From 42553e72d2b70a223dc981104c94096915837c15 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 12 Jun 2023 11:10:43 -0400 Subject: [PATCH 001/131] no idea what I changed --- ocpmodels/common/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index dfa48f51d4..b5cae81886 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -1115,6 +1115,7 @@ def build_config(args, args_override, silent=False): if loaded_config: # update dirs + print("hi") new_dirs = [ (k, v) for k, v in config.items() if "dir" in k and k != "cp_data_to_tmpdir" ] From fe76d2959d8813fc80455cb8f61a3566336592e5 Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 13 Jun 2023 10:27:59 -0400 Subject: [PATCH 002/131] Created a file with disconnected gnn and an edge deleter --- configs/exps/alvaro/split-ads-cats.yaml | 43 ++ configs/models/disconnected.yaml | 271 +++++++++ ocpmodels/models/disconnected.py | 712 ++++++++++++++++++++++++ 3 files changed, 1026 insertions(+) create mode 100644 configs/exps/alvaro/split-ads-cats.yaml create mode 100644 configs/models/disconnected.yaml create mode 100644 ocpmodels/models/disconnected.py diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml new file mode 100644 index 0000000000..792dc80926 --- /dev/null +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -0,0 +1,43 @@ +job: + mem: 32GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + +default: + config: disconnected-is2re-all + test_ri: True + mode: train + wandb_tags: 'baseline' + wandb_name: alvaro-carbonero-math + wandb_project: ocp-alvaro + cp_data_to_tmpdir: true + fa_frames: se3-random + frame_averaging: 2D + graph_rewiring: remove-tag-0 + model: + pg_hidden_channels: 64 + phys_embeds: true + phys_hidden_channels: 0 + tag_hidden_channels: 64 + energy_head: weighted-av-final-embeds + optim: + max_epochs: 12 + +runs: + # Run 1 + - {} + + # Run 2 +# - model: +# pg_hidden_channels: 0 + + # Run 3 +# - optim: +# lr_initial: 0.0001 + + # Run 4 +# - model: +# pg_hidden_channels: 32 +# optim: +# lr_initial: 0.0001 diff --git a/configs/models/disconnected.yaml b/configs/models/disconnected.yaml new file mode 100644 index 0000000000..2b1319d717 --- /dev/null +++ b/configs/models/disconnected.yaml @@ -0,0 +1,271 @@ +default: + model: + name: disconnected + act: swish + hidden_channels: 128 + num_filters: 100 + num_interactions: 3 + num_gaussians: 100 + cutoff: 6.0 + use_pbc: True + regress_forces: False + # drlab attributes: + tag_hidden_channels: 0 # 32 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_embeds: False # True + phys_hidden_channels: 0 + energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} + # faenet new features + skip_co: False # output skip connections {False, "add", "concat"} + second_layer_MLP: False # in EmbeddingBlock + complex_mp: False + edge_embed_type: rij # {'rij','all_rij','sh', 'all'}) + mp_type: base # {'base', 'simple', 'updownscale', 'att', 'base_with_att', 'local_env'} + graph_norm: False # bool + att_heads: 1 # int + force_decoder_type: "mlp" # can be {"" or "simple"} | only used if regress_forces is True + force_decoder_model_config: + simple: + hidden_channels: 128 + norm: batch1d # batch1d, layer or null + mlp: + hidden_channels: 256 + norm: batch1d # batch1d, layer or null + res: + hidden_channels: 128 + norm: batch1d # batch1d, layer or null + res_updown: + hidden_channels: 128 + norm: batch1d # batch1d, layer or null + optim: + batch_size: 64 + eval_batch_size: 64 + num_workers: 4 + lr_gamma: 0.1 + lr_initial: 0.001 + warmup_factor: 0.2 + max_epochs: 20 + energy_grad_coefficient: 10 + force_coefficient: 30 + energy_coefficient: 1 + + frame_averaging: False # 2D, 3D, da, False + fa_frames: False # can be {None, full, random, det, e3, e3-random, e3-det} + +# ------------------- +# ----- IS2RE ----- +# ------------------- + +is2re: + # *** Important note *** + # The total number of gpus used for this run was 1. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + 10k: + optim: + lr_initial: 0.005 + lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma + - 1562 + - 2343 + - 3125 + warmup_steps: 468 + max_epochs: 20 + + 100k: + model: + hidden_channels: 256 + optim: + lr_initial: 0.005 + lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma + - 1562 + - 2343 + - 3125 + warmup_steps: 468 + max_epochs: 20 + + all: + model: + hidden_channels: 384 + num_interactions: 4 + optim: + batch_size: 256 + eval_batch_size: 256 + lr_initial: 0.001 + lr_gamma: 0.1 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 18000 + - 27000 + - 37000 + warmup_steps: 6000 + max_epochs: 20 + +# ------------------ +# ----- S2EF ----- +# ------------------ + +# For 2 GPUs + +s2ef: + default: + model: + num_interactions: 4 + hidden_channels: 750 + num_gaussians: 200 + num_filters: 256 + regress_forces: "direct" + force_coefficient: 30 + energy_grad_coefficient: 10 + optim: + batch_size: 96 + eval_batch_size: 96 + warmup_factor: 0.2 + lr_gamma: 0.1 + lr_initial: 0.0001 + max_epochs: 15 + warmup_steps: 30000 + lr_milestones: + - 55000 + - 75000 + - 10000 + + 200k: {} + + # 1 gpus + 2M: + model: + num_interactions: 5 + hidden_channels: 1024 + num_gaussians: 200 + num_filters: 256 + optim: + batch_size: 192 + eval_batch_size: 192 + + 20M: {} + + all: {} + +qm9: + default: + model: + act: swish + att_heads: 1 + complex_mp: true + cutoff: 6.0 + edge_embed_type: all_rij + energy_head: '' + graph_norm: true + graph_rewiring: null + hidden_channels: 400 + max_num_neighbors: 30 + mp_type: updownscale_base + num_filters: 480 + num_gaussians: 100 + num_interactions: 5 + otf_graph: false + pg_hidden_channels: 32 + phys_embeds: false + phys_hidden_channels: 0 + regress_forces: '' + second_layer_MLP: true + skip_co: true + tag_hidden_channels: 0 + use_pbc: false + + optim: + batch_size: 64 + es_min_abs_change: 1.0e-06 + es_patience: 20 + es_warmup_epochs: 600 + eval_batch_size: 64 + factor: 0.9 + lr_initial: 0.0003 + loss_energy: mse + lr_gamma: 0.1 + lr_initial: 0.001 + max_epochs: 1500 + min_lr: 1.0e-06 + mode: min + optimizer: AdamW + patience: 15 + scheduler: ReduceLROnPlateau + threshold: 0.0001 + threshold_mode: abs + verbose: true + warmup_factor: 0.2 + warmup_steps: 3000 + + 10k: {} + all: {} + +qm7x: + default: + model: # SOTA settings + act: swish + att_heads: 1 + complex_mp: true + cutoff: 5.0 + edge_embed_type: all_rij + energy_head: false + force_decoder_model_config: + mlp: + hidden_channels: 256 + norm: batch1d + res: + hidden_channels: 128 + norm: batch1d + res_updown: + hidden_channels: 128 + norm: layer + simple: + hidden_channels: 128 + norm: batch1d + force_decoder_type: res_updown + graph_norm: false + hidden_channels: 500 + max_num_neighbors: 40 + mp_type: updownscale_base + num_filters: 400 + num_gaussians: 50 + num_interactions: 5 + otf_graph: false + pg_hidden_channels: 32 + phys_embeds: true + phys_hidden_channels: 0 + regress_forces: direct_with_gradient_target + second_layer_MLP: true + skip_co: false + tag_hidden_channels: 0 + use_pbc: false + + optim: + batch_size: 100 + energy_grad_coefficient: 5 + eval_batch_size: 100 + eval_every: 0.34 + factor: 0.75 + force_coefficient: 75 + loss_energy: mae + loss_force: mse + lr_gamma: 0.1 + lr_initial: 0.000193 + max_steps: 4000000 + min_lr: 1.0e-06 + mode: min + optimizer: AdamW + scheduler: ReduceLROnPlateau + threshold: 0.001 + threshold_mode: abs + verbose: true + warmup_factor: 0.2 + warmup_steps: 3000 + + all: {} + 1k: {} + +qm9: + default: + model: + use_pbc: False + all: {} + 10k: {} diff --git a/ocpmodels/models/disconnected.py b/ocpmodels/models/disconnected.py new file mode 100644 index 0000000000..00f66d50c7 --- /dev/null +++ b/ocpmodels/models/disconnected.py @@ -0,0 +1,712 @@ +""" Code of the Scalable Frame Averaging (Rotation Invariant) GNN +""" +import torch +from e3nn.o3 import spherical_harmonics +from torch import nn +from torch.nn import Embedding, Linear +from torch_geometric.nn import MessagePassing, TransformerConv, radius_graph +from torch_geometric.nn.norm import BatchNorm, GraphNorm +from torch_scatter import scatter + +from ocpmodels.common.registry import registry +from ocpmodels.common.utils import conditional_grad, get_pbc_distances +from ocpmodels.models.base_model import BaseModel +from ocpmodels.models.force_decoder import ForceDecoder +from ocpmodels.models.utils.attention_model import TransfoAttConv +from ocpmodels.models.utils.pos_encodings import PositionalEncoding +from ocpmodels.modules.phys_embeddings import PhysEmbedding +from ocpmodels.modules.pooling import Graclus, Hierarchical_Pooling +from ocpmodels.models.utils.activations import swish + +NUM_CLUSTERS = 20 +NUM_POOLING_LAYERS = 1 + + +class GaussianSmearing(nn.Module): + def __init__(self, start=0.0, stop=5.0, num_gaussians=50): + super().__init__() + offset = torch.linspace(start, stop, num_gaussians) + self.coeff = -0.5 / (offset[1] - offset[0]).item() ** 2 + self.register_buffer("offset", offset) + + def forward(self, dist): + dist = dist.view(-1, 1) - self.offset.view(1, -1) + return torch.exp(self.coeff * torch.pow(dist, 2)) + + +class EmbeddingBlock(nn.Module): + def __init__( + self, + num_gaussians, + num_filters, + hidden_channels, + tag_hidden_channels, + pg_hidden_channels, + phys_hidden_channels, + phys_embeds, + graph_rewiring, + act, + second_layer_MLP, + edge_embed_type, + ): + super().__init__() + self.act = act + self.use_tag = tag_hidden_channels > 0 + self.use_pg = pg_hidden_channels > 0 + self.use_mlp_phys = phys_hidden_channels > 0 and phys_embeds + self.use_positional_embeds = graph_rewiring in { + "one-supernode-per-graph", + "one-supernode-per-atom-type", + "one-supernode-per-atom-type-dist", + } + self.second_layer_MLP = second_layer_MLP + self.edge_embed_type = edge_embed_type + + # --- Node embedding --- + + # Phys embeddings + self.phys_emb = PhysEmbedding( + props=phys_embeds, props_grad=phys_hidden_channels > 0, pg=self.use_pg + ) + # With MLP + if self.use_mlp_phys: + self.phys_lin = Linear(self.phys_emb.n_properties, phys_hidden_channels) + else: + phys_hidden_channels = self.phys_emb.n_properties + + # Period + group embeddings + if self.use_pg: + self.period_embedding = Embedding( + self.phys_emb.period_size, pg_hidden_channels + ) + self.group_embedding = Embedding( + self.phys_emb.group_size, pg_hidden_channels + ) + + # Tag embedding + if tag_hidden_channels: + self.tag_embedding = Embedding(3, tag_hidden_channels) + + # Positional encoding + if self.use_positional_embeds: + self.pe = PositionalEncoding(hidden_channels, 210) + + # Main embedding + self.emb = Embedding( + 85, + hidden_channels + - tag_hidden_channels + - phys_hidden_channels + - 2 * pg_hidden_channels, + ) + + # MLP + self.lin = Linear(hidden_channels, hidden_channels) + if self.second_layer_MLP: + self.lin_2 = Linear(hidden_channels, hidden_channels) + + # --- Edge embedding --- + + # TODO: change some num_filters to edge_embed_hidden + if self.edge_embed_type == "rij": + self.lin_e1 = Linear(3, num_filters) + elif self.edge_embed_type == "all_rij": + self.lin_e1 = Linear(3, num_filters // 2) # r_ij + self.lin_e12 = Linear( + num_gaussians, num_filters - (num_filters // 2) + ) # d_ij + elif self.edge_embed_type == "sh": + self.lin_e1 = Linear(15, num_filters) + elif self.edge_embed_type == "all": + self.lin_e1 = Linear(18 + num_gaussians, num_filters) + else: + raise ValueError("edge_embedding_type does not exist") + + if self.second_layer_MLP: + self.lin_e2 = Linear(num_filters, num_filters) + + self.reset_parameters() + + def reset_parameters(self): + self.emb.reset_parameters() + if self.use_mlp_phys: + nn.init.xavier_uniform_(self.phys_lin.weight) + if self.use_tag: + self.tag_embedding.reset_parameters() + if self.use_pg: + self.period_embedding.reset_parameters() + self.group_embedding.reset_parameters() + nn.init.xavier_uniform_(self.lin.weight) + self.lin.bias.data.fill_(0) + nn.init.xavier_uniform_(self.lin_e1.weight) + self.lin_e1.bias.data.fill_(0) + if self.second_layer_MLP: + nn.init.xavier_uniform_(self.lin_2.weight) + self.lin_2.bias.data.fill_(0) + nn.init.xavier_uniform_(self.lin_e2.weight) + self.lin_e2.bias.data.fill_(0) + if self.edge_embed_type == "all_rij": + nn.init.xavier_uniform_(self.lin_e12.weight) + self.lin_e12.bias.data.fill_(0) + + def forward( + self, z, rel_pos, edge_attr, tag=None, normalised_rel_pos=None, subnodes=None + ): + # --- Edge embedding -- + + if self.edge_embed_type == "rij": + e = self.lin_e1(rel_pos) + elif self.edge_embed_type == "all_rij": + rel_pos = self.lin_e1(rel_pos) # r_ij + edge_attr = self.lin_e12(edge_attr) # d_ij + e = torch.cat((rel_pos, edge_attr), dim=1) + elif self.edge_embed_type == "sh": + self.sh = spherical_harmonics( + l=[1, 2, 3], + x=normalised_rel_pos, + normalize=False, + normalization="component", + ) + e = self.lin_e1(self.sh) + elif self.edge_embed_type == "all": + self.sh = spherical_harmonics( + l=[1, 2, 3], + x=normalised_rel_pos, + normalize=False, + normalization="component", + ) + e = torch.cat((rel_pos, self.sh, edge_attr), dim=1) + e = self.lin_e1(e) + + e = self.act(e) # can comment out + + if self.second_layer_MLP: + # e = self.lin_e2(e) + e = self.act(self.lin_e2(e)) + + # --- Node embedding -- + + # Create atom embeddings based on its characteristic number + h = self.emb(z) + + if self.phys_emb.device != h.device: + self.phys_emb = self.phys_emb.to(h.device) + + # Concat tag embedding + if self.use_tag: + h_tag = self.tag_embedding(tag) + h = torch.cat((h, h_tag), dim=1) + + # Concat physics embeddings + if self.phys_emb.n_properties > 0: + h_phys = self.phys_emb.properties[z] + if self.use_mlp_phys: + h_phys = self.phys_lin(h_phys) + h = torch.cat((h, h_phys), dim=1) + + # Concat period & group embedding + if self.use_pg: + h_period = self.period_embedding(self.phys_emb.period[z]) + h_group = self.group_embedding(self.phys_emb.group[z]) + h = torch.cat((h, h_period, h_group), dim=1) + + # Add positional embedding + if self.use_positional_embeds: + idx_of_non_zero_val = (tag == 0).nonzero().T.squeeze(0) + h_pos = torch.zeros_like(h, device=h.device) + h_pos[idx_of_non_zero_val, :] = self.pe(subnodes).to(device=h_pos.device) + h += h_pos + + # MLP + h = self.act(self.lin(h)) + if self.second_layer_MLP: + h = self.act(self.lin_2(h)) + + return h, e + + +class InteractionBlock(MessagePassing): + def __init__( + self, + hidden_channels, + num_filters, + act, + mp_type, + complex_mp, + att_heads, + graph_norm, + ): + super(InteractionBlock, self).__init__() + self.act = act + self.mp_type = mp_type + self.hidden_channels = hidden_channels + self.complex_mp = complex_mp + self.graph_norm = graph_norm + if graph_norm: + self.graph_norm = GraphNorm( + hidden_channels if "updown" not in self.mp_type else num_filters + ) + + if self.mp_type == "simple": + self.lin_geom = nn.Linear(num_filters, hidden_channels) + self.lin_h = nn.Linear(hidden_channels, hidden_channels) + + elif self.mp_type == "sfarinet": + self.lin_h = nn.Linear(hidden_channels, hidden_channels) + + elif self.mp_type == "updownscale": + self.lin_geom = nn.Linear(num_filters, num_filters) # like 'simple' + self.lin_down = nn.Linear(hidden_channels, num_filters) + self.lin_up = nn.Linear(num_filters, hidden_channels) + + elif self.mp_type == "updownscale_base": + self.lin_geom = nn.Linear(num_filters + 2 * hidden_channels, num_filters) + self.lin_down = nn.Linear(hidden_channels, num_filters) + self.lin_up = nn.Linear(num_filters, hidden_channels) + + elif self.mp_type == "base_with_att": + self.lin_h = nn.Linear(hidden_channels, hidden_channels) + # self.lin_geom = AttConv(hidden_channels, heads=1, concat=True, bias=True) + self.lin_geom = TransfoAttConv( + hidden_channels, + hidden_channels, + heads=att_heads, + concat=False, + root_weight=False, + edge_dim=num_filters, + ) + elif self.mp_type == "att": + self.lin_h = nn.Linear(hidden_channels, hidden_channels) + self.lin_geom = TransformerConv( + hidden_channels, + hidden_channels, + heads=att_heads, + concat=False, + root_weight=False, + edge_dim=num_filters, + ) + + elif self.mp_type == "local_env": + self.lin_geom = nn.Linear(num_filters, hidden_channels) + self.lin_h = nn.Linear(hidden_channels, hidden_channels) + + elif self.mp_type == "updown_local_env": + self.lin_down = nn.Linear(hidden_channels, num_filters) + self.lin_geom = nn.Linear(num_filters, num_filters) + self.lin_up = nn.Linear(2 * num_filters, hidden_channels) + + else: # base + self.lin_geom = nn.Linear( + num_filters + 2 * hidden_channels, hidden_channels + ) + self.lin_h = nn.Linear(hidden_channels, hidden_channels) + + if self.complex_mp: + self.other_mlp = nn.Linear(hidden_channels, hidden_channels) + + self.reset_parameters() + + def reset_parameters(self): + if self.mp_type not in {"sfarinet", "att", "base_with_att"}: + nn.init.xavier_uniform_(self.lin_geom.weight) + self.lin_geom.bias.data.fill_(0) + if self.complex_mp: + nn.init.xavier_uniform_(self.other_mlp.weight) + self.other_mlp.bias.data.fill_(0) + if self.mp_type in {"updownscale", "updownscale_base", "updown_local_env"}: + nn.init.xavier_uniform_(self.lin_up.weight) + self.lin_up.bias.data.fill_(0) + nn.init.xavier_uniform_(self.lin_down.weight) + self.lin_down.bias.data.fill_(0) + else: + nn.init.xavier_uniform_(self.lin_h.weight) + self.lin_h.bias.data.fill_(0) + + def forward(self, h, edge_index, e): + # Define edge embedding + if self.mp_type in {"base", "updownscale_base"}: + e = torch.cat([e, h[edge_index[0]], h[edge_index[1]]], dim=1) + + if self.mp_type in { + "simple", + "updownscale", + "base", + "updownscale_base", + "local_env", + }: + e = self.act(self.lin_geom(e)) # TODO: remove act() ? + + # --- Message Passing block -- + + if self.mp_type == "updownscale" or self.mp_type == "updownscale_base": + h = self.act(self.lin_down(h)) # downscale node rep. + h = self.propagate(edge_index, x=h, W=e) # propagate + if self.graph_norm: + h = self.act(self.graph_norm(h)) + h = self.act(self.lin_up(h)) # upscale node rep. + + elif self.mp_type == "att": + h = self.lin_geom(h, edge_index, edge_attr=e) + if self.graph_norm: + h = self.act(self.graph_norm(h)) + h = self.act(self.lin_h(h)) + + elif self.mp_type == "base_with_att": + h = self.lin_geom(h, edge_index, edge_attr=e) # propagate is inside + if self.graph_norm: + h = self.act(self.graph_norm(h)) + h = self.act(self.lin_h(h)) + + elif self.mp_type == "local_env": + chi = self.propagate(edge_index, x=h, W=e, local_env=True) + h = self.propagate(edge_index, x=h, W=e) # propagate + h = h + chi + if self.graph_norm: + h = self.act(self.graph_norm(h)) + h = h = self.act(self.lin_h(h)) + + elif self.mp_type == "updown_local_env": + h = self.act(self.lin_down(h)) + chi = self.propagate(edge_index, x=h, W=e, local_env=True) + e = self.lin_geom(e) + h = self.propagate(edge_index, x=h, W=e) # propagate + if self.graph_norm: + h = self.act(self.graph_norm(h)) + h = torch.cat((h, chi), dim=1) + h = self.lin_up(h) + + elif self.mp_type in {"base", "simple", "sfarinet"}: + h = self.propagate(edge_index, x=h, W=e) # propagate + if self.graph_norm: + h = self.act(self.graph_norm(h)) + h = self.act(self.lin_h(h)) + + else: + raise ValueError("mp_type provided does not exist") + + if self.complex_mp: + h = self.act(self.other_mlp(h)) + + return h + + def message(self, x_j, W, local_env=None): + if local_env is not None: + return W + else: + return x_j * W + + +class OutputBlock(nn.Module): + def __init__(self, energy_head, hidden_channels, act): + super().__init__() + self.energy_head = energy_head + self.act = act + + self.lin1 = Linear(hidden_channels, hidden_channels // 2) + self.lin2 = Linear(hidden_channels // 2, 1) + + # weighted average & pooling + if self.energy_head in {"pooling", "random"}: + self.hierarchical_pooling = Hierarchical_Pooling( + hidden_channels, + self.act, + NUM_POOLING_LAYERS, + NUM_CLUSTERS, + self.energy_head, + ) + elif self.energy_head == "graclus": + self.graclus = Graclus(hidden_channels, self.act) + elif self.energy_head == "weighted-av-final-embeds": + self.w_lin = Linear(hidden_channels, 1) + + def reset_parameters(self): + nn.init.xavier_uniform_(self.lin1.weight) + self.lin1.bias.data.fill_(0) + nn.init.xavier_uniform_(self.lin2.weight) + self.lin2.bias.data.fill_(0) + if self.energy_head == "weighted-av-final-embeds": + nn.init.xavier_uniform_(self.w_lin.weight) + self.w_lin.bias.data.fill_(0) + + def forward(self, h, edge_index, edge_weight, batch, alpha): + if self.energy_head == "weighted-av-final-embeds": + alpha = self.w_lin(h) + + elif self.energy_head == "graclus": + h, batch = self.graclus(h, edge_index, edge_weight, batch) + + elif self.energy_head in {"pooling", "random"}: + h, batch, pooling_loss = self.hierarchical_pooling( + h, edge_index, edge_weight, batch + ) + + # MLP + h = self.lin1(h) + h = self.act(h) + h = self.lin2(h) + + if self.energy_head in { + "weighted-av-initial-embeds", + "weighted-av-final-embeds", + }: + h = h * alpha + + # Global pooling + out = scatter(h, batch, dim=0, reduce="add") + + return out + + +@registry.register_model("disconnected") +class Disconnected(BaseModel): + r"""Frame Averaging GNN model FAENet. + + Args: + cutoff (float): Cutoff distance for interatomic interactions. + (default: :obj:`6.0`) + use_pbc (bool): Use of periodic boundary conditions. + (default: true) + act (str): activation function + (default: swish) + max_num_neighbors (int): The maximum number of neighbors to + collect for each node within the :attr:`cutoff` distance. + (default: :obj:`32`) + graph_rewiring (str): Method used to create the graph, + among "", remove-tag-0, supernodes. + energy_head (str): Method to compute energy prediction + from atom representations. + hidden_channels (int): Hidden embedding size. + (default: :obj:`128`) + tag_hidden_channels (int): Hidden tag embedding size. + (default: :obj:`32`) + pg_hidden_channels (int): Hidden period and group embed size. + (default: obj:`32`) + phys_embed (bool): Concat fixed physics-aware embeddings. + phys_hidden_channels (int): Hidden size of learnable phys embed. + (default: obj:`32`) + num_interactions (int): The number of interaction blocks. + (default: :obj:`4`) + num_gaussians (int): The number of gaussians :math:`\mu`. + (default: :obj:`50`) + second_layer_MLP (bool): use 2-layers MLP at the end of the Embedding block. + skip_co (str): add a skip connection between each interaction block and + energy-head. ("add", False, "concat", "concat_atom") + edge_embed_type (str, in {'rij','all_rij','sh', 'all'}): input feature + of the edge embedding block. + edge_embed_hidden (int): size of edge representation. + could be num_filters or hidden_channels. + mp_type (str, in {'base', 'simple', 'updownscale', 'att', 'base_with_att', 'local_env' + 'updownscale_base', 'updownscale', 'updown_local_env', 'sfarinet'}}): + specificies the MP of the interaction block. + graph_norm (bool): whether to apply batch norm after every linear layer. + complex_mp (bool): whether to add a second layer MLP at the end of each Interaction + force_decoder_model_config (dict): config of the force decoder model. + keys: "model_type", "hidden_channels", "num_layers", "num_heads", + force_decoder_type (str): type of the force decoder model. + (options: "mlp", "simple", "res", "res_updown") + """ + + def __init__(self, **kwargs): + super(Disconnected, self).__init__() + + self.cutoff = kwargs["cutoff"] + self.energy_head = kwargs["energy_head"] + self.regress_forces = kwargs["regress_forces"] + self.use_pbc = kwargs["use_pbc"] + self.max_num_neighbors = kwargs["max_num_neighbors"] + self.edge_embed_type = kwargs["edge_embed_type"] + self.skip_co = kwargs["skip_co"] + if kwargs["mp_type"] == "sfarinet": + kwargs["num_filters"] = kwargs["hidden_channels"] + + self.act = ( + getattr(nn.functional, kwargs["act"]) if kwargs["act"] != "swish" else swish + ) + self.use_positional_embeds = kwargs["graph_rewiring"] in { + "one-supernode-per-graph", + "one-supernode-per-atom-type", + "one-supernode-per-atom-type-dist", + } + # Gaussian Basis + self.distance_expansion = GaussianSmearing( + 0.0, self.cutoff, kwargs["num_gaussians"] + ) + + # Embedding block + self.embed_block = EmbeddingBlock( + kwargs["num_gaussians"], + kwargs["num_filters"], + kwargs["hidden_channels"], + kwargs["tag_hidden_channels"], + kwargs["pg_hidden_channels"], + kwargs["phys_hidden_channels"], + kwargs["phys_embeds"], + kwargs["graph_rewiring"], + self.act, + kwargs["second_layer_MLP"], + kwargs["edge_embed_type"], + ) + + # Interaction block + self.interaction_blocks = nn.ModuleList( + [ + InteractionBlock( + kwargs["hidden_channels"], + kwargs["num_filters"], + self.act, + kwargs["mp_type"], + kwargs["complex_mp"], + kwargs["att_heads"], + kwargs["graph_norm"], + ) + for _ in range(kwargs["num_interactions"]) + ] + ) + + # Output block + self.output_block = OutputBlock( + self.energy_head, kwargs["hidden_channels"], self.act + ) + + # Energy head + if self.energy_head == "weighted-av-initial-embeds": + self.w_lin = Linear(kwargs["hidden_channels"], 1) + + # Force head + self.decoder = ( + ForceDecoder( + kwargs["force_decoder_type"], + kwargs["hidden_channels"], + kwargs["force_decoder_model_config"], + self.act, + ) + if "direct" in self.regress_forces + else None + ) + + # Skip co + if self.skip_co == "concat": + self.mlp_skip_co = Linear((kwargs["num_interactions"] + 1), 1) + elif self.skip_co == "concat_atom": + self.mlp_skip_co = Linear( + ((kwargs["num_interactions"] + 1) * kwargs["hidden_channels"]), + kwargs["hidden_channels"], + ) + + @conditional_grad(torch.enable_grad()) + def forces_forward(self, preds): + return self.decoder(preds["hidden_state"]) + + def edge_classifier(self, edge_index, tags, batch = None): + values = torch.ones([edge_index.shape[-1]]) + if batch is not None: + + #values = torch.transpose(edge_index, 0, 1) + + for i, (head, tail) in enumerate(zip(edge_index[0], edge_index[1])): + if ( + batch[int(head.item())] == batch[int(tail.item())] + and set((tags[int(head.item())].item(), tags[int(tail.item())].item())) == set((1, 2)) + ): + values[i] = False + + else: + for i, (head, tail) in enumerate(zip(edge_index[0], edge_index[1])): + if set((tags[int(head.item())].item(), tags[int(tail.item())].item())) == set((1, 2)): + values[i] = False + + return values.bool() + + @conditional_grad(torch.enable_grad()) + def energy_forward(self, data): + import ipdb, time + start = time.time() + # Rewire the graph + z = data.atomic_numbers.long() + pos = data.pos + batch = data.batch + + # Use periodic boundary conditions + if self.use_pbc: + assert z.dim() == 1 and z.dtype == torch.long + + out = get_pbc_distances( + pos, + data.edge_index, + data.cell, + data.cell_offsets, + data.neighbors, + return_distance_vec=True, + ) + + edge_index = out["edge_index"] + edge_weight = out["distances"] + rel_pos = out["distance_vec"] + edge_attr = self.distance_expansion(edge_weight) + else: + edge_index = radius_graph( + pos, + r=self.cutoff, + batch=batch, + max_num_neighbors=self.max_num_neighbors, + ) + # edge_index = data.edge_index + row, col = edge_index + rel_pos = pos[row] - pos[col] + edge_weight = rel_pos.norm(dim=-1) + edge_attr = self.distance_expansion(edge_weight) + + # Removing unnecessary edges + edges_to_keep = self.edge_classifier(edge_index, data.tags, batch) + + edge_index = edge_index[:, edges_to_keep] + edge_weight = edge_weight[edges_to_keep] + edge_attr = edge_attr[edges_to_keep, :] + rel_pos = rel_pos[edges_to_keep, :] + + # Normalize and squash to [0,1] for gaussian basis + rel_pos_normalized = None + if self.edge_embed_type in {"sh", "all_rij", "all"}: + rel_pos_normalized = (rel_pos / edge_weight.view(-1, 1) + 1) / 2.0 + + pooling_loss = None # deal with pooling loss + + # Embedding block + h, e = self.embed_block(z, rel_pos, edge_attr, data.tags, rel_pos_normalized) + + # Compute atom weights for late energy head + if self.energy_head == "weighted-av-initial-embeds": + alpha = self.w_lin(h) + else: + alpha = None + + # Interaction blocks + energy_skip_co = [] + for interaction in self.interaction_blocks: + if self.skip_co == "concat_atom": + energy_skip_co.append(h) + elif self.skip_co: + energy_skip_co.append( + self.output_block(h, edge_index, edge_weight, batch, alpha) + ) + h = h + interaction(h, edge_index, e) + + # Atom skip-co + if self.skip_co == "concat_atom": + energy_skip_co.append(h) + h = self.act(self.mlp_skip_co(torch.cat(energy_skip_co, dim=1))) + + energy = self.output_block(h, edge_index, edge_weight, batch, alpha) + + # Skip-connection + energy_skip_co.append(energy) + if self.skip_co == "concat": + energy = self.mlp_skip_co(torch.cat(energy_skip_co, dim=1)) + elif self.skip_co == "add": + energy = sum(energy_skip_co) + + preds = {"energy": energy, "pooling_loss": pooling_loss, "hidden_state": h} + + end = time.time() + ipdb.set_trace() + return preds From 064699fa7f2d60f4668cc8113ca4ade7fd31035a Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 13 Jun 2023 16:36:38 -0400 Subject: [PATCH 003/131] vectorized implementation of edge deleter --- ocpmodels/common/utils.py | 1 - ocpmodels/models/disconnected.py | 29 +++++------------------------ 2 files changed, 5 insertions(+), 25 deletions(-) diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index b5cae81886..dfa48f51d4 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -1115,7 +1115,6 @@ def build_config(args, args_override, silent=False): if loaded_config: # update dirs - print("hi") new_dirs = [ (k, v) for k, v in config.items() if "dir" in k and k != "cp_data_to_tmpdir" ] diff --git a/ocpmodels/models/disconnected.py b/ocpmodels/models/disconnected.py index 00f66d50c7..0264d4280a 100644 --- a/ocpmodels/models/disconnected.py +++ b/ocpmodels/models/disconnected.py @@ -597,30 +597,13 @@ def __init__(self, **kwargs): def forces_forward(self, preds): return self.decoder(preds["hidden_state"]) - def edge_classifier(self, edge_index, tags, batch = None): - values = torch.ones([edge_index.shape[-1]]) - if batch is not None: - - #values = torch.transpose(edge_index, 0, 1) - - for i, (head, tail) in enumerate(zip(edge_index[0], edge_index[1])): - if ( - batch[int(head.item())] == batch[int(tail.item())] - and set((tags[int(head.item())].item(), tags[int(tail.item())].item())) == set((1, 2)) - ): - values[i] = False - - else: - for i, (head, tail) in enumerate(zip(edge_index[0], edge_index[1])): - if set((tags[int(head.item())].item(), tags[int(tail.item())].item())) == set((1, 2)): - values[i] = False - - return values.bool() + def edge_classifier(self, edge_index, tags): + edges_with_tags = tags[edge_index.type(torch.long)] + values = (edges_with_tags[0] == edges_with_tags[1]) + return values @conditional_grad(torch.enable_grad()) def energy_forward(self, data): - import ipdb, time - start = time.time() # Rewire the graph z = data.atomic_numbers.long() pos = data.pos @@ -657,7 +640,7 @@ def energy_forward(self, data): edge_attr = self.distance_expansion(edge_weight) # Removing unnecessary edges - edges_to_keep = self.edge_classifier(edge_index, data.tags, batch) + edges_to_keep = self.edge_classifier(edge_index, data.tags) edge_index = edge_index[:, edges_to_keep] edge_weight = edge_weight[edges_to_keep] @@ -707,6 +690,4 @@ def energy_forward(self, data): preds = {"energy": energy, "pooling_loss": pooling_loss, "hidden_state": h} - end = time.time() - ipdb.set_trace() return preds From e8976159879d89ab95cccd53b0b9a154b164fa87 Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 14 Jun 2023 10:06:40 -0400 Subject: [PATCH 004/131] Changed config file to run faenet vs disconnected. --- configs/exps/alvaro/split-ads-cats.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index 792dc80926..e48a5c7e08 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -8,7 +8,7 @@ default: config: disconnected-is2re-all test_ri: True mode: train - wandb_tags: 'baseline' + # wandb_tags: 'baseine' wandb_name: alvaro-carbonero-math wandb_project: ocp-alvaro cp_data_to_tmpdir: true @@ -29,8 +29,7 @@ runs: - {} # Run 2 -# - model: -# pg_hidden_channels: 0 + - config: faenet-is2re-all # Run 3 # - optim: From e29b571e7c2895e55baf55c7631aa431f854f0cb Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 14 Jun 2023 10:42:32 -0400 Subject: [PATCH 005/131] Created new flag 'is_disconnected' --- ocpmodels/common/flags.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ocpmodels/common/flags.py b/ocpmodels/common/flags.py index 8389fa373f..bd4eb6cc3f 100644 --- a/ocpmodels/common/flags.py +++ b/ocpmodels/common/flags.py @@ -287,6 +287,12 @@ def add_core_args(self): help="Number of validation loops to run in order to collect inference" + " timing stats", ) + self.parser.add_argument( + "--is_disconnected", + action="store_true", + help="Eliminates edges between catalyst and adsorbate.", + default=False + ) flags = Flags() From 83fa9a98ea4e68927ac7fa24517f6dc608b4fd3d Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 14 Jun 2023 11:51:55 -0400 Subject: [PATCH 006/131] Corrected some typos. --- configs/exps/alvaro/split-ads-cats.yaml | 6 +++--- ocpmodels/datasets/data_transforms.py | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index e48a5c7e08..b3d5153862 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -8,16 +8,16 @@ default: config: disconnected-is2re-all test_ri: True mode: train - # wandb_tags: 'baseine' + wandb_tags: 'baseline' wandb_name: alvaro-carbonero-math wandb_project: ocp-alvaro - cp_data_to_tmpdir: true + cp_data_to_tmpdir: True fa_frames: se3-random frame_averaging: 2D graph_rewiring: remove-tag-0 model: pg_hidden_channels: 64 - phys_embeds: true + phys_embeds: True phys_hidden_channels: 0 tag_hidden_channels: 64 energy_head: weighted-av-final-embeds diff --git a/ocpmodels/datasets/data_transforms.py b/ocpmodels/datasets/data_transforms.py index 64556a0038..8562bac254 100644 --- a/ocpmodels/datasets/data_transforms.py +++ b/ocpmodels/datasets/data_transforms.py @@ -99,6 +99,24 @@ def __call__(self, data): return self.rewiring_func(data) +class Disconnected(Transform): + def __init__(self, is_disconnected=False) -> None: + self.inactive = not is_disconnected + + def edge_classifier(self, edge_index, tags): + edges_with_tags = tags[edge_index.type(torch.long)] + values = (edges_with_tags[0] == edges_with_tags[1]) + return values + + def __call__(self, data): + if self.inactive: + return data + + values = self.edge_classifier(data.edge_index, data.tags) + data.edge_index = data.edge_index[:, values] + + return data + class Compose: # https://pytorch.org/vision/stable/_modules/torchvision/transforms/transforms.html#Compose @@ -140,5 +158,6 @@ def get_transforms(trainer_config): AddAttributes(), GraphRewiring(trainer_config.get("graph_rewiring")), FrameAveraging(trainer_config["frame_averaging"], trainer_config["fa_frames"]), + Disconnected(trainer_config["is_disconnected"]) ] return Compose(transforms) From afa0ae084520ec30c5fff9dcc531fcc2bffce202 Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 14 Jun 2023 11:56:48 -0400 Subject: [PATCH 007/131] Deleted disconnected model, as now it is a feature on data transforms. Changed a config file to reflect this. --- configs/exps/alvaro/split-ads-cats.yaml | 2 +- ocpmodels/models/disconnected.py | 693 ------------------------ 2 files changed, 1 insertion(+), 694 deletions(-) delete mode 100644 ocpmodels/models/disconnected.py diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index b3d5153862..797b855b0f 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -5,7 +5,7 @@ job: partition: long default: - config: disconnected-is2re-all + config: faenet-is2re-all test_ri: True mode: train wandb_tags: 'baseline' diff --git a/ocpmodels/models/disconnected.py b/ocpmodels/models/disconnected.py deleted file mode 100644 index 0264d4280a..0000000000 --- a/ocpmodels/models/disconnected.py +++ /dev/null @@ -1,693 +0,0 @@ -""" Code of the Scalable Frame Averaging (Rotation Invariant) GNN -""" -import torch -from e3nn.o3 import spherical_harmonics -from torch import nn -from torch.nn import Embedding, Linear -from torch_geometric.nn import MessagePassing, TransformerConv, radius_graph -from torch_geometric.nn.norm import BatchNorm, GraphNorm -from torch_scatter import scatter - -from ocpmodels.common.registry import registry -from ocpmodels.common.utils import conditional_grad, get_pbc_distances -from ocpmodels.models.base_model import BaseModel -from ocpmodels.models.force_decoder import ForceDecoder -from ocpmodels.models.utils.attention_model import TransfoAttConv -from ocpmodels.models.utils.pos_encodings import PositionalEncoding -from ocpmodels.modules.phys_embeddings import PhysEmbedding -from ocpmodels.modules.pooling import Graclus, Hierarchical_Pooling -from ocpmodels.models.utils.activations import swish - -NUM_CLUSTERS = 20 -NUM_POOLING_LAYERS = 1 - - -class GaussianSmearing(nn.Module): - def __init__(self, start=0.0, stop=5.0, num_gaussians=50): - super().__init__() - offset = torch.linspace(start, stop, num_gaussians) - self.coeff = -0.5 / (offset[1] - offset[0]).item() ** 2 - self.register_buffer("offset", offset) - - def forward(self, dist): - dist = dist.view(-1, 1) - self.offset.view(1, -1) - return torch.exp(self.coeff * torch.pow(dist, 2)) - - -class EmbeddingBlock(nn.Module): - def __init__( - self, - num_gaussians, - num_filters, - hidden_channels, - tag_hidden_channels, - pg_hidden_channels, - phys_hidden_channels, - phys_embeds, - graph_rewiring, - act, - second_layer_MLP, - edge_embed_type, - ): - super().__init__() - self.act = act - self.use_tag = tag_hidden_channels > 0 - self.use_pg = pg_hidden_channels > 0 - self.use_mlp_phys = phys_hidden_channels > 0 and phys_embeds - self.use_positional_embeds = graph_rewiring in { - "one-supernode-per-graph", - "one-supernode-per-atom-type", - "one-supernode-per-atom-type-dist", - } - self.second_layer_MLP = second_layer_MLP - self.edge_embed_type = edge_embed_type - - # --- Node embedding --- - - # Phys embeddings - self.phys_emb = PhysEmbedding( - props=phys_embeds, props_grad=phys_hidden_channels > 0, pg=self.use_pg - ) - # With MLP - if self.use_mlp_phys: - self.phys_lin = Linear(self.phys_emb.n_properties, phys_hidden_channels) - else: - phys_hidden_channels = self.phys_emb.n_properties - - # Period + group embeddings - if self.use_pg: - self.period_embedding = Embedding( - self.phys_emb.period_size, pg_hidden_channels - ) - self.group_embedding = Embedding( - self.phys_emb.group_size, pg_hidden_channels - ) - - # Tag embedding - if tag_hidden_channels: - self.tag_embedding = Embedding(3, tag_hidden_channels) - - # Positional encoding - if self.use_positional_embeds: - self.pe = PositionalEncoding(hidden_channels, 210) - - # Main embedding - self.emb = Embedding( - 85, - hidden_channels - - tag_hidden_channels - - phys_hidden_channels - - 2 * pg_hidden_channels, - ) - - # MLP - self.lin = Linear(hidden_channels, hidden_channels) - if self.second_layer_MLP: - self.lin_2 = Linear(hidden_channels, hidden_channels) - - # --- Edge embedding --- - - # TODO: change some num_filters to edge_embed_hidden - if self.edge_embed_type == "rij": - self.lin_e1 = Linear(3, num_filters) - elif self.edge_embed_type == "all_rij": - self.lin_e1 = Linear(3, num_filters // 2) # r_ij - self.lin_e12 = Linear( - num_gaussians, num_filters - (num_filters // 2) - ) # d_ij - elif self.edge_embed_type == "sh": - self.lin_e1 = Linear(15, num_filters) - elif self.edge_embed_type == "all": - self.lin_e1 = Linear(18 + num_gaussians, num_filters) - else: - raise ValueError("edge_embedding_type does not exist") - - if self.second_layer_MLP: - self.lin_e2 = Linear(num_filters, num_filters) - - self.reset_parameters() - - def reset_parameters(self): - self.emb.reset_parameters() - if self.use_mlp_phys: - nn.init.xavier_uniform_(self.phys_lin.weight) - if self.use_tag: - self.tag_embedding.reset_parameters() - if self.use_pg: - self.period_embedding.reset_parameters() - self.group_embedding.reset_parameters() - nn.init.xavier_uniform_(self.lin.weight) - self.lin.bias.data.fill_(0) - nn.init.xavier_uniform_(self.lin_e1.weight) - self.lin_e1.bias.data.fill_(0) - if self.second_layer_MLP: - nn.init.xavier_uniform_(self.lin_2.weight) - self.lin_2.bias.data.fill_(0) - nn.init.xavier_uniform_(self.lin_e2.weight) - self.lin_e2.bias.data.fill_(0) - if self.edge_embed_type == "all_rij": - nn.init.xavier_uniform_(self.lin_e12.weight) - self.lin_e12.bias.data.fill_(0) - - def forward( - self, z, rel_pos, edge_attr, tag=None, normalised_rel_pos=None, subnodes=None - ): - # --- Edge embedding -- - - if self.edge_embed_type == "rij": - e = self.lin_e1(rel_pos) - elif self.edge_embed_type == "all_rij": - rel_pos = self.lin_e1(rel_pos) # r_ij - edge_attr = self.lin_e12(edge_attr) # d_ij - e = torch.cat((rel_pos, edge_attr), dim=1) - elif self.edge_embed_type == "sh": - self.sh = spherical_harmonics( - l=[1, 2, 3], - x=normalised_rel_pos, - normalize=False, - normalization="component", - ) - e = self.lin_e1(self.sh) - elif self.edge_embed_type == "all": - self.sh = spherical_harmonics( - l=[1, 2, 3], - x=normalised_rel_pos, - normalize=False, - normalization="component", - ) - e = torch.cat((rel_pos, self.sh, edge_attr), dim=1) - e = self.lin_e1(e) - - e = self.act(e) # can comment out - - if self.second_layer_MLP: - # e = self.lin_e2(e) - e = self.act(self.lin_e2(e)) - - # --- Node embedding -- - - # Create atom embeddings based on its characteristic number - h = self.emb(z) - - if self.phys_emb.device != h.device: - self.phys_emb = self.phys_emb.to(h.device) - - # Concat tag embedding - if self.use_tag: - h_tag = self.tag_embedding(tag) - h = torch.cat((h, h_tag), dim=1) - - # Concat physics embeddings - if self.phys_emb.n_properties > 0: - h_phys = self.phys_emb.properties[z] - if self.use_mlp_phys: - h_phys = self.phys_lin(h_phys) - h = torch.cat((h, h_phys), dim=1) - - # Concat period & group embedding - if self.use_pg: - h_period = self.period_embedding(self.phys_emb.period[z]) - h_group = self.group_embedding(self.phys_emb.group[z]) - h = torch.cat((h, h_period, h_group), dim=1) - - # Add positional embedding - if self.use_positional_embeds: - idx_of_non_zero_val = (tag == 0).nonzero().T.squeeze(0) - h_pos = torch.zeros_like(h, device=h.device) - h_pos[idx_of_non_zero_val, :] = self.pe(subnodes).to(device=h_pos.device) - h += h_pos - - # MLP - h = self.act(self.lin(h)) - if self.second_layer_MLP: - h = self.act(self.lin_2(h)) - - return h, e - - -class InteractionBlock(MessagePassing): - def __init__( - self, - hidden_channels, - num_filters, - act, - mp_type, - complex_mp, - att_heads, - graph_norm, - ): - super(InteractionBlock, self).__init__() - self.act = act - self.mp_type = mp_type - self.hidden_channels = hidden_channels - self.complex_mp = complex_mp - self.graph_norm = graph_norm - if graph_norm: - self.graph_norm = GraphNorm( - hidden_channels if "updown" not in self.mp_type else num_filters - ) - - if self.mp_type == "simple": - self.lin_geom = nn.Linear(num_filters, hidden_channels) - self.lin_h = nn.Linear(hidden_channels, hidden_channels) - - elif self.mp_type == "sfarinet": - self.lin_h = nn.Linear(hidden_channels, hidden_channels) - - elif self.mp_type == "updownscale": - self.lin_geom = nn.Linear(num_filters, num_filters) # like 'simple' - self.lin_down = nn.Linear(hidden_channels, num_filters) - self.lin_up = nn.Linear(num_filters, hidden_channels) - - elif self.mp_type == "updownscale_base": - self.lin_geom = nn.Linear(num_filters + 2 * hidden_channels, num_filters) - self.lin_down = nn.Linear(hidden_channels, num_filters) - self.lin_up = nn.Linear(num_filters, hidden_channels) - - elif self.mp_type == "base_with_att": - self.lin_h = nn.Linear(hidden_channels, hidden_channels) - # self.lin_geom = AttConv(hidden_channels, heads=1, concat=True, bias=True) - self.lin_geom = TransfoAttConv( - hidden_channels, - hidden_channels, - heads=att_heads, - concat=False, - root_weight=False, - edge_dim=num_filters, - ) - elif self.mp_type == "att": - self.lin_h = nn.Linear(hidden_channels, hidden_channels) - self.lin_geom = TransformerConv( - hidden_channels, - hidden_channels, - heads=att_heads, - concat=False, - root_weight=False, - edge_dim=num_filters, - ) - - elif self.mp_type == "local_env": - self.lin_geom = nn.Linear(num_filters, hidden_channels) - self.lin_h = nn.Linear(hidden_channels, hidden_channels) - - elif self.mp_type == "updown_local_env": - self.lin_down = nn.Linear(hidden_channels, num_filters) - self.lin_geom = nn.Linear(num_filters, num_filters) - self.lin_up = nn.Linear(2 * num_filters, hidden_channels) - - else: # base - self.lin_geom = nn.Linear( - num_filters + 2 * hidden_channels, hidden_channels - ) - self.lin_h = nn.Linear(hidden_channels, hidden_channels) - - if self.complex_mp: - self.other_mlp = nn.Linear(hidden_channels, hidden_channels) - - self.reset_parameters() - - def reset_parameters(self): - if self.mp_type not in {"sfarinet", "att", "base_with_att"}: - nn.init.xavier_uniform_(self.lin_geom.weight) - self.lin_geom.bias.data.fill_(0) - if self.complex_mp: - nn.init.xavier_uniform_(self.other_mlp.weight) - self.other_mlp.bias.data.fill_(0) - if self.mp_type in {"updownscale", "updownscale_base", "updown_local_env"}: - nn.init.xavier_uniform_(self.lin_up.weight) - self.lin_up.bias.data.fill_(0) - nn.init.xavier_uniform_(self.lin_down.weight) - self.lin_down.bias.data.fill_(0) - else: - nn.init.xavier_uniform_(self.lin_h.weight) - self.lin_h.bias.data.fill_(0) - - def forward(self, h, edge_index, e): - # Define edge embedding - if self.mp_type in {"base", "updownscale_base"}: - e = torch.cat([e, h[edge_index[0]], h[edge_index[1]]], dim=1) - - if self.mp_type in { - "simple", - "updownscale", - "base", - "updownscale_base", - "local_env", - }: - e = self.act(self.lin_geom(e)) # TODO: remove act() ? - - # --- Message Passing block -- - - if self.mp_type == "updownscale" or self.mp_type == "updownscale_base": - h = self.act(self.lin_down(h)) # downscale node rep. - h = self.propagate(edge_index, x=h, W=e) # propagate - if self.graph_norm: - h = self.act(self.graph_norm(h)) - h = self.act(self.lin_up(h)) # upscale node rep. - - elif self.mp_type == "att": - h = self.lin_geom(h, edge_index, edge_attr=e) - if self.graph_norm: - h = self.act(self.graph_norm(h)) - h = self.act(self.lin_h(h)) - - elif self.mp_type == "base_with_att": - h = self.lin_geom(h, edge_index, edge_attr=e) # propagate is inside - if self.graph_norm: - h = self.act(self.graph_norm(h)) - h = self.act(self.lin_h(h)) - - elif self.mp_type == "local_env": - chi = self.propagate(edge_index, x=h, W=e, local_env=True) - h = self.propagate(edge_index, x=h, W=e) # propagate - h = h + chi - if self.graph_norm: - h = self.act(self.graph_norm(h)) - h = h = self.act(self.lin_h(h)) - - elif self.mp_type == "updown_local_env": - h = self.act(self.lin_down(h)) - chi = self.propagate(edge_index, x=h, W=e, local_env=True) - e = self.lin_geom(e) - h = self.propagate(edge_index, x=h, W=e) # propagate - if self.graph_norm: - h = self.act(self.graph_norm(h)) - h = torch.cat((h, chi), dim=1) - h = self.lin_up(h) - - elif self.mp_type in {"base", "simple", "sfarinet"}: - h = self.propagate(edge_index, x=h, W=e) # propagate - if self.graph_norm: - h = self.act(self.graph_norm(h)) - h = self.act(self.lin_h(h)) - - else: - raise ValueError("mp_type provided does not exist") - - if self.complex_mp: - h = self.act(self.other_mlp(h)) - - return h - - def message(self, x_j, W, local_env=None): - if local_env is not None: - return W - else: - return x_j * W - - -class OutputBlock(nn.Module): - def __init__(self, energy_head, hidden_channels, act): - super().__init__() - self.energy_head = energy_head - self.act = act - - self.lin1 = Linear(hidden_channels, hidden_channels // 2) - self.lin2 = Linear(hidden_channels // 2, 1) - - # weighted average & pooling - if self.energy_head in {"pooling", "random"}: - self.hierarchical_pooling = Hierarchical_Pooling( - hidden_channels, - self.act, - NUM_POOLING_LAYERS, - NUM_CLUSTERS, - self.energy_head, - ) - elif self.energy_head == "graclus": - self.graclus = Graclus(hidden_channels, self.act) - elif self.energy_head == "weighted-av-final-embeds": - self.w_lin = Linear(hidden_channels, 1) - - def reset_parameters(self): - nn.init.xavier_uniform_(self.lin1.weight) - self.lin1.bias.data.fill_(0) - nn.init.xavier_uniform_(self.lin2.weight) - self.lin2.bias.data.fill_(0) - if self.energy_head == "weighted-av-final-embeds": - nn.init.xavier_uniform_(self.w_lin.weight) - self.w_lin.bias.data.fill_(0) - - def forward(self, h, edge_index, edge_weight, batch, alpha): - if self.energy_head == "weighted-av-final-embeds": - alpha = self.w_lin(h) - - elif self.energy_head == "graclus": - h, batch = self.graclus(h, edge_index, edge_weight, batch) - - elif self.energy_head in {"pooling", "random"}: - h, batch, pooling_loss = self.hierarchical_pooling( - h, edge_index, edge_weight, batch - ) - - # MLP - h = self.lin1(h) - h = self.act(h) - h = self.lin2(h) - - if self.energy_head in { - "weighted-av-initial-embeds", - "weighted-av-final-embeds", - }: - h = h * alpha - - # Global pooling - out = scatter(h, batch, dim=0, reduce="add") - - return out - - -@registry.register_model("disconnected") -class Disconnected(BaseModel): - r"""Frame Averaging GNN model FAENet. - - Args: - cutoff (float): Cutoff distance for interatomic interactions. - (default: :obj:`6.0`) - use_pbc (bool): Use of periodic boundary conditions. - (default: true) - act (str): activation function - (default: swish) - max_num_neighbors (int): The maximum number of neighbors to - collect for each node within the :attr:`cutoff` distance. - (default: :obj:`32`) - graph_rewiring (str): Method used to create the graph, - among "", remove-tag-0, supernodes. - energy_head (str): Method to compute energy prediction - from atom representations. - hidden_channels (int): Hidden embedding size. - (default: :obj:`128`) - tag_hidden_channels (int): Hidden tag embedding size. - (default: :obj:`32`) - pg_hidden_channels (int): Hidden period and group embed size. - (default: obj:`32`) - phys_embed (bool): Concat fixed physics-aware embeddings. - phys_hidden_channels (int): Hidden size of learnable phys embed. - (default: obj:`32`) - num_interactions (int): The number of interaction blocks. - (default: :obj:`4`) - num_gaussians (int): The number of gaussians :math:`\mu`. - (default: :obj:`50`) - second_layer_MLP (bool): use 2-layers MLP at the end of the Embedding block. - skip_co (str): add a skip connection between each interaction block and - energy-head. ("add", False, "concat", "concat_atom") - edge_embed_type (str, in {'rij','all_rij','sh', 'all'}): input feature - of the edge embedding block. - edge_embed_hidden (int): size of edge representation. - could be num_filters or hidden_channels. - mp_type (str, in {'base', 'simple', 'updownscale', 'att', 'base_with_att', 'local_env' - 'updownscale_base', 'updownscale', 'updown_local_env', 'sfarinet'}}): - specificies the MP of the interaction block. - graph_norm (bool): whether to apply batch norm after every linear layer. - complex_mp (bool): whether to add a second layer MLP at the end of each Interaction - force_decoder_model_config (dict): config of the force decoder model. - keys: "model_type", "hidden_channels", "num_layers", "num_heads", - force_decoder_type (str): type of the force decoder model. - (options: "mlp", "simple", "res", "res_updown") - """ - - def __init__(self, **kwargs): - super(Disconnected, self).__init__() - - self.cutoff = kwargs["cutoff"] - self.energy_head = kwargs["energy_head"] - self.regress_forces = kwargs["regress_forces"] - self.use_pbc = kwargs["use_pbc"] - self.max_num_neighbors = kwargs["max_num_neighbors"] - self.edge_embed_type = kwargs["edge_embed_type"] - self.skip_co = kwargs["skip_co"] - if kwargs["mp_type"] == "sfarinet": - kwargs["num_filters"] = kwargs["hidden_channels"] - - self.act = ( - getattr(nn.functional, kwargs["act"]) if kwargs["act"] != "swish" else swish - ) - self.use_positional_embeds = kwargs["graph_rewiring"] in { - "one-supernode-per-graph", - "one-supernode-per-atom-type", - "one-supernode-per-atom-type-dist", - } - # Gaussian Basis - self.distance_expansion = GaussianSmearing( - 0.0, self.cutoff, kwargs["num_gaussians"] - ) - - # Embedding block - self.embed_block = EmbeddingBlock( - kwargs["num_gaussians"], - kwargs["num_filters"], - kwargs["hidden_channels"], - kwargs["tag_hidden_channels"], - kwargs["pg_hidden_channels"], - kwargs["phys_hidden_channels"], - kwargs["phys_embeds"], - kwargs["graph_rewiring"], - self.act, - kwargs["second_layer_MLP"], - kwargs["edge_embed_type"], - ) - - # Interaction block - self.interaction_blocks = nn.ModuleList( - [ - InteractionBlock( - kwargs["hidden_channels"], - kwargs["num_filters"], - self.act, - kwargs["mp_type"], - kwargs["complex_mp"], - kwargs["att_heads"], - kwargs["graph_norm"], - ) - for _ in range(kwargs["num_interactions"]) - ] - ) - - # Output block - self.output_block = OutputBlock( - self.energy_head, kwargs["hidden_channels"], self.act - ) - - # Energy head - if self.energy_head == "weighted-av-initial-embeds": - self.w_lin = Linear(kwargs["hidden_channels"], 1) - - # Force head - self.decoder = ( - ForceDecoder( - kwargs["force_decoder_type"], - kwargs["hidden_channels"], - kwargs["force_decoder_model_config"], - self.act, - ) - if "direct" in self.regress_forces - else None - ) - - # Skip co - if self.skip_co == "concat": - self.mlp_skip_co = Linear((kwargs["num_interactions"] + 1), 1) - elif self.skip_co == "concat_atom": - self.mlp_skip_co = Linear( - ((kwargs["num_interactions"] + 1) * kwargs["hidden_channels"]), - kwargs["hidden_channels"], - ) - - @conditional_grad(torch.enable_grad()) - def forces_forward(self, preds): - return self.decoder(preds["hidden_state"]) - - def edge_classifier(self, edge_index, tags): - edges_with_tags = tags[edge_index.type(torch.long)] - values = (edges_with_tags[0] == edges_with_tags[1]) - return values - - @conditional_grad(torch.enable_grad()) - def energy_forward(self, data): - # Rewire the graph - z = data.atomic_numbers.long() - pos = data.pos - batch = data.batch - - # Use periodic boundary conditions - if self.use_pbc: - assert z.dim() == 1 and z.dtype == torch.long - - out = get_pbc_distances( - pos, - data.edge_index, - data.cell, - data.cell_offsets, - data.neighbors, - return_distance_vec=True, - ) - - edge_index = out["edge_index"] - edge_weight = out["distances"] - rel_pos = out["distance_vec"] - edge_attr = self.distance_expansion(edge_weight) - else: - edge_index = radius_graph( - pos, - r=self.cutoff, - batch=batch, - max_num_neighbors=self.max_num_neighbors, - ) - # edge_index = data.edge_index - row, col = edge_index - rel_pos = pos[row] - pos[col] - edge_weight = rel_pos.norm(dim=-1) - edge_attr = self.distance_expansion(edge_weight) - - # Removing unnecessary edges - edges_to_keep = self.edge_classifier(edge_index, data.tags) - - edge_index = edge_index[:, edges_to_keep] - edge_weight = edge_weight[edges_to_keep] - edge_attr = edge_attr[edges_to_keep, :] - rel_pos = rel_pos[edges_to_keep, :] - - # Normalize and squash to [0,1] for gaussian basis - rel_pos_normalized = None - if self.edge_embed_type in {"sh", "all_rij", "all"}: - rel_pos_normalized = (rel_pos / edge_weight.view(-1, 1) + 1) / 2.0 - - pooling_loss = None # deal with pooling loss - - # Embedding block - h, e = self.embed_block(z, rel_pos, edge_attr, data.tags, rel_pos_normalized) - - # Compute atom weights for late energy head - if self.energy_head == "weighted-av-initial-embeds": - alpha = self.w_lin(h) - else: - alpha = None - - # Interaction blocks - energy_skip_co = [] - for interaction in self.interaction_blocks: - if self.skip_co == "concat_atom": - energy_skip_co.append(h) - elif self.skip_co: - energy_skip_co.append( - self.output_block(h, edge_index, edge_weight, batch, alpha) - ) - h = h + interaction(h, edge_index, e) - - # Atom skip-co - if self.skip_co == "concat_atom": - energy_skip_co.append(h) - h = self.act(self.mlp_skip_co(torch.cat(energy_skip_co, dim=1))) - - energy = self.output_block(h, edge_index, edge_weight, batch, alpha) - - # Skip-connection - energy_skip_co.append(energy) - if self.skip_co == "concat": - energy = self.mlp_skip_co(torch.cat(energy_skip_co, dim=1)) - elif self.skip_co == "add": - energy = sum(energy_skip_co) - - preds = {"energy": energy, "pooling_loss": pooling_loss, "hidden_state": h} - - return preds From fca863cef19723a338eb018dbeff9c0815c06f73 Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 14 Jun 2023 14:55:28 -0400 Subject: [PATCH 008/131] Fixed problem that the function assumed there were no tag 0 vertices. --- ocpmodels/datasets/data_transforms.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/ocpmodels/datasets/data_transforms.py b/ocpmodels/datasets/data_transforms.py index 8562bac254..307b9c80da 100644 --- a/ocpmodels/datasets/data_transforms.py +++ b/ocpmodels/datasets/data_transforms.py @@ -104,9 +104,14 @@ def __init__(self, is_disconnected=False) -> None: self.inactive = not is_disconnected def edge_classifier(self, edge_index, tags): - edges_with_tags = tags[edge_index.type(torch.long)] - values = (edges_with_tags[0] == edges_with_tags[1]) - return values + edges_with_tags = tags[edge_index.type(torch.long)] # Tensor with shape=edge_index.shape where every entry is a tag + filt1 = (edges_with_tags[0] == edges_with_tags[1]) + filt2 = (edges_with_tags[0] != 2) * (edges_with_tags[1] != 2) + + # Edge is removed if tags are different (R1), and at least one end has tag 2 (R2). We want ~(R1*R2) = ~R1+~R2. + # filt1 = ~R1. Let L1 be that head has tag 2, and L2 is that tail has tag 2. Then R2 = L1+L2, so ~R2 = ~L1*~L2 = filt2. + + return filt1 + filt2 def __call__(self, data): if self.inactive: From 78e829e6af75ad2ccc85438575d16c311f5331aa Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 15 Jun 2023 11:24:06 -0400 Subject: [PATCH 009/131] Fixed a mistake where not all edge attributes are filtered --- ocpmodels/datasets/data_transforms.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ocpmodels/datasets/data_transforms.py b/ocpmodels/datasets/data_transforms.py index 307b9c80da..89d1d997ae 100644 --- a/ocpmodels/datasets/data_transforms.py +++ b/ocpmodels/datasets/data_transforms.py @@ -118,7 +118,10 @@ def __call__(self, data): return data values = self.edge_classifier(data.edge_index, data.tags) + data.edge_index = data.edge_index[:, values] + data.cell_offsets = data.cell_offsets[values, :] + data.distances = data.distances[values] return data From e91c007965fbd9fe331c1cccb830aa49808322ba Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 21 Jun 2023 17:25:01 -0400 Subject: [PATCH 010/131] Modified the is_disconnected flag to be a variable so it can be used in yaml files. Also modified some yaml experiment files. --- configs/exps/alvaro/default-config.yaml | 20 ++++++++++++++++++++ configs/exps/alvaro/split-ads-cats.yaml | 20 ++++++++++---------- ocpmodels/common/flags.py | 4 ++-- 3 files changed, 32 insertions(+), 12 deletions(-) create mode 100644 configs/exps/alvaro/default-config.yaml diff --git a/configs/exps/alvaro/default-config.yaml b/configs/exps/alvaro/default-config.yaml new file mode 100644 index 0000000000..b1ddf7e65c --- /dev/null +++ b/configs/exps/alvaro/default-config.yaml @@ -0,0 +1,20 @@ +job: + mem: 32GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + +default: + config: faenet-is2re-10k + wandb_name: alvaro-carbonero-math + wandb_project: ocp-alvaro + cp_data_to_tmpdir: True + fa_frames: se3-random + frame_averaging: 2D + graph_rewiring: remove-tag-0 + optim: + max_epochs: 12 + +runs: + # Run 1 + - is_disconnected: True diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index 797b855b0f..146efbb071 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -5,7 +5,7 @@ job: partition: long default: - config: faenet-is2re-all + config: dimenetplus-is2re-10k test_ri: True mode: train wandb_tags: 'baseline' @@ -16,7 +16,9 @@ default: frame_averaging: 2D graph_rewiring: remove-tag-0 model: - pg_hidden_channels: 64 + pg_hidden_channels: 384 + act: swish + num_gaussians: 104 phys_embeds: True phys_hidden_channels: 0 tag_hidden_channels: 64 @@ -26,17 +28,15 @@ default: runs: # Run 1 - - {} + #- {} # Run 2 - - config: faenet-is2re-all + - config: faenet-is2re-10k # Run 3 -# - optim: -# lr_initial: 0.0001 + #- is_disconnected: True # Run 4 -# - model: -# pg_hidden_channels: 32 -# optim: -# lr_initial: 0.0001 + #- is_disconnected: True + #config: faenet-is2re-10k + diff --git a/ocpmodels/common/flags.py b/ocpmodels/common/flags.py index bd4eb6cc3f..afd7ecfce2 100644 --- a/ocpmodels/common/flags.py +++ b/ocpmodels/common/flags.py @@ -289,9 +289,9 @@ def add_core_args(self): ) self.parser.add_argument( "--is_disconnected", - action="store_true", - help="Eliminates edges between catalyst and adsorbate.", + type=bool default=False + help="Eliminates edges between catalyst and adsorbate." ) From 0719a7f455994974608a9e375ab3becea6217417 Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 21 Jun 2023 17:30:32 -0400 Subject: [PATCH 011/131] fixed some typos --- ocpmodels/common/flags.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ocpmodels/common/flags.py b/ocpmodels/common/flags.py index afd7ecfce2..c5da2efdb6 100644 --- a/ocpmodels/common/flags.py +++ b/ocpmodels/common/flags.py @@ -289,8 +289,8 @@ def add_core_args(self): ) self.parser.add_argument( "--is_disconnected", - type=bool - default=False + type=bool, + default=False, help="Eliminates edges between catalyst and adsorbate." ) From 61887f368ca572745db04daad496432ceacbc691 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 26 Jun 2023 16:23:38 -0400 Subject: [PATCH 012/131] Deleted disconnected model, and copied best configs to my experiments folder --- configs/exps/alvaro/best-configs-all.yaml | 43 ++++ configs/exps/alvaro/default-config.yaml | 14 +- configs/exps/alvaro/faenet-orion.yaml | 62 +++++ configs/exps/alvaro/split-ads-cats.yaml | 62 ++--- configs/models/disconnected.yaml | 271 ---------------------- 5 files changed, 144 insertions(+), 308 deletions(-) create mode 100644 configs/exps/alvaro/best-configs-all.yaml create mode 100644 configs/exps/alvaro/faenet-orion.yaml delete mode 100644 configs/models/disconnected.yaml diff --git a/configs/exps/alvaro/best-configs-all.yaml b/configs/exps/alvaro/best-configs-all.yaml new file mode 100644 index 0000000000..0c673cdef0 --- /dev/null +++ b/configs/exps/alvaro/best-configs-all.yaml @@ -0,0 +1,43 @@ +job: + mem: 32GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + time: 15:00:00 + +default: + wandb_name: alvaro-carbonero-math + wandb_project: ocp-alvaro + test_ri: True + mode: train + graph_rewiring: remove-tag-0 + model: + edge_embed_type: all_rij + wandb_tags: 'best-config' + optim: + batch_size: 256 + eval_batch_size: 256 + cp_data_to_tmpdir: true + config: faenet-is2re-10k + note: 'best-config-??' # Insert what model you're running if running one by one. + frame_averaging: 2D + fa_frames: se3-random + model: + mp_type: updownscale + phys_embeds: False + tag_hidden_channels: 32 + pg_hidden_channels: 64 + energy_head: weighted-av-final-embeds + complex_mp: False + graph_norm: True + hidden_channels: 352 + num_filters: 448 + num_gaussians: 99 + num_interactions: 6 + second_layer_MLP: True + skip_co: concat + optim: + lr_initial: 0.0019 + scheduler: LinearWarmupCosineAnnealingLR + max_epochs: 20 + eval_every: 0.4 diff --git a/configs/exps/alvaro/default-config.yaml b/configs/exps/alvaro/default-config.yaml index b1ddf7e65c..c04e1ee533 100644 --- a/configs/exps/alvaro/default-config.yaml +++ b/configs/exps/alvaro/default-config.yaml @@ -5,16 +5,12 @@ job: partition: long default: - config: faenet-is2re-10k - wandb_name: alvaro-carbonero-math + test_ri: True + mode: train + wandb_name: alvaro-carbonero-math # DO NOT USE THIS CONFIG FILE. IT'S BAD! wandb_project: ocp-alvaro - cp_data_to_tmpdir: True - fa_frames: se3-random - frame_averaging: 2D - graph_rewiring: remove-tag-0 - optim: - max_epochs: 12 runs: # Run 1 - - is_disconnected: True + - config: faenet-is2re-10k + note: 'default-with-train-mode' diff --git a/configs/exps/alvaro/faenet-orion.yaml b/configs/exps/alvaro/faenet-orion.yaml new file mode 100644 index 0000000000..229d8bcc0b --- /dev/null +++ b/configs/exps/alvaro/faenet-orion.yaml @@ -0,0 +1,62 @@ +# more epochs, larger batch size, explore faenet: larger model & skip-co & mlp_rij +job: + mem: 8GB + cpus: 4 + gres: gpu:1 + time: 30:00 + partition: main + # code_loc: /home/mila/s/schmidtv/ocp-project/ocp-drlab + # env: ocp-a100 + +default: + wandb_project: ocp-alvaro + config: faenet-is2re-10k + mode: train + test_ri: true + wandb_tags: is2re-10k, orion + cp_data_to_tmpdir: false + graph_rewiring: remove-tag-0 + log_train_every: 20 + optim: + warmup_steps: 100 + # parameters EMA + ema_decay: 0.999 + decay_steps: max_steps + scheduler: LinearWarmupCosineAnnealingLR + batch_size: 64 + note: + model: name, num_gaussians, hidden_channels, num_filters, num_interactions, phys_embeds, pg_hidden_channels, phys_hidden_channels, tag_hidden_channels, energy_head, edge_embed_type, mp_type, graph_norm + optim: batch_size, lr_initial + _root_: frame_averaging, fa_frames + orion_mult_factor: + value: 32 + targets: hidden_channels, num_filters, pg_hidden_channels, phys_hidden_channels, tag_hidden_channels + +orion: + # Remember to change the experiment name if you change anything in the search space + n_jobs: 20 + + unique_exp_name: faenet-is2re-10k-v1.3.0 + + space: + optim/max_epochs: fidelity(20, 100, base=4) + optim/lr_initial: loguniform(1e-4, 5e-3, precision=2) + model/graph_norm: choices([True, False]) + model/edge_embed_type: choices(["rij", "all_rij", "sh", "all"]) + model/energy_head: choices(["", "weighted-av-final-embeds", "weighted-av-initial-embeds"]) + model/hidden_channels: uniform(4, 16, discrete=True) + model/mp_type: choices(["simple", "base", "sfarinet", "updownscale", "updownscale_base", "base_with_att", "att", "local_env", "updown_local_env"]) + model/num_filters: uniform(1, 16, discrete=True) + model/num_gaussians: uniform(20, 150, discrete=True) + model/num_interactions: uniform(1, 7, discrete=True) + model/pg_hidden_channels: uniform(0, 2, discrete=True) + model/phys_embeds: choices([True, False]) + model/phys_hidden_channels: uniform(0, 2, discrete=True) + model/tag_hidden_channels: uniform(0, 2, discrete=True) + frame_averaging: choices(["", "2D", "3D", "DA"]) + fa_frames: choices(["", "random", "det", "all", "se3-all", "se3-random", "se3-det", "multiple", "se3-multiple"]) + algorithms: + asha: + seed: 123 + num_rungs: 5 + num_brackets: 1 diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index 146efbb071..ac818e6a1c 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -1,42 +1,48 @@ +# RIGHT NOW, THIS IS THE SAME AS BEST-CONFIGS-ALL. + job: mem: 32GB cpus: 4 gres: gpu:rtx8000:1 partition: long + time: 15:00:00 default: - config: dimenetplus-is2re-10k - test_ri: True - mode: train - wandb_tags: 'baseline' wandb_name: alvaro-carbonero-math wandb_project: ocp-alvaro - cp_data_to_tmpdir: True - fa_frames: se3-random - frame_averaging: 2D + test_ri: True + mode: train graph_rewiring: remove-tag-0 model: - pg_hidden_channels: 384 - act: swish - num_gaussians: 104 - phys_embeds: True - phys_hidden_channels: 0 - tag_hidden_channels: 64 - energy_head: weighted-av-final-embeds + edge_embed_type: all_rij + wandb_tags: 'best-config' optim: - max_epochs: 12 + batch_size: 256 + eval_batch_size: 256 + cp_data_to_tmpdir: true runs: - # Run 1 - #- {} - - # Run 2 - - config: faenet-is2re-10k - - # Run 3 - #- is_disconnected: True - - # Run 4 - #- is_disconnected: True - #config: faenet-is2re-10k - + - config: faenet-is2re-all + note: 'best-config-all-disconected' # Insert what model you're running if running one by one. + is_disconnected: True + frame_averaging: 2D + fa_frames: se3-random + model: + mp_type: updownscale + phys_embeds: False + tag_hidden_channels: 32 + pg_hidden_channels: 64 + energy_head: weighted-av-final-embeds + complex_mp: False + graph_norm: True + hidden_channels: 352 + num_filters: 448 + num_gaussians: 99 + num_interactions: 6 + second_layer_MLP: True + skip_co: concat + optim: + lr_initial: 0.0019 + scheduler: LinearWarmupCosineAnnealingLR + max_epochs: 20 + eval_every: 0.4 diff --git a/configs/models/disconnected.yaml b/configs/models/disconnected.yaml deleted file mode 100644 index 2b1319d717..0000000000 --- a/configs/models/disconnected.yaml +++ /dev/null @@ -1,271 +0,0 @@ -default: - model: - name: disconnected - act: swish - hidden_channels: 128 - num_filters: 100 - num_interactions: 3 - num_gaussians: 100 - cutoff: 6.0 - use_pbc: True - regress_forces: False - # drlab attributes: - tag_hidden_channels: 0 # 32 - pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels - phys_embeds: False # True - phys_hidden_channels: 0 - energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} - # faenet new features - skip_co: False # output skip connections {False, "add", "concat"} - second_layer_MLP: False # in EmbeddingBlock - complex_mp: False - edge_embed_type: rij # {'rij','all_rij','sh', 'all'}) - mp_type: base # {'base', 'simple', 'updownscale', 'att', 'base_with_att', 'local_env'} - graph_norm: False # bool - att_heads: 1 # int - force_decoder_type: "mlp" # can be {"" or "simple"} | only used if regress_forces is True - force_decoder_model_config: - simple: - hidden_channels: 128 - norm: batch1d # batch1d, layer or null - mlp: - hidden_channels: 256 - norm: batch1d # batch1d, layer or null - res: - hidden_channels: 128 - norm: batch1d # batch1d, layer or null - res_updown: - hidden_channels: 128 - norm: batch1d # batch1d, layer or null - optim: - batch_size: 64 - eval_batch_size: 64 - num_workers: 4 - lr_gamma: 0.1 - lr_initial: 0.001 - warmup_factor: 0.2 - max_epochs: 20 - energy_grad_coefficient: 10 - force_coefficient: 30 - energy_coefficient: 1 - - frame_averaging: False # 2D, 3D, da, False - fa_frames: False # can be {None, full, random, det, e3, e3-random, e3-det} - -# ------------------- -# ----- IS2RE ----- -# ------------------- - -is2re: - # *** Important note *** - # The total number of gpus used for this run was 1. - # If the global batch size (num_gpus * batch_size) is modified - # the lr_milestones and warmup_steps need to be adjusted accordingly. - 10k: - optim: - lr_initial: 0.005 - lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma - - 1562 - - 2343 - - 3125 - warmup_steps: 468 - max_epochs: 20 - - 100k: - model: - hidden_channels: 256 - optim: - lr_initial: 0.005 - lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma - - 1562 - - 2343 - - 3125 - warmup_steps: 468 - max_epochs: 20 - - all: - model: - hidden_channels: 384 - num_interactions: 4 - optim: - batch_size: 256 - eval_batch_size: 256 - lr_initial: 0.001 - lr_gamma: 0.1 - lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma - - 18000 - - 27000 - - 37000 - warmup_steps: 6000 - max_epochs: 20 - -# ------------------ -# ----- S2EF ----- -# ------------------ - -# For 2 GPUs - -s2ef: - default: - model: - num_interactions: 4 - hidden_channels: 750 - num_gaussians: 200 - num_filters: 256 - regress_forces: "direct" - force_coefficient: 30 - energy_grad_coefficient: 10 - optim: - batch_size: 96 - eval_batch_size: 96 - warmup_factor: 0.2 - lr_gamma: 0.1 - lr_initial: 0.0001 - max_epochs: 15 - warmup_steps: 30000 - lr_milestones: - - 55000 - - 75000 - - 10000 - - 200k: {} - - # 1 gpus - 2M: - model: - num_interactions: 5 - hidden_channels: 1024 - num_gaussians: 200 - num_filters: 256 - optim: - batch_size: 192 - eval_batch_size: 192 - - 20M: {} - - all: {} - -qm9: - default: - model: - act: swish - att_heads: 1 - complex_mp: true - cutoff: 6.0 - edge_embed_type: all_rij - energy_head: '' - graph_norm: true - graph_rewiring: null - hidden_channels: 400 - max_num_neighbors: 30 - mp_type: updownscale_base - num_filters: 480 - num_gaussians: 100 - num_interactions: 5 - otf_graph: false - pg_hidden_channels: 32 - phys_embeds: false - phys_hidden_channels: 0 - regress_forces: '' - second_layer_MLP: true - skip_co: true - tag_hidden_channels: 0 - use_pbc: false - - optim: - batch_size: 64 - es_min_abs_change: 1.0e-06 - es_patience: 20 - es_warmup_epochs: 600 - eval_batch_size: 64 - factor: 0.9 - lr_initial: 0.0003 - loss_energy: mse - lr_gamma: 0.1 - lr_initial: 0.001 - max_epochs: 1500 - min_lr: 1.0e-06 - mode: min - optimizer: AdamW - patience: 15 - scheduler: ReduceLROnPlateau - threshold: 0.0001 - threshold_mode: abs - verbose: true - warmup_factor: 0.2 - warmup_steps: 3000 - - 10k: {} - all: {} - -qm7x: - default: - model: # SOTA settings - act: swish - att_heads: 1 - complex_mp: true - cutoff: 5.0 - edge_embed_type: all_rij - energy_head: false - force_decoder_model_config: - mlp: - hidden_channels: 256 - norm: batch1d - res: - hidden_channels: 128 - norm: batch1d - res_updown: - hidden_channels: 128 - norm: layer - simple: - hidden_channels: 128 - norm: batch1d - force_decoder_type: res_updown - graph_norm: false - hidden_channels: 500 - max_num_neighbors: 40 - mp_type: updownscale_base - num_filters: 400 - num_gaussians: 50 - num_interactions: 5 - otf_graph: false - pg_hidden_channels: 32 - phys_embeds: true - phys_hidden_channels: 0 - regress_forces: direct_with_gradient_target - second_layer_MLP: true - skip_co: false - tag_hidden_channels: 0 - use_pbc: false - - optim: - batch_size: 100 - energy_grad_coefficient: 5 - eval_batch_size: 100 - eval_every: 0.34 - factor: 0.75 - force_coefficient: 75 - loss_energy: mae - loss_force: mse - lr_gamma: 0.1 - lr_initial: 0.000193 - max_steps: 4000000 - min_lr: 1.0e-06 - mode: min - optimizer: AdamW - scheduler: ReduceLROnPlateau - threshold: 0.001 - threshold_mode: abs - verbose: true - warmup_factor: 0.2 - warmup_steps: 3000 - - all: {} - 1k: {} - -qm9: - default: - model: - use_pbc: False - all: {} - 10k: {} From cec76631b991971391c6f5915c75f2747b464c5f Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 26 Jun 2023 17:29:56 -0400 Subject: [PATCH 013/131] Added flag for only using a subset of the dataset with a unique datapoint for every (catalyst, adsorbate, cell) tuple. --- ocpmodels/common/flags.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ocpmodels/common/flags.py b/ocpmodels/common/flags.py index c5da2efdb6..ed66ac1f12 100644 --- a/ocpmodels/common/flags.py +++ b/ocpmodels/common/flags.py @@ -293,6 +293,12 @@ def add_core_args(self): default=False, help="Eliminates edges between catalyst and adsorbate." ) + self.parser.add_argument( + "--lowest_energy_only", + type=bool, + default=False, + help="Makes trainer use the lowest energy data point for every (catalyst, adsorbate, cell) tuple." + ) flags = Flags() From a2e6b334b60a5d24c29600a8ea6218cd92db386a Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 26 Jun 2023 17:44:03 -0400 Subject: [PATCH 014/131] Added if statement in base trainer to use only the desired subset of dataset for training purposes --- ocpmodels/trainers/base_trainer.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 43f52d4a25..172e631ceb 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -10,6 +10,7 @@ import os import random import time +import pickle from abc import ABC, abstractmethod from collections import defaultdict from copy import deepcopy @@ -22,7 +23,7 @@ from rich.console import Console from rich.table import Table from torch.nn.parallel.distributed import DistributedDataParallel -from torch.utils.data import DataLoader +from torch.utils.data import DataLoader, Subset from torch_geometric.data import Batch from tqdm import tqdm from uuid import uuid4 @@ -243,6 +244,13 @@ def load_datasets(self): self.config["task"]["dataset"] )(ds_conf, transform=transform) + if self.config["lowest_energy_only"]: + with open('/network/scratch/a/alvaro.carbonero/lowest_energy.pkl', 'rb') as fp: + good_indices = pickle.load(fp) + good_indices = list(good_indices) + + self.dataset["train"] = Subset(self.dataset["train"], good_indices) + shuffle = False if split == "train": shuffle = True From 82c7c59f39efa35271e09ad603bddf29a4c9c364 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 26 Jun 2023 17:51:40 -0400 Subject: [PATCH 015/131] fixed some typos --- ocpmodels/trainers/base_trainer.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 172e631ceb..8facfea93e 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -244,12 +244,15 @@ def load_datasets(self): self.config["task"]["dataset"] )(ds_conf, transform=transform) + import ipdb + ipdb.set_trace() + if self.config["lowest_energy_only"]: with open('/network/scratch/a/alvaro.carbonero/lowest_energy.pkl', 'rb') as fp: good_indices = pickle.load(fp) good_indices = list(good_indices) - self.dataset["train"] = Subset(self.dataset["train"], good_indices) + self.datasets["train"] = Subset(self.datasets["train"], good_indices) shuffle = False if split == "train": From 431a7c7d4acdbae7ded3815ee794eb098eb5191a Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 28 Jun 2023 10:28:57 -0400 Subject: [PATCH 016/131] Fixed a bug when closing the dataset and modified config files to make them easier to replicate --- configs/exps/alvaro/best-configs-all.yaml | 4 +- configs/exps/alvaro/split-ads-cats.yaml | 53 ++++++++++++----------- ocpmodels/common/flags.py | 2 +- ocpmodels/trainers/base_trainer.py | 4 +- ocpmodels/trainers/single_trainer.py | 6 ++- 5 files changed, 38 insertions(+), 31 deletions(-) diff --git a/configs/exps/alvaro/best-configs-all.yaml b/configs/exps/alvaro/best-configs-all.yaml index 0c673cdef0..106a6b263d 100644 --- a/configs/exps/alvaro/best-configs-all.yaml +++ b/configs/exps/alvaro/best-configs-all.yaml @@ -1,3 +1,5 @@ +# DON'T MODIFY THIS + job: mem: 32GB cpus: 4 @@ -18,7 +20,7 @@ default: batch_size: 256 eval_batch_size: 256 cp_data_to_tmpdir: true - config: faenet-is2re-10k + config: faenet-is2re-all note: 'best-config-??' # Insert what model you're running if running one by one. frame_averaging: 2D fa_frames: se3-random diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index ac818e6a1c..95147bf30c 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -1,4 +1,4 @@ -# RIGHT NOW, THIS IS THE SAME AS BEST-CONFIGS-ALL. +# MODIFY THIS ONE FOR RUNS job: mem: 32GB @@ -20,29 +20,32 @@ default: batch_size: 256 eval_batch_size: 256 cp_data_to_tmpdir: true + config: faenet-is2re-all + note: 'best-config-all-lowest-energy' # Insert what model you're running if running one by one. + frame_averaging: 2D + fa_frames: se3-random + model: + mp_type: updownscale + phys_embeds: False + tag_hidden_channels: 32 + pg_hidden_channels: 64 + energy_head: weighted-av-final-embeds + complex_mp: False + graph_norm: True + hidden_channels: 352 + num_filters: 448 + num_gaussians: 99 + num_interactions: 6 + second_layer_MLP: True + skip_co: concat + optim: + lr_initial: 0.0019 + scheduler: LinearWarmupCosineAnnealingLR + max_epochs: 20 + eval_every: 0.4 runs: - - config: faenet-is2re-all - note: 'best-config-all-disconected' # Insert what model you're running if running one by one. - is_disconnected: True - frame_averaging: 2D - fa_frames: se3-random - model: - mp_type: updownscale - phys_embeds: False - tag_hidden_channels: 32 - pg_hidden_channels: 64 - energy_head: weighted-av-final-embeds - complex_mp: False - graph_norm: True - hidden_channels: 352 - num_filters: 448 - num_gaussians: 99 - num_interactions: 6 - second_layer_MLP: True - skip_co: concat - optim: - lr_initial: 0.0019 - scheduler: LinearWarmupCosineAnnealingLR - max_epochs: 20 - eval_every: 0.4 + - is_disconnected: True + lowest_energy_only: True + + - lowest_energy_only: True diff --git a/ocpmodels/common/flags.py b/ocpmodels/common/flags.py index ed66ac1f12..da5d105743 100644 --- a/ocpmodels/common/flags.py +++ b/ocpmodels/common/flags.py @@ -297,7 +297,7 @@ def add_core_args(self): "--lowest_energy_only", type=bool, default=False, - help="Makes trainer use the lowest energy data point for every (catalyst, adsorbate, cell) tuple." + help="Makes trainer use the lowest energy data point for every (catalyst, adsorbate, cell) tuple. ONLY USE WITH ALL DATASET" ) diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 8facfea93e..f5ee07970e 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -244,14 +244,12 @@ def load_datasets(self): self.config["task"]["dataset"] )(ds_conf, transform=transform) - import ipdb - ipdb.set_trace() - if self.config["lowest_energy_only"]: with open('/network/scratch/a/alvaro.carbonero/lowest_energy.pkl', 'rb') as fp: good_indices = pickle.load(fp) good_indices = list(good_indices) + self.real_dataset = self.datasets["train"] self.datasets["train"] = Subset(self.datasets["train"], good_indices) shuffle = False diff --git a/ocpmodels/trainers/single_trainer.py b/ocpmodels/trainers/single_trainer.py index bd4f32d380..2ff82da9dd 100644 --- a/ocpmodels/trainers/single_trainer.py +++ b/ocpmodels/trainers/single_trainer.py @@ -468,7 +468,11 @@ def end_of_training( # Close datasets if debug_batches < 0: for ds in self.datasets.values(): - ds.close_db() + try: + ds.close_db() + except: + assert self.config["lowest_energy_only"] == True + self.real_dataset.close_db() def model_forward(self, batch_list, mode="train"): # Distinguish frame averaging from base case. From ab814b5b71f5566a4f0f05555e4d315c771c89f2 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 3 Jul 2023 15:38:11 -0400 Subject: [PATCH 017/131] Added graph splitter. Still need to reindex edges to not cause errors later. --- configs/models/disconnected.yaml | 271 +++++++++++++++++++++++++++++++ ocpmodels/common/utils.py | 3 + ocpmodels/models/disconnected.py | 92 +++++++++++ ocpmodels/models/faenet.py | 3 + 4 files changed, 369 insertions(+) create mode 100644 configs/models/disconnected.yaml create mode 100644 ocpmodels/models/disconnected.py diff --git a/configs/models/disconnected.yaml b/configs/models/disconnected.yaml new file mode 100644 index 0000000000..2b1319d717 --- /dev/null +++ b/configs/models/disconnected.yaml @@ -0,0 +1,271 @@ +default: + model: + name: disconnected + act: swish + hidden_channels: 128 + num_filters: 100 + num_interactions: 3 + num_gaussians: 100 + cutoff: 6.0 + use_pbc: True + regress_forces: False + # drlab attributes: + tag_hidden_channels: 0 # 32 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_embeds: False # True + phys_hidden_channels: 0 + energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} + # faenet new features + skip_co: False # output skip connections {False, "add", "concat"} + second_layer_MLP: False # in EmbeddingBlock + complex_mp: False + edge_embed_type: rij # {'rij','all_rij','sh', 'all'}) + mp_type: base # {'base', 'simple', 'updownscale', 'att', 'base_with_att', 'local_env'} + graph_norm: False # bool + att_heads: 1 # int + force_decoder_type: "mlp" # can be {"" or "simple"} | only used if regress_forces is True + force_decoder_model_config: + simple: + hidden_channels: 128 + norm: batch1d # batch1d, layer or null + mlp: + hidden_channels: 256 + norm: batch1d # batch1d, layer or null + res: + hidden_channels: 128 + norm: batch1d # batch1d, layer or null + res_updown: + hidden_channels: 128 + norm: batch1d # batch1d, layer or null + optim: + batch_size: 64 + eval_batch_size: 64 + num_workers: 4 + lr_gamma: 0.1 + lr_initial: 0.001 + warmup_factor: 0.2 + max_epochs: 20 + energy_grad_coefficient: 10 + force_coefficient: 30 + energy_coefficient: 1 + + frame_averaging: False # 2D, 3D, da, False + fa_frames: False # can be {None, full, random, det, e3, e3-random, e3-det} + +# ------------------- +# ----- IS2RE ----- +# ------------------- + +is2re: + # *** Important note *** + # The total number of gpus used for this run was 1. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + 10k: + optim: + lr_initial: 0.005 + lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma + - 1562 + - 2343 + - 3125 + warmup_steps: 468 + max_epochs: 20 + + 100k: + model: + hidden_channels: 256 + optim: + lr_initial: 0.005 + lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma + - 1562 + - 2343 + - 3125 + warmup_steps: 468 + max_epochs: 20 + + all: + model: + hidden_channels: 384 + num_interactions: 4 + optim: + batch_size: 256 + eval_batch_size: 256 + lr_initial: 0.001 + lr_gamma: 0.1 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 18000 + - 27000 + - 37000 + warmup_steps: 6000 + max_epochs: 20 + +# ------------------ +# ----- S2EF ----- +# ------------------ + +# For 2 GPUs + +s2ef: + default: + model: + num_interactions: 4 + hidden_channels: 750 + num_gaussians: 200 + num_filters: 256 + regress_forces: "direct" + force_coefficient: 30 + energy_grad_coefficient: 10 + optim: + batch_size: 96 + eval_batch_size: 96 + warmup_factor: 0.2 + lr_gamma: 0.1 + lr_initial: 0.0001 + max_epochs: 15 + warmup_steps: 30000 + lr_milestones: + - 55000 + - 75000 + - 10000 + + 200k: {} + + # 1 gpus + 2M: + model: + num_interactions: 5 + hidden_channels: 1024 + num_gaussians: 200 + num_filters: 256 + optim: + batch_size: 192 + eval_batch_size: 192 + + 20M: {} + + all: {} + +qm9: + default: + model: + act: swish + att_heads: 1 + complex_mp: true + cutoff: 6.0 + edge_embed_type: all_rij + energy_head: '' + graph_norm: true + graph_rewiring: null + hidden_channels: 400 + max_num_neighbors: 30 + mp_type: updownscale_base + num_filters: 480 + num_gaussians: 100 + num_interactions: 5 + otf_graph: false + pg_hidden_channels: 32 + phys_embeds: false + phys_hidden_channels: 0 + regress_forces: '' + second_layer_MLP: true + skip_co: true + tag_hidden_channels: 0 + use_pbc: false + + optim: + batch_size: 64 + es_min_abs_change: 1.0e-06 + es_patience: 20 + es_warmup_epochs: 600 + eval_batch_size: 64 + factor: 0.9 + lr_initial: 0.0003 + loss_energy: mse + lr_gamma: 0.1 + lr_initial: 0.001 + max_epochs: 1500 + min_lr: 1.0e-06 + mode: min + optimizer: AdamW + patience: 15 + scheduler: ReduceLROnPlateau + threshold: 0.0001 + threshold_mode: abs + verbose: true + warmup_factor: 0.2 + warmup_steps: 3000 + + 10k: {} + all: {} + +qm7x: + default: + model: # SOTA settings + act: swish + att_heads: 1 + complex_mp: true + cutoff: 5.0 + edge_embed_type: all_rij + energy_head: false + force_decoder_model_config: + mlp: + hidden_channels: 256 + norm: batch1d + res: + hidden_channels: 128 + norm: batch1d + res_updown: + hidden_channels: 128 + norm: layer + simple: + hidden_channels: 128 + norm: batch1d + force_decoder_type: res_updown + graph_norm: false + hidden_channels: 500 + max_num_neighbors: 40 + mp_type: updownscale_base + num_filters: 400 + num_gaussians: 50 + num_interactions: 5 + otf_graph: false + pg_hidden_channels: 32 + phys_embeds: true + phys_hidden_channels: 0 + regress_forces: direct_with_gradient_target + second_layer_MLP: true + skip_co: false + tag_hidden_channels: 0 + use_pbc: false + + optim: + batch_size: 100 + energy_grad_coefficient: 5 + eval_batch_size: 100 + eval_every: 0.34 + factor: 0.75 + force_coefficient: 75 + loss_energy: mae + loss_force: mse + lr_gamma: 0.1 + lr_initial: 0.000193 + max_steps: 4000000 + min_lr: 1.0e-06 + mode: min + optimizer: AdamW + scheduler: ReduceLROnPlateau + threshold: 0.001 + threshold_mode: abs + verbose: true + warmup_factor: 0.2 + warmup_steps: 3000 + + all: {} + 1k: {} + +qm9: + default: + model: + use_pbc: False + all: {} + 10k: {} diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index dfa48f51d4..78fc86ec87 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -1248,6 +1248,9 @@ def get_pbc_distances( return_offsets=False, return_distance_vec=False, ): + import ipdb + ipdb.set_trace() + row, col = edge_index distance_vectors = pos[row] - pos[col] diff --git a/ocpmodels/models/disconnected.py b/ocpmodels/models/disconnected.py new file mode 100644 index 0000000000..c010900696 --- /dev/null +++ b/ocpmodels/models/disconnected.py @@ -0,0 +1,92 @@ +import torch +from torch.nn import Linear +from torch_scatter import scatter + +from ocpmodels.models.faenet import FAENet as conFAENet +from ocpmodels.models.faenet import OutputBlock as conOutputBlock +from ocpmodels.common.registry import registry +from ocpmodels.common.utils import conditional_grad + +from torch_geometric.data import Batch + +def graph_splitter(graph): + tags = graph.tags + edge_index = graph.edge_index + pos = graph.pos + atomic_numbers = graph.atomic_numbers + batch = graph.batch + cell = graph.cell + cell_offsets = graph.cell_offsets + + adsorbate_v_mask = (tags == 2) + catalyst_v_mask = (tags == 1) + (tags == 0) + + adsorbate_e_mask = (tags[edge_index][0] == 2) * (tags[edge_index][1] == 2) + catalyst_e_mask = ( + ((tags[edge_index][0] == 1) + (tags[edge_index][0] == 0)) + * ((tags[edge_index][1] == 1) + (tags[edge_index][1] == 0)) + ) + + ads_neighbors = scatter(adsorbate_e_mask.long(), batch[edge_index[0]], dim = 0, reduce = "add") + cat_neighbors = graph.neighbors - ads_neighbors + + adsorbate = Batch( + edge_index = edge_index[:, adsorbate_e_mask], + pos = pos[adsorbate_v_mask, :], + atomic_numbers = atomic_numbers[adsorbate_v_mask], + batch = batch[adsorbate_v_mask], + cell = cell, + cell_offsets = cell_offsets[adsorbate_e_mask, :], + tags = tags[adsorbate_v_mask], + neighbors = ads_neighbors, + mode="adsorbate" + ) + catalyst = Batch( + edge_index = edge_index[:, catalyst_e_mask], + pos = pos[catalyst_v_mask, :], + atomic_numbers = atomic_numbers[catalyst_v_mask], + batch = batch[adsorbate_v_mask], + cell = cell, + cell_offsets = cell_offsets[adsorbate_e_mask, :], + tags = tags[catalyst_v_mask], + neighbors = cat_neighbors, + mode="catalyst" + ) + + return adsorbate, catalyst + +class discOutputBlock(conOutputBlock): + def __init__(self, energy_head, hidden_channels, act): + super(discOutputBlock, self).__init__( + energy_head, hidden_channels, act + ) + + assert self.energy_head == "weighted-av-final-embeds" + del self.lin2 + + self.lin2 = Linear(hidden_channels // 2, hidden_channels // 2) + +@registry.register_model("disconnected") +class discFAENet(conFAENet): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + assert self.energy_head == "weighted-av-final-embeds" + del self.output_block + + hidden_channels = kwargs["hidden_channels"] + + self.output_block = discOutputBlock( + self.energy_head, hidden_channels, self.act + ) + + self.lin1 = Linear(hidden_channels // 2 * 2, hidden_channels // 2) + self.lin2 = Linear(hidden_channels // 2, 1) + + @conditional_grad(torch.enable_grad()) + def energy_forward(self, data): + adsorbate, catalyst = graph_splitter(data) + + test = super().energy_forward(data) + ads_pred = super().energy_forward(adsorbate) + cat_pred = super().energy_forward(catalyst) diff --git a/ocpmodels/models/faenet.py b/ocpmodels/models/faenet.py index 17fd05754d..c71190954a 100644 --- a/ocpmodels/models/faenet.py +++ b/ocpmodels/models/faenet.py @@ -600,6 +600,9 @@ def forces_forward(self, preds): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): # Rewire the graph + import ipdb + ipdb.set_trace() + z = data.atomic_numbers.long() pos = data.pos batch = data.batch From 540a4857122261427fb11d21c3c8137b5172b392 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 3 Jul 2023 17:48:19 -0400 Subject: [PATCH 018/131] Implemented: getting separate predictions for ads, cats. Their outputs get concatenated, and then passed through a 2-layer MLP to return a single value. Testing still needed. --- ocpmodels/common/utils.py | 3 --- ocpmodels/models/disconnected.py | 38 ++++++++++++++++++++++++++------ ocpmodels/models/faenet.py | 2 -- 3 files changed, 31 insertions(+), 12 deletions(-) diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index 78fc86ec87..dfa48f51d4 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -1248,9 +1248,6 @@ def get_pbc_distances( return_offsets=False, return_distance_vec=False, ): - import ipdb - ipdb.set_trace() - row, col = edge_index distance_vectors = pos[row] - pos[col] diff --git a/ocpmodels/models/disconnected.py b/ocpmodels/models/disconnected.py index c010900696..77e0afe01c 100644 --- a/ocpmodels/models/disconnected.py +++ b/ocpmodels/models/disconnected.py @@ -18,6 +18,7 @@ def graph_splitter(graph): cell = graph.cell cell_offsets = graph.cell_offsets + # Make masks to filter most data we need adsorbate_v_mask = (tags == 2) catalyst_v_mask = (tags == 1) + (tags == 0) @@ -27,11 +28,26 @@ def graph_splitter(graph): * ((tags[edge_index][1] == 1) + (tags[edge_index][1] == 0)) ) + # Recalculate neighbors ads_neighbors = scatter(adsorbate_e_mask.long(), batch[edge_index[0]], dim = 0, reduce = "add") - cat_neighbors = graph.neighbors - ads_neighbors + cat_neighbors = scatter(catalyst_e_mask.long(), batch[edge_index[0]], dim = 0, reduce = "add") + # Reindex the edge indices. + device = graph.edge_index.device + natoms = graph.natoms.sum().item() + + ads_assoc = torch.full((natoms,), -1, dtype = torch.long, device = device) + cat_assoc = torch.full((natoms,), -1, dtype = torch.long, device = device) + + ads_assoc[adsorbate_v_mask] = torch.arange(adsorbate_v_mask.sum(), device = device) + cat_assoc[catalyst_v_mask] = torch.arange(catalyst_v_mask.sum(), device = device) + + ads_edge_index = ads_assoc[edge_index[:, adsorbate_e_mask]] + cat_edge_index = cat_assoc[edge_index[:, catalyst_e_mask]] + + # Create the batches adsorbate = Batch( - edge_index = edge_index[:, adsorbate_e_mask], + edge_index = ads_edge_index, pos = pos[adsorbate_v_mask, :], atomic_numbers = atomic_numbers[adsorbate_v_mask], batch = batch[adsorbate_v_mask], @@ -42,12 +58,12 @@ def graph_splitter(graph): mode="adsorbate" ) catalyst = Batch( - edge_index = edge_index[:, catalyst_e_mask], + edge_index = cat_edge_index, pos = pos[catalyst_v_mask, :], atomic_numbers = atomic_numbers[catalyst_v_mask], - batch = batch[adsorbate_v_mask], + batch = batch[catalyst_v_mask], cell = cell, - cell_offsets = cell_offsets[adsorbate_e_mask, :], + cell_offsets = cell_offsets[catalyst_e_mask, :], tags = tags[catalyst_v_mask], neighbors = cat_neighbors, mode="catalyst" @@ -86,7 +102,15 @@ def __init__(self, **kwargs): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): adsorbate, catalyst = graph_splitter(data) - - test = super().energy_forward(data) + ads_pred = super().energy_forward(adsorbate) cat_pred = super().energy_forward(catalyst) + + ads_energy = ads_pred["energy"] + cat_energy = cat_pred["energy"] + + system_energy = torch.cat((ads_energy, cat_energy), dim = 1) + system_energy = self.lin1(system_energy) + system_energy = self.lin2(system_energy) + + return system_energy diff --git a/ocpmodels/models/faenet.py b/ocpmodels/models/faenet.py index c71190954a..c54f9d7422 100644 --- a/ocpmodels/models/faenet.py +++ b/ocpmodels/models/faenet.py @@ -600,8 +600,6 @@ def forces_forward(self, preds): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): # Rewire the graph - import ipdb - ipdb.set_trace() z = data.atomic_numbers.long() pos = data.pos From 59928cf49b6b870e64d35f80aea2ff039261d5c6 Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 5 Jul 2023 10:26:49 -0400 Subject: [PATCH 019/131] Middle point before implementing a needed modification to make skip_co=concat feature work. --- configs/exps/alvaro/split-ads-cats.yaml | 10 +++------- ocpmodels/models/disconnected.py | 4 +++- ocpmodels/models/faenet.py | 3 +++ 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index 95147bf30c..740886fd60 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -15,13 +15,12 @@ default: graph_rewiring: remove-tag-0 model: edge_embed_type: all_rij - wandb_tags: 'best-config' optim: batch_size: 256 eval_batch_size: 256 cp_data_to_tmpdir: true - config: faenet-is2re-all - note: 'best-config-all-lowest-energy' # Insert what model you're running if running one by one. + config: disconnected-is2re-10k + wandb-tags: 'best-config-??' # Insert what model you're running if running one by one. frame_averaging: 2D fa_frames: se3-random model: @@ -45,7 +44,4 @@ default: eval_every: 0.4 runs: - - is_disconnected: True - lowest_energy_only: True - - - lowest_energy_only: True + - wandb_tags: 'best-config-disc_model-10k' diff --git a/ocpmodels/models/disconnected.py b/ocpmodels/models/disconnected.py index 77e0afe01c..488ec76fea 100644 --- a/ocpmodels/models/disconnected.py +++ b/ocpmodels/models/disconnected.py @@ -113,4 +113,6 @@ def energy_forward(self, data): system_energy = self.lin1(system_energy) system_energy = self.lin2(system_energy) - return system_energy + ads_pred["energy"] = system_energy + + return ads_pred diff --git a/ocpmodels/models/faenet.py b/ocpmodels/models/faenet.py index c54f9d7422..d1717774c9 100644 --- a/ocpmodels/models/faenet.py +++ b/ocpmodels/models/faenet.py @@ -663,6 +663,7 @@ def energy_forward(self, data): h = h + interaction(h, edge_index, e) # Atom skip-co + if self.skip_co == "concat_atom": energy_skip_co.append(h) h = self.act(self.mlp_skip_co(torch.cat(energy_skip_co, dim=1))) @@ -671,6 +672,8 @@ def energy_forward(self, data): # Skip-connection energy_skip_co.append(energy) + import ipdb + ipdb.set_trace() if self.skip_co == "concat": energy = self.mlp_skip_co(torch.cat(energy_skip_co, dim=1)) elif self.skip_co == "add": From 1e6346fee2c603ccd6d71bd571f5e70d6caa9d62 Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 5 Jul 2023 12:02:20 -0400 Subject: [PATCH 020/131] Skip_co correctly implemented --- configs/exps/alvaro/split-ads-cats.yaml | 6 ++++-- ocpmodels/models/disconnected.py | 8 ++++++++ ocpmodels/models/faenet.py | 2 -- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index 740886fd60..b7a663541a 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -19,7 +19,7 @@ default: batch_size: 256 eval_batch_size: 256 cp_data_to_tmpdir: true - config: disconnected-is2re-10k + config: disconnected-is2re-all wandb-tags: 'best-config-??' # Insert what model you're running if running one by one. frame_averaging: 2D fa_frames: se3-random @@ -44,4 +44,6 @@ default: eval_every: 0.4 runs: - - wandb_tags: 'best-config-disc_model-10k' + - notes: 'best-config-discmodel-all' + + diff --git a/ocpmodels/models/disconnected.py b/ocpmodels/models/disconnected.py index 488ec76fea..f354e12313 100644 --- a/ocpmodels/models/disconnected.py +++ b/ocpmodels/models/disconnected.py @@ -99,6 +99,14 @@ def __init__(self, **kwargs): self.lin1 = Linear(hidden_channels // 2 * 2, hidden_channels // 2) self.lin2 = Linear(hidden_channels // 2, 1) + if self.skip_co in {"concat", "concat_atom"}: + assert self.skip_co == "concat" # We can implement the other one later. + del self.mlp_skip_co + self.mlp_skip_co = Linear( + (kwargs["hidden_channels"] // 2) * (kwargs["num_interactions"] + 1), + kwargs["hidden_channels"] // 2 + ) + @conditional_grad(torch.enable_grad()) def energy_forward(self, data): adsorbate, catalyst = graph_splitter(data) diff --git a/ocpmodels/models/faenet.py b/ocpmodels/models/faenet.py index d1717774c9..061cd5faec 100644 --- a/ocpmodels/models/faenet.py +++ b/ocpmodels/models/faenet.py @@ -672,8 +672,6 @@ def energy_forward(self, data): # Skip-connection energy_skip_co.append(energy) - import ipdb - ipdb.set_trace() if self.skip_co == "concat": energy = self.mlp_skip_co(torch.cat(energy_skip_co, dim=1)) elif self.skip_co == "add": From 49cb00cae25c16a127f8c4df6b0f27c48f274684 Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 5 Jul 2023 16:07:00 -0400 Subject: [PATCH 021/131] Set it so using the disconnected model turns on the is_disconnected flag on --- ocpmodels/trainers/base_trainer.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index f5ee07970e..3034945b7a 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -158,6 +158,11 @@ def __init__(self, **kwargs): model_regresses_forces=self.config["model"].get("regress_forces", ""), ) + if self.config["model_name"] == "disconnected": + if not self.config["is_disconnected"]: + print("\n\nWhen using the disconnected model, the flag 'is_disconnected' should be used! The flag has been turned on.") + self.config["is_disconnected"] = True + def load(self): self.load_seed_from_config() self.load_logger() From c9d3c844981a223c1b2eb470add1171a15828f6c Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 7 Jul 2023 15:47:50 -0400 Subject: [PATCH 022/131] Changed the name of disconnected model to dependent as to make psace for other types of disconnected models --- configs/exps/alvaro/split-ads-cats.yaml | 6 +- .../{disconnected.yaml => dependent.yaml} | 2 +- ocpmodels/datasets/independent_dataset.py | 4 + ocpmodels/datasets/lmdb_dataset.py | 1 - .../models/{disconnected.py => dependent.py} | 81 +++++++++++-------- 5 files changed, 56 insertions(+), 38 deletions(-) rename configs/models/{disconnected.yaml => dependent.yaml} (99%) create mode 100644 ocpmodels/datasets/independent_dataset.py rename ocpmodels/models/{disconnected.py => dependent.py} (65%) diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index b7a663541a..9381674254 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -19,7 +19,7 @@ default: batch_size: 256 eval_batch_size: 256 cp_data_to_tmpdir: true - config: disconnected-is2re-all + config: faenet-is2re-all wandb-tags: 'best-config-??' # Insert what model you're running if running one by one. frame_averaging: 2D fa_frames: se3-random @@ -44,6 +44,8 @@ default: eval_every: 0.4 runs: - - notes: 'best-config-discmodel-all' + - Notes: 'best-config-discmodel-all' + config: disconnected-is2re-all + is_disconnected: True diff --git a/configs/models/disconnected.yaml b/configs/models/dependent.yaml similarity index 99% rename from configs/models/disconnected.yaml rename to configs/models/dependent.yaml index 2b1319d717..10fa4600db 100644 --- a/configs/models/disconnected.yaml +++ b/configs/models/dependent.yaml @@ -1,6 +1,6 @@ default: model: - name: disconnected + name: dependent act: swish hidden_channels: 128 num_filters: 100 diff --git a/ocpmodels/datasets/independent_dataset.py b/ocpmodels/datasets/independent_dataset.py new file mode 100644 index 0000000000..402986800e --- /dev/null +++ b/ocpmodels/datasets/independent_dataset.py @@ -0,0 +1,4 @@ +from ocpmodels.datasets.lmdb_dataset import LmdbDataset + +class IndependentDataset(LmdbDataset): + pass diff --git a/ocpmodels/datasets/lmdb_dataset.py b/ocpmodels/datasets/lmdb_dataset.py index 2eaef01200..d19d267689 100644 --- a/ocpmodels/datasets/lmdb_dataset.py +++ b/ocpmodels/datasets/lmdb_dataset.py @@ -156,7 +156,6 @@ def __init__(self, config, transform=None): stacklevel=3, ) - def data_list_collater(data_list, otf_graph=False): batch = Batch.from_data_list(data_list) diff --git a/ocpmodels/models/disconnected.py b/ocpmodels/models/dependent.py similarity index 65% rename from ocpmodels/models/disconnected.py rename to ocpmodels/models/dependent.py index f354e12313..a1b16340db 100644 --- a/ocpmodels/models/disconnected.py +++ b/ocpmodels/models/dependent.py @@ -2,13 +2,14 @@ from torch.nn import Linear from torch_scatter import scatter -from ocpmodels.models.faenet import FAENet as conFAENet +from ocpmodels.models.faenet import FAENet from ocpmodels.models.faenet import OutputBlock as conOutputBlock from ocpmodels.common.registry import registry from ocpmodels.common.utils import conditional_grad from torch_geometric.data import Batch +# Graph splitter should become a transform once model 3 is done. def graph_splitter(graph): tags = graph.tags edge_index = graph.edge_index @@ -77,50 +78,62 @@ def __init__(self, energy_head, hidden_channels, act): energy_head, hidden_channels, act ) - assert self.energy_head == "weighted-av-final-embeds" del self.lin2 - self.lin2 = Linear(hidden_channels // 2, hidden_channels // 2) -@registry.register_model("disconnected") -class discFAENet(conFAENet): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - assert self.energy_head == "weighted-av-final-embeds" - del self.output_block + self.sys_lin1 = Linear(hidden_channels // 2 * 2, hidden_channels // 2) + self.sys_lin2 = Linear(hidden_channels // 2, 1) - hidden_channels = kwargs["hidden_channels"] + def tags_saver(self, tags): + self.current_tags = tags - self.output_block = discOutputBlock( - self.energy_head, hidden_channels, self.act - ) + def forward(self, h, edge_index, edge_weight, batch, alpha): + if self.energy_head == "weighted-av-final-embeds": # Right now, this is the only available option. + alpha = self.w_lin(h) - self.lin1 = Linear(hidden_channels // 2 * 2, hidden_channels // 2) - self.lin2 = Linear(hidden_channels // 2, 1) + elif self.energy_head == "graclus": + h, batch = self.graclus(h, edge_index, edge_weight, batch) - if self.skip_co in {"concat", "concat_atom"}: - assert self.skip_co == "concat" # We can implement the other one later. - del self.mlp_skip_co - self.mlp_skip_co = Linear( - (kwargs["hidden_channels"] // 2) * (kwargs["num_interactions"] + 1), - kwargs["hidden_channels"] // 2 + elif self.energy_head in {"pooling", "random"}: + h, batch, pooling_loss = self.hierarchical_pooling( + h, edge_index, edge_weight, batch ) - @conditional_grad(torch.enable_grad()) - def energy_forward(self, data): - adsorbate, catalyst = graph_splitter(data) + # MLP + h = self.lin1(h) + h = self.lin2(self.act(h)) + + if self.energy_head in { + "weighted-av-initial-embeds", + "weighted-av-final-embeds", + }: + h = h * alpha + + ads = self.current_tags == 2 + cat = ~ads + + ads_out = scatter(h, batch * ads, dim = 0, reduce = "add") + cat_out = scatter(h, batch * cat, dim = 0, reduce = "add") + system = torch.cat([ads_out, cat_out], dim = 1) + + system = self.sys_lin1(system) + energy = self.sys_lin2(system) - ads_pred = super().energy_forward(adsorbate) - cat_pred = super().energy_forward(catalyst) + return energy - ads_energy = ads_pred["energy"] - cat_energy = cat_pred["energy"] +@registry.register_model("dependent") +class depFAENet(FAENet): + def __init__(self, **kwargs): + super().__init__(**kwargs) - system_energy = torch.cat((ads_energy, cat_energy), dim = 1) - system_energy = self.lin1(system_energy) - system_energy = self.lin2(system_energy) + del self.output_block + self.output_block = discOutputBlock( + self.energy_head, kwargs["hidden_channels"], self.act + ) - ads_pred["energy"] = system_energy + @conditional_grad(torch.enable_grad()) + def energy_forward(self, data): + self.output_block.tags_saver(data.tags) + pred = super().energy_forward(data) - return ads_pred + return pred From 34e232c277c02d457f46485c477a91de06080f3b Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 7 Jul 2023 17:15:57 -0400 Subject: [PATCH 023/131] Created a new dataset file where graphs will be split before transforms are applied. --- ocpmodels/datasets/independent_dataset.py | 4 - ocpmodels/datasets/lmdb_dataset.py | 4 + ocpmodels/datasets/separate_dataset.py | 118 ++++++++++++++++++++++ ocpmodels/models/dependent.py | 63 ------------ ocpmodels/trainers/base_trainer.py | 14 ++- 5 files changed, 133 insertions(+), 70 deletions(-) delete mode 100644 ocpmodels/datasets/independent_dataset.py create mode 100644 ocpmodels/datasets/separate_dataset.py diff --git a/ocpmodels/datasets/independent_dataset.py b/ocpmodels/datasets/independent_dataset.py deleted file mode 100644 index 402986800e..0000000000 --- a/ocpmodels/datasets/independent_dataset.py +++ /dev/null @@ -1,4 +0,0 @@ -from ocpmodels.datasets.lmdb_dataset import LmdbDataset - -class IndependentDataset(LmdbDataset): - pass diff --git a/ocpmodels/datasets/lmdb_dataset.py b/ocpmodels/datasets/lmdb_dataset.py index d19d267689..7ff09b94db 100644 --- a/ocpmodels/datasets/lmdb_dataset.py +++ b/ocpmodels/datasets/lmdb_dataset.py @@ -156,7 +156,11 @@ def __init__(self, config, transform=None): stacklevel=3, ) + def data_list_collater(data_list, otf_graph=False): + import ipdb + ipdb.set_trace() + batch = Batch.from_data_list(data_list) if ( diff --git a/ocpmodels/datasets/separate_dataset.py b/ocpmodels/datasets/separate_dataset.py new file mode 100644 index 0000000000..089a96ad1c --- /dev/null +++ b/ocpmodels/datasets/separate_dataset.py @@ -0,0 +1,118 @@ +import bisect +import logging +import pickle +import time +from pathlib import Path + +import lmdb +import numpy as np +import torch + +from ocpmodels.datasets.lmdb_dataset import LmdbDataset +from ocpmodels.common.registry import registry +from ocpmodels.common.utils import pyg2_data_transform + +def graph_splitter(graph): + tags = graph.tags + edge_index = graph.edge_index + pos = graph.pos + atomic_numbers = graph.atomic_numbers + batch = graph.batch + cell = graph.cell + cell_offsets = graph.cell_offsets + + # Make masks to filter most data we need + adsorbate_v_mask = (tags == 2) + catalyst_v_mask = (tags == 1) + (tags == 0) + + adsorbate_e_mask = (tags[edge_index][0] == 2) * (tags[edge_index][1] == 2) + catalyst_e_mask = ( + ((tags[edge_index][0] == 1) + (tags[edge_index][0] == 0)) + * ((tags[edge_index][1] == 1) + (tags[edge_index][1] == 0)) + ) + + # Recalculate neighbors + ads_neighbors = scatter(adsorbate_e_mask.long(), batch[edge_index[0]], dim = 0, reduce = "add") + cat_neighbors = scatter(catalyst_e_mask.long(), batch[edge_index[0]], dim = 0, reduce = "add") + + # Reindex the edge indices. + device = graph.edge_index.device + natoms = graph.natoms.sum().item() + + ads_assoc = torch.full((natoms,), -1, dtype = torch.long, device = device) + cat_assoc = torch.full((natoms,), -1, dtype = torch.long, device = device) + + ads_assoc[adsorbate_v_mask] = torch.arange(adsorbate_v_mask.sum(), device = device) + cat_assoc[catalyst_v_mask] = torch.arange(catalyst_v_mask.sum(), device = device) + + ads_edge_index = ads_assoc[edge_index[:, adsorbate_e_mask]] + cat_edge_index = cat_assoc[edge_index[:, catalyst_e_mask]] + + # Create the batches + adsorbate = Batch( + edge_index = ads_edge_index, + pos = pos[adsorbate_v_mask, :], + atomic_numbers = atomic_numbers[adsorbate_v_mask], + batch = batch[adsorbate_v_mask], + cell = cell, + cell_offsets = cell_offsets[adsorbate_e_mask, :], + tags = tags[adsorbate_v_mask], + neighbors = ads_neighbors, + mode="adsorbate" + ) + catalyst = Batch( + edge_index = cat_edge_index, + pos = pos[catalyst_v_mask, :], + atomic_numbers = atomic_numbers[catalyst_v_mask], + batch = batch[catalyst_v_mask], + cell = cell, + cell_offsets = cell_offsets[catalyst_e_mask, :], + tags = tags[catalyst_v_mask], + neighbors = cat_neighbors, + mode="catalyst" + ) + + return adsorbate, catalyst + +@registry.register_dataset("separate") +class SeparateLmdbDataset(LmdbDataset): + def __getitem__(self, idx): + t0 = time.time_ns() + if not self.path.is_file(): + # Figure out which db this should be indexed from. + db_idx = bisect.bisect(self._keylen_cumulative, idx) + # Extract index of element within that db. + el_idx = idx + if db_idx != 0: + el_idx = idx - self._keylen_cumulative[db_idx - 1] + assert el_idx >= 0 + + # Return features. + datapoint_pickled = ( + self.envs[db_idx] + .begin() + .get(f"{self._keys[db_idx][el_idx]}".encode("ascii")) + ) + data_object = pyg2_data_transform(pickle.loads(datapoint_pickled)) + data_object.id = f"{db_idx}_{el_idx}" + else: + datapoint_pickled = self.env.begin().get(self._keys[idx]) + data_object = pyg2_data_transform(pickle.loads(datapoint_pickled)) + + import ipdb + ipdb.set_trace() + + t1 = time.time_ns() + if self.transform is not None: + data_object = self.transform(data_object) + t2 = time.time_ns() + + load_time = (t1 - t0) * 1e-9 # time in s + transform_time = (t2 - t1) * 1e-9 # time in s + total_get_time = (t2 - t0) * 1e-9 # time in s + + data_object.load_time = load_time + data_object.transform_time = transform_time + data_object.total_get_time = total_get_time + + return data_object diff --git a/ocpmodels/models/dependent.py b/ocpmodels/models/dependent.py index a1b16340db..e36ae0d18e 100644 --- a/ocpmodels/models/dependent.py +++ b/ocpmodels/models/dependent.py @@ -9,69 +9,6 @@ from torch_geometric.data import Batch -# Graph splitter should become a transform once model 3 is done. -def graph_splitter(graph): - tags = graph.tags - edge_index = graph.edge_index - pos = graph.pos - atomic_numbers = graph.atomic_numbers - batch = graph.batch - cell = graph.cell - cell_offsets = graph.cell_offsets - - # Make masks to filter most data we need - adsorbate_v_mask = (tags == 2) - catalyst_v_mask = (tags == 1) + (tags == 0) - - adsorbate_e_mask = (tags[edge_index][0] == 2) * (tags[edge_index][1] == 2) - catalyst_e_mask = ( - ((tags[edge_index][0] == 1) + (tags[edge_index][0] == 0)) - * ((tags[edge_index][1] == 1) + (tags[edge_index][1] == 0)) - ) - - # Recalculate neighbors - ads_neighbors = scatter(adsorbate_e_mask.long(), batch[edge_index[0]], dim = 0, reduce = "add") - cat_neighbors = scatter(catalyst_e_mask.long(), batch[edge_index[0]], dim = 0, reduce = "add") - - # Reindex the edge indices. - device = graph.edge_index.device - natoms = graph.natoms.sum().item() - - ads_assoc = torch.full((natoms,), -1, dtype = torch.long, device = device) - cat_assoc = torch.full((natoms,), -1, dtype = torch.long, device = device) - - ads_assoc[adsorbate_v_mask] = torch.arange(adsorbate_v_mask.sum(), device = device) - cat_assoc[catalyst_v_mask] = torch.arange(catalyst_v_mask.sum(), device = device) - - ads_edge_index = ads_assoc[edge_index[:, adsorbate_e_mask]] - cat_edge_index = cat_assoc[edge_index[:, catalyst_e_mask]] - - # Create the batches - adsorbate = Batch( - edge_index = ads_edge_index, - pos = pos[adsorbate_v_mask, :], - atomic_numbers = atomic_numbers[adsorbate_v_mask], - batch = batch[adsorbate_v_mask], - cell = cell, - cell_offsets = cell_offsets[adsorbate_e_mask, :], - tags = tags[adsorbate_v_mask], - neighbors = ads_neighbors, - mode="adsorbate" - ) - catalyst = Batch( - edge_index = cat_edge_index, - pos = pos[catalyst_v_mask, :], - atomic_numbers = atomic_numbers[catalyst_v_mask], - batch = batch[catalyst_v_mask], - cell = cell, - cell_offsets = cell_offsets[catalyst_e_mask, :], - tags = tags[catalyst_v_mask], - neighbors = cat_neighbors, - mode="catalyst" - ) - - return adsorbate, catalyst - class discOutputBlock(conOutputBlock): def __init__(self, energy_head, hidden_channels, act): super(discOutputBlock, self).__init__( diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 3034945b7a..cc71810723 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -245,9 +245,17 @@ def load_datasets(self): if split == "default_val": continue - self.datasets[split] = registry.get_dataset_class( - self.config["task"]["dataset"] - )(ds_conf, transform=transform) + import ipdb + ipdb.set_trace() + + if self.config["model_name"] in ["dependent", "independent"]: # DEPENDENT SHOULDN'T BE ON THIS LIST. IT'S FOR DEBUGGIN. + self.datasets[split] = registry.get_dataset_class( + "separate" + )(ds_conf, transform=transform) + else: + self.datasets[split] = registry.get_dataset_class( + self.config["task"]["dataset"] + )(ds_conf, transform=transform) if self.config["lowest_energy_only"]: with open('/network/scratch/a/alvaro.carbonero/lowest_energy.pkl', 'rb') as fp: From a997c04b5a0adb2d229aa7bbfd5dc5660f650dc0 Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 7 Jul 2023 17:58:39 -0400 Subject: [PATCH 024/131] Implemented graph splitter. Currently debugging modification to data_list_collater --- ocpmodels/datasets/lmdb_dataset.py | 5 ++- ocpmodels/datasets/separate_dataset.py | 61 ++++++++++++++++---------- ocpmodels/trainers/base_trainer.py | 3 -- 3 files changed, 41 insertions(+), 28 deletions(-) diff --git a/ocpmodels/datasets/lmdb_dataset.py b/ocpmodels/datasets/lmdb_dataset.py index 7ff09b94db..4fbfff9eba 100644 --- a/ocpmodels/datasets/lmdb_dataset.py +++ b/ocpmodels/datasets/lmdb_dataset.py @@ -158,8 +158,9 @@ def __init__(self, config, transform=None): def data_list_collater(data_list, otf_graph=False): - import ipdb - ipdb.set_trace() + if data_list[0] is tuple: + graphs = [system[0] for system in data_list] + [system[1] for system in data_list] + batch = Batch.from_data_list(graphs) batch = Batch.from_data_list(data_list) diff --git a/ocpmodels/datasets/separate_dataset.py b/ocpmodels/datasets/separate_dataset.py index 089a96ad1c..a65af98468 100644 --- a/ocpmodels/datasets/separate_dataset.py +++ b/ocpmodels/datasets/separate_dataset.py @@ -7,19 +7,27 @@ import lmdb import numpy as np import torch +from torch_geometric.data import Data from ocpmodels.datasets.lmdb_dataset import LmdbDataset from ocpmodels.common.registry import registry from ocpmodels.common.utils import pyg2_data_transform def graph_splitter(graph): - tags = graph.tags edge_index = graph.edge_index pos = graph.pos - atomic_numbers = graph.atomic_numbers - batch = graph.batch cell = graph.cell + atomic_numbers = graph.atomic_numbers + natoms = graph.natoms cell_offsets = graph.cell_offsets + force = graph.force + distances = graph.distances + fixed = graph.fixed + tags = graph.tags + y_init = graph.y_init + y_relaxed = graph.y_relaxed + pos_relaxed = graph.pos_relaxed + id = graph.id # Make masks to filter most data we need adsorbate_v_mask = (tags == 2) @@ -31,13 +39,8 @@ def graph_splitter(graph): * ((tags[edge_index][1] == 1) + (tags[edge_index][1] == 0)) ) - # Recalculate neighbors - ads_neighbors = scatter(adsorbate_e_mask.long(), batch[edge_index[0]], dim = 0, reduce = "add") - cat_neighbors = scatter(catalyst_e_mask.long(), batch[edge_index[0]], dim = 0, reduce = "add") - # Reindex the edge indices. device = graph.edge_index.device - natoms = graph.natoms.sum().item() ads_assoc = torch.full((natoms,), -1, dtype = torch.long, device = device) cat_assoc = torch.full((natoms,), -1, dtype = torch.long, device = device) @@ -49,26 +52,34 @@ def graph_splitter(graph): cat_edge_index = cat_assoc[edge_index[:, catalyst_e_mask]] # Create the batches - adsorbate = Batch( + adsorbate = Data( edge_index = ads_edge_index, pos = pos[adsorbate_v_mask, :], - atomic_numbers = atomic_numbers[adsorbate_v_mask], - batch = batch[adsorbate_v_mask], cell = cell, + atomic_numbers = atomic_numbers[adsorbate_v_mask], + natoms = adsorbate_v_mask.sum().item(), cell_offsets = cell_offsets[adsorbate_e_mask, :], + force = force[adsorbate_v_mask, :], tags = tags[adsorbate_v_mask], - neighbors = ads_neighbors, + y_init = y_init, + y_relaxed = y_relaxed, + pos_relaxed = pos_relaxed[adsorbate_v_mask, :], + id = id, mode="adsorbate" ) - catalyst = Batch( + catalyst = Data( edge_index = cat_edge_index, pos = pos[catalyst_v_mask, :], - atomic_numbers = atomic_numbers[catalyst_v_mask], - batch = batch[catalyst_v_mask], cell = cell, + atomic_numbers = atomic_numbers[catalyst_v_mask], + natoms = catalyst_v_mask.sum().item(), cell_offsets = cell_offsets[catalyst_e_mask, :], + force = force[catalyst_v_mask, :], tags = tags[catalyst_v_mask], - neighbors = cat_neighbors, + y_init = y_init, + y_relaxed = y_relaxed, + pos_relaxed = pos_relaxed[catalyst_v_mask, :], + id = id, mode="catalyst" ) @@ -99,20 +110,24 @@ def __getitem__(self, idx): datapoint_pickled = self.env.begin().get(self._keys[idx]) data_object = pyg2_data_transform(pickle.loads(datapoint_pickled)) - import ipdb - ipdb.set_trace() + adsorbate, catalyst = graph_splitter(data_object) t1 = time.time_ns() if self.transform is not None: - data_object = self.transform(data_object) + adsorbate = self.transform(adsorbate) + catalyst = self.transform(catalyst) t2 = time.time_ns() load_time = (t1 - t0) * 1e-9 # time in s transform_time = (t2 - t1) * 1e-9 # time in s total_get_time = (t2 - t0) * 1e-9 # time in s - data_object.load_time = load_time - data_object.transform_time = transform_time - data_object.total_get_time = total_get_time + adsorbate.load_time = load_time + adsorbate.transform_time = transform_time + adsorbate.total_get_time = total_get_time + + catalyst.load_time = load_time + catalyst.transform_time = transform_time + catalyst.total_get_time = total_get_time - return data_object + return (adsorbate, catalyst) diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index cc71810723..a40ffcd995 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -245,9 +245,6 @@ def load_datasets(self): if split == "default_val": continue - import ipdb - ipdb.set_trace() - if self.config["model_name"] in ["dependent", "independent"]: # DEPENDENT SHOULDN'T BE ON THIS LIST. IT'S FOR DEBUGGIN. self.datasets[split] = registry.get_dataset_class( "separate" From 39fe297dac815e709aa3a609a73dd0d626c40f51 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 10 Jul 2023 09:21:31 -0400 Subject: [PATCH 025/131] Now that the new dataset is implemented, we correct so the dependent model doesn't use it, and we start writing the independent model. --- ocpmodels/datasets/lmdb_dataset.py | 6 +++--- ocpmodels/models/independent.py | 1 + ocpmodels/trainers/base_trainer.py | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 ocpmodels/models/independent.py diff --git a/ocpmodels/datasets/lmdb_dataset.py b/ocpmodels/datasets/lmdb_dataset.py index 4fbfff9eba..7aa97dca2f 100644 --- a/ocpmodels/datasets/lmdb_dataset.py +++ b/ocpmodels/datasets/lmdb_dataset.py @@ -158,11 +158,11 @@ def __init__(self, config, transform=None): def data_list_collater(data_list, otf_graph=False): - if data_list[0] is tuple: + if type(data_list[0]) is tuple: graphs = [system[0] for system in data_list] + [system[1] for system in data_list] batch = Batch.from_data_list(graphs) - - batch = Batch.from_data_list(data_list) + else: + batch = Batch.from_data_list(data_list) if ( not otf_graph diff --git a/ocpmodels/models/independent.py b/ocpmodels/models/independent.py new file mode 100644 index 0000000000..28af769e1d --- /dev/null +++ b/ocpmodels/models/independent.py @@ -0,0 +1 @@ +from ocpmodels.common.registry import registry diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index a40ffcd995..e57455e7a0 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -245,7 +245,7 @@ def load_datasets(self): if split == "default_val": continue - if self.config["model_name"] in ["dependent", "independent"]: # DEPENDENT SHOULDN'T BE ON THIS LIST. IT'S FOR DEBUGGIN. + if self.config["model_name"] in ["independent"]: # DEPENDENT SHOULDN'T BE ON THIS LIST. IT'S FOR DEBUGGIN. self.datasets[split] = registry.get_dataset_class( "separate" )(ds_conf, transform=transform) From f2f89ef952a7f9ee3b8de57116f64d517aea38fc Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 11 Jul 2023 18:06:00 -0400 Subject: [PATCH 026/131] Renamed some models, started debugging a simplified version of model 3 --- configs/exps/alvaro/split-ads-cats.yaml | 9 +- .../models/{dependent.yaml => depfaenet.yaml} | 2 +- configs/models/indfaenet.yaml | 271 ++++++++++++++++++ debug.py | 223 ++++++++++++++ main.py | 2 +- ocpmodels/datasets/lmdb_dataset.py | 9 +- ocpmodels/datasets/separate_dataset.py | 4 +- .../models/{dependent.py => depfaenet.py} | 10 +- ocpmodels/models/faenet.py | 2 +- ocpmodels/models/independent.py | 1 - ocpmodels/models/indfaenet.py | 62 ++++ ocpmodels/trainers/base_trainer.py | 2 +- 12 files changed, 579 insertions(+), 18 deletions(-) rename configs/models/{dependent.yaml => depfaenet.yaml} (99%) create mode 100644 configs/models/indfaenet.yaml create mode 100644 debug.py rename ocpmodels/models/{dependent.py => depfaenet.py} (83%) delete mode 100644 ocpmodels/models/independent.py create mode 100644 ocpmodels/models/indfaenet.py diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index 9381674254..6b1ee75204 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -19,13 +19,12 @@ default: batch_size: 256 eval_batch_size: 256 cp_data_to_tmpdir: true - config: faenet-is2re-all wandb-tags: 'best-config-??' # Insert what model you're running if running one by one. frame_averaging: 2D fa_frames: se3-random model: mp_type: updownscale - phys_embeds: False + phys_embeds: True # I CHANGED THIS! tag_hidden_channels: 32 pg_hidden_channels: 64 energy_head: weighted-av-final-embeds @@ -44,8 +43,8 @@ default: eval_every: 0.4 runs: - - Notes: 'best-config-discmodel-all' - config: disconnected-is2re-all - is_disconnected: True + - config: depfaenet-is2re-10k + + - config: depfaenet-is2re-all diff --git a/configs/models/dependent.yaml b/configs/models/depfaenet.yaml similarity index 99% rename from configs/models/dependent.yaml rename to configs/models/depfaenet.yaml index 10fa4600db..852ebc3bfd 100644 --- a/configs/models/dependent.yaml +++ b/configs/models/depfaenet.yaml @@ -1,6 +1,6 @@ default: model: - name: dependent + name: depfaenet act: swish hidden_channels: 128 num_filters: 100 diff --git a/configs/models/indfaenet.yaml b/configs/models/indfaenet.yaml new file mode 100644 index 0000000000..acfb22166f --- /dev/null +++ b/configs/models/indfaenet.yaml @@ -0,0 +1,271 @@ +default: + model: + name: indfaenet + act: swish + hidden_channels: 128 + num_filters: 100 + num_interactions: 3 + num_gaussians: 100 + cutoff: 6.0 + use_pbc: True + regress_forces: False + # drlab attributes: + tag_hidden_channels: 0 # 32 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_embeds: False # True + phys_hidden_channels: 0 + energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} + # faenet new features + skip_co: False # output skip connections {False, "add", "concat"} + second_layer_MLP: False # in EmbeddingBlock + complex_mp: False + edge_embed_type: rij # {'rij','all_rij','sh', 'all'}) + mp_type: base # {'base', 'simple', 'updownscale', 'att', 'base_with_att', 'local_env'} + graph_norm: False # bool + att_heads: 1 # int + force_decoder_type: "mlp" # can be {"" or "simple"} | only used if regress_forces is True + force_decoder_model_config: + simple: + hidden_channels: 128 + norm: batch1d # batch1d, layer or null + mlp: + hidden_channels: 256 + norm: batch1d # batch1d, layer or null + res: + hidden_channels: 128 + norm: batch1d # batch1d, layer or null + res_updown: + hidden_channels: 128 + norm: batch1d # batch1d, layer or null + optim: + batch_size: 64 + eval_batch_size: 64 + num_workers: 4 + lr_gamma: 0.1 + lr_initial: 0.001 + warmup_factor: 0.2 + max_epochs: 20 + energy_grad_coefficient: 10 + force_coefficient: 30 + energy_coefficient: 1 + + frame_averaging: False # 2D, 3D, da, False + fa_frames: False # can be {None, full, random, det, e3, e3-random, e3-det} + +# ------------------- +# ----- IS2RE ----- +# ------------------- + +is2re: + # *** Important note *** + # The total number of gpus used for this run was 1. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + 10k: + optim: + lr_initial: 0.005 + lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma + - 1562 + - 2343 + - 3125 + warmup_steps: 468 + max_epochs: 20 + + 100k: + model: + hidden_channels: 256 + optim: + lr_initial: 0.005 + lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma + - 1562 + - 2343 + - 3125 + warmup_steps: 468 + max_epochs: 20 + + all: + model: + hidden_channels: 384 + num_interactions: 4 + optim: + batch_size: 256 + eval_batch_size: 256 + lr_initial: 0.001 + lr_gamma: 0.1 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 18000 + - 27000 + - 37000 + warmup_steps: 6000 + max_epochs: 20 + +# ------------------ +# ----- S2EF ----- +# ------------------ + +# For 2 GPUs + +s2ef: + default: + model: + num_interactions: 4 + hidden_channels: 750 + num_gaussians: 200 + num_filters: 256 + regress_forces: "direct" + force_coefficient: 30 + energy_grad_coefficient: 10 + optim: + batch_size: 96 + eval_batch_size: 96 + warmup_factor: 0.2 + lr_gamma: 0.1 + lr_initial: 0.0001 + max_epochs: 15 + warmup_steps: 30000 + lr_milestones: + - 55000 + - 75000 + - 10000 + + 200k: {} + + # 1 gpus + 2M: + model: + num_interactions: 5 + hidden_channels: 1024 + num_gaussians: 200 + num_filters: 256 + optim: + batch_size: 192 + eval_batch_size: 192 + + 20M: {} + + all: {} + +qm9: + default: + model: + act: swish + att_heads: 1 + complex_mp: true + cutoff: 6.0 + edge_embed_type: all_rij + energy_head: '' + graph_norm: true + graph_rewiring: null + hidden_channels: 400 + max_num_neighbors: 30 + mp_type: updownscale_base + num_filters: 480 + num_gaussians: 100 + num_interactions: 5 + otf_graph: false + pg_hidden_channels: 32 + phys_embeds: false + phys_hidden_channels: 0 + regress_forces: '' + second_layer_MLP: true + skip_co: true + tag_hidden_channels: 0 + use_pbc: false + + optim: + batch_size: 64 + es_min_abs_change: 1.0e-06 + es_patience: 20 + es_warmup_epochs: 600 + eval_batch_size: 64 + factor: 0.9 + lr_initial: 0.0003 + loss_energy: mse + lr_gamma: 0.1 + lr_initial: 0.001 + max_epochs: 1500 + min_lr: 1.0e-06 + mode: min + optimizer: AdamW + patience: 15 + scheduler: ReduceLROnPlateau + threshold: 0.0001 + threshold_mode: abs + verbose: true + warmup_factor: 0.2 + warmup_steps: 3000 + + 10k: {} + all: {} + +qm7x: + default: + model: # SOTA settings + act: swish + att_heads: 1 + complex_mp: true + cutoff: 5.0 + edge_embed_type: all_rij + energy_head: false + force_decoder_model_config: + mlp: + hidden_channels: 256 + norm: batch1d + res: + hidden_channels: 128 + norm: batch1d + res_updown: + hidden_channels: 128 + norm: layer + simple: + hidden_channels: 128 + norm: batch1d + force_decoder_type: res_updown + graph_norm: false + hidden_channels: 500 + max_num_neighbors: 40 + mp_type: updownscale_base + num_filters: 400 + num_gaussians: 50 + num_interactions: 5 + otf_graph: false + pg_hidden_channels: 32 + phys_embeds: true + phys_hidden_channels: 0 + regress_forces: direct_with_gradient_target + second_layer_MLP: true + skip_co: false + tag_hidden_channels: 0 + use_pbc: false + + optim: + batch_size: 100 + energy_grad_coefficient: 5 + eval_batch_size: 100 + eval_every: 0.34 + factor: 0.75 + force_coefficient: 75 + loss_energy: mae + loss_force: mse + lr_gamma: 0.1 + lr_initial: 0.000193 + max_steps: 4000000 + min_lr: 1.0e-06 + mode: min + optimizer: AdamW + scheduler: ReduceLROnPlateau + threshold: 0.001 + threshold_mode: abs + verbose: true + warmup_factor: 0.2 + warmup_steps: 3000 + + all: {} + 1k: {} + +qm9: + default: + model: + use_pbc: False + all: {} + 10k: {} diff --git a/debug.py b/debug.py new file mode 100644 index 0000000000..95bea31f26 --- /dev/null +++ b/debug.py @@ -0,0 +1,223 @@ +""" +Copyright (c) Facebook, Inc. and its affiliates. + +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree. +""" + +import logging +import os +import time +import traceback +import sys +import torch +from yaml import dump + +from ocpmodels.common import dist_utils +from ocpmodels.common.flags import flags +from ocpmodels.common.registry import registry +from ocpmodels.common.utils import ( + JOB_ID, + auto_note, + build_config, + merge_dicts, + move_lmdb_data_to_slurm_tmpdir, + resolve, + setup_imports, + setup_logging, + update_from_sbatch_py_vars, + set_min_hidden_channels, +) +from ocpmodels.common.orion_utils import ( + continue_orion_exp, + load_orion_exp, + sample_orion_hparams, +) +from ocpmodels.trainers import BaseTrainer + +# os.environ["CUDA_LAUNCH_BLOCKING"] = "1" +torch.multiprocessing.set_sharing_strategy("file_system") + + +def print_warnings(): + warnings = [ + "`max_num_neighbors` is set to 40. This should be tuned per model.", + "`tag_specific_weights` is not handled for " + + "`regress_forces: direct_with_gradient_target` in compute_loss()", + ] + print("\n" + "-" * 80 + "\n") + print("🛑 OCP-DR-Lab Warnings (nota benes):") + for warning in warnings: + print(f" • {warning}") + print("Remove warnings when they are fixed in the code/configs.") + print("\n" + "-" * 80 + "\n") + + +def wrap_up(args, start_time, error=None, signal=None, trainer=None): + total_time = time.time() - start_time + logging.info(f"Total time taken: {total_time}") + if trainer and trainer.logger is not None: + trainer.logger.log({"Total time": total_time}) + + if args.distributed: + print( + "\nWaiting for all processes to finish with dist_utils.cleanup()...", + end="", + ) + dist_utils.cleanup() + print("Done!") + + if "interactive" not in os.popen(f"squeue -hj {JOB_ID}").read(): + print("\nSelf-canceling SLURM job in 32s", JOB_ID) + os.popen(f"sleep 32 && scancel {JOB_ID}") + + if trainer and trainer.logger: + trainer.logger.finish(error or signal) + + +if __name__ == "__main__": + error = signal = orion_exp = orion_trial = trainer = None + orion_race_condition = False + hparams = {} + + setup_logging() + + parser = flags.get_parser() + args, override_args = parser.parse_known_args() + args = update_from_sbatch_py_vars(args) + if args.logdir: + args.logdir = resolve(args.logdir) + + # -- Build config + + args.wandb_name = "alvaro-carbonero-math" + args.wandb_project = "ocp-alvaro" + + args.test_ri = True + args.mode = "train" + args.graph_rewiring = "remove-tag-0" + args.cp_data_to_tmpdir = True + args.config = "indfaenet-is2re-10k" + args.frame_averaging = "2D" + args.fa_frames = "se3-random" + + args.config = "indfaenet-is2re-10k" + + trainer_config = build_config(args, override_args) + + if dist_utils.is_master(): + trainer_config = move_lmdb_data_to_slurm_tmpdir(trainer_config) + dist_utils.synchronize() + + trainer_config["dataset"] = dist_utils.broadcast_from_master( + trainer_config["dataset"] + ) + + trainer_config["model"]["edge_embed_type"] = "all_rij" + trainer_config["model"]["mp_type"] = "updownscale" + trainer_config["model"]["phys_Embeds"] = False + trainer_config["model"]["tag_hidden_channels"] = 32 + trainer_config["model"]["pg_hidden_channels"] = 64 + trainer_config["model"]["energy_head"] = "weighted-av-final-embeds" + trainer_config["model"]["complex_mp"] = False + trainer_config["model"]["graph_norm"] = True + trainer_config["model"]['hidden_channels'] = 352 + trainer_config["model"]["num_filters"] = 448 + trainer_config["model"]["num_gaussians"] = 99 + trainer_config["model"]["num_interactions"] = 6 + trainer_config["model"]["second_layer_MLP"] = True + trainer_config["model"]["skip_co"] = "concat" + + trainer_config["optim"]["batch_sizes"] = 256 + trainer_config["optim"]["eval_batch_sizes"] = 256 + trainer_config["optim"]["lr_initial"] = 0.0019 + trainer_config["optim"]["scheduler"] = "LinearWarmupCosineAnnealingLR" + trainer_config["optim"]["max_epochs"] = 20 + trainer_config["optim"]["eval_every"] = 0.4 + + # -- Initial setup + + setup_imports() + print("\n🚩 All things imported.\n") + start_time = time.time() + + try: + # -- Orion + + if args.orion_exp_config_path and dist_utils.is_master(): + orion_exp = load_orion_exp(args) + hparams, orion_trial = sample_orion_hparams(orion_exp, trainer_config) + + if hparams.get("orion_race_condition"): + logging.warning("\n\n ⛔️ Orion race condition. Stopping here.\n\n") + wrap_up(args, start_time, error, signal) + sys.exit() + + hparams = dist_utils.broadcast_from_master(hparams) + if hparams: + print("\n💎 Received hyper-parameters from Orion:") + print(dump(hparams), end="\n") + trainer_config = merge_dicts(trainer_config, hparams) + + # -- Setup trainer + + trainer_config = continue_orion_exp(trainer_config) + trainer_config = auto_note(trainer_config) + trainer_config = set_min_hidden_channels(trainer_config) + + try: + cls = registry.get_trainer_class(trainer_config["trainer"]) + trainer: BaseTrainer = cls(**trainer_config) + except Exception as e: + traceback.print_exc() + logging.warning(f"\n💀 Error in trainer initialization: {e}\n") + signal = "trainer_init_error" + + if signal is None: + task = registry.get_task_class(trainer_config["mode"])(trainer_config) + task.setup(trainer) + print_warnings() + + # -- Start Training + + signal = task.run() + + # -- End of training + + # handle job preemption / time limit + if signal == "SIGTERM": + print("\nJob was preempted. Wrapping up...\n") + if trainer: + trainer.close_datasets() + + dist_utils.synchronize() + + objective = dist_utils.broadcast_from_master( + trainer.objective if trainer else None + ) + + if orion_exp is not None: + if objective is None: + if signal == "loss_is_nan": + objective = 1e12 + print("Received NaN objective from worker. Setting to 1e12.") + if signal == "trainer_init_error": + objective = 1e12 + print( + "Received trainer_init_error from worker.", + "Setting objective to 1e12.", + ) + if objective is not None: + orion_exp.observe( + orion_trial, + [{"type": "objective", "name": "energy_mae", "value": objective}], + ) + else: + print("Received None objective from worker. Skipping observation.") + + except Exception: + error = True + print(traceback.format_exc()) + + finally: + wrap_up(args, start_time, error, signal, trainer=trainer) diff --git a/main.py b/main.py index e9dacfb137..f1519dd1c8 100644 --- a/main.py +++ b/main.py @@ -89,7 +89,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.logdir = resolve(args.logdir) # -- Build config - + trainer_config = build_config(args, override_args) if dist_utils.is_master(): diff --git a/ocpmodels/datasets/lmdb_dataset.py b/ocpmodels/datasets/lmdb_dataset.py index 7aa97dca2f..4eff3317c8 100644 --- a/ocpmodels/datasets/lmdb_dataset.py +++ b/ocpmodels/datasets/lmdb_dataset.py @@ -157,10 +157,17 @@ def __init__(self, config, transform=None): ) -def data_list_collater(data_list, otf_graph=False): +def data_list_collater(data_list, otf_graph=False): # Check if len(batch) is ever used if type(data_list[0]) is tuple: graphs = [system[0] for system in data_list] + [system[1] for system in data_list] batch = Batch.from_data_list(graphs) + for i in range(len(batch)): + if batch[i].neighbors.shape[0] == 0: + batch[i].neighbors = torch.tensor( + [0], + device = batch[i].neighbors.device, + dtype = torch.int64 + ) else: batch = Batch.from_data_list(data_list) diff --git a/ocpmodels/datasets/separate_dataset.py b/ocpmodels/datasets/separate_dataset.py index a65af98468..7017f94b50 100644 --- a/ocpmodels/datasets/separate_dataset.py +++ b/ocpmodels/datasets/separate_dataset.py @@ -51,7 +51,7 @@ def graph_splitter(graph): ads_edge_index = ads_assoc[edge_index[:, adsorbate_e_mask]] cat_edge_index = cat_assoc[edge_index[:, catalyst_e_mask]] - # Create the batches + # Create the graphs adsorbate = Data( edge_index = ads_edge_index, pos = pos[adsorbate_v_mask, :], @@ -86,7 +86,7 @@ def graph_splitter(graph): return adsorbate, catalyst @registry.register_dataset("separate") -class SeparateLmdbDataset(LmdbDataset): +class SeparateLmdbDataset(LmdbDataset): # Check that the dataset works as intended, with an specific example. def __getitem__(self, idx): t0 = time.time_ns() if not self.path.is_file(): diff --git a/ocpmodels/models/dependent.py b/ocpmodels/models/depfaenet.py similarity index 83% rename from ocpmodels/models/dependent.py rename to ocpmodels/models/depfaenet.py index e36ae0d18e..05bf8399fc 100644 --- a/ocpmodels/models/dependent.py +++ b/ocpmodels/models/depfaenet.py @@ -15,7 +15,7 @@ def __init__(self, energy_head, hidden_channels, act): energy_head, hidden_channels, act ) - del self.lin2 + del self.lin2 # This is probably not necessary. Check out if you can delete it. self.lin2 = Linear(hidden_channels // 2, hidden_channels // 2) self.sys_lin1 = Linear(hidden_channels // 2 * 2, hidden_channels // 2) @@ -50,20 +50,20 @@ def forward(self, h, edge_index, edge_weight, batch, alpha): cat = ~ads ads_out = scatter(h, batch * ads, dim = 0, reduce = "add") - cat_out = scatter(h, batch * cat, dim = 0, reduce = "add") - system = torch.cat([ads_out, cat_out], dim = 1) + cat_out = scatter(h, batch * cat, dim = 0, reduce = "add") # Try to make an MLP differnt for each of the adsorbates and catalyst. + system = torch.cat([ads_out, cat_out], dim = 1) # To implement the comment above, you can implement another flag to be used for this model. system = self.sys_lin1(system) energy = self.sys_lin2(system) return energy -@registry.register_model("dependent") +@registry.register_model("depfaenet") class depFAENet(FAENet): def __init__(self, **kwargs): super().__init__(**kwargs) - del self.output_block + del self.output_block # Probably you don't have to delete it, just redefine it. But double check. self.output_block = discOutputBlock( self.energy_head, kwargs["hidden_channels"], self.act ) diff --git a/ocpmodels/models/faenet.py b/ocpmodels/models/faenet.py index 061cd5faec..d451007b2c 100644 --- a/ocpmodels/models/faenet.py +++ b/ocpmodels/models/faenet.py @@ -585,7 +585,7 @@ def __init__(self, **kwargs): ) # Skip co - if self.skip_co == "concat": + if self.skip_co == "concat": # for the implementation of independent faenet, make sure the input is large enough self.mlp_skip_co = Linear((kwargs["num_interactions"] + 1), 1) elif self.skip_co == "concat_atom": self.mlp_skip_co = Linear( diff --git a/ocpmodels/models/independent.py b/ocpmodels/models/independent.py deleted file mode 100644 index 28af769e1d..0000000000 --- a/ocpmodels/models/independent.py +++ /dev/null @@ -1 +0,0 @@ -from ocpmodels.common.registry import registry diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py new file mode 100644 index 0000000000..6bd2e9614a --- /dev/null +++ b/ocpmodels/models/indfaenet.py @@ -0,0 +1,62 @@ +import torch +from torch import nn + +from ocpmodels.models.faenet import FAENet +from ocpmodels.models.base_model import BaseModel +from ocpmodels.common.registry import registry + +from torch_geometric.data import Batch + +@registry.register_model("indfaenet") +class indFAENet(BaseModel): # Change to make it inherit from base model. + def __init__(self, **kwargs): + super(indFAENet, self).__init__() + + self.regress_forces = kwargs["regress_forces"] + + self.ads_model = FAENet(**kwargs) + self.cat_model = FAENet(**kwargs) + # To do this, you can create a new input to FAENet so that + # it makes it predict a vector, where the default is normal FAENet. + + def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! + batch_size = len(data) // 2 + + adsorbates = Batch.from_data_list(data[:batch_size]) + catalysts = Batch.from_data_list(data[batch_size:]) + + # Fixing neighbor's dimensions. This error happens when an adsorbate has 0 edges. + # We do so only on adsorbates, because it is reasonable to assume catalysts have at least one edge. + num_adsorbates = len(adsorbates) + # Find indices of adsorbates without edges: + edgeless = [i for i in range(num_adsorbates) if adsorbates[i].neighbors.shape[0] == 0] + if len(edgeless) > 0: + # Since most adsorbates have an edge, we pop those values specifically from range(num_adsorbates) + mask = list(range(num_adsorbates)) + num_popped = 0 # We can do this since edgeless is already sorted + for unwanted in edgeless: + mask.pop(unwanted-num_popped) + num_popped += 1 + + # Now, we create the new neighbors. + new_nbrs = torch.zeros( + num_adsorbates, + dtype = torch.int64, + device = adsorbates.neighbors.device, + ) + new_nbrs[mask] = adsorbates.neighbors + adsorbates.neighbors = new_nbrs + + # We make predictions for each + pred_ads = self.ads_model(adsorbates, mode) + pred_cat = self.cat_model(catalysts, mode) + + # We combine predictions and return them + pred_system = { + "energy" : (pred_ads["energy"] + pred_cat["energy"]) / 2, + "pooling_loss" : pred_ads["pooling_loss"] if pred_ads["pooling_loss"] is None + else pred_ads["pooling_loss"] + pred_cat["pooling_loss"], + "hidden_state" : torch.cat([pred_ads["hidden_state"], pred_cat["hidden_state"]], dim = 0) + } + + return pred_system diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index e57455e7a0..5eb766c673 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -245,7 +245,7 @@ def load_datasets(self): if split == "default_val": continue - if self.config["model_name"] in ["independent"]: # DEPENDENT SHOULDN'T BE ON THIS LIST. IT'S FOR DEBUGGIN. + if self.config["model_name"] in ["indfaenet"]: # DEPENDENT SHOULDN'T BE ON THIS LIST. IT'S FOR DEBUGGIN. self.datasets[split] = registry.get_dataset_class( "separate" )(ds_conf, transform=transform) From 785497636aa9ef93188fd3fb08b567e5d026642f Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 11 Jul 2023 21:04:15 -0400 Subject: [PATCH 027/131] Created debug file to debug from an interactive node --- debug.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/debug.py b/debug.py index 95bea31f26..78f97aeecf 100644 --- a/debug.py +++ b/debug.py @@ -92,17 +92,14 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.wandb_name = "alvaro-carbonero-math" args.wandb_project = "ocp-alvaro" - args.test_ri = True args.mode = "train" args.graph_rewiring = "remove-tag-0" args.cp_data_to_tmpdir = True - args.config = "indfaenet-is2re-10k" + args.config = "depfaenet-is2re-10k" args.frame_averaging = "2D" args.fa_frames = "se3-random" - args.config = "indfaenet-is2re-10k" - trainer_config = build_config(args, override_args) if dist_utils.is_master(): @@ -115,7 +112,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["edge_embed_type"] = "all_rij" trainer_config["model"]["mp_type"] = "updownscale" - trainer_config["model"]["phys_Embeds"] = False + trainer_config["model"]["phys_embeds"] = True trainer_config["model"]["tag_hidden_channels"] = 32 trainer_config["model"]["pg_hidden_channels"] = 64 trainer_config["model"]["energy_head"] = "weighted-av-final-embeds" From 6435dccf869313977afdcf0149e42418a840972a Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 12 Jul 2023 16:35:34 -0400 Subject: [PATCH 028/131] Finished implementing basic version of model indfaenet --- configs/exps/alvaro/split-ads-cats.yaml | 13 ++---- debug.py | 2 +- ocpmodels/models/depfaenet.py | 4 +- ocpmodels/trainers/base_trainer.py | 20 +++++++-- ocpmodels/trainers/single_trainer.py | 56 +++++++++++++++---------- 5 files changed, 56 insertions(+), 39 deletions(-) diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index 6b1ee75204..5e6d91d527 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -13,18 +13,14 @@ default: test_ri: True mode: train graph_rewiring: remove-tag-0 - model: - edge_embed_type: all_rij optim: - batch_size: 256 - eval_batch_size: 256 cp_data_to_tmpdir: true wandb-tags: 'best-config-??' # Insert what model you're running if running one by one. frame_averaging: 2D fa_frames: se3-random model: mp_type: updownscale - phys_embeds: True # I CHANGED THIS! + phys_embeds: True tag_hidden_channels: 32 pg_hidden_channels: 64 energy_head: weighted-av-final-embeds @@ -36,15 +32,14 @@ default: num_interactions: 6 second_layer_MLP: True skip_co: concat + edge_embed_type: rij optim: lr_initial: 0.0019 scheduler: LinearWarmupCosineAnnealingLR max_epochs: 20 eval_every: 0.4 + batch_size: 256 + eval_batch_size: 256 runs: - config: depfaenet-is2re-10k - - - config: depfaenet-is2re-all - - diff --git a/debug.py b/debug.py index 78f97aeecf..20b04a9364 100644 --- a/debug.py +++ b/debug.py @@ -96,7 +96,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.mode = "train" args.graph_rewiring = "remove-tag-0" args.cp_data_to_tmpdir = True - args.config = "depfaenet-is2re-10k" + args.config = "indfaenet-is2re-10k" args.frame_averaging = "2D" args.fa_frames = "se3-random" diff --git a/ocpmodels/models/depfaenet.py b/ocpmodels/models/depfaenet.py index 05bf8399fc..c3840ceba1 100644 --- a/ocpmodels/models/depfaenet.py +++ b/ocpmodels/models/depfaenet.py @@ -15,7 +15,6 @@ def __init__(self, energy_head, hidden_channels, act): energy_head, hidden_channels, act ) - del self.lin2 # This is probably not necessary. Check out if you can delete it. self.lin2 = Linear(hidden_channels // 2, hidden_channels // 2) self.sys_lin1 = Linear(hidden_channels // 2 * 2, hidden_channels // 2) @@ -50,7 +49,7 @@ def forward(self, h, edge_index, edge_weight, batch, alpha): cat = ~ads ads_out = scatter(h, batch * ads, dim = 0, reduce = "add") - cat_out = scatter(h, batch * cat, dim = 0, reduce = "add") # Try to make an MLP differnt for each of the adsorbates and catalyst. + cat_out = scatter(h, batch * cat, dim = 0, reduce = "add") # Try to make an MLP different for each of the adsorbates and catalyst. system = torch.cat([ads_out, cat_out], dim = 1) # To implement the comment above, you can implement another flag to be used for this model. system = self.sys_lin1(system) @@ -63,7 +62,6 @@ class depFAENet(FAENet): def __init__(self, **kwargs): super().__init__(**kwargs) - del self.output_block # Probably you don't have to delete it, just redefine it. But double check. self.output_block = discOutputBlock( self.energy_head, kwargs["hidden_channels"], self.act ) diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 5eb766c673..c4866262ce 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -158,11 +158,24 @@ def __init__(self, **kwargs): model_regresses_forces=self.config["model"].get("regress_forces", ""), ) - if self.config["model_name"] == "disconnected": + # Here's the models whose edges are removed as a transform + transform_models = ["depfaenet"] + if self.config["is_disconnected"]: + print("\n\nHeads up: cat-ads edges being removed!") + + if self.config["model_name"] in edge_transform_models: if not self.config["is_disconnected"]: - print("\n\nWhen using the disconnected model, the flag 'is_disconnected' should be used! The flag has been turned on.") + print(f"\n\nWhen using {self.config['model_name']},", + "the flag 'is_disconnected' should be used! The flag has been turned on.\n") self.config["is_disconnected"] = True + # Here's the models whose graphs are disconnected in the dataset + dataset_models = ["indfaenet"] + self.separate_dataset = False + if self.config["model_name"] in dataset_models: + self.separate_dataset = True + print("\n\nHeads up: using separate dataset, so ads/cats are separated before transforms.\n") + def load(self): self.load_seed_from_config() self.load_logger() @@ -226,6 +239,7 @@ def get_dataloader(self, dataset, sampler): pin_memory=True, batch_sampler=sampler, ) + return loader def load_datasets(self): @@ -245,7 +259,7 @@ def load_datasets(self): if split == "default_val": continue - if self.config["model_name"] in ["indfaenet"]: # DEPENDENT SHOULDN'T BE ON THIS LIST. IT'S FOR DEBUGGIN. + if self.config["model_name"] in ["indfaenet"]: self.datasets[split] = registry.get_dataset_class( "separate" )(ds_conf, transform=transform) diff --git a/ocpmodels/trainers/single_trainer.py b/ocpmodels/trainers/single_trainer.py index 2ff82da9dd..7fe8943ecb 100644 --- a/ocpmodels/trainers/single_trainer.py +++ b/ocpmodels/trainers/single_trainer.py @@ -196,7 +196,7 @@ def predict(self, loader, per_image=True, results_file=None, disable_tqdm=False) return predictions def train(self, disable_eval_tqdm=True, debug_batches=-1): - n_train = len(self.loaders["train"]) + n_train = self.config["optim"]["batch_size"] epoch_int = 0 eval_every = self.config["optim"].get("eval_every", n_train) or n_train if eval_every < 1: @@ -449,8 +449,9 @@ def end_of_training( batch = next(iter(self.loaders[self.config["dataset"]["default_val"]])) self.model_forward(batch) self.logger.log({"Batch time": time.time() - start_time}) + self.logger.log( - {"Model run time": model_run_time / len(self.loaders["train"])} + {"Model run time": model_run_time / self.config["optim"]["batch_size"]} ) if log_epoch_times: self.logger.log({"Epoch time": np.mean(epoch_times)}) @@ -550,15 +551,18 @@ def compute_loss(self, preds, batch_list): loss = {"total_loss": []} # Energy loss - energy_target = torch.cat( - [ - batch.y_relaxed.to(self.device) - if self.task_name == "is2re" - else batch.y.to(self.device) - for batch in batch_list - ], - dim=0, - ) + if not self.separate_dataset: + energy_target = torch.cat( + [ + batch.y_relaxed.to(self.device) + if self.task_name == "is2re" + else batch.y.to(self.device) + for batch in batch_list + ], + dim=0, + ) + else: + energy_target = batch_list[0].y_relaxed[:preds["energy"].shape[0]].to(self.device) if self.normalizer.get("normalize_labels", False): hofs = None @@ -658,18 +662,24 @@ def compute_metrics( [batch.natoms.to(self.device) for batch in batch_list], dim=0 ) - target = { - "energy": torch.cat( - [ - batch.y_relaxed.to(self.device) - if self.task_name == "is2re" - else batch.y.to(self.device) - for batch in batch_list - ], - dim=0, - ), - "natoms": natoms, - } + if not self.separate_dataset: + target = { + "energy": torch.cat( + [ + batch.y_relaxed.to(self.device) + if self.task_name == "is2re" + else batch.y.to(self.device) + for batch in batch_list + ], + dim=0, + ), + "natoms": natoms, + } + else: + target = { + "energy": batch_list[0].y_relaxed[:preds["energy"].shape[0]].to(self.device), + "natoms": natoms, + } if self.config["model"].get("regress_forces", False): target["forces"] = torch.cat( From 6a5f3d87a6221a14b13237cba625b33792f83ac2 Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 12 Jul 2023 16:49:18 -0400 Subject: [PATCH 029/131] Fixed small typo --- configs/exps/alvaro/split-ads-cats.yaml | 4 +++- ocpmodels/trainers/base_trainer.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index 5e6d91d527..9eec135fa2 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -42,4 +42,6 @@ default: eval_batch_size: 256 runs: - - config: depfaenet-is2re-10k + - config: indfaenet-is2re-10k + + - config: indfaenet-is2re-all diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index c4866262ce..8664f5e4a7 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -163,7 +163,7 @@ def __init__(self, **kwargs): if self.config["is_disconnected"]: print("\n\nHeads up: cat-ads edges being removed!") - if self.config["model_name"] in edge_transform_models: + if self.config["model_name"] in transform_models: if not self.config["is_disconnected"]: print(f"\n\nWhen using {self.config['model_name']},", "the flag 'is_disconnected' should be used! The flag has been turned on.\n") From 0985b839a3f6794a574e2b1dea9a22b1cd5de0a7 Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 12 Jul 2023 17:43:36 -0400 Subject: [PATCH 030/131] fixing bug --- configs/exps/alvaro/split-ads-cats.yaml | 2 -- ocpmodels/common/utils.py | 7 ++++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index 9eec135fa2..624e02fb8d 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -43,5 +43,3 @@ default: runs: - config: indfaenet-is2re-10k - - - config: indfaenet-is2re-all diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index dfa48f51d4..322e57a86b 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -1254,7 +1254,12 @@ def get_pbc_distances( # correct for pbc neighbors = neighbors.to(cell.device) - cell = torch.repeat_interleave(cell, neighbors, dim=0) + try: + cell = torch.repeat_interleave(cell, neighbors, dim=0) + except: + print("cell", cell.shape) + print("neighbors", neighbors.shape) + cell = torch.repeat_interleave(cell, neighbors, dim=0) offsets = cell_offsets.float().view(-1, 1, 3).bmm(cell.float()).view(-1, 3) distance_vectors += offsets From 50687f57b8b44fbd433a077da28e94d913c18dde Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 13 Jul 2023 13:12:26 -0400 Subject: [PATCH 031/131] Bug of model 3 has been fixed. --- configs/exps/alvaro/split-ads-cats.yaml | 4 ++- debug.py | 4 +-- ocpmodels/common/utils.py | 7 +--- ocpmodels/datasets/separate_dataset.py | 7 ++-- ocpmodels/models/indfaenet.py | 28 +++++++++++---- ocpmodels/trainers/single_trainer.py | 6 ++-- slurm-3384656.out | 47 +++++++++++++++++++++++++ 7 files changed, 80 insertions(+), 23 deletions(-) create mode 100644 slurm-3384656.out diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index 624e02fb8d..154e8cca2e 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -32,7 +32,7 @@ default: num_interactions: 6 second_layer_MLP: True skip_co: concat - edge_embed_type: rij + edge_embed_type: all_rij optim: lr_initial: 0.0019 scheduler: LinearWarmupCosineAnnealingLR @@ -43,3 +43,5 @@ default: runs: - config: indfaenet-is2re-10k + + - config: indfaenet-is2re-all diff --git a/debug.py b/debug.py index 20b04a9364..60d959565a 100644 --- a/debug.py +++ b/debug.py @@ -125,8 +125,8 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["second_layer_MLP"] = True trainer_config["model"]["skip_co"] = "concat" - trainer_config["optim"]["batch_sizes"] = 256 - trainer_config["optim"]["eval_batch_sizes"] = 256 + #trainer_config["optim"]["batch_sizes"] = 256 + #trainer_config["optim"]["eval_batch_sizes"] = 256 trainer_config["optim"]["lr_initial"] = 0.0019 trainer_config["optim"]["scheduler"] = "LinearWarmupCosineAnnealingLR" trainer_config["optim"]["max_epochs"] = 20 diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index 322e57a86b..dfa48f51d4 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -1254,12 +1254,7 @@ def get_pbc_distances( # correct for pbc neighbors = neighbors.to(cell.device) - try: - cell = torch.repeat_interleave(cell, neighbors, dim=0) - except: - print("cell", cell.shape) - print("neighbors", neighbors.shape) - cell = torch.repeat_interleave(cell, neighbors, dim=0) + cell = torch.repeat_interleave(cell, neighbors, dim=0) offsets = cell_offsets.float().view(-1, 1, 3).bmm(cell.float()).view(-1, 3) distance_vectors += offsets diff --git a/ocpmodels/datasets/separate_dataset.py b/ocpmodels/datasets/separate_dataset.py index 7017f94b50..35564630a1 100644 --- a/ocpmodels/datasets/separate_dataset.py +++ b/ocpmodels/datasets/separate_dataset.py @@ -31,13 +31,10 @@ def graph_splitter(graph): # Make masks to filter most data we need adsorbate_v_mask = (tags == 2) - catalyst_v_mask = (tags == 1) + (tags == 0) + catalyst_v_mask = ~adsorbate_v_mask adsorbate_e_mask = (tags[edge_index][0] == 2) * (tags[edge_index][1] == 2) - catalyst_e_mask = ( - ((tags[edge_index][0] == 1) + (tags[edge_index][0] == 0)) - * ((tags[edge_index][1] == 1) + (tags[edge_index][1] == 0)) - ) + catalyst_e_mask = (tags[edge_index][0] != 2) * (tags[edge_index][1] != 2) # Reindex the edge indices. device = graph.edge_index.device diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index 6bd2e9614a..8022c0eb8d 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -26,19 +26,16 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION catalysts = Batch.from_data_list(data[batch_size:]) # Fixing neighbor's dimensions. This error happens when an adsorbate has 0 edges. - # We do so only on adsorbates, because it is reasonable to assume catalysts have at least one edge. num_adsorbates = len(adsorbates) # Find indices of adsorbates without edges: - edgeless = [i for i in range(num_adsorbates) if adsorbates[i].neighbors.shape[0] == 0] - if len(edgeless) > 0: + edgeless_ads = [i for i in range(num_adsorbates) if adsorbates[i].neighbors.shape[0] == 0] + if len(edgeless_ads) > 0: # Since most adsorbates have an edge, we pop those values specifically from range(num_adsorbates) mask = list(range(num_adsorbates)) num_popped = 0 # We can do this since edgeless is already sorted - for unwanted in edgeless: + for unwanted in edgeless_ads: mask.pop(unwanted-num_popped) num_popped += 1 - - # Now, we create the new neighbors. new_nbrs = torch.zeros( num_adsorbates, dtype = torch.int64, @@ -47,6 +44,25 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION new_nbrs[mask] = adsorbates.neighbors adsorbates.neighbors = new_nbrs + # Now for catalysts + num_catalysts = len(catalysts) + edgeless_cats = [i for i in range(num_catalysts) if catalysts[i].neighbors.shape[0] == 0] + if len(edgeless_cats) > 0: + mask = list(range(num_catalysts)) + num_popped = 0 + for unwanted in edgeless_cats: + mask.pop(unwanted-num_popped) + num_popped += 1 + + # Now, we create the new neighbors. + new_nbrs = torch.zeros( + num_catalysts, + dtype = torch.int64, + device = catalysts.neighbors.device, + ) + new_nbrs[mask] = catalysts.neighbors + catalysts.neighbors = new_nbrs + # We make predictions for each pred_ads = self.ads_model(adsorbates, mode) pred_cat = self.cat_model(catalysts, mode) diff --git a/ocpmodels/trainers/single_trainer.py b/ocpmodels/trainers/single_trainer.py index 7fe8943ecb..d979f9c4e9 100644 --- a/ocpmodels/trainers/single_trainer.py +++ b/ocpmodels/trainers/single_trainer.py @@ -196,7 +196,7 @@ def predict(self, loader, per_image=True, results_file=None, disable_tqdm=False) return predictions def train(self, disable_eval_tqdm=True, debug_batches=-1): - n_train = self.config["optim"]["batch_size"] + n_train = len(self.loaders["train"]) epoch_int = 0 eval_every = self.config["optim"].get("eval_every", n_train) or n_train if eval_every < 1: @@ -449,9 +449,9 @@ def end_of_training( batch = next(iter(self.loaders[self.config["dataset"]["default_val"]])) self.model_forward(batch) self.logger.log({"Batch time": time.time() - start_time}) - + self.logger.log( - {"Model run time": model_run_time / self.config["optim"]["batch_size"]} + {"Model run time": model_run_time / len(self.loaders["train"])} ) if log_epoch_times: self.logger.log({"Epoch time": np.mean(epoch_times)}) diff --git a/slurm-3384656.out b/slurm-3384656.out new file mode 100644 index 0000000000..cfc0f84248 --- /dev/null +++ b/slurm-3384656.out @@ -0,0 +1,47 @@ + +======== GPU REPORT ======== + +==============NVSMI LOG============== + +Timestamp : Wed Jul 12 18:02:38 2023 +Driver Version : 515.65.01 +CUDA Version : 11.7 + +Attached GPUs : 1 +GPU 00000000:89:00.0 + Accounting Mode : Enabled + Accounting Mode Buffer Size : 4000 + Accounted Processes + Process ID : 13872 + GPU Utilization : 4 % + Memory Utilization : 2 % + Max memory usage : 6181 MiB + Time : 1437430 ms + Is Running : 0 + Process ID : 34608 + GPU Utilization : 4 % + Memory Utilization : 1 % + Max memory usage : 6181 MiB + Time : 865845 ms + Is Running : 0 + +Wed Jul 12 18:02:38 2023 ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 515.65.01 Driver Version: 515.65.01 CUDA Version: 11.7 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|===============================+======================+======================| +| 0 Quadro RTX 8000 On | 00000000:89:00.0 Off | Off | +| 33% 28C P8 27W / 260W | 1MiB / 49152MiB | 0% Default | +| | | N/A | ++-------------------------------+----------------------+----------------------+ + ++-----------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=============================================================================| +| No running processes found | ++-----------------------------------------------------------------------------+ From b5a24149f39f49c0276dcdfff30e90cee736e1ee Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 13 Jul 2023 14:22:41 -0400 Subject: [PATCH 032/131] Rewrote a line to make it nicer. --- ocpmodels/models/indfaenet.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index 8022c0eb8d..a7ed53f85c 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -7,6 +7,8 @@ from torch_geometric.data import Batch + + @registry.register_model("indfaenet") class indFAENet(BaseModel): # Change to make it inherit from base model. def __init__(self, **kwargs): @@ -28,7 +30,11 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION # Fixing neighbor's dimensions. This error happens when an adsorbate has 0 edges. num_adsorbates = len(adsorbates) # Find indices of adsorbates without edges: - edgeless_ads = [i for i in range(num_adsorbates) if adsorbates[i].neighbors.shape[0] == 0] + edgeless_ads = [ + i for i + in range(num_adsorbates) + if adsorbates[i].neighbors.shape[0] == 0 + ] if len(edgeless_ads) > 0: # Since most adsorbates have an edge, we pop those values specifically from range(num_adsorbates) mask = list(range(num_adsorbates)) From ef44257b9cb1eb4ed163e181fbe57cc54985aa44 Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 14 Jul 2023 15:17:43 -0400 Subject: [PATCH 033/131] Finished implementing indfaenet --- configs/exps/alvaro/split-ads-cats.yaml | 2 +- ocpmodels/models/faenet.py | 21 ++++++++++++++++----- ocpmodels/models/indfaenet.py | 22 +++++++++++++++++++--- ocpmodels/trainers/base_trainer.py | 15 ++++++++------- ocpmodels/trainers/single_trainer.py | 17 +++++++++-------- 5 files changed, 53 insertions(+), 24 deletions(-) diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index 154e8cca2e..ac5903daaa 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -44,4 +44,4 @@ default: runs: - config: indfaenet-is2re-10k - - config: indfaenet-is2re-all +# - config: indfaenet-is2re-all diff --git a/ocpmodels/models/faenet.py b/ocpmodels/models/faenet.py index d451007b2c..f8e7947651 100644 --- a/ocpmodels/models/faenet.py +++ b/ocpmodels/models/faenet.py @@ -397,13 +397,18 @@ def message(self, x_j, W, local_env=None): class OutputBlock(nn.Module): - def __init__(self, energy_head, hidden_channels, act): + def __init__( + self, energy_head, hidden_channels, act, model_name = "faenet" + ): super().__init__() self.energy_head = energy_head self.act = act self.lin1 = Linear(hidden_channels, hidden_channels // 2) - self.lin2 = Linear(hidden_channels // 2, 1) + if model_name == "faenet": + self.lin2 = Linear(hidden_channels // 2, 1) + elif model_name == "indfaenet": + self.lin2 = Linear(hidden_channels // 2, hidden_channels // 2) # weighted average & pooling if self.energy_head in {"pooling", "random"}: @@ -527,6 +532,7 @@ def __init__(self, **kwargs): "one-supernode-per-atom-type", "one-supernode-per-atom-type-dist", } + # Gaussian Basis self.distance_expansion = GaussianSmearing( 0.0, self.cutoff, kwargs["num_gaussians"] @@ -565,7 +571,7 @@ def __init__(self, **kwargs): # Output block self.output_block = OutputBlock( - self.energy_head, kwargs["hidden_channels"], self.act + self.energy_head, kwargs["hidden_channels"], self.act, kwargs["model_name"] ) # Energy head @@ -586,7 +592,13 @@ def __init__(self, **kwargs): # Skip co if self.skip_co == "concat": # for the implementation of independent faenet, make sure the input is large enough - self.mlp_skip_co = Linear((kwargs["num_interactions"] + 1), 1) + if kwargs["model_name"] == "faenet": + self.mlp_skip_co = Linear((kwargs["num_interactions"] + 1), 1) + elif kwargs["model_name"] == "indfaenet": + self.mlp_skip_co = Linear( + (kwargs["num_interactions"] + 1) * kwargs["hidden_channels"] // 2, + kwargs["hidden_channels"] // 2 + ) elif self.skip_co == "concat_atom": self.mlp_skip_co = Linear( ((kwargs["num_interactions"] + 1) * kwargs["hidden_channels"]), @@ -663,7 +675,6 @@ def energy_forward(self, data): h = h + interaction(h, edge_index, e) # Atom skip-co - if self.skip_co == "concat_atom": energy_skip_co.append(h) h = self.act(self.mlp_skip_co(torch.cat(energy_skip_co, dim=1))) diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index a7ed53f85c..0b5603e848 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -1,14 +1,15 @@ import torch from torch import nn +from torch.nn import Linear from ocpmodels.models.faenet import FAENet +from ocpmodels.models.faenet import OutputBlock from ocpmodels.models.base_model import BaseModel from ocpmodels.common.registry import registry +from ocpmodels.models.utils.activations import swish from torch_geometric.data import Batch - - @registry.register_model("indfaenet") class indFAENet(BaseModel): # Change to make it inherit from base model. def __init__(self, **kwargs): @@ -18,6 +19,13 @@ def __init__(self, **kwargs): self.ads_model = FAENet(**kwargs) self.cat_model = FAENet(**kwargs) + + self.act = ( + getattr(nn.functional, kwargs["act"]) if kwargs["act"] != "swish" else swish + ) + + self.lin1 = Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2) + self.lin2 = Linear(kwargs["hidden_channels"] // 2, 1) # To do this, you can create a new input to FAENet so that # it makes it predict a vector, where the default is normal FAENet. @@ -73,9 +81,17 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION pred_ads = self.ads_model(adsorbates, mode) pred_cat = self.cat_model(catalysts, mode) + ads_energy = pred_ads["energy"] + cat_energy = pred_cat["energy"] + + system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = self.lin1(system_energy) + system_energy = self.act(system_energy) + system_energy = self.lin2(system_energy) + # We combine predictions and return them pred_system = { - "energy" : (pred_ads["energy"] + pred_cat["energy"]) / 2, + "energy" : system_energy, "pooling_loss" : pred_ads["pooling_loss"] if pred_ads["pooling_loss"] is None else pred_ads["pooling_loss"] + pred_cat["pooling_loss"], "hidden_state" : torch.cat([pred_ads["hidden_state"], pred_cat["hidden_state"]], dim = 0) diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 8664f5e4a7..520ccbc3b5 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -151,18 +151,11 @@ def __init__(self, **kwargs): "to stop the training after the next validation\n", ) (run_dir / f"config-{JOB_ID}.yaml").write_text(yaml.dump(self.config)) - self.load() - - self.evaluator = Evaluator( - task=self.task_name, - model_regresses_forces=self.config["model"].get("regress_forces", ""), - ) # Here's the models whose edges are removed as a transform transform_models = ["depfaenet"] if self.config["is_disconnected"]: print("\n\nHeads up: cat-ads edges being removed!") - if self.config["model_name"] in transform_models: if not self.config["is_disconnected"]: print(f"\n\nWhen using {self.config['model_name']},", @@ -176,6 +169,13 @@ def __init__(self, **kwargs): self.separate_dataset = True print("\n\nHeads up: using separate dataset, so ads/cats are separated before transforms.\n") + self.load() + + self.evaluator = Evaluator( + task = self.task_name, + model_regresses_forces = self.config["model"].get("regress_forces", ""), + ) + def load(self): self.load_seed_from_config() self.load_logger() @@ -397,6 +397,7 @@ def load_model(self): "task_name": self.task_name, }, **self.config["model"], + "model_name": self.config["model_name"], } self.model = registry.get_model_class(self.config["model_name"])( diff --git a/ocpmodels/trainers/single_trainer.py b/ocpmodels/trainers/single_trainer.py index d979f9c4e9..869eeb2571 100644 --- a/ocpmodels/trainers/single_trainer.py +++ b/ocpmodels/trainers/single_trainer.py @@ -457,14 +457,15 @@ def end_of_training( self.logger.log({"Epoch time": np.mean(epoch_times)}) # Check respect of symmetries - if self.test_ri and not is_test_env: - symmetry = self.test_model_symmetries(debug_batches=debug_batches) - if symmetry == "SIGTERM": - return "SIGTERM" - if self.logger: - self.logger.log(symmetry) - if not self.silent: - print(symmetry) + if not self.separate_dataset: + if self.test_ri and not is_test_env: + symmetry = self.test_model_symmetries(debug_batches=debug_batches) + if symmetry == "SIGTERM": + return "SIGTERM" + if self.logger: + self.logger.log(symmetry) + if not self.silent: + print(symmetry) # Close datasets if debug_batches < 0: From a37986c2b7afd935bf179e0e7cb593bbe03f5172 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 17 Jul 2023 09:57:41 -0400 Subject: [PATCH 034/131] Implemented disconnected_mlp in indfaenet --- configs/exps/alvaro/split-ads-cats.yaml | 8 +++++++- debug.py | 3 ++- ocpmodels/models/depfaenet.py | 25 +++++++++++++++++++------ ocpmodels/models/faenet.py | 2 +- ocpmodels/models/indfaenet.py | 11 ++++++++++- 5 files changed, 39 insertions(+), 10 deletions(-) diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index ac5903daaa..af053b9b54 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -42,6 +42,12 @@ default: eval_batch_size: 256 runs: + - config: depfaenet-is2re-all + is_disconnected: True + disconnected_mlp: True + - config: indfaenet-is2re-10k + disconnected_mlp: False -# - config: indfaenet-is2re-all + - config: indfaenet-is2re-all + disconnected_mlp: False diff --git a/debug.py b/debug.py index 60d959565a..157c61db89 100644 --- a/debug.py +++ b/debug.py @@ -96,7 +96,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.mode = "train" args.graph_rewiring = "remove-tag-0" args.cp_data_to_tmpdir = True - args.config = "indfaenet-is2re-10k" + args.config = "depfaenet-is2re-10k" args.frame_averaging = "2D" args.fa_frames = "se3-random" @@ -124,6 +124,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["num_interactions"] = 6 trainer_config["model"]["second_layer_MLP"] = True trainer_config["model"]["skip_co"] = "concat" + trainer_config["model"]["disconnected_mlp"] = True #trainer_config["optim"]["batch_sizes"] = 256 #trainer_config["optim"]["eval_batch_sizes"] = 256 diff --git a/ocpmodels/models/depfaenet.py b/ocpmodels/models/depfaenet.py index c3840ceba1..d7576a5961 100644 --- a/ocpmodels/models/depfaenet.py +++ b/ocpmodels/models/depfaenet.py @@ -10,13 +10,18 @@ from torch_geometric.data import Batch class discOutputBlock(conOutputBlock): - def __init__(self, energy_head, hidden_channels, act): + def __init__(self, energy_head, hidden_channels, act, disconnected_mlp = False): super(discOutputBlock, self).__init__( energy_head, hidden_channels, act ) self.lin2 = Linear(hidden_channels // 2, hidden_channels // 2) + self.disconnected_mlp = disconnected_mlp + if self.disconnected_mlp: + self.ads_lin = Linear(hidden_channels // 2, hidden_channels // 2) + self.cat_lin = Linear(hidden_channels // 2, hidden_channels // 2) + self.sys_lin1 = Linear(hidden_channels // 2 * 2, hidden_channels // 2) self.sys_lin2 = Linear(hidden_channels // 2, 1) @@ -49,12 +54,17 @@ def forward(self, h, edge_index, edge_weight, batch, alpha): cat = ~ads ads_out = scatter(h, batch * ads, dim = 0, reduce = "add") - cat_out = scatter(h, batch * cat, dim = 0, reduce = "add") # Try to make an MLP different for each of the adsorbates and catalyst. - system = torch.cat([ads_out, cat_out], dim = 1) # To implement the comment above, you can implement another flag to be used for this model. - + cat_out = scatter(h, batch * cat, dim = 0, reduce = "add") + + if self.disconnected_mlp: + ads_out = self.ads_lin(ads_out) + cat_out = self.cat_lin(cat_out) + + system = torch.cat([ads_out, cat_out], dim = 1) + system = self.sys_lin1(system) energy = self.sys_lin2(system) - + return energy @registry.register_model("depfaenet") @@ -62,8 +72,11 @@ class depFAENet(FAENet): def __init__(self, **kwargs): super().__init__(**kwargs) + if "disconnected_mlp" not in kwargs: + kwargs["disconnected_mlp"] = False + self.output_block = discOutputBlock( - self.energy_head, kwargs["hidden_channels"], self.act + self.energy_head, kwargs["hidden_channels"], self.act, kwargs["disconnected_mlp"] ) @conditional_grad(torch.enable_grad()) diff --git a/ocpmodels/models/faenet.py b/ocpmodels/models/faenet.py index f8e7947651..afdd9e158a 100644 --- a/ocpmodels/models/faenet.py +++ b/ocpmodels/models/faenet.py @@ -592,7 +592,7 @@ def __init__(self, **kwargs): # Skip co if self.skip_co == "concat": # for the implementation of independent faenet, make sure the input is large enough - if kwargs["model_name"] == "faenet": + if kwargs["model_name"] in ["faenet", "depfaenet"]: self.mlp_skip_co = Linear((kwargs["num_interactions"] + 1), 1) elif kwargs["model_name"] == "indfaenet": self.mlp_skip_co = Linear( diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index 0b5603e848..a5b638aa47 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -26,6 +26,11 @@ def __init__(self, **kwargs): self.lin1 = Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2) self.lin2 = Linear(kwargs["hidden_channels"] // 2, 1) + + self.disconnected_mlp = kwarsg["disconnected_mlp"] if "disconnected_mlp" in kwargs else False + if self.disconnected_mlp: + self.ads_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) + self.cat_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) # To do this, you can create a new input to FAENet so that # it makes it predict a vector, where the default is normal FAENet. @@ -44,7 +49,8 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION if adsorbates[i].neighbors.shape[0] == 0 ] if len(edgeless_ads) > 0: - # Since most adsorbates have an edge, we pop those values specifically from range(num_adsorbates) + # Since most adsorbates have an edge, + # we pop those values specifically from range(num_adsorbates) mask = list(range(num_adsorbates)) num_popped = 0 # We can do this since edgeless is already sorted for unwanted in edgeless_ads: @@ -83,6 +89,9 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION ads_energy = pred_ads["energy"] cat_energy = pred_cat["energy"] + if self.disconnected_mlp: + ads_energy = self.ads_lin(ads_energy) + cat_energy = self.cat_lin(cat_energy) system_energy = torch.cat([ads_energy, cat_energy], dim = 1) system_energy = self.lin1(system_energy) From fd264183673c5a819fffbd0c2199c2217554c293 Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 19 Jul 2023 12:10:46 -0400 Subject: [PATCH 035/131] Cleaned up some code, and set it ready for some hyperparameter tweaking --- configs/exps/alvaro/hyper-tweak.yaml | 55 +++++++++++++++++++++++++ configs/exps/alvaro/split-ads-cats.yaml | 15 ++++--- debug.py | 4 +- ocpmodels/models/depfaenet.py | 6 +-- ocpmodels/models/indfaenet.py | 4 +- ocpmodels/tasks/task.py | 2 +- slurm-3384656.out | 47 --------------------- 7 files changed, 72 insertions(+), 61 deletions(-) create mode 100644 configs/exps/alvaro/hyper-tweak.yaml delete mode 100644 slurm-3384656.out diff --git a/configs/exps/alvaro/hyper-tweak.yaml b/configs/exps/alvaro/hyper-tweak.yaml new file mode 100644 index 0000000000..982f6aad29 --- /dev/null +++ b/configs/exps/alvaro/hyper-tweak.yaml @@ -0,0 +1,55 @@ +# MODIFY THIS ONE FOR RUNS + +job: + mem: 32GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + time: 15:00:00 + +default: + wandb_name: alvaro-carbonero-math + wandb_project: ocp-alvaro + test_ri: True + mode: train + graph_rewiring: remove-tag-0 + optim: + cp_data_to_tmpdir: true + wandb-tags: 'best-config-??' # Insert what model you're running if running one by one. + frame_averaging: 2D + fa_frames: se3-random + model: + mp_type: updownscale + phys_embeds: True + tag_hidden_channels: 32 + pg_hidden_channels: 64 + energy_head: weighted-av-final-embeds + complex_mp: False + graph_norm: True + hidden_channels: 352 + num_filters: 448 + num_gaussians: 99 + num_interactions: 6 + second_layer_MLP: True + skip_co: concat + edge_embed_type: all_rij + optim: + #lr_initial: 0.0019 + scheduler: LinearWarmupCosineAnnealingLR + max_epochs: 20 + eval_every: 0.4 + batch_size: 256 + eval_batch_size: 256 + +runs: + - config: indfaenet-is2re-all + optim: + lr_initial: 0.0019 + + - config: indfaenet-is2re-all + optim: + lr_initial: 0.001 + + - config: indfaenet-is2re-all + optim: + lr_initial: 0.0005 diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index af053b9b54..9bfc40f26b 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -42,12 +42,15 @@ default: eval_batch_size: 256 runs: - - config: depfaenet-is2re-all - is_disconnected: True - disconnected_mlp: True - - config: indfaenet-is2re-10k - disconnected_mlp: False + model: + disconnected_mlp: True - config: indfaenet-is2re-all - disconnected_mlp: False + model: + disconnected_mlp: True + + - config: depfaenet-is2re-all + is_disconnected: True + model: + disconnected_mlp: True diff --git a/debug.py b/debug.py index 157c61db89..3b3d6fd7ba 100644 --- a/debug.py +++ b/debug.py @@ -96,7 +96,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.mode = "train" args.graph_rewiring = "remove-tag-0" args.cp_data_to_tmpdir = True - args.config = "depfaenet-is2re-10k" + args.config = "indfaenet-is2re-10k" args.frame_averaging = "2D" args.fa_frames = "se3-random" @@ -124,7 +124,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["num_interactions"] = 6 trainer_config["model"]["second_layer_MLP"] = True trainer_config["model"]["skip_co"] = "concat" - trainer_config["model"]["disconnected_mlp"] = True + #trainer_config["model"]["disconnected_mlp"] = True #trainer_config["optim"]["batch_sizes"] = 256 #trainer_config["optim"]["eval_batch_sizes"] = 256 diff --git a/ocpmodels/models/depfaenet.py b/ocpmodels/models/depfaenet.py index d7576a5961..d3c0f0ed9c 100644 --- a/ocpmodels/models/depfaenet.py +++ b/ocpmodels/models/depfaenet.py @@ -72,11 +72,9 @@ class depFAENet(FAENet): def __init__(self, **kwargs): super().__init__(**kwargs) - if "disconnected_mlp" not in kwargs: - kwargs["disconnected_mlp"] = False - + self.disconnected_mlp = kwargs.get("disconnected_mlp", False) self.output_block = discOutputBlock( - self.energy_head, kwargs["hidden_channels"], self.act, kwargs["disconnected_mlp"] + self.energy_head, kwargs["hidden_channels"], self.act, self.disconnected_mlp ) @conditional_grad(torch.enable_grad()) diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index a5b638aa47..fd6327b105 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -27,7 +27,9 @@ def __init__(self, **kwargs): self.lin1 = Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2) self.lin2 = Linear(kwargs["hidden_channels"] // 2, 1) - self.disconnected_mlp = kwarsg["disconnected_mlp"] if "disconnected_mlp" in kwargs else False + import ipdb + ipdb.set_trace() + self.disconnected_mlp = kwargs.get("disconnected_mlp", False) if self.disconnected_mlp: self.ads_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) self.cat_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) diff --git a/ocpmodels/tasks/task.py b/ocpmodels/tasks/task.py index caac6b253e..e229152212 100644 --- a/ocpmodels/tasks/task.py +++ b/ocpmodels/tasks/task.py @@ -52,7 +52,7 @@ def run(self): if loops > 0: print("----------------------------------------") print("⏱️ Measuring inference time.") - self.trainer.measure_inference_time(loops=loops) + #self.trainer.measure_inference_time(loops=loops) print("----------------------------------------\n") torch.set_grad_enabled(True) return self.trainer.train( diff --git a/slurm-3384656.out b/slurm-3384656.out deleted file mode 100644 index cfc0f84248..0000000000 --- a/slurm-3384656.out +++ /dev/null @@ -1,47 +0,0 @@ - -======== GPU REPORT ======== - -==============NVSMI LOG============== - -Timestamp : Wed Jul 12 18:02:38 2023 -Driver Version : 515.65.01 -CUDA Version : 11.7 - -Attached GPUs : 1 -GPU 00000000:89:00.0 - Accounting Mode : Enabled - Accounting Mode Buffer Size : 4000 - Accounted Processes - Process ID : 13872 - GPU Utilization : 4 % - Memory Utilization : 2 % - Max memory usage : 6181 MiB - Time : 1437430 ms - Is Running : 0 - Process ID : 34608 - GPU Utilization : 4 % - Memory Utilization : 1 % - Max memory usage : 6181 MiB - Time : 865845 ms - Is Running : 0 - -Wed Jul 12 18:02:38 2023 -+-----------------------------------------------------------------------------+ -| NVIDIA-SMI 515.65.01 Driver Version: 515.65.01 CUDA Version: 11.7 | -|-------------------------------+----------------------+----------------------+ -| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | -| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | -| | | MIG M. | -|===============================+======================+======================| -| 0 Quadro RTX 8000 On | 00000000:89:00.0 Off | Off | -| 33% 28C P8 27W / 260W | 1MiB / 49152MiB | 0% Default | -| | | N/A | -+-------------------------------+----------------------+----------------------+ - -+-----------------------------------------------------------------------------+ -| Processes: | -| GPU GI CI PID Type Process name GPU Memory | -| ID ID Usage | -|=============================================================================| -| No running processes found | -+-----------------------------------------------------------------------------+ From 745351f8eecb539ef917f70127eba650f356f36b Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 19 Jul 2023 16:01:53 -0400 Subject: [PATCH 036/131] Deleted a wild point trace. --- ocpmodels/models/indfaenet.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index fd6327b105..25d6732ab1 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -27,8 +27,6 @@ def __init__(self, **kwargs): self.lin1 = Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2) self.lin2 = Linear(kwargs["hidden_channels"] // 2, 1) - import ipdb - ipdb.set_trace() self.disconnected_mlp = kwargs.get("disconnected_mlp", False) if self.disconnected_mlp: self.ads_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) From 3020a46220dcf74ed5692d890fb840523609cda7 Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 26 Jul 2023 17:29:07 -0400 Subject: [PATCH 037/131] Implemented a first version of the transformer --- configs/exps/alvaro/split-ads-cats.yaml | 13 ++------- debug.py | 1 + ocpmodels/models/indfaenet.py | 39 +++++++++++++++++++------ 3 files changed, 33 insertions(+), 20 deletions(-) diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index 9bfc40f26b..45b67ccd0a 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -1,4 +1,4 @@ -# MODIFY THIS ONE FOR RUNS +\# MODIFY THIS ONE FOR RUNS job: mem: 32GB @@ -44,13 +44,4 @@ default: runs: - config: indfaenet-is2re-10k model: - disconnected_mlp: True - - - config: indfaenet-is2re-all - model: - disconnected_mlp: True - - - config: depfaenet-is2re-all - is_disconnected: True - model: - disconnected_mlp: True + transformer_out: True diff --git a/debug.py b/debug.py index 3b3d6fd7ba..6b502c958c 100644 --- a/debug.py +++ b/debug.py @@ -124,6 +124,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["num_interactions"] = 6 trainer_config["model"]["second_layer_MLP"] = True trainer_config["model"]["skip_co"] = "concat" + trainer_config["model"]["transformer_out"] = False #trainer_config["model"]["disconnected_mlp"] = True #trainer_config["optim"]["batch_sizes"] = 256 diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index 25d6732ab1..c556e3fc72 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -1,6 +1,6 @@ import torch from torch import nn -from torch.nn import Linear +from torch.nn import Linear, Transformer from ocpmodels.models.faenet import FAENet from ocpmodels.models.faenet import OutputBlock @@ -24,15 +24,29 @@ def __init__(self, **kwargs): getattr(nn.functional, kwargs["act"]) if kwargs["act"] != "swish" else swish ) - self.lin1 = Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2) - self.lin2 = Linear(kwargs["hidden_channels"] // 2, 1) - self.disconnected_mlp = kwargs.get("disconnected_mlp", False) if self.disconnected_mlp: self.ads_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) self.cat_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) - # To do this, you can create a new input to FAENet so that - # it makes it predict a vector, where the default is normal FAENet. + + self.transformer_out = kwargs.get("transformer_out", False) + if self.transformer_out: + self.combination = Transformer( + d_model = kwargs["hidden_channels"], + nhead = 2, + num_encoder_layers = 2, + num_decoder_layers = 2, + dim_feedforward = kwargs["hidden_channels"], + batch_first = True + ) + self.query_pos = nn.Parameter(torch.rand(kwargs["hidden_channels"])) + self.transformer_lin = Linear(kwargs["hidden_channels"], 1) + else: + self.combination = nn.Sequential( + Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2), + self.act, + Linear(kwargs["hidden_channels"] // 2, 1) + ) def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! batch_size = len(data) // 2 @@ -94,9 +108,16 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION cat_energy = self.cat_lin(cat_energy) system_energy = torch.cat([ads_energy, cat_energy], dim = 1) - system_energy = self.lin1(system_energy) - system_energy = self.act(system_energy) - system_energy = self.lin2(system_energy) + if self.transformer_out: + batch_size = system_energy.shape[0] + + fake_target_sequence = self.query_pos.unsqueeze(0).expand(batch_size, -1).squeeze(1) + system_energy = system_energy.squeeze(1) + + system_energy = self.combination(system_energy, fake_target_sequence).squeeze(1) + system_energy = self.transformer_lin(system_energy) + else: + system_energy = self.combination(system_energy) # We combine predictions and return them pred_system = { From c6179184cc4147408386e9a4e5dec57cdc70b6aa Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 26 Jul 2023 18:25:39 -0400 Subject: [PATCH 038/131] Changed implementation to a better one --- configs/exps/alvaro/split-ads-cats.yaml | 2 +- debug.py | 2 +- ocpmodels/models/indfaenet.py | 22 ++++++++++++++-------- ocpmodels/tasks/task.py | 2 +- 4 files changed, 17 insertions(+), 11 deletions(-) diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/split-ads-cats.yaml index 45b67ccd0a..3cc1ca55b3 100644 --- a/configs/exps/alvaro/split-ads-cats.yaml +++ b/configs/exps/alvaro/split-ads-cats.yaml @@ -1,4 +1,4 @@ -\# MODIFY THIS ONE FOR RUNS +# MODIFY THIS ONE FOR RUNS job: mem: 32GB diff --git a/debug.py b/debug.py index 6b502c958c..96925bcdac 100644 --- a/debug.py +++ b/debug.py @@ -124,7 +124,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["num_interactions"] = 6 trainer_config["model"]["second_layer_MLP"] = True trainer_config["model"]["skip_co"] = "concat" - trainer_config["model"]["transformer_out"] = False + trainer_config["model"]["transformer_out"] = True #trainer_config["model"]["disconnected_mlp"] = True #trainer_config["optim"]["batch_sizes"] = 256 diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index c556e3fc72..9dd9f79cca 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -32,15 +32,15 @@ def __init__(self, **kwargs): self.transformer_out = kwargs.get("transformer_out", False) if self.transformer_out: self.combination = Transformer( - d_model = kwargs["hidden_channels"], + d_model = kwargs["hidden_channels"] // 2, nhead = 2, num_encoder_layers = 2, num_decoder_layers = 2, dim_feedforward = kwargs["hidden_channels"], batch_first = True ) - self.query_pos = nn.Parameter(torch.rand(kwargs["hidden_channels"])) - self.transformer_lin = Linear(kwargs["hidden_channels"], 1) + self.query_pos = nn.Parameter(torch.rand(kwargs["hidden_channels"] // 2)) + self.transformer_lin = Linear(kwargs["hidden_channels"] // 2, 1) else: self.combination = nn.Sequential( Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2), @@ -107,16 +107,22 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION ads_energy = self.ads_lin(ads_energy) cat_energy = self.cat_lin(cat_energy) - system_energy = torch.cat([ads_energy, cat_energy], dim = 1) if self.transformer_out: - batch_size = system_energy.shape[0] + batch_size = ads_energy.shape[0] + + fake_target_sequence = self.query_pos.unsqueeze(0).expand(batch_size, -1).unsqueeze(1) + system_energy = torch.cat( + [ + ads_energy.unsqueeze(1), + cat_energy.unsqueeze(1) + ], + dim = 1 + ) - fake_target_sequence = self.query_pos.unsqueeze(0).expand(batch_size, -1).squeeze(1) - system_energy = system_energy.squeeze(1) - system_energy = self.combination(system_energy, fake_target_sequence).squeeze(1) system_energy = self.transformer_lin(system_energy) else: + system_energy = torch.cat([ads_energy, cat_energy], dim = 1) system_energy = self.combination(system_energy) # We combine predictions and return them diff --git a/ocpmodels/tasks/task.py b/ocpmodels/tasks/task.py index e229152212..caac6b253e 100644 --- a/ocpmodels/tasks/task.py +++ b/ocpmodels/tasks/task.py @@ -52,7 +52,7 @@ def run(self): if loops > 0: print("----------------------------------------") print("⏱️ Measuring inference time.") - #self.trainer.measure_inference_time(loops=loops) + self.trainer.measure_inference_time(loops=loops) print("----------------------------------------\n") torch.set_grad_enabled(True) return self.trainer.train( From 684327d2c2dff32afc02440ae4b521c750bfbb34 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 27 Jul 2023 11:34:54 -0400 Subject: [PATCH 039/131] Added positional encoding to allow the model to distinguish between adsorbate and catalysts inputs --- ocpmodels/models/indfaenet.py | 30 +++++++++++++++++++++++++++++- ocpmodels/tasks/task.py | 2 +- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index 9dd9f79cca..c46769c12b 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -1,4 +1,4 @@ -import torch +import torch, math from torch import nn from torch.nn import Linear, Transformer @@ -10,6 +10,27 @@ from torch_geometric.data import Batch +# Implementation of positional encoding obtained from Harvard's annotated transformer's guide +class PositionalEncoding(nn.Module): + def __init__(self, d_model, dropout = 0.1, max_len = 5): + super(PositionalEncoding, self).__init__() + self.dropout = nn.Dropout(p = dropout) + + # Compute the positional encodings once in log space. + pe = torch.zeros(max_len, d_model) + position = torch.arange(0, max_len).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model) + ) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = pe.unsqueeze(0) + self.register_buffer("pe", pe) + + def forward(self, x): + x = x + self.pe[:, : x.size(1)].requires_grad_(False) + return self.dropout(x) + @registry.register_model("indfaenet") class indFAENet(BaseModel): # Change to make it inherit from base model. def __init__(self, **kwargs): @@ -39,6 +60,11 @@ def __init__(self, **kwargs): dim_feedforward = kwargs["hidden_channels"], batch_first = True ) + self.positional_encoding = PositionalEncoding( + kwargs["hidden_channels"] // 2, + dropout = 0.1, + max_len = 5, + ) self.query_pos = nn.Parameter(torch.rand(kwargs["hidden_channels"] // 2)) self.transformer_lin = Linear(kwargs["hidden_channels"] // 2, 1) else: @@ -118,6 +144,8 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION ], dim = 1 ) + + system_energy = self.positional_encoding(system_energy) system_energy = self.combination(system_energy, fake_target_sequence).squeeze(1) system_energy = self.transformer_lin(system_energy) diff --git a/ocpmodels/tasks/task.py b/ocpmodels/tasks/task.py index caac6b253e..e229152212 100644 --- a/ocpmodels/tasks/task.py +++ b/ocpmodels/tasks/task.py @@ -52,7 +52,7 @@ def run(self): if loops > 0: print("----------------------------------------") print("⏱️ Measuring inference time.") - self.trainer.measure_inference_time(loops=loops) + #self.trainer.measure_inference_time(loops=loops) print("----------------------------------------\n") torch.set_grad_enabled(True) return self.trainer.train( From 9c83bec23d6400f021801eddd29a0c4292c6bdf4 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 27 Jul 2023 11:42:01 -0400 Subject: [PATCH 040/131] Forgot to renable inference time calculation --- ocpmodels/tasks/task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocpmodels/tasks/task.py b/ocpmodels/tasks/task.py index e229152212..caac6b253e 100644 --- a/ocpmodels/tasks/task.py +++ b/ocpmodels/tasks/task.py @@ -52,7 +52,7 @@ def run(self): if loops > 0: print("----------------------------------------") print("⏱️ Measuring inference time.") - #self.trainer.measure_inference_time(loops=loops) + self.trainer.measure_inference_time(loops=loops) print("----------------------------------------\n") torch.set_grad_enabled(True) return self.trainer.train( From 31ec5885f201a5d94a526be79c01f68adbb92024 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 27 Jul 2023 12:16:46 -0400 Subject: [PATCH 041/131] Modified training files. --- .../{split-ads-cats.yaml => 10k-training.yaml} | 0 .../{hyper-tweak.yaml => all-training.yaml} | 16 ++++------------ 2 files changed, 4 insertions(+), 12 deletions(-) rename configs/exps/alvaro/{split-ads-cats.yaml => 10k-training.yaml} (100%) rename configs/exps/alvaro/{hyper-tweak.yaml => all-training.yaml} (80%) diff --git a/configs/exps/alvaro/split-ads-cats.yaml b/configs/exps/alvaro/10k-training.yaml similarity index 100% rename from configs/exps/alvaro/split-ads-cats.yaml rename to configs/exps/alvaro/10k-training.yaml diff --git a/configs/exps/alvaro/hyper-tweak.yaml b/configs/exps/alvaro/all-training.yaml similarity index 80% rename from configs/exps/alvaro/hyper-tweak.yaml rename to configs/exps/alvaro/all-training.yaml index 982f6aad29..936650c88e 100644 --- a/configs/exps/alvaro/hyper-tweak.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -34,7 +34,7 @@ default: skip_co: concat edge_embed_type: all_rij optim: - #lr_initial: 0.0019 + lr_initial: 0.001 scheduler: LinearWarmupCosineAnnealingLR max_epochs: 20 eval_every: 0.4 @@ -42,14 +42,6 @@ default: eval_batch_size: 256 runs: - - config: indfaenet-is2re-all - optim: - lr_initial: 0.0019 - - - config: indfaenet-is2re-all - optim: - lr_initial: 0.001 - - - config: indfaenet-is2re-all - optim: - lr_initial: 0.0005 + - config: indfaenet-is2re-10k + model: + transformer_out: True From 9a715461ff6d9fc6f40130295595cec60f7834a3 Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 28 Jul 2023 09:47:57 -0400 Subject: [PATCH 042/131] fixed typo in experiments folder --- configs/exps/alvaro/all-training.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index 936650c88e..88446f3552 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -42,6 +42,6 @@ default: eval_batch_size: 256 runs: - - config: indfaenet-is2re-10k + - config: indfaenet-is2re-all model: transformer_out: True From acf2d8a60d86ee09252c16096ce602733b784648 Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 28 Jul 2023 11:42:10 -0400 Subject: [PATCH 043/131] Started implementing transformer interaction faenet, init mostly done --- ocpmodels/models/tifaenet.py | 149 +++++++++++++++++++++++++++++++++++ 1 file changed, 149 insertions(+) create mode 100644 ocpmodels/models/tifaenet.py diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py new file mode 100644 index 0000000000..a3e00bcba5 --- /dev/null +++ b/ocpmodels/models/tifaenet.py @@ -0,0 +1,149 @@ +import torch +from torch import nn + +from ocpmodels.models.faenet import ( + GaussianSmearing, + EmbeddingBlock, + InteractionBlock, + OutputBlock +) +from ocpmodels.common.registry import registry +from ocpmodels.models.base_model import BaseModel + +class TransformerInteraction(nn.Module): + def __init__(self, placeholder): + pass + + def forward(self, inputs): + pass + +@registry.register_model("tifaenet") +class TIFaenet(BaseModel) + def __init__(self, **kwargs): + super(TIFaenet, self).__init__() + + self.cutoff = kwargs["cutoff"] + self.energy_head = kwargs["energy_head"] + self.regress_forces = kwargs["regress_forces"] + self.use_pbc = kwargs["use_pbc"] + self.max_num_neighbors = kwargs["max_num_neighbors"] + self.edge_embed_type = kwargs["edge_embed_type"] + self.skip_co = kwargs["skip_co"] + if kwargs["mp_type"] == "sfarinet": + kwargs["num_filters"] = kwargs["hidden_channels"] + + self.act = ( + getattr(nn.functional, kwargs["act"]) if kwargs["act"] != "swish" else swish + ) + self.use_positional_embeds = kwargs["graph_rewiring"] in { + "one-supernode-per-graph", + "one-supernode-per-atom-type", + "one-supernode-per-atom-type-dist", + } + + # Gaussian Basis + self.distance_expansion = GaussianSmearing( + 0.0, self.cutoff, kwargs["num_gaussians"] + ) + + # Embedding block + self.embed_block = EmbeddingBlock( + kwargs["num_gaussians"], + kwargs["num_filters"], + kwargs["hidden_channels"], + kwargs["tag_hidden_channels"], + kwargs["pg_hidden_channels"], + kwargs["phys_hidden_channels"], + kwargs["phys_embeds"], + kwargs["graph_rewiring"], + self.act, + kwargs["second_layer_MLP"], + kwargs["edge_embed_type"], + ) + + # Interaction block + self.interaction_blocks_ads = nn.ModuleList( + [ + InteractionBlock( + kwargs["hidden_channels"], + kwargs["num_filters"], + self.act, + kwargs["mp_type"], + kwargs["complex_mp"], + kwargs["att_heads"], + kwargs["graph_norm"], + ) + for _ in range(kwargs["num_interactions"]) + ] + ) + self.interaction_blocks_cat = nn.ModuleList( + [ + InteractionBlock( + kwargs["hidden_channels"], + kwargs["num_filters"], + self.act, + kwargs["mp_type"], + kwargs["complex_mp"], + kwargs["att_heads"], + kwargs["graph_norm"], + ) + for _ in range(kwargs["num_interactions"]) + ] + ) + + # Transformer Interaction + self.transformer_blocks_ads = nn.ModuleList( + [ + TransformerInteraction( + placeholder = 3.14159265 + ) + ] + ) + + # Output blocks + self.output_block_ads = OutputBlock( + self.energy_head, kwargs["hidden_channels"], self.act, kwargs["model_name"] + ) + self.output_block_cat = OutputBlock( + self.energy_head, kwargs["hidden_channels"], self.act, kwargs["model_name"] + ) + + # Energy head + if self.energy_head == "weighted-av-initial-embeds": + self.w_lin_ads = Linear(kwargs["hidden_channels"], 1) + self.w_lin_cat = Linear(kwargs["hidden_channels"], 1) + + # Skip co + if self.skip_co == "concat": # for the implementation of independent faenet, make sure the input is large enough + if kwargs["model_name"] in {"faenet", "depfaenet"}: + self.mlp_skip_co_ads = Linear( + kwargs["num_interactions"] + 1, + 1 + ) + self.mlp_skip_co_cat = Linear( + kwargs["num_interactions"] + 1, + 1 + ) + elif kwargs["model_name"] == "indfaenet": + # self.mlp_skip_co_ads = Linear( + (kwargs["num_interactions"] + 1) * kwargs["hidden_channels"] // 2, + kwargs["hidden_channels"] // 2 + ) + self.mlp_skip_co_cat = Linear( + (kwargs["num_interactions"] + 1) * kwargs["hidden_channels"] // 2, + kwargs["hidden_channels"] // 2 + ) + + elif self.skip_co == "concat_atom": + self.mlp_skip_co = Linear( + ((kwargs["num_interactions"] + 1) * kwargs["hidden_channels"]), + kwargs["hidden_channels"], + ) + + @conditional_grad(torch.enable_grad()) + def energy_forward(self, data): + + + @conditional_grad(torch.enable_grad()) + def forces_forward(self, preds): + pass From 5aead8a28bc0a65e2c56c5c6650b43a3c9956370 Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 28 Jul 2023 12:24:18 -0400 Subject: [PATCH 044/131] Did good progress on the implementation of energy forward, modified base trainer to make it easier to implement models like tifaenet/indfaenet. Now you just have to add it to a list in the initialization method. --- ocpmodels/models/tifaenet.py | 84 ++++++++++++++++++++++++++++-- ocpmodels/trainers/base_trainer.py | 6 +-- 2 files changed, 84 insertions(+), 6 deletions(-) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index a3e00bcba5..833f6d48c7 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -42,7 +42,7 @@ def __init__(self, **kwargs): } # Gaussian Basis - self.distance_expansion = GaussianSmearing( + self.distance_expansion = GaussianSmearing( 0.0, self.cutoff, kwargs["num_gaussians"] ) @@ -125,7 +125,7 @@ def __init__(self, **kwargs): 1 ) elif kwargs["model_name"] == "indfaenet": - # self.mlp_skip_co_ads = Linear( + self.mlp_skip_co_ads = Linear( (kwargs["num_interactions"] + 1) * kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2 ) @@ -142,7 +142,85 @@ def __init__(self, **kwargs): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): - + batch_size = len(data) // 2 + + adsorbates = Batch.from_data_list(data[:batch_size]) + catalysts = Batch.from_data_list(data[batch_size:]) + + # Fixing neighbor's dimensions. This error happens when an adsorbate has 0 edges. + adsorbates = self.neighbor_fixer(adsorbates) + catalysts = self.neighbor_fixer(catalysts) + + # Graph rewiring + ads_rewiring = graph_rewiring(adsorbates) + edge_index_ads, edge_weight_ads, rel_pos_ads, edge_attr_ads = ads_rewiring + + cat_rewiring = graph_rewiring(catalysts) + edge_index_cat, edge_weight_cat, rel_pos_cat, edge_attr_cat = cat_rewiring + + @conditional_grad(torch.enable_grad()) + def graph_rewiring(self, data) + z = data.atomic_numbers.long() + pos = data.pos + batch = data.batch + + # Use periodic boundary conditions + if self.use_pbc: + assert z.dim() == 1 and z.dtype == torch.long + + out = get_pbc_distances( + pos, + data.edge_index, + data.cell, + data.cell_offsets, + data.neighbors, + return_distance_vec=True, + ) + + edge_index = out["edge_index"] + edge_weight = out["distances"] + rel_pos = out["distance_vec"] + edge_attr = self.distance_expansion(edge_weight) + else: + edge_index = radius_graph( + pos, + r=self.cutoff, + batch=batch, + max_num_neighbors=self.max_num_neighbors, + ) + # edge_index = data.edge_index + row, col = edge_index + rel_pos = pos[row] - pos[col] + edge_weight = rel_pos.norm(dim=-1) + edge_attr = self.distance_expansion(edge_weight) + + return (edge_index, edge_weight, rel_pos, edge_attr) + + def neighbor_fixer(self, data): + num_graphs = len(data) + # Find indices of adsorbates without edges: + edgeless = [ + i for i + in range(num_graphs) + if data[i].neighbors.shape[0] == 0 + ] + if len(edgeless) > 0: + # Since most adsorbates have an edge, + # we pop those values specifically from range(num_adsorbates) + mask = list(range(num_graphs)) + num_popped = 0 # We can do this since edgeless is already sorted + for unwanted in edgeless: + mask.pop(unwanted-num_popped) + num_popped += 1 + new_nbrs = torch.zeros( + num_graphs, + dtype = torch.int64, + device = data.neighbors.device, + ) + new_nbrs[mask] = data.neighbors + data.neighbors = new_nbrs + + return data @conditional_grad(torch.enable_grad()) def forces_forward(self, preds): diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 520ccbc3b5..4d01149c20 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -163,9 +163,9 @@ def __init__(self, **kwargs): self.config["is_disconnected"] = True # Here's the models whose graphs are disconnected in the dataset - dataset_models = ["indfaenet"] + self.dataset_models = ["indfaenet", "tifaenet"] self.separate_dataset = False - if self.config["model_name"] in dataset_models: + if self.config["model_name"] in self.dataset_models: self.separate_dataset = True print("\n\nHeads up: using separate dataset, so ads/cats are separated before transforms.\n") @@ -259,7 +259,7 @@ def load_datasets(self): if split == "default_val": continue - if self.config["model_name"] in ["indfaenet"]: + if self.config["model_name"] in self.dataset_models: self.datasets[split] = registry.get_dataset_class( "separate" )(ds_conf, transform=transform) From 189f7ce7253df2599f891e37d7a21af03a3b0684 Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 28 Jul 2023 13:10:58 -0400 Subject: [PATCH 045/131] Implemented everything up to the actual transformer block. --- ocpmodels/models/tifaenet.py | 167 ++++++++++++++++++++++++++++++++++- 1 file changed, 164 insertions(+), 3 deletions(-) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index 833f6d48c7..c084d60d00 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -42,12 +42,28 @@ def __init__(self, **kwargs): } # Gaussian Basis - self.distance_expansion = GaussianSmearing( + self.distance_expansion_ads = GaussianSmearing( + 0.0, self.cutoff, kwargs["num_gaussians"] + ) + self.distance_expansion_cat = GaussianSmearing( 0.0, self.cutoff, kwargs["num_gaussians"] ) # Embedding block - self.embed_block = EmbeddingBlock( + self.embed_block_ads = EmbeddingBlock( + kwargs["num_gaussians"], + kwargs["num_filters"], + kwargs["hidden_channels"], + kwargs["tag_hidden_channels"], + kwargs["pg_hidden_channels"], + kwargs["phys_hidden_channels"], + kwargs["phys_embeds"], + kwargs["graph_rewiring"], + self.act, + kwargs["second_layer_MLP"], + kwargs["edge_embed_type"], + ) + self.embed_block_cat = EmbeddingBlock( kwargs["num_gaussians"], kwargs["num_filters"], kwargs["hidden_channels"], @@ -92,7 +108,7 @@ def __init__(self, **kwargs): ) # Transformer Interaction - self.transformer_blocks_ads = nn.ModuleList( + self.transformer_interactions = nn.ModuleList( [ TransformerInteraction( placeholder = 3.14159265 @@ -140,6 +156,30 @@ def __init__(self, **kwargs): kwargs["hidden_channels"], ) + self.transformer_out = kwargs.get("transformer_out", False) + if self.transformer_out: + self.combination = Transformer( + d_model = kwargs["hidden_channels"] // 2, + nhead = 2, + num_encoder_layers = 2, + num_decoder_layers = 2, + dim_feedforward = kwargs["hidden_channels"], + batch_first = True + ) + self.positional_encoding = PositionalEncoding( + kwargs["hidden_channels"] // 2, + dropout = 0.1, + max_len = 5, + ) + self.query_pos = nn.Parameter(torch.rand(kwargs["hidden_channels"] // 2)) + self.transformer_lin = Linear(kwargs["hidden_channels"] // 2, 1) + else: + self.combination = nn.Sequential( + Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2), + self.act, + Linear(kwargs["hidden_channels"] // 2, 1) + ) + @conditional_grad(torch.enable_grad()) def energy_forward(self, data): batch_size = len(data) // 2 @@ -147,6 +187,9 @@ def energy_forward(self, data): adsorbates = Batch.from_data_list(data[:batch_size]) catalysts = Batch.from_data_list(data[batch_size:]) + batch_ads = adsorbates.batch + batch_cat = catalysts.batch + # Fixing neighbor's dimensions. This error happens when an adsorbate has 0 edges. adsorbates = self.neighbor_fixer(adsorbates) catalysts = self.neighbor_fixer(catalysts) @@ -158,6 +201,124 @@ def energy_forward(self, data): cat_rewiring = graph_rewiring(catalysts) edge_index_cat, edge_weight_cat, rel_pos_cat, edge_attr_cat = cat_rewiring + # Embedding + h_ads, e_ads = embedding( + edge_weight_ads, rel_pos_ads, edge_attr_ads, adsorbates.tags + ) + h_cat, e_cat = embedding( + edge_weight_cat, rel_pos_cat, edge_attr_cat, catalysts.tags + ) + + # Compute atom weights for late energy head + if self.energy_head == "weighted-av-initial-embeds": + alpha_ads = self.w_lin_ads(h_ads) + alpha_cat = self.w_lin_cat(h_cat) + else: + alpha_ads = None + alpha_cat = None + + # Interaction and transformer blocks + energy_skip_co_ads = [] + energy_skip_co_cat = [] + for ( + interaction_ads, + interaction_cat, + transformer_interaction + ) in zip( + self.interaction_blocks_ads, + self.interaction_blocks_cat, + self.transformer_blocks + ): + if self.skip_co == "concat_atom": + energy_skip_co_ads.append(h_ads) + energy_skip_co_cat.append(h_cat) + elif self.skip_co: + energy_skip_co_ads.append( + self.ouput_block_ads( + h_ads, edge_index_ads, edge_weight_ads, batch_ads, alpha_ads + ) + ) + energy_skip_co_cat.append( + self.output_block_cat( + h_cat, edge_index_cat, edge_weight_cat, batch_ads, alpha_cat + ) + ) + intra_ads = interaction_ads(h_ads, edge_index_ads, e_ads) + intra_cat = interaction_cat(h_cat, edge_index_cat, e_cat) + + inter_ads, inter_cat = transformer_interaction(intra_ads, intra_cat) + h_ads = h_ads + inter_ads + h_cat = h_cat + inter_cat + + # Atom skip-co + if self.skip_co == "concat_atom": + energy_skip_co_ads.append(h_ads) + energy_skip_co_cat.append(h_cat) + + h_ads = self.act(self.mlp_skip_co_ads(torch.cat(energy_skip_co_ads, dim = 1))) + h_cat = self.act(self.mlp_skip_co_cat(torch.cat(energy_skip_co_cat, dim = 1))) + + energy_ads = self.output_block_ads( + h_ads, edge_index_ads, edge_weight_ads, batch_ads, alpha_ads + ) + energy_cat = self.output_block_cat( + h_cat, edge_index_cat, edge_weight_cat, batch_cat, alpha_cat + ) + + # Skip-connection + energy_skip_co_ads.append(energy_ads) + energy_skip_co_cat.append(energy_cat) + if self.skip_co == "concat" + energy_ads = self.mlp_skip_co_ads(torch.cat(energy_skip_co_ads, dim = 1)) + energy_cat = self.mlp_skip_co_cat(torch.cat(energy_skip_co_cat, dim = 1)) + elif self.skip_co == "add": + energy_ads = sum(energy_skip_co_ads) + energy_cat = sum(energy_skip_co_cat) + + # Combining hidden representations + if self.transformer_out: + batch_size = energy_ads.shape[0] + + fake_target_sequence = self.query_pos.unsqueeze(0).expand(batch_size, -1).unsqueeze(1) + system_energy = torch.cat( + [ + energy_ads.unsqueeze(1), + energy_cat.unsqueeze(1) + ], + dim = 1 + ) + + system_energy = self.positional_encoding(system_energy) + + system_energy = self.combination(system_energy, fake_target_sequence).squeeze(1) + system_energy = self.transformer_lin(system_energy) + else: + system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = self.combination(system_energy) + + # We combine predictions and return them + pred_system = { + "energy" : system_energy, + "pooling_loss" : None, # This might break something. + "hidden_state" : torch.cat(energy_ads, energy_cat, dim = 0) + } + + return pred_system + + @conditional_grad(torch.enable_grad()) + def embedding(self, edge_weight, rel_pos, edge_attr, tags): + # Normalize and squash to [0,1] for gaussian basis + rel_pos_normalized = None + if self.edge_embed_type in {"sh", "all_rij", "all"}: + rel_pos_normalized = (rel_pos / edge_weight.view(-1, 1) + 1) / 2.0 + + pooling_loss = None # deal with pooling loss + + # Embedding block + h, e = self.embed_block(z, rel_pos, edge_attr, tags, rel_pos_normalized) + + return h, e + @conditional_grad(torch.enable_grad()) def graph_rewiring(self, data) z = data.atomic_numbers.long() From e8af33f00074523fd907349fd75d3ae1d2736c5e Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 30 Jul 2023 00:34:37 -0400 Subject: [PATCH 046/131] Finished implementation of transformer interactions, and will redo training on transformer combination to see if I can reduce the number of parameters --- configs/exps/alvaro/10k-training.yaml | 6 + configs/exps/alvaro/all-training.yaml | 6 + configs/models/tifaenet.yaml | 271 ++++++++++++++++++++++++++ debug.py | 2 +- ocpmodels/models/faenet.py | 2 +- ocpmodels/models/indfaenet.py | 6 +- ocpmodels/models/tifaenet.py | 109 ++++++++--- 7 files changed, 370 insertions(+), 32 deletions(-) create mode 100644 configs/models/tifaenet.yaml diff --git a/configs/exps/alvaro/10k-training.yaml b/configs/exps/alvaro/10k-training.yaml index 3cc1ca55b3..f09fc4ddc0 100644 --- a/configs/exps/alvaro/10k-training.yaml +++ b/configs/exps/alvaro/10k-training.yaml @@ -42,6 +42,12 @@ default: eval_batch_size: 256 runs: + - config: tifaenet-is2re-10k + + - config: tifaenet-is2re-10k + model: + transformer_out: True + - config: indfaenet-is2re-10k model: transformer_out: True diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index 88446f3552..22f992df72 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -42,6 +42,12 @@ default: eval_batch_size: 256 runs: + - config: tifaenet-is2re-all + + - config: tifaenet-is2re-all + model: + transformer_out: True + - config: indfaenet-is2re-all model: transformer_out: True diff --git a/configs/models/tifaenet.yaml b/configs/models/tifaenet.yaml new file mode 100644 index 0000000000..d4125a0568 --- /dev/null +++ b/configs/models/tifaenet.yaml @@ -0,0 +1,271 @@ +default: + model: + name: tifaenet + act: swish + hidden_channels: 128 + num_filters: 100 + num_interactions: 3 + num_gaussians: 100 + cutoff: 6.0 + use_pbc: True + regress_forces: False + # drlab attributes: + tag_hidden_channels: 0 # 32 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_embeds: False # True + phys_hidden_channels: 0 + energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} + # faenet new features + skip_co: False # output skip connections {False, "add", "concat"} + second_layer_MLP: False # in EmbeddingBlock + complex_mp: False + edge_embed_type: rij # {'rij','all_rij','sh', 'all'}) + mp_type: base # {'base', 'simple', 'updownscale', 'att', 'base_with_att', 'local_env'} + graph_norm: False # bool + att_heads: 1 # int + force_decoder_type: "mlp" # can be {"" or "simple"} | only used if regress_forces is True + force_decoder_model_config: + simple: + hidden_channels: 128 + norm: batch1d # batch1d, layer or null + mlp: + hidden_channels: 256 + norm: batch1d # batch1d, layer or null + res: + hidden_channels: 128 + norm: batch1d # batch1d, layer or null + res_updown: + hidden_channels: 128 + norm: batch1d # batch1d, layer or null + optim: + batch_size: 64 + eval_batch_size: 64 + num_workers: 4 + lr_gamma: 0.1 + lr_initial: 0.001 + warmup_factor: 0.2 + max_epochs: 20 + energy_grad_coefficient: 10 + force_coefficient: 30 + energy_coefficient: 1 + + frame_averaging: False # 2D, 3D, da, False + fa_frames: False # can be {None, full, random, det, e3, e3-random, e3-det} + +# ------------------- +# ----- IS2RE ----- +# ------------------- + +is2re: + # *** Important note *** + # The total number of gpus used for this run was 1. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + 10k: + optim: + lr_initial: 0.005 + lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma + - 1562 + - 2343 + - 3125 + warmup_steps: 468 + max_epochs: 20 + + 100k: + model: + hidden_channels: 256 + optim: + lr_initial: 0.005 + lr_milestones: # epochs at which lr_initial <- lr_initial * lr_gamma + - 1562 + - 2343 + - 3125 + warmup_steps: 468 + max_epochs: 20 + + all: + model: + hidden_channels: 384 + num_interactions: 4 + optim: + batch_size: 256 + eval_batch_size: 256 + lr_initial: 0.001 + lr_gamma: 0.1 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 18000 + - 27000 + - 37000 + warmup_steps: 6000 + max_epochs: 20 + +# ------------------ +# ----- S2EF ----- +# ------------------ + +# For 2 GPUs + +s2ef: + default: + model: + num_interactions: 4 + hidden_channels: 750 + num_gaussians: 200 + num_filters: 256 + regress_forces: "direct" + force_coefficient: 30 + energy_grad_coefficient: 10 + optim: + batch_size: 96 + eval_batch_size: 96 + warmup_factor: 0.2 + lr_gamma: 0.1 + lr_initial: 0.0001 + max_epochs: 15 + warmup_steps: 30000 + lr_milestones: + - 55000 + - 75000 + - 10000 + + 200k: {} + + # 1 gpus + 2M: + model: + num_interactions: 5 + hidden_channels: 1024 + num_gaussians: 200 + num_filters: 256 + optim: + batch_size: 192 + eval_batch_size: 192 + + 20M: {} + + all: {} + +qm9: + default: + model: + act: swish + att_heads: 1 + complex_mp: true + cutoff: 6.0 + edge_embed_type: all_rij + energy_head: '' + graph_norm: true + graph_rewiring: null + hidden_channels: 400 + max_num_neighbors: 30 + mp_type: updownscale_base + num_filters: 480 + num_gaussians: 100 + num_interactions: 5 + otf_graph: false + pg_hidden_channels: 32 + phys_embeds: false + phys_hidden_channels: 0 + regress_forces: '' + second_layer_MLP: true + skip_co: true + tag_hidden_channels: 0 + use_pbc: false + + optim: + batch_size: 64 + es_min_abs_change: 1.0e-06 + es_patience: 20 + es_warmup_epochs: 600 + eval_batch_size: 64 + factor: 0.9 + lr_initial: 0.0003 + loss_energy: mse + lr_gamma: 0.1 + lr_initial: 0.001 + max_epochs: 1500 + min_lr: 1.0e-06 + mode: min + optimizer: AdamW + patience: 15 + scheduler: ReduceLROnPlateau + threshold: 0.0001 + threshold_mode: abs + verbose: true + warmup_factor: 0.2 + warmup_steps: 3000 + + 10k: {} + all: {} + +qm7x: + default: + model: # SOTA settings + act: swish + att_heads: 1 + complex_mp: true + cutoff: 5.0 + edge_embed_type: all_rij + energy_head: false + force_decoder_model_config: + mlp: + hidden_channels: 256 + norm: batch1d + res: + hidden_channels: 128 + norm: batch1d + res_updown: + hidden_channels: 128 + norm: layer + simple: + hidden_channels: 128 + norm: batch1d + force_decoder_type: res_updown + graph_norm: false + hidden_channels: 500 + max_num_neighbors: 40 + mp_type: updownscale_base + num_filters: 400 + num_gaussians: 50 + num_interactions: 5 + otf_graph: false + pg_hidden_channels: 32 + phys_embeds: true + phys_hidden_channels: 0 + regress_forces: direct_with_gradient_target + second_layer_MLP: true + skip_co: false + tag_hidden_channels: 0 + use_pbc: false + + optim: + batch_size: 100 + energy_grad_coefficient: 5 + eval_batch_size: 100 + eval_every: 0.34 + factor: 0.75 + force_coefficient: 75 + loss_energy: mae + loss_force: mse + lr_gamma: 0.1 + lr_initial: 0.000193 + max_steps: 4000000 + min_lr: 1.0e-06 + mode: min + optimizer: AdamW + scheduler: ReduceLROnPlateau + threshold: 0.001 + threshold_mode: abs + verbose: true + warmup_factor: 0.2 + warmup_steps: 3000 + + all: {} + 1k: {} + +qm9: + default: + model: + use_pbc: False + all: {} + 10k: {} diff --git a/debug.py b/debug.py index 96925bcdac..0253b39084 100644 --- a/debug.py +++ b/debug.py @@ -96,7 +96,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.mode = "train" args.graph_rewiring = "remove-tag-0" args.cp_data_to_tmpdir = True - args.config = "indfaenet-is2re-10k" + args.config = "tifaenet-is2re-10k" args.frame_averaging = "2D" args.fa_frames = "se3-random" diff --git a/ocpmodels/models/faenet.py b/ocpmodels/models/faenet.py index afdd9e158a..8cfadb6db4 100644 --- a/ocpmodels/models/faenet.py +++ b/ocpmodels/models/faenet.py @@ -407,7 +407,7 @@ def __init__( self.lin1 = Linear(hidden_channels, hidden_channels // 2) if model_name == "faenet": self.lin2 = Linear(hidden_channels // 2, 1) - elif model_name == "indfaenet": + elif model_name in {"indfaenet", "tifaenet"}: self.lin2 = Linear(hidden_channels // 2, hidden_channels // 2) # weighted average & pooling diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index c46769c12b..5325841575 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -54,9 +54,9 @@ def __init__(self, **kwargs): if self.transformer_out: self.combination = Transformer( d_model = kwargs["hidden_channels"] // 2, - nhead = 2, - num_encoder_layers = 2, - num_decoder_layers = 2, + nhead = 1, + num_encoder_layers = 1, + num_decoder_layers = 1, dim_feedforward = kwargs["hidden_channels"], batch_first = True ) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index c084d60d00..48db284776 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -1,5 +1,10 @@ import torch +import math from torch import nn +from torch.nn import Linear, Transformer, Softmax + +from torch_geometric.data import Batch +from torch_geometric.nn import radius_graph from ocpmodels.models.faenet import ( GaussianSmearing, @@ -7,18 +12,52 @@ InteractionBlock, OutputBlock ) +from ocpmodels.models.indfaenet import PositionalEncoding from ocpmodels.common.registry import registry from ocpmodels.models.base_model import BaseModel +from ocpmodels.common.utils import conditional_grad, get_pbc_distances +from ocpmodels.models.utils.activations import swish class TransformerInteraction(nn.Module): - def __init__(self, placeholder): - pass + def __init__(self, d_model): + super(TransformerInteraction, self).__init__() - def forward(self, inputs): - pass + self.queries_ads = Linear(d_model, d_model) + self.keys_ads = Linear(d_model, d_model) + self.values_ads = Linear(d_model, d_model) + + self.queries_cat = Linear(d_model, d_model) + self.keys_cat = Linear(d_model, d_model) + self.values_cat = Linear(d_model, d_model) + + self.softmax = Softmax(dim = 1) + + def forward(self, h_ads, h_cat): + queries_ads = self.queries_ads(h_ads) + keys_ads = self.keys_ads(h_ads) + values_ads = self.values_ads(h_ads) + + queries_cat = self.queries_cat(h_cat) + keys_cat = self.keys_cat(h_cat) + values_cat = self.values_cat(h_cat) + + d_model = queries_ads.shape[1] + + scalars_ads = self.softmax( + torch.matmul(queries_ads, torch.transpose(keys_cat, 0, 1)) / math.sqrt(d_model) + ) + scalars_cat = self.softmax( + torch.matmul(queries_cat, torch.transpose(keys_ads, 0, 1)) / math.sqrt(d_model) + ) + + h_ads = h_ads + torch.matmul(scalars_ads, values_cat) + h_cat = h_cat + torch.matmul(scalars_cat, values_ads) + + return h_ads, h_cat + @registry.register_model("tifaenet") -class TIFaenet(BaseModel) +class TIFaenet(BaseModel): def __init__(self, **kwargs): super(TIFaenet, self).__init__() @@ -111,8 +150,9 @@ def __init__(self, **kwargs): self.transformer_interactions = nn.ModuleList( [ TransformerInteraction( - placeholder = 3.14159265 + d_model = kwargs["hidden_channels"], ) + for _ in range(kwargs["num_interactions"]) ] ) @@ -140,7 +180,7 @@ def __init__(self, **kwargs): kwargs["num_interactions"] + 1, 1 ) - elif kwargs["model_name"] == "indfaenet": + elif kwargs["model_name"] in {"indfaenet", "tifaenet"}: self.mlp_skip_co_ads = Linear( (kwargs["num_interactions"] + 1) * kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2 @@ -160,9 +200,9 @@ def __init__(self, **kwargs): if self.transformer_out: self.combination = Transformer( d_model = kwargs["hidden_channels"] // 2, - nhead = 2, - num_encoder_layers = 2, - num_decoder_layers = 2, + nhead = 1, + num_encoder_layers = 1, + num_decoder_layers = 1, dim_feedforward = kwargs["hidden_channels"], batch_first = True ) @@ -195,18 +235,28 @@ def energy_forward(self, data): catalysts = self.neighbor_fixer(catalysts) # Graph rewiring - ads_rewiring = graph_rewiring(adsorbates) + ads_rewiring = self.graph_rewiring(adsorbates) edge_index_ads, edge_weight_ads, rel_pos_ads, edge_attr_ads = ads_rewiring - cat_rewiring = graph_rewiring(catalysts) + cat_rewiring = self.graph_rewiring(catalysts) edge_index_cat, edge_weight_cat, rel_pos_cat, edge_attr_cat = cat_rewiring # Embedding - h_ads, e_ads = embedding( - edge_weight_ads, rel_pos_ads, edge_attr_ads, adsorbates.tags + h_ads, e_ads = self.embedding( + adsorbates.atomic_numbers.long(), + edge_weight_ads, + rel_pos_ads, + edge_attr_ads, + adsorbates.tags, + self.embed_block_ads ) - h_cat, e_cat = embedding( - edge_weight_cat, rel_pos_cat, edge_attr_cat, catalysts.tags + h_cat, e_cat = self.embedding( + catalysts.atomic_numbers.long(), + edge_weight_cat, + rel_pos_cat, + edge_attr_cat, + catalysts.tags, + self.embed_block_cat ) # Compute atom weights for late energy head @@ -227,20 +277,20 @@ def energy_forward(self, data): ) in zip( self.interaction_blocks_ads, self.interaction_blocks_cat, - self.transformer_blocks + self.transformer_interactions, ): if self.skip_co == "concat_atom": energy_skip_co_ads.append(h_ads) energy_skip_co_cat.append(h_cat) elif self.skip_co: energy_skip_co_ads.append( - self.ouput_block_ads( + self.output_block_ads( h_ads, edge_index_ads, edge_weight_ads, batch_ads, alpha_ads ) ) energy_skip_co_cat.append( self.output_block_cat( - h_cat, edge_index_cat, edge_weight_cat, batch_ads, alpha_cat + h_cat, edge_index_cat, edge_weight_cat, batch_cat, alpha_cat ) ) intra_ads = interaction_ads(h_ads, edge_index_ads, e_ads) @@ -268,7 +318,7 @@ def energy_forward(self, data): # Skip-connection energy_skip_co_ads.append(energy_ads) energy_skip_co_cat.append(energy_cat) - if self.skip_co == "concat" + if self.skip_co == "concat": energy_ads = self.mlp_skip_co_ads(torch.cat(energy_skip_co_ads, dim = 1)) energy_cat = self.mlp_skip_co_cat(torch.cat(energy_skip_co_cat, dim = 1)) elif self.skip_co == "add": @@ -293,20 +343,20 @@ def energy_forward(self, data): system_energy = self.combination(system_energy, fake_target_sequence).squeeze(1) system_energy = self.transformer_lin(system_energy) else: - system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = torch.cat([energy_ads, energy_cat], dim = 1) system_energy = self.combination(system_energy) # We combine predictions and return them pred_system = { "energy" : system_energy, "pooling_loss" : None, # This might break something. - "hidden_state" : torch.cat(energy_ads, energy_cat, dim = 0) + "hidden_state" : torch.cat([energy_ads, energy_cat], dim = 1) } return pred_system @conditional_grad(torch.enable_grad()) - def embedding(self, edge_weight, rel_pos, edge_attr, tags): + def embedding(self, z, edge_weight, rel_pos, edge_attr, tags, embed_func): # Normalize and squash to [0,1] for gaussian basis rel_pos_normalized = None if self.edge_embed_type in {"sh", "all_rij", "all"}: @@ -315,16 +365,21 @@ def embedding(self, edge_weight, rel_pos, edge_attr, tags): pooling_loss = None # deal with pooling loss # Embedding block - h, e = self.embed_block(z, rel_pos, edge_attr, tags, rel_pos_normalized) + h, e = embed_func(z, rel_pos, edge_attr, tags, rel_pos_normalized) return h, e @conditional_grad(torch.enable_grad()) - def graph_rewiring(self, data) + def graph_rewiring(self, data): z = data.atomic_numbers.long() pos = data.pos batch = data.batch + mode = data.mode[0] + if mode == "adsorbate": + distance_expansion = self.distance_expansion_ads + else: + distance_expansion = self.distance_expansion_cat # Use periodic boundary conditions if self.use_pbc: assert z.dim() == 1 and z.dtype == torch.long @@ -341,7 +396,7 @@ def graph_rewiring(self, data) edge_index = out["edge_index"] edge_weight = out["distances"] rel_pos = out["distance_vec"] - edge_attr = self.distance_expansion(edge_weight) + edge_attr = distance_expansion(edge_weight) else: edge_index = radius_graph( pos, @@ -353,7 +408,7 @@ def graph_rewiring(self, data) row, col = edge_index rel_pos = pos[row] - pos[col] edge_weight = rel_pos.norm(dim=-1) - edge_attr = self.distance_expansion(edge_weight) + edge_attr = distance_expansion(edge_weight) return (edge_index, edge_weight, rel_pos, edge_attr) From dc7d717e20a6974d3ad5176f5ac640aa53687cc6 Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 30 Jul 2023 09:45:04 -0400 Subject: [PATCH 047/131] Changed initial learning rate for all dataset training --- configs/exps/alvaro/all-training.yaml | 6 +----- ocpmodels/models/tifaenet.py | 2 ++ 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index 22f992df72..36dcffcfc0 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -34,7 +34,7 @@ default: skip_co: concat edge_embed_type: all_rij optim: - lr_initial: 0.001 + lr_initial: 0.00075 scheduler: LinearWarmupCosineAnnealingLR max_epochs: 20 eval_every: 0.4 @@ -47,7 +47,3 @@ runs: - config: tifaenet-is2re-all model: transformer_out: True - - - config: indfaenet-is2re-all - model: - transformer_out: True diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index 48db284776..f6be9db0ba 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -53,6 +53,8 @@ def forward(self, h_ads, h_cat): h_ads = h_ads + torch.matmul(scalars_ads, values_cat) h_cat = h_cat + torch.matmul(scalars_cat, values_ads) + # WHAT ABOUT NORMALIZING!! + return h_ads, h_cat From 2d45a3821b762a25d8cc2b8da61889100a2d2c65 Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 30 Jul 2023 10:10:18 -0400 Subject: [PATCH 048/131] Made transformer larger --- ocpmodels/models/indfaenet.py | 6 +++--- ocpmodels/models/tifaenet.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index 5325841575..c46769c12b 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -54,9 +54,9 @@ def __init__(self, **kwargs): if self.transformer_out: self.combination = Transformer( d_model = kwargs["hidden_channels"] // 2, - nhead = 1, - num_encoder_layers = 1, - num_decoder_layers = 1, + nhead = 2, + num_encoder_layers = 2, + num_decoder_layers = 2, dim_feedforward = kwargs["hidden_channels"], batch_first = True ) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index f6be9db0ba..be29b4b5a4 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -202,9 +202,9 @@ def __init__(self, **kwargs): if self.transformer_out: self.combination = Transformer( d_model = kwargs["hidden_channels"] // 2, - nhead = 1, - num_encoder_layers = 1, - num_decoder_layers = 1, + nhead = 2, + num_encoder_layers = 2, + num_decoder_layers = 2, dim_feedforward = kwargs["hidden_channels"], batch_first = True ) From f5883a71ec90ee8de613b2b30a388bd10bc07a19 Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 30 Jul 2023 10:12:38 -0400 Subject: [PATCH 049/131] Readied an experiment --- configs/exps/alvaro/10k-training.yaml | 6 ------ configs/exps/alvaro/all-training.yaml | 2 -- 2 files changed, 8 deletions(-) diff --git a/configs/exps/alvaro/10k-training.yaml b/configs/exps/alvaro/10k-training.yaml index f09fc4ddc0..000fdb82de 100644 --- a/configs/exps/alvaro/10k-training.yaml +++ b/configs/exps/alvaro/10k-training.yaml @@ -43,11 +43,5 @@ default: runs: - config: tifaenet-is2re-10k - - - config: tifaenet-is2re-10k - model: - transformer_out: True - - - config: indfaenet-is2re-10k model: transformer_out: True diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index 36dcffcfc0..1bdb08113b 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -42,8 +42,6 @@ default: eval_batch_size: 256 runs: - - config: tifaenet-is2re-all - - config: tifaenet-is2re-all model: transformer_out: True From 7a223adeff3dcc112265b1cf6f18fc2962984ca7 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 31 Jul 2023 11:53:48 -0400 Subject: [PATCH 050/131] Started implementing a second type of transformer-like block. --- ocpmodels/models/tifaenet.py | 50 ++++++++++++++++++++++++++++++++---- 1 file changed, 45 insertions(+), 5 deletions(-) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index be29b4b5a4..bfdcb29730 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -19,9 +19,39 @@ from ocpmodels.models.utils.activations import swish class TransformerInteraction(nn.Module): - def __init__(self, d_model): + def __init__(self, d_model, nhead = 2, num_encoder_layers = 2, num_decoder_layers = 2): super(TransformerInteraction, self).__init__() + self.transformer_ads = Transformer( + d_model = d_model, + nhead = nhead, + num_encoder_layers = num_encoder_layers, + num_decoder_layers = num_decoder_layers, + dim_feedforward = d_model, + batch_first = True + ) + + self.transformer_ads = Transformer( + d_model = d_model, + nhead = nhead, + num_encoder_layers = num_encoder_layers, + num_decoder_layers = num_decoder_layers, + dim_feedforward = d_model, + batch_first = True + ) + + def forward(self, h_ads, h_cat): + import ipdb + ipdb.set_trace() + + + return h_ads, h_cat + + +class AttentionInteraction(nn.Module): + def __init__(self, d_model): + super(AttentionInteraction, self).__init__() + self.queries_ads = Linear(d_model, d_model) self.keys_ads = Linear(d_model, d_model) self.values_ads = Linear(d_model, d_model) @@ -53,7 +83,8 @@ def forward(self, h_ads, h_cat): h_ads = h_ads + torch.matmul(scalars_ads, values_cat) h_cat = h_cat + torch.matmul(scalars_cat, values_ads) - # WHAT ABOUT NORMALIZING!! + h_ads = nn.functional.normalize(h_ads) + h_cat = nn.functional.normalize(h_cat) return h_ads, h_cat @@ -149,9 +180,18 @@ def __init__(self, **kwargs): ) # Transformer Interaction - self.transformer_interactions = nn.ModuleList( + inter_interaction_type = kwargs.get("tifaenet_mode", None) + assert inter_interaction_type is not None, "When using TIFaenet, tifaenet_mode is needed. Options: attention, transformer" + assert inter_interaction_type in {"attention", "transformer"}, "Using an invalid tifaenet_mode. Options: attention, transformer" + if inter_interaction_type == "transformer": + inter_interaction_type = TransformerInteraction + + elif: inter_interaction_type == "attention": + inter_interaction_type = AttentionInteraction + + self.inter_interactions = nn.ModuleList( [ - TransformerInteraction( + inter_interaction_type( d_model = kwargs["hidden_channels"], ) for _ in range(kwargs["num_interactions"]) @@ -275,7 +315,7 @@ def energy_forward(self, data): for ( interaction_ads, interaction_cat, - transformer_interaction + inter_interaction ) in zip( self.interaction_blocks_ads, self.interaction_blocks_cat, From e7cee7a7a2c86abb6c76ff22b22ec1db244572a3 Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 1 Aug 2023 17:15:56 -0400 Subject: [PATCH 051/131] Finished correcting a HUGE bug on attention based models. --- configs/exps/alvaro/10k-training.yaml | 7 ++- configs/exps/alvaro/all-training.yaml | 3 +- debug.py | 1 + ocpmodels/datasets/separate_dataset.py | 20 ++++++- ocpmodels/models/tifaenet.py | 64 ++++++++++++++--------- ocpmodels/preprocessing/graph_rewiring.py | 5 ++ 6 files changed, 72 insertions(+), 28 deletions(-) diff --git a/configs/exps/alvaro/10k-training.yaml b/configs/exps/alvaro/10k-training.yaml index 000fdb82de..0952e396d0 100644 --- a/configs/exps/alvaro/10k-training.yaml +++ b/configs/exps/alvaro/10k-training.yaml @@ -34,7 +34,7 @@ default: skip_co: concat edge_embed_type: all_rij optim: - lr_initial: 0.0019 + lr_initial: 0.0005 scheduler: LinearWarmupCosineAnnealingLR max_epochs: 20 eval_every: 0.4 @@ -44,4 +44,9 @@ default: runs: - config: tifaenet-is2re-10k model: + tifaenet_mode: "attention" + + - config: tifaenet-is2re-10k + model: + tifaenet_mode: "attention" transformer_out: True diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index 1bdb08113b..f4531ccd6b 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -34,7 +34,7 @@ default: skip_co: concat edge_embed_type: all_rij optim: - lr_initial: 0.00075 + lr_initial: 0.0005 scheduler: LinearWarmupCosineAnnealingLR max_epochs: 20 eval_every: 0.4 @@ -44,4 +44,5 @@ default: runs: - config: tifaenet-is2re-all model: + tifaenet_mode: "attention" transformer_out: True diff --git a/debug.py b/debug.py index 0253b39084..773c77ac00 100644 --- a/debug.py +++ b/debug.py @@ -125,6 +125,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["second_layer_MLP"] = True trainer_config["model"]["skip_co"] = "concat" trainer_config["model"]["transformer_out"] = True + trainer_config["model"]["tifaenet_mode"] = "attention" #trainer_config["model"]["disconnected_mlp"] = True #trainer_config["optim"]["batch_sizes"] = 256 diff --git a/ocpmodels/datasets/separate_dataset.py b/ocpmodels/datasets/separate_dataset.py index 35564630a1..097031628c 100644 --- a/ocpmodels/datasets/separate_dataset.py +++ b/ocpmodels/datasets/separate_dataset.py @@ -42,11 +42,17 @@ def graph_splitter(graph): ads_assoc = torch.full((natoms,), -1, dtype = torch.long, device = device) cat_assoc = torch.full((natoms,), -1, dtype = torch.long, device = device) - ads_assoc[adsorbate_v_mask] = torch.arange(adsorbate_v_mask.sum(), device = device) - cat_assoc[catalyst_v_mask] = torch.arange(catalyst_v_mask.sum(), device = device) + ads_natoms = adsorbate_v_mask.sum() + cat_natoms = catalyst_v_mask.sum() + + ads_assoc[adsorbate_v_mask] = torch.arange(ads_natoms, device = device) + cat_assoc[catalyst_v_mask] = torch.arange(cat_natoms, device = device) ads_edge_index = ads_assoc[edge_index[:, adsorbate_e_mask]] cat_edge_index = cat_assoc[edge_index[:, catalyst_e_mask]] + + # This is for attention related stuff. + dummy = torch.zeros(ads_natoms, 1) # Create the graphs adsorbate = Data( @@ -62,8 +68,14 @@ def graph_splitter(graph): y_relaxed = y_relaxed, pos_relaxed = pos_relaxed[adsorbate_v_mask, :], id = id, + h = dummy, + query = dummy, + key = dummy, + value = dummy, mode="adsorbate" ) + + dummy = torch.zeros(cat_natoms, 1) catalyst = Data( edge_index = cat_edge_index, pos = pos[catalyst_v_mask, :], @@ -77,6 +89,10 @@ def graph_splitter(graph): y_relaxed = y_relaxed, pos_relaxed = pos_relaxed[catalyst_v_mask, :], id = id, + h = dummy, + query = dummy, + key = dummy, + value = dummy, mode="catalyst" ) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index bfdcb29730..8cf44a0807 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -62,31 +62,46 @@ def __init__(self, d_model): self.softmax = Softmax(dim = 1) - def forward(self, h_ads, h_cat): - queries_ads = self.queries_ads(h_ads) - keys_ads = self.keys_ads(h_ads) - values_ads = self.values_ads(h_ads) + def forward(self, adsorbates, catalysts): + d_model = adsorbates.h.shape[1] + batch_size = max(adsorbates.batch).item() + 1 + + h_ads = adsorbates.h + adsorbates.query = self.queries_ads(h_ads) + adsorbates.key = self.keys_ads(h_ads) + adsorbates.value = self.values_ads(h_ads) + + h_cat = catalysts.h + catalysts.query = self.queries_cat(h_cat) + catalysts.key = self.keys_cat(h_cat) + catalysts.value = self.values_cat(h_cat) + + new_h_ads = [] + new_h_cat = [] + for i in range(batch_size): # How can I avoid a for loop? + scalars_ads = self.softmax( + torch.matmul(adsorbates[i].query, catalysts[i].key.T) / math.sqrt(d_model) + ) + scalars_cat = self.softmax( + torch.matmul(catalysts[i].query, adsorbates[i].key.T) / math.sqrt(d_model) + ) - queries_cat = self.queries_cat(h_cat) - keys_cat = self.keys_cat(h_cat) - values_cat = self.values_cat(h_cat) + new_h_ads.append(torch.matmul(scalars_ads, catalysts[i].value)) + new_h_cat.append(torch.matmul(scalars_cat, adsorbates[i].value)) - d_model = queries_ads.shape[1] + _, idx = adsorbates.batch.sort(stable=True) + new_h_ads = torch.concat(new_h_ads, dim = 0)[torch.argsort(idx)] # Inverse of permutation - scalars_ads = self.softmax( - torch.matmul(queries_ads, torch.transpose(keys_cat, 0, 1)) / math.sqrt(d_model) - ) - scalars_cat = self.softmax( - torch.matmul(queries_cat, torch.transpose(keys_ads, 0, 1)) / math.sqrt(d_model) - ) + _, idx = catalysts.batch.sort(stable=True) + new_h_cat = torch.concat(new_h_cat, dim = 0)[torch.argsort(idx)] - h_ads = h_ads + torch.matmul(scalars_ads, values_cat) - h_cat = h_cat + torch.matmul(scalars_cat, values_ads) + new_h_ads = h_ads + new_h_ads + new_h_cat = h_cat + new_h_cat - h_ads = nn.functional.normalize(h_ads) - h_cat = nn.functional.normalize(h_cat) + new_h_ads = nn.functional.normalize(new_h_ads) + new_h_cat = nn.functional.normalize(new_h_cat) - return h_ads, h_cat + return new_h_ads, new_h_cat @registry.register_model("tifaenet") @@ -186,7 +201,7 @@ def __init__(self, **kwargs): if inter_interaction_type == "transformer": inter_interaction_type = TransformerInteraction - elif: inter_interaction_type == "attention": + elif inter_interaction_type == "attention": inter_interaction_type = AttentionInteraction self.inter_interactions = nn.ModuleList( @@ -319,7 +334,7 @@ def energy_forward(self, data): ) in zip( self.interaction_blocks_ads, self.interaction_blocks_cat, - self.transformer_interactions, + self.inter_interactions, ): if self.skip_co == "concat_atom": energy_skip_co_ads.append(h_ads) @@ -338,9 +353,10 @@ def energy_forward(self, data): intra_ads = interaction_ads(h_ads, edge_index_ads, e_ads) intra_cat = interaction_cat(h_cat, edge_index_cat, e_cat) - inter_ads, inter_cat = transformer_interaction(intra_ads, intra_cat) - h_ads = h_ads + inter_ads - h_cat = h_cat + inter_cat + adsorbates.h = intra_ads + catalysts.h = intra_cat + + h_ads, h_cat = inter_interaction(adsorbates, catalysts) # Atom skip-co if self.skip_co == "concat_atom": diff --git a/ocpmodels/preprocessing/graph_rewiring.py b/ocpmodels/preprocessing/graph_rewiring.py index 2f3b103a6c..b9115e9077 100644 --- a/ocpmodels/preprocessing/graph_rewiring.py +++ b/ocpmodels/preprocessing/graph_rewiring.py @@ -36,6 +36,11 @@ def remove_tag0_nodes(data): data.tags = data.tags[non_sub] if hasattr(data, "pos_relaxed"): data.pos_relaxed = data.pos_relaxed[non_sub, :] + if hasattr(data, "query"): + data.h = data.h[non_sub, :] + data.query = data.query[non_sub, :] + data.key = data.key[non_sub, :] + data.value = data.value[non_sub, :] # per-edge tensors data.edge_index = data.edge_index[:, neither_is_sub] From 181ee534aafdd6cdcb1f07d3510e7b107efb7d2d Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 1 Aug 2023 17:18:41 -0400 Subject: [PATCH 052/131] Changed training file. --- configs/exps/alvaro/all-training.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index f4531ccd6b..d8bf9d2b40 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -42,6 +42,10 @@ default: eval_batch_size: 256 runs: + - config: tifaenet-is2re-all + model: + tifaenet_mode: "attention" + - config: tifaenet-is2re-all model: tifaenet_mode: "attention" From c0cec2a418d7f1ab49275cf6dcd8ac645ea298e0 Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 4 Aug 2023 16:23:46 -0400 Subject: [PATCH 053/131] Improved the implementation of tifaenet to use sparse matrices. It still isn't good. --- ocpmodels/models/tifaenet.py | 136 ++++++++++++++++++++++++++--------- 1 file changed, 101 insertions(+), 35 deletions(-) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index 8cf44a0807..a989041ef1 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -6,6 +6,10 @@ from torch_geometric.data import Batch from torch_geometric.nn import radius_graph +from torch_sparse import SparseTensor, spspmm +from torch_sparse import transpose as transpose_sparse +from scipy import sparse + from ocpmodels.models.faenet import ( GaussianSmearing, EmbeddingBlock, @@ -62,38 +66,50 @@ def __init__(self, d_model): self.softmax = Softmax(dim = 1) - def forward(self, adsorbates, catalysts): - d_model = adsorbates.h.shape[1] - batch_size = max(adsorbates.batch).item() + 1 - - h_ads = adsorbates.h - adsorbates.query = self.queries_ads(h_ads) - adsorbates.key = self.keys_ads(h_ads) - adsorbates.value = self.values_ads(h_ads) - - h_cat = catalysts.h - catalysts.query = self.queries_cat(h_cat) - catalysts.key = self.keys_cat(h_cat) - catalysts.value = self.values_cat(h_cat) - - new_h_ads = [] - new_h_cat = [] - for i in range(batch_size): # How can I avoid a for loop? - scalars_ads = self.softmax( - torch.matmul(adsorbates[i].query, catalysts[i].key.T) / math.sqrt(d_model) - ) - scalars_cat = self.softmax( - torch.matmul(catalysts[i].query, adsorbates[i].key.T) / math.sqrt(d_model) - ) - - new_h_ads.append(torch.matmul(scalars_ads, catalysts[i].value)) - new_h_cat.append(torch.matmul(scalars_cat, adsorbates[i].value)) - - _, idx = adsorbates.batch.sort(stable=True) - new_h_ads = torch.concat(new_h_ads, dim = 0)[torch.argsort(idx)] # Inverse of permutation + def forward(self, + h_ads, h_cat, + index_ads, index_cat, + batch_size + ): + d_model = h_ads.shape[1] + natoms_ads = h_ads.shape[0] + natoms_cat = h_cat.shape[0] + + # Create matrices with values + query_ads = self.queries_ads(h_ads) + key_ads = self.keys_ads(h_ads) + value_ads = self.values_ads(h_ads) + + query_cat = self.queries_cat(h_cat) + key_cat = self.keys_cat(h_cat) + value_cat = self.values_cat(h_cat) + + key_cat_T_index, key_cat_T_val = transpose_sparse( + index_cat, key_cat.view(-1), + natoms_cat, d_model * batch_size + ) + key_ads_T_index, key_ads_T_val = transpose_sparse( + index_ads, key_ads.view(-1), + natoms_ads, d_model * batch_size + ) - _, idx = catalysts.batch.sort(stable=True) - new_h_cat = torch.concat(new_h_cat, dim = 0)[torch.argsort(idx)] + index_att_ads, attention_ads = spspmm( + index_ads, query_ads.view(-1), + key_cat_T_index, key_cat_T_val, + natoms_ads, d_model * batch_size, natoms_cat + ) + attention_ads = SparseTensor(row=index_att_ads[0], col=index_att_ads[1], value=attention_ads).to_dense() + attention_ads = self.softmax(attention_ads / math.sqrt(d_model)) + new_h_ads = torch.matmul(attention_ads, value_cat) + + index_att_cat, attention_cat = spspmm( + index_cat, query_cat.view(-1), + key_ads_T_index, key_ads_T_val, + natoms_cat, d_model * batch_size, natoms_ads + ) + attention_cat = SparseTensor(row=index_att_cat[0], col=index_att_cat[1], value=attention_cat).to_dense() + attention_cat = self.softmax(attention_cat / math.sqrt(d_model)) + new_h_cat = torch.matmul(attention_cat, value_ads) new_h_ads = h_ads + new_h_ads new_h_cat = h_cat + new_h_cat @@ -118,6 +134,7 @@ def __init__(self, **kwargs): self.skip_co = kwargs["skip_co"] if kwargs["mp_type"] == "sfarinet": kwargs["num_filters"] = kwargs["hidden_channels"] + self.hidden_channels = kwargs["hidden_channels"] self.act = ( getattr(nn.functional, kwargs["act"]) if kwargs["act"] != "swish" else swish @@ -325,6 +342,54 @@ def energy_forward(self, data): alpha_cat = None # Interaction and transformer blocks + + # Start by setting up the sparse matrices in scipy + natoms_ads = h_ads.shape[0] + natoms_cat = h_cat.shape[0] + + dummy_ads = torch.arange(natoms_ads * self.hidden_channels).numpy() + dummy_cat = torch.ones(natoms_cat * self.hidden_channels).numpy() + + crowd_indices_ads = torch.arange( + start = 0, end = (natoms_ads + 1)*self.hidden_channels, step = self.hidden_channels, + ).numpy() + crowd_indices_cat = torch.arange( + start = 0, end = (natoms_cat + 1)*self.hidden_channels, step = self.hidden_channels, + ).numpy() + + raw_col_indices = [ + [torch.arange(self.hidden_channels) + (10*j)] * i + for i, j + in zip(adsorbates.natoms, range(batch_size)) + ] + col_indices = [] + for graph in raw_col_indices: + col_indices += graph + col_indices_ads = torch.concat(col_indices).numpy() + + raw_col_indices = [ + [torch.arange(self.hidden_channels) + (10*j)] * i + for i, j + in zip(catalysts.natoms, range(batch_size)) + ] + col_indices = [] + for graph in raw_col_indices: + col_indices += graph + col_indices_cat = torch.concat(col_indices).numpy() + + sparse_ads = sparse.csr_array( + (dummy_ads, col_indices_ads, crowd_indices_ads), shape=(natoms_ads, dummy_ads.shape[0]) + ).tocoo() + row_ads, col_ads = torch.from_numpy(sparse_ads.row), torch.from_numpy(sparse_ads.col) + index_ads = torch.concat([row_ads.view(1, -1), col_ads.view(1, -1)], dim=0).long().to(h_ads.device) + + sparse_cat = sparse.csr_array( + (dummy_cat, col_indices_cat, crowd_indices_cat), shape=(natoms_cat, dummy_cat.shape[0]) + ).tocoo() + row_cat, col_cat = torch.from_numpy(sparse_cat.row), torch.from_numpy(sparse_cat.col) + index_cat = torch.concat([row_cat.view(1, -1), col_cat.view(1, -1)], dim=0).long().to(h_ads.device) + + # Now we do interactions. energy_skip_co_ads = [] energy_skip_co_cat = [] for ( @@ -353,10 +418,11 @@ def energy_forward(self, data): intra_ads = interaction_ads(h_ads, edge_index_ads, e_ads) intra_cat = interaction_cat(h_cat, edge_index_cat, e_cat) - adsorbates.h = intra_ads - catalysts.h = intra_cat - - h_ads, h_cat = inter_interaction(adsorbates, catalysts) + h_ads, h_cat = inter_interaction( + intra_ads, intra_cat, + index_ads, index_cat, + batch_size + ) # Atom skip-co if self.skip_co == "concat_atom": From 1120d031c3b0cd1ace06f924c12a1e8693062843 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 7 Aug 2023 16:51:57 -0400 Subject: [PATCH 054/131] Implemented attention tifaenet through sparse matrix. Now changing backend implementation of tifaenet so it uses heterogeneous graphs --- debug.py | 2 +- ocpmodels/datasets/heterogeneous.py | 28 ++++++ ocpmodels/datasets/separate_dataset.py | 16 +--- ocpmodels/models/tifaenet.py | 118 +++++++++++++++---------- ocpmodels/trainers/base_trainer.py | 20 ++++- ocpmodels/trainers/single_trainer.py | 6 +- 6 files changed, 119 insertions(+), 71 deletions(-) create mode 100644 ocpmodels/datasets/heterogeneous.py diff --git a/debug.py b/debug.py index 773c77ac00..ddaa199daa 100644 --- a/debug.py +++ b/debug.py @@ -125,7 +125,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["second_layer_MLP"] = True trainer_config["model"]["skip_co"] = "concat" trainer_config["model"]["transformer_out"] = True - trainer_config["model"]["tifaenet_mode"] = "attention" + trainer_config["model"]["tifaenet_mode"] = "gat" #trainer_config["model"]["disconnected_mlp"] = True #trainer_config["optim"]["batch_sizes"] = 256 diff --git a/ocpmodels/datasets/heterogeneous.py b/ocpmodels/datasets/heterogeneous.py new file mode 100644 index 0000000000..0481b7572a --- /dev/null +++ b/ocpmodels/datasets/heterogeneous.py @@ -0,0 +1,28 @@ +import torch + +from ocpmodels.datasets.lmdb_dataset import LmdbDataset +from ocpmodels.common.registry import registry + +from torch_geometric.data import HeteroData + +@registry.register_dataset("heterogeneous") +class HeterogeneousDataset(LmdbDataset): + def __getitem__(self, idx): + adsorbate, catalyst = super(HeterogeneousDataset, self).__getitem__(self, idx) + + reaction = HeteroData() + for graph in [adsorbate, catalyst]: + mode = graph.mode + for key in graph.keys(): + if key == "edge_index": + continue + reaction[mode][key] = graph[key] + + reaction[mode, "is_close", mode].edge_index = graph.edge_index + + sender = torch.arange(0, adsorbate.natoms, 1/catalyst.natoms) + receiver = torch.arange(0.0, catalyst.natoms).repeat(adsorbate.natoms) + + reaction["adsorbate", "is_disc", "catalyst"].edge_index = torch.stack([sender. reciver]) + + return reaction diff --git a/ocpmodels/datasets/separate_dataset.py b/ocpmodels/datasets/separate_dataset.py index 097031628c..0fe49a3bd9 100644 --- a/ocpmodels/datasets/separate_dataset.py +++ b/ocpmodels/datasets/separate_dataset.py @@ -51,16 +51,13 @@ def graph_splitter(graph): ads_edge_index = ads_assoc[edge_index[:, adsorbate_e_mask]] cat_edge_index = cat_assoc[edge_index[:, catalyst_e_mask]] - # This is for attention related stuff. - dummy = torch.zeros(ads_natoms, 1) - # Create the graphs adsorbate = Data( edge_index = ads_edge_index, pos = pos[adsorbate_v_mask, :], cell = cell, atomic_numbers = atomic_numbers[adsorbate_v_mask], - natoms = adsorbate_v_mask.sum().item(), + natoms = ads_natoms, cell_offsets = cell_offsets[adsorbate_e_mask, :], force = force[adsorbate_v_mask, :], tags = tags[adsorbate_v_mask], @@ -68,20 +65,15 @@ def graph_splitter(graph): y_relaxed = y_relaxed, pos_relaxed = pos_relaxed[adsorbate_v_mask, :], id = id, - h = dummy, - query = dummy, - key = dummy, - value = dummy, mode="adsorbate" ) - dummy = torch.zeros(cat_natoms, 1) catalyst = Data( edge_index = cat_edge_index, pos = pos[catalyst_v_mask, :], cell = cell, atomic_numbers = atomic_numbers[catalyst_v_mask], - natoms = catalyst_v_mask.sum().item(), + natoms = cat_natoms, cell_offsets = cell_offsets[catalyst_e_mask, :], force = force[catalyst_v_mask, :], tags = tags[catalyst_v_mask], @@ -89,10 +81,6 @@ def graph_splitter(graph): y_relaxed = y_relaxed, pos_relaxed = pos_relaxed[catalyst_v_mask, :], id = id, - h = dummy, - query = dummy, - key = dummy, - value = dummy, mode="catalyst" ) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index a989041ef1..08357d9002 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -4,7 +4,7 @@ from torch.nn import Linear, Transformer, Softmax from torch_geometric.data import Batch -from torch_geometric.nn import radius_graph +from torch_geometric.nn import radius_graph, GATConv from torch_sparse import SparseTensor, spspmm from torch_sparse import transpose as transpose_sparse @@ -22,6 +22,16 @@ from ocpmodels.common.utils import conditional_grad, get_pbc_distances from ocpmodels.models.utils.activations import swish +class GATInteraction(nn.Module): + def __init__(self, d_model, dropout=0.1): + super(GATInteraction, self).__init__() + + self.interaction = GATConv( + in_channels = d_model, + out_channels = d_model, + ) + def forward(self, ) + class TransformerInteraction(nn.Module): def __init__(self, d_model, nhead = 2, num_encoder_layers = 2, num_decoder_layers = 2): super(TransformerInteraction, self).__init__() @@ -213,14 +223,18 @@ def __init__(self, **kwargs): # Transformer Interaction inter_interaction_type = kwargs.get("tifaenet_mode", None) + self.inter_interaction_type = inter_interaction_type assert inter_interaction_type is not None, "When using TIFaenet, tifaenet_mode is needed. Options: attention, transformer" - assert inter_interaction_type in {"attention", "transformer"}, "Using an invalid tifaenet_mode. Options: attention, transformer" + assert inter_interaction_type in {"attention", "transformer", "gat"}, "Using an invalid tifaenet_mode. Options: attention, transformer, gat" if inter_interaction_type == "transformer": inter_interaction_type = TransformerInteraction elif inter_interaction_type == "attention": inter_interaction_type = AttentionInteraction + elif inter_interaction_type == "gat": + inter_interaction_type = GATInteraction + self.inter_interactions = nn.ModuleList( [ inter_interaction_type( @@ -342,52 +356,59 @@ def energy_forward(self, data): alpha_cat = None # Interaction and transformer blocks - - # Start by setting up the sparse matrices in scipy - natoms_ads = h_ads.shape[0] - natoms_cat = h_cat.shape[0] - dummy_ads = torch.arange(natoms_ads * self.hidden_channels).numpy() - dummy_cat = torch.ones(natoms_cat * self.hidden_channels).numpy() - - crowd_indices_ads = torch.arange( - start = 0, end = (natoms_ads + 1)*self.hidden_channels, step = self.hidden_channels, - ).numpy() - crowd_indices_cat = torch.arange( - start = 0, end = (natoms_cat + 1)*self.hidden_channels, step = self.hidden_channels, - ).numpy() - - raw_col_indices = [ - [torch.arange(self.hidden_channels) + (10*j)] * i - for i, j - in zip(adsorbates.natoms, range(batch_size)) - ] - col_indices = [] - for graph in raw_col_indices: - col_indices += graph - col_indices_ads = torch.concat(col_indices).numpy() - - raw_col_indices = [ - [torch.arange(self.hidden_channels) + (10*j)] * i - for i, j - in zip(catalysts.natoms, range(batch_size)) - ] - col_indices = [] - for graph in raw_col_indices: - col_indices += graph - col_indices_cat = torch.concat(col_indices).numpy() - - sparse_ads = sparse.csr_array( - (dummy_ads, col_indices_ads, crowd_indices_ads), shape=(natoms_ads, dummy_ads.shape[0]) - ).tocoo() - row_ads, col_ads = torch.from_numpy(sparse_ads.row), torch.from_numpy(sparse_ads.col) - index_ads = torch.concat([row_ads.view(1, -1), col_ads.view(1, -1)], dim=0).long().to(h_ads.device) - - sparse_cat = sparse.csr_array( - (dummy_cat, col_indices_cat, crowd_indices_cat), shape=(natoms_cat, dummy_cat.shape[0]) - ).tocoo() - row_cat, col_cat = torch.from_numpy(sparse_cat.row), torch.from_numpy(sparse_cat.col) - index_cat = torch.concat([row_cat.view(1, -1), col_cat.view(1, -1)], dim=0).long().to(h_ads.device) + if self.inter_interaction_type == "attention": + # Start by setting up the sparse matrices in scipy + natoms_ads = h_ads.shape[0] + natoms_cat = h_cat.shape[0] + + dummy_ads = torch.arange(natoms_ads * self.hidden_channels).numpy() + dummy_cat = torch.ones(natoms_cat * self.hidden_channels).numpy() + + crowd_indices_ads = torch.arange( + start = 0, end = (natoms_ads + 1)*self.hidden_channels, step = self.hidden_channels, + ).numpy() + crowd_indices_cat = torch.arange( + start = 0, end = (natoms_cat + 1)*self.hidden_channels, step = self.hidden_channels, + ).numpy() + + raw_col_indices = [ + [torch.arange(self.hidden_channels) + (10*j)] * i + for i, j + in zip(adsorbates.natoms, range(batch_size)) + ] + col_indices = [] + for graph in raw_col_indices: + col_indices += graph + col_indices_ads = torch.concat(col_indices).numpy() + + raw_col_indices = [ + [torch.arange(self.hidden_channels) + (10*j)] * i + for i, j + in zip(catalysts.natoms, range(batch_size)) + ] + col_indices = [] + for graph in raw_col_indices: + col_indices += graph + col_indices_cat = torch.concat(col_indices).numpy() + + sparse_ads = sparse.csr_array( + (dummy_ads, col_indices_ads, crowd_indices_ads), shape=(natoms_ads, dummy_ads.shape[0]) + ).tocoo() + row_ads, col_ads = torch.from_numpy(sparse_ads.row), torch.from_numpy(sparse_ads.col) + index_ads = torch.concat([row_ads.view(1, -1), col_ads.view(1, -1)], dim=0).long().to(h_ads.device) + + sparse_cat = sparse.csr_array( + (dummy_cat, col_indices_cat, crowd_indices_cat), shape=(natoms_cat, dummy_cat.shape[0]) + ).tocoo() + row_cat, col_cat = torch.from_numpy(sparse_cat.row), torch.from_numpy(sparse_cat.col) + index_cat = torch.concat([row_cat.view(1, -1), col_cat.view(1, -1)], dim=0).long().to(h_ads.device) + + extra_parameters = [index_ads, index_cat, batch_size] + elif self.inter_interaction_type == "gat": + import ipdb + ipdb.set_trace() + inter_edge_weight = [] # Now we do interactions. energy_skip_co_ads = [] @@ -420,8 +441,7 @@ def energy_forward(self, data): h_ads, h_cat = inter_interaction( intra_ads, intra_cat, - index_ads, index_cat, - batch_size + *extra_parameters ) # Atom skip-co diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 4d01149c20..7820cb794b 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -163,14 +163,20 @@ def __init__(self, **kwargs): self.config["is_disconnected"] = True # Here's the models whose graphs are disconnected in the dataset - self.dataset_models = ["indfaenet", "tifaenet"] + self.separate_models = ["indfaenet"] + self.heterogeneous_models = ["tifaenet"] + self.data_mode = "normal" self.separate_dataset = False + if self.config["model_name"] in self.dataset_models: - self.separate_dataset = True + self.data_mode = "separate" print("\n\nHeads up: using separate dataset, so ads/cats are separated before transforms.\n") - self.load() + elif self.config["model_name"] in self.heterogeneous_models: + self.data_mode = "heterogeneous" + print("\n\nHeads up: using heterogeneous dataset, so ads/cats are stored separately in a het graph.\n") + self.load() self.evaluator = Evaluator( task = self.task_name, model_regresses_forces = self.config["model"].get("regress_forces", ""), @@ -259,10 +265,16 @@ def load_datasets(self): if split == "default_val": continue - if self.config["model_name"] in self.dataset_models: + if self.data_mode == "separate": self.datasets[split] = registry.get_dataset_class( "separate" )(ds_conf, transform=transform) + + elif: self.data_mode == "heterogeneous": + self.datasets[split] = registry.get_dataset_class( + "heterogeneous" + )(ds_conf, transform=transform) + else: self.datasets[split] = registry.get_dataset_class( self.config["task"]["dataset"] diff --git a/ocpmodels/trainers/single_trainer.py b/ocpmodels/trainers/single_trainer.py index 869eeb2571..2afcd47510 100644 --- a/ocpmodels/trainers/single_trainer.py +++ b/ocpmodels/trainers/single_trainer.py @@ -457,7 +457,7 @@ def end_of_training( self.logger.log({"Epoch time": np.mean(epoch_times)}) # Check respect of symmetries - if not self.separate_dataset: + if self.data_mode == "normal": if self.test_ri and not is_test_env: symmetry = self.test_model_symmetries(debug_batches=debug_batches) if symmetry == "SIGTERM": @@ -552,7 +552,7 @@ def compute_loss(self, preds, batch_list): loss = {"total_loss": []} # Energy loss - if not self.separate_dataset: + if self.data_mode != "normal": energy_target = torch.cat( [ batch.y_relaxed.to(self.device) @@ -663,7 +663,7 @@ def compute_metrics( [batch.natoms.to(self.device) for batch in batch_list], dim=0 ) - if not self.separate_dataset: + if self.data_mode != "normal": target = { "energy": torch.cat( [ From 95a415c4f85c575bd501da745217089c6f1c4966 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 10 Aug 2023 09:06:54 -0400 Subject: [PATCH 055/131] Finished implementing heterogeneous graphs dataset. Most of the changes are simple changes in syntax throughout the code as heterogeneous graphs do not have the same syntax as homogeneous graphs in pytorch geometric. --- configs/exps/alvaro/10k-training.yaml | 7 +- debug.py | 2 +- ocpmodels/datasets/heterogeneous.py | 15 ++- ocpmodels/datasets/lmdb_dataset.py | 23 +++- ocpmodels/models/base_model.py | 11 +- ocpmodels/models/tifaenet.py | 151 +++++++++++--------------- ocpmodels/trainers/base_trainer.py | 5 +- ocpmodels/trainers/single_trainer.py | 75 ++++++++++--- 8 files changed, 169 insertions(+), 120 deletions(-) diff --git a/configs/exps/alvaro/10k-training.yaml b/configs/exps/alvaro/10k-training.yaml index 0952e396d0..de918b578f 100644 --- a/configs/exps/alvaro/10k-training.yaml +++ b/configs/exps/alvaro/10k-training.yaml @@ -44,9 +44,4 @@ default: runs: - config: tifaenet-is2re-10k model: - tifaenet_mode: "attention" - - - config: tifaenet-is2re-10k - model: - tifaenet_mode: "attention" - transformer_out: True + tifaenet_mode: "gat" diff --git a/debug.py b/debug.py index ddaa199daa..7dbe332006 100644 --- a/debug.py +++ b/debug.py @@ -124,7 +124,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["num_interactions"] = 6 trainer_config["model"]["second_layer_MLP"] = True trainer_config["model"]["skip_co"] = "concat" - trainer_config["model"]["transformer_out"] = True + trainer_config["model"]["transformer_out"] = False trainer_config["model"]["tifaenet_mode"] = "gat" #trainer_config["model"]["disconnected_mlp"] = True diff --git a/ocpmodels/datasets/heterogeneous.py b/ocpmodels/datasets/heterogeneous.py index 0481b7572a..fdc2a33a35 100644 --- a/ocpmodels/datasets/heterogeneous.py +++ b/ocpmodels/datasets/heterogeneous.py @@ -1,28 +1,27 @@ import torch -from ocpmodels.datasets.lmdb_dataset import LmdbDataset +from ocpmodels.datasets.separate_dataset import SeparateLmdbDataset from ocpmodels.common.registry import registry from torch_geometric.data import HeteroData @registry.register_dataset("heterogeneous") -class HeterogeneousDataset(LmdbDataset): +class HeterogeneousDataset(SeparateLmdbDataset): def __getitem__(self, idx): - adsorbate, catalyst = super(HeterogeneousDataset, self).__getitem__(self, idx) + adsorbate, catalyst = super().__getitem__(idx) reaction = HeteroData() for graph in [adsorbate, catalyst]: mode = graph.mode - for key in graph.keys(): + for key in graph.keys: if key == "edge_index": continue reaction[mode][key] = graph[key] reaction[mode, "is_close", mode].edge_index = graph.edge_index - sender = torch.arange(0, adsorbate.natoms, 1/catalyst.natoms) - receiver = torch.arange(0.0, catalyst.natoms).repeat(adsorbate.natoms) - - reaction["adsorbate", "is_disc", "catalyst"].edge_index = torch.stack([sender. reciver]) + sender = torch.repeat_interleave(torch.arange(adsorbate.natoms.item()), catalyst.natoms.item()) + receiver = torch.arange(0, catalyst.natoms.item()).repeat(adsorbate.natoms.item()) + reaction["adsorbate", "is_disc", "catalyst"].edge_index = torch.stack([sender, receiver]) return reaction diff --git a/ocpmodels/datasets/lmdb_dataset.py b/ocpmodels/datasets/lmdb_dataset.py index 4eff3317c8..4f1b54804c 100644 --- a/ocpmodels/datasets/lmdb_dataset.py +++ b/ocpmodels/datasets/lmdb_dataset.py @@ -16,7 +16,7 @@ import numpy as np import torch from torch.utils.data import Dataset -from torch_geometric.data import Batch +from torch_geometric.data import Batch, HeteroData from ocpmodels.common.registry import registry from ocpmodels.common.utils import pyg2_data_transform @@ -187,4 +187,25 @@ def data_list_collater(data_list, otf_graph=False): # Check if len(batch) is eve "LMDB does not contain edge index information, set otf_graph=True" ) + elif ( + not otf_graph + and hasattr(data_list[0]["adsorbate", "is_close", "adsorbate"], "edge_index") + ): + # First, fix the neighborhood dimension. + n_neighbors_ads = [] + n_neighbors_cat = [] + for i, data in enumerate(data_list): + n_index_ads = data["adsorbate", "is_close", "adsorbate"].edge_index + n_index_cat = data["catalyst", "is_close", "catalyst"].edge_index + n_neighbors_ads.append(n_index_ads[1, :].shape[0]) + n_neighbors_cat.append(n_index_cat[1, :].shape[0]) + batch["adsorbate"].neighbors = torch.tensor(n_neighbors_ads) + batch["catalyst"].neighbors = torch.tensor(n_neighbors_cat) + + # Then, fix the edge index between ads and cats. + sender, receiver = batch["is_disc"].edge_index + ads_to_cat = torch.stack([sender, receiver + batch["adsorbate"].num_nodes]) + cat_to_ads = torch.stack([ads_to_cat[1], ads_to_cat[0]]) + batch["is_disc"].edge_index = torch.concat([ads_to_cat, cat_to_ads], dim = 1) + return batch diff --git a/ocpmodels/models/base_model.py b/ocpmodels/models/base_model.py index 240cc4a7b0..b9e3e9e40f 100644 --- a/ocpmodels/models/base_model.py +++ b/ocpmodels/models/base_model.py @@ -44,7 +44,11 @@ def forward(self, data, mode="train"): # energy gradient w.r.t. positions will be computed if mode == "train" or self.regress_forces == "from_energy": - data.pos.requires_grad_(True) + try: + data.pos.requires_grad_(True) + except: + data["adsorbate"].pos.requires_grad_(True) + data["catalyst"].pos.requires_grad_(True) # predict energy preds = self.energy_forward(data) @@ -63,7 +67,10 @@ def forward(self, data, mode="train"): grad_forces = forces else: # compute forces from energy gradient - grad_forces = self.forces_as_energy_grad(data.pos, preds["energy"]) + try: + grad_forces = self.forces_as_energy_grad(data.pos, preds["energy"]) + except: + grad_forces = self.forces_as_energy_grad(data["adsorbate"].pos) if self.regress_forces == "from_energy": # predicted forces are the energy gradient diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index 08357d9002..945e35408b 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -29,8 +29,15 @@ def __init__(self, d_model, dropout=0.1): self.interaction = GATConv( in_channels = d_model, out_channels = d_model, + num_layers = 1, + dropout = dropout ) - def forward(self, ) + def forward(self, h_ads, h_cat, bipartite_edges): + separation_point = h_ads.shape[0] + combined = torch.concat([h_ads, h_cat], dim = 0) + combined = self.interaction(combined, bipartite_edges) + + return combined[:separation_point], combined[separation_point:] class TransformerInteraction(nn.Module): def __init__(self, d_model, nhead = 2, num_encoder_layers = 2, num_decoder_layers = 2): @@ -54,7 +61,7 @@ def __init__(self, d_model, nhead = 2, num_encoder_layers = 2, num_decoder_layer batch_first = True ) - def forward(self, h_ads, h_cat): + def forward(self, h_ads, h_cat, ads_to_cat, cat_to_ads): import ipdb ipdb.set_trace() @@ -108,7 +115,9 @@ def forward(self, key_cat_T_index, key_cat_T_val, natoms_ads, d_model * batch_size, natoms_cat ) - attention_ads = SparseTensor(row=index_att_ads[0], col=index_att_ads[1], value=attention_ads).to_dense() + attention_ads = SparseTensor( + row=index_att_ads[0], col=index_att_ads[1], value=attention_ads + ).to_dense() attention_ads = self.softmax(attention_ads / math.sqrt(d_model)) new_h_ads = torch.matmul(attention_ads, value_cat) @@ -117,7 +126,9 @@ def forward(self, key_ads_T_index, key_ads_T_val, natoms_cat, d_model * batch_size, natoms_ads ) - attention_cat = SparseTensor(row=index_att_cat[0], col=index_att_cat[1], value=attention_cat).to_dense() + attention_cat = SparseTensor( + row=index_att_cat[0], col=index_att_cat[1], value=attention_cat + ).to_dense() attention_cat = self.softmax(attention_cat / math.sqrt(d_model)) new_h_cat = torch.matmul(attention_cat, value_ads) @@ -310,40 +321,31 @@ def __init__(self, **kwargs): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): - batch_size = len(data) // 2 - - adsorbates = Batch.from_data_list(data[:batch_size]) - catalysts = Batch.from_data_list(data[batch_size:]) - - batch_ads = adsorbates.batch - batch_cat = catalysts.batch + batch_size = len(data) - # Fixing neighbor's dimensions. This error happens when an adsorbate has 0 edges. - adsorbates = self.neighbor_fixer(adsorbates) - catalysts = self.neighbor_fixer(catalysts) + batch_ads = data["adsorbate"]["batch"] + batch_cat = data["catalyst"]["batch"] # Graph rewiring - ads_rewiring = self.graph_rewiring(adsorbates) + ads_rewiring, cat_rewiring = self.graph_rewiring(data, batch_ads, batch_cat) edge_index_ads, edge_weight_ads, rel_pos_ads, edge_attr_ads = ads_rewiring - - cat_rewiring = self.graph_rewiring(catalysts) edge_index_cat, edge_weight_cat, rel_pos_cat, edge_attr_cat = cat_rewiring # Embedding h_ads, e_ads = self.embedding( - adsorbates.atomic_numbers.long(), + data["adsorbate"].atomic_numbers.long(), edge_weight_ads, rel_pos_ads, edge_attr_ads, - adsorbates.tags, + data["adsorbate"].tags, self.embed_block_ads ) h_cat, e_cat = self.embedding( - catalysts.atomic_numbers.long(), + data["catalyst"].atomic_numbers.long(), edge_weight_cat, rel_pos_cat, edge_attr_cat, - catalysts.tags, + data["catalyst"].tags, self.embed_block_cat ) @@ -406,9 +408,8 @@ def energy_forward(self, data): extra_parameters = [index_ads, index_cat, batch_size] elif self.inter_interaction_type == "gat": - import ipdb - ipdb.set_trace() - inter_edge_weight = [] + extra_parameters = [data["is_disc"].edge_index] + # Fix edges between graphs # Now we do interactions. energy_skip_co_ads = [] @@ -514,73 +515,53 @@ def embedding(self, z, edge_weight, rel_pos, edge_attr, tags, embed_func): return h, e @conditional_grad(torch.enable_grad()) - def graph_rewiring(self, data): - z = data.atomic_numbers.long() - pos = data.pos - batch = data.batch - - mode = data.mode[0] - if mode == "adsorbate": - distance_expansion = self.distance_expansion_ads - else: - distance_expansion = self.distance_expansion_cat + def graph_rewiring(self, data, batch_ads, batch_cat): + z = data["adsorbate"].atomic_numbers.long() + # Use periodic boundary conditions + results = [] if self.use_pbc: assert z.dim() == 1 and z.dtype == torch.long - out = get_pbc_distances( - pos, - data.edge_index, - data.cell, - data.cell_offsets, - data.neighbors, - return_distance_vec=True, - ) + for mode in ["adsorbate", "catalyst"]: + out = get_pbc_distances( + data[mode].pos, + data[mode, "is_close", mode].edge_index, + data[mode].cell, + data[mode].cell_offsets, + data[mode].neighbors, + return_distance_vec=True, + ) - edge_index = out["edge_index"] - edge_weight = out["distances"] - rel_pos = out["distance_vec"] - edge_attr = distance_expansion(edge_weight) + edge_index = out["edge_index"] + edge_weight = out["distances"] + rel_pos = out["distance_vec"] + if mode == "adsorbate": + distance_expansion = self.distance_expansion_ads + else: + distance_expansion = self.distance_expansion_cat + edge_attr = distance_expansion(edge_weight) + results.append([edge_index, edge_weight, rel_pos, edge_attr]) else: - edge_index = radius_graph( - pos, - r=self.cutoff, - batch=batch, - max_num_neighbors=self.max_num_neighbors, - ) - # edge_index = data.edge_index - row, col = edge_index - rel_pos = pos[row] - pos[col] - edge_weight = rel_pos.norm(dim=-1) - edge_attr = distance_expansion(edge_weight) - - return (edge_index, edge_weight, rel_pos, edge_attr) - - def neighbor_fixer(self, data): - num_graphs = len(data) - # Find indices of adsorbates without edges: - edgeless = [ - i for i - in range(num_graphs) - if data[i].neighbors.shape[0] == 0 - ] - if len(edgeless) > 0: - # Since most adsorbates have an edge, - # we pop those values specifically from range(num_adsorbates) - mask = list(range(num_graphs)) - num_popped = 0 # We can do this since edgeless is already sorted - for unwanted in edgeless: - mask.pop(unwanted-num_popped) - num_popped += 1 - new_nbrs = torch.zeros( - num_graphs, - dtype = torch.int64, - device = data.neighbors.device, - ) - new_nbrs[mask] = data.neighbors - data.neighbors = new_nbrs - - return data + for mode in ["adsorbate", "catalyst"]: + edge_index = radius_graph( + data[mode].pos, + r=self.cutoff, + batch=batch_ads if mode == "adsorbate" else batch_cat, + max_num_neighbors=self.max_num_neighbors, + ) + # edge_index = data.edge_index + row, col = edge_index + rel_pos = data[mode].pos[row] - data[mode].pos[col] + edge_weight = rel_pos.norm(dim=-1) + if mode == "adsorbate": + distance_expansion = self.distance_expansion_ads + else: + distance_expansion = self.distance_expansion_cat + edge_attr = distance_expansion(edge_weight) + results.append([edge_index, edge_weight, rel_pos, edge_attr]) + + return results @conditional_grad(torch.enable_grad()) def forces_forward(self, preds): diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 7820cb794b..6e66b73190 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -168,7 +168,7 @@ def __init__(self, **kwargs): self.data_mode = "normal" self.separate_dataset = False - if self.config["model_name"] in self.dataset_models: + if self.config["model_name"] in self.separate_models: self.data_mode = "separate" print("\n\nHeads up: using separate dataset, so ads/cats are separated before transforms.\n") @@ -270,7 +270,7 @@ def load_datasets(self): "separate" )(ds_conf, transform=transform) - elif: self.data_mode == "heterogeneous": + elif self.data_mode == "heterogeneous": self.datasets[split] = registry.get_dataset_class( "heterogeneous" )(ds_conf, transform=transform) @@ -1102,6 +1102,7 @@ def measure_inference_time(self, loops=1): with timer.next("forward"): _ = self.model_forward(b, mode="inference") + # divide times by batch size mean, std = timer.prepare_for_logging( map_funcs={ diff --git a/ocpmodels/trainers/single_trainer.py b/ocpmodels/trainers/single_trainer.py index 2afcd47510..40bbffb475 100644 --- a/ocpmodels/trainers/single_trainer.py +++ b/ocpmodels/trainers/single_trainer.py @@ -479,16 +479,34 @@ def end_of_training( def model_forward(self, batch_list, mode="train"): # Distinguish frame averaging from base case. if self.config["frame_averaging"] and self.config["frame_averaging"] != "DA": - original_pos = batch_list[0].pos - if self.task_name in OCP_TASKS: - original_cell = batch_list[0].cell + if self.data_mode == "heterogeneous": + original_pos_ads = batch_list[0]["adsorbate"].pos + original_pos_cat = batch_list[0]["catalyst"].pos + if self.task_name in OCP_TASKS: + original_cell = batch_list[0]["catalyst"].cell + else: + original_pos = batch_list[0].pos + if self.task_name in OCP_TASKS: + original_cell = batch_list[0].cell e_all, p_all, f_all, gt_all = [], [], [], [] # Compute model prediction for each frame - for i in range(len(batch_list[0].fa_pos)): - batch_list[0].pos = batch_list[0].fa_pos[i] - if self.task_name in OCP_TASKS: - batch_list[0].cell = batch_list[0].fa_cell[i] + if self.data_mode == "heterogeneous": + fa_pos_length = len(batch_list[0]["adsorbate"].fa_pos) + else: + fa_pos_length = len(batch_list[0].fa_pos) + + for i in range(fa_pos_length): + if self.data_mode == "heterogeneous": + batch_list[0]["adsorbate"].pos = batch_list[0]["adsorbate"].fa_pos[i] + batch_list[0]["catalyst"].pos = batch_list[0]["catalyst"].fa_pos[i] + if self.task_name in OCP_TASKS: + batch_list[0]["adsorbate"].cell = batch_list[0]["adsorbate"].fa_cell[i] + batch_list[0]["catalyst"].cell = batch_list[0]["catalyst"].fa_cell[i] + else: + batch_list[0].pos = batch_list[0].fa_pos[i] + if self.task_name in OCP_TASKS: + batch_list[0].cell = batch_list[0].fa_cell[i] # forward pass preds = self.model(deepcopy(batch_list), mode=mode) @@ -528,9 +546,16 @@ def model_forward(self, batch_list, mode="train"): ) gt_all.append(g_grad_target) - batch_list[0].pos = original_pos - if self.task_name in OCP_TASKS: - batch_list[0].cell = original_cell + if self.data_mode == "heterogeneous": + batch_list[0]["adsorbate"].pos = original_pos_ads + batch_list[0]["catalyst"].pos = original_pos_cat + if self.task_name in OCP_TASKS: + batch_list[0]["adsorbate"].cell = original_cell + batch_list[0]["catalyst"].cell = original_cell + else: + batch_list[0].pos = original_pos + if self.task_name in OCP_TASKS: + batch_list[0].cell = original_cell # Average predictions over frames preds["energy"] = sum(e_all) / len(e_all) @@ -552,7 +577,18 @@ def compute_loss(self, preds, batch_list): loss = {"total_loss": []} # Energy loss - if self.data_mode != "normal": + if self.data_mode == "heterogeneous": + energy_target = torch.cat( + [ + batch["adsorbate"].y_relaxed.to(self.device) + if self.task_name == "is2re" + else batch["adsorbate"].y.to(self.device) + for batch in batch_list + ], + dim=0 + ) + + elif self.data_mode != "normal": energy_target = torch.cat( [ batch.y_relaxed.to(self.device) @@ -659,11 +695,20 @@ def compute_loss(self, preds, batch_list): def compute_metrics( self, preds: Dict, batch_list: List[Data], evaluator: Evaluator, metrics={} ): - natoms = torch.cat( - [batch.natoms.to(self.device) for batch in batch_list], dim=0 - ) + if self.data_mode == "heterogeneous": + natoms = (batch_list[0]["adsorbate"].natoms.to(self.device) + + batch_list[0]["catalyst"].natoms.to(self.device)) + else: + natoms = torch.cat( + [batch.natoms.to(self.device) for batch in batch_list], dim=0 + ) - if self.data_mode != "normal": + if self.data_mode == "heterogeneous": + target = { + "energy": batch_list[0]["adsorbate"].y_relaxed.to(self.device), + "natoms": natoms, + } + elif self.data_mode != "normal": target = { "energy": torch.cat( [ From 98a049fbe72ed23d35f291f73fa171c37c648b8f Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 10 Aug 2023 15:04:59 -0400 Subject: [PATCH 056/131] Code worked fine. Implemented the requirement of specifying the version of GAT. Training yaml files ready for submission. --- configs/exps/alvaro/10k-training.yaml | 1 + configs/exps/alvaro/all-training.yaml | 4 ++-- ocpmodels/models/tifaenet.py | 19 +++++++++++++++---- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/configs/exps/alvaro/10k-training.yaml b/configs/exps/alvaro/10k-training.yaml index de918b578f..6979b8d49e 100644 --- a/configs/exps/alvaro/10k-training.yaml +++ b/configs/exps/alvaro/10k-training.yaml @@ -45,3 +45,4 @@ runs: - config: tifaenet-is2re-10k model: tifaenet_mode: "gat" + tifaenet_gat_mode: "v2" diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index d8bf9d2b40..3785e41e8f 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -45,8 +45,8 @@ runs: - config: tifaenet-is2re-all model: tifaenet_mode: "attention" + tifaenet_gat_mode: "v1" - config: tifaenet-is2re-all model: - tifaenet_mode: "attention" - transformer_out: True + tifaenet_gat_mode: "v2" diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index 945e35408b..712de3e5a4 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -23,9 +23,16 @@ from ocpmodels.models.utils.activations import swish class GATInteraction(nn.Module): - def __init__(self, d_model, dropout=0.1): + def __init__(self, d_model, version, dropout=0.1): super(GATInteraction, self).__init__() + if version == "v1": + version = False + elif version == "v2": + version = True + else: + raise ValueError(f"Invalid GAT version. Received {version}, available: v1, v2.") + self.interaction = GATConv( in_channels = d_model, out_channels = d_model, @@ -242,15 +249,19 @@ def __init__(self, **kwargs): elif inter_interaction_type == "attention": inter_interaction_type = AttentionInteraction + inter_interaction_parameters = [kwargs["hidden_channels"]] elif inter_interaction_type == "gat": + assert hasattr(kwargs, "gat_mode"), "When using GAT mode, a version needs to be specified. Options: v1, v2". inter_interaction_type = GATInteraction + inter_interaction_parameters = [ + kwargs["hidden_channels"], + kwargs["gat_mode"] + ] self.inter_interactions = nn.ModuleList( [ - inter_interaction_type( - d_model = kwargs["hidden_channels"], - ) + inter_interaction_type(*inter_interaction_parameters) for _ in range(kwargs["num_interactions"]) ] ) From 6dc745b75ad80f8c0246dd7c32e38ecfc9b5c987 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 10 Aug 2023 17:25:54 -0400 Subject: [PATCH 057/131] fixed small typo --- ocpmodels/models/tifaenet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index 712de3e5a4..292b4914ee 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -252,7 +252,7 @@ def __init__(self, **kwargs): inter_interaction_parameters = [kwargs["hidden_channels"]] elif inter_interaction_type == "gat": - assert hasattr(kwargs, "gat_mode"), "When using GAT mode, a version needs to be specified. Options: v1, v2". + assert hasattr(kwargs, "gat_mode"), "When using GAT mode, a version needs to be specified. Options: v1, v2." inter_interaction_type = GATInteraction inter_interaction_parameters = [ kwargs["hidden_channels"], From 558d701b4a53799198e947378ce98a6f68a52d56 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 10 Aug 2023 17:37:09 -0400 Subject: [PATCH 058/131] fixed a mistake in coding --- configs/exps/alvaro/all-training.yaml | 3 ++- debug.py | 1 + ocpmodels/models/tifaenet.py | 8 ++++++-- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index 3785e41e8f..e9e6567252 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -44,9 +44,10 @@ default: runs: - config: tifaenet-is2re-all model: - tifaenet_mode: "attention" + tifaenet_mode: "gat" tifaenet_gat_mode: "v1" - config: tifaenet-is2re-all model: + tifaenet_mode: "gat" tifaenet_gat_mode: "v2" diff --git a/debug.py b/debug.py index 7dbe332006..9130adc391 100644 --- a/debug.py +++ b/debug.py @@ -126,6 +126,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["skip_co"] = "concat" trainer_config["model"]["transformer_out"] = False trainer_config["model"]["tifaenet_mode"] = "gat" + trainer_config["model"]["tifaenet_gat_mode"] = "v1" #trainer_config["model"]["disconnected_mlp"] = True #trainer_config["optim"]["batch_sizes"] = 256 diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index 292b4914ee..ec26e08024 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -240,6 +240,10 @@ def __init__(self, **kwargs): ) # Transformer Interaction + + import ipdb + ipdb.set_trace() + inter_interaction_type = kwargs.get("tifaenet_mode", None) self.inter_interaction_type = inter_interaction_type assert inter_interaction_type is not None, "When using TIFaenet, tifaenet_mode is needed. Options: attention, transformer" @@ -252,11 +256,11 @@ def __init__(self, **kwargs): inter_interaction_parameters = [kwargs["hidden_channels"]] elif inter_interaction_type == "gat": - assert hasattr(kwargs, "gat_mode"), "When using GAT mode, a version needs to be specified. Options: v1, v2." + assert "tifaenet_gat_mode" in kwargs, "When using GAT mode, a version needs to be specified. Options: v1, v2." inter_interaction_type = GATInteraction inter_interaction_parameters = [ kwargs["hidden_channels"], - kwargs["gat_mode"] + kwargs["tifaenet_gat_mode"] ] self.inter_interactions = nn.ModuleList( From 8ccb813728c8408bc3d7516432af4b7ddc1183a8 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 10 Aug 2023 17:48:37 -0400 Subject: [PATCH 059/131] Forgot a set_trace --- ocpmodels/models/tifaenet.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index ec26e08024..cae4e7545a 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -241,9 +241,6 @@ def __init__(self, **kwargs): # Transformer Interaction - import ipdb - ipdb.set_trace() - inter_interaction_type = kwargs.get("tifaenet_mode", None) self.inter_interaction_type = inter_interaction_type assert inter_interaction_type is not None, "When using TIFaenet, tifaenet_mode is needed. Options: attention, transformer" From 0ca9573dc3316672b354281d4063d53e0aa93fe1 Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 11 Aug 2023 15:59:16 -0400 Subject: [PATCH 060/131] Fixed mistake where version 2 wasn't being used. Added attention heads to the GAT layer. --- configs/exps/alvaro/10k-training.yaml | 5 ++++ configs/exps/alvaro/all-training.yaml | 6 ++--- ocpmodels/models/tifaenet.py | 36 ++++++++++++++++----------- 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/configs/exps/alvaro/10k-training.yaml b/configs/exps/alvaro/10k-training.yaml index 6979b8d49e..79915904c8 100644 --- a/configs/exps/alvaro/10k-training.yaml +++ b/configs/exps/alvaro/10k-training.yaml @@ -46,3 +46,8 @@ runs: model: tifaenet_mode: "gat" tifaenet_gat_mode: "v2" + + - config: tifaenet-is2re-10k + model: + tifaenet_mode: "gat" + tifaenet_gat_mode: "v1" diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index e9e6567252..7e78aa998b 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -45,9 +45,9 @@ runs: - config: tifaenet-is2re-all model: tifaenet_mode: "gat" - tifaenet_gat_mode: "v1" + tifaenet_gat_mode: "v2" - config: tifaenet-is2re-all - model: + mode: tifaenet_mode: "gat" - tifaenet_gat_mode: "v2" + tifaenet_gat_mode: "v1" diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index cae4e7545a..be01d65e42 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -4,7 +4,7 @@ from torch.nn import Linear, Transformer, Softmax from torch_geometric.data import Batch -from torch_geometric.nn import radius_graph, GATConv +from torch_geometric.nn import radius_graph, GATConv, GATv2Conv from torch_sparse import SparseTensor, spspmm from torch_sparse import transpose as transpose_sparse @@ -26,25 +26,33 @@ class GATInteraction(nn.Module): def __init__(self, d_model, version, dropout=0.1): super(GATInteraction, self).__init__() - if version == "v1": - version = False - elif version == "v2": - version = True - else: + if version not in {"v1", "v2"}: raise ValueError(f"Invalid GAT version. Received {version}, available: v1, v2.") - self.interaction = GATConv( - in_channels = d_model, - out_channels = d_model, - num_layers = 1, - dropout = dropout - ) + if version == "v1": + self.interaction = GATConv( + in_channels = d_model, + out_channels = d_model, + heads = 3 + dropout = dropout + ) + else: + self.interaction = GATv2Conv( + in_channels = d_model, + out_channels = d_model, + head = 3 + dropout = dropout + ) def forward(self, h_ads, h_cat, bipartite_edges): - separation_point = h_ads.shape[0] + separation_pt = h_ads.shape[0] combined = torch.concat([h_ads, h_cat], dim = 0) combined = self.interaction(combined, bipartite_edges) - return combined[:separation_point], combined[separation_point:] + ads, cat = combined[:separation_pt], combined[separation_pt:] + ads, cat = nn.functional.normalize(ads), nn.functional.normalize(cat) + ads, cat = ads + h_ads, cat + h_cat + + return ads, cat class TransformerInteraction(nn.Module): def __init__(self, d_model, nhead = 2, num_encoder_layers = 2, num_decoder_layers = 2): From 5e1485c4df3767358732cabdbba2feb0916dc052 Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 11 Aug 2023 16:17:51 -0400 Subject: [PATCH 061/131] Small typo --- ocpmodels/models/tifaenet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index be01d65e42..0a8cb71507 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -33,7 +33,7 @@ def __init__(self, d_model, version, dropout=0.1): self.interaction = GATConv( in_channels = d_model, out_channels = d_model, - heads = 3 + heads = 3, dropout = dropout ) else: From b7cfdf2178d18c48ce937fdab63341057dcf78eb Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 11 Aug 2023 17:02:03 -0400 Subject: [PATCH 062/131] Fixed another typo --- ocpmodels/models/tifaenet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index 0a8cb71507..abfb91d882 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -40,7 +40,7 @@ def __init__(self, d_model, version, dropout=0.1): self.interaction = GATv2Conv( in_channels = d_model, out_channels = d_model, - head = 3 + head = 3, dropout = dropout ) def forward(self, h_ads, h_cat, bipartite_edges): From a249d35c5ce9e0b6b7c7d40e5bb2ecd7dab2c082 Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 11 Aug 2023 17:25:52 -0400 Subject: [PATCH 063/131] fixing some last mistakes --- debug.py | 2 +- ocpmodels/models/tifaenet.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/debug.py b/debug.py index 9130adc391..fc96aa314c 100644 --- a/debug.py +++ b/debug.py @@ -126,7 +126,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["skip_co"] = "concat" trainer_config["model"]["transformer_out"] = False trainer_config["model"]["tifaenet_mode"] = "gat" - trainer_config["model"]["tifaenet_gat_mode"] = "v1" + trainer_config["model"]["tifaenet_gat_mode"] = "v2" #trainer_config["model"]["disconnected_mlp"] = True #trainer_config["optim"]["batch_sizes"] = 256 diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index abfb91d882..b3b69d12e5 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -34,6 +34,7 @@ def __init__(self, d_model, version, dropout=0.1): in_channels = d_model, out_channels = d_model, heads = 3, + concat = False, dropout = dropout ) else: @@ -41,9 +42,12 @@ def __init__(self, d_model, version, dropout=0.1): in_channels = d_model, out_channels = d_model, head = 3, + concat = False, dropout = dropout ) def forward(self, h_ads, h_cat, bipartite_edges): + import ipdb + ipdb.set_trace() separation_pt = h_ads.shape[0] combined = torch.concat([h_ads, h_cat], dim = 0) combined = self.interaction(combined, bipartite_edges) From eb3f71d0f6c5deb0500e0e532d2b923e655be76f Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 11 Aug 2023 17:27:51 -0400 Subject: [PATCH 064/131] Forgot an ipdb set trace. --- ocpmodels/models/tifaenet.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index b3b69d12e5..28c0416f5d 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -46,8 +46,6 @@ def __init__(self, d_model, version, dropout=0.1): dropout = dropout ) def forward(self, h_ads, h_cat, bipartite_edges): - import ipdb - ipdb.set_trace() separation_pt = h_ads.shape[0] combined = torch.concat([h_ads, h_cat], dim = 0) combined = self.interaction(combined, bipartite_edges) From e2ab7ac239947ef0d885fb2092efbc747c5f49d4 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 14 Aug 2023 18:00:22 -0400 Subject: [PATCH 065/131] Testing the addition of edge weights to the graph model --- configs/exps/alvaro/all-training.yaml | 2 +- ocpmodels/datasets/heterogeneous.py | 10 +++++++--- ocpmodels/datasets/lmdb_dataset.py | 5 +++++ ocpmodels/models/tifaenet.py | 14 ++++++++++---- 4 files changed, 23 insertions(+), 8 deletions(-) diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index 7e78aa998b..abb55ca978 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -48,6 +48,6 @@ runs: tifaenet_gat_mode: "v2" - config: tifaenet-is2re-all - mode: + model: tifaenet_mode: "gat" tifaenet_gat_mode: "v1" diff --git a/ocpmodels/datasets/heterogeneous.py b/ocpmodels/datasets/heterogeneous.py index fdc2a33a35..2a192eafb0 100644 --- a/ocpmodels/datasets/heterogeneous.py +++ b/ocpmodels/datasets/heterogeneous.py @@ -20,8 +20,12 @@ def __getitem__(self, idx): reaction[mode, "is_close", mode].edge_index = graph.edge_index - sender = torch.repeat_interleave(torch.arange(adsorbate.natoms.item()), catalyst.natoms.item()) - receiver = torch.arange(0, catalyst.natoms.item()).repeat(adsorbate.natoms.item()) - reaction["adsorbate", "is_disc", "catalyst"].edge_index = torch.stack([sender, receiver]) + sender = torch.repeat_interleave(torch.arange(catalyst.natoms.item()), adsorbate.natoms.item()) + receiver = torch.arange(0, adsorbate.natoms.item()).repeat(catalyst.natoms.item()) + reaction["catalyst", "is_disc", "adsorbate"].edge_index = torch.stack([sender, receiver]) + reaction["catalyst", "is_disc", "adsorbate"].edge_weight = torch.repeat_interleave( + reaction["catalyst"].pos[:, 2], + adsorbate.natoms.item(), + ) return reaction diff --git a/ocpmodels/datasets/lmdb_dataset.py b/ocpmodels/datasets/lmdb_dataset.py index 4f1b54804c..673cf0b2dc 100644 --- a/ocpmodels/datasets/lmdb_dataset.py +++ b/ocpmodels/datasets/lmdb_dataset.py @@ -208,4 +208,9 @@ def data_list_collater(data_list, otf_graph=False): # Check if len(batch) is eve cat_to_ads = torch.stack([ads_to_cat[1], ads_to_cat[0]]) batch["is_disc"].edge_index = torch.concat([ads_to_cat, cat_to_ads], dim = 1) + batch["is_disc"].edge_weight = torch.concat( + [batch["is_disc"].edge_weight, -batch["is_disc"].edge_weight], + dim = 0 + ) + return batch diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index 28c0416f5d..0a505474e1 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -35,6 +35,7 @@ def __init__(self, d_model, version, dropout=0.1): out_channels = d_model, heads = 3, concat = False, + edge_dim = 1, dropout = dropout ) else: @@ -43,12 +44,17 @@ def __init__(self, d_model, version, dropout=0.1): out_channels = d_model, head = 3, concat = False, + edge_dim = 1, dropout = dropout ) - def forward(self, h_ads, h_cat, bipartite_edges): + def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): + + import ipdb + ipdb.set_trace() + separation_pt = h_ads.shape[0] combined = torch.concat([h_ads, h_cat], dim = 0) - combined = self.interaction(combined, bipartite_edges) + combined = self.interaction(combined, bipartite_dges, bipartite_weights) ads, cat = combined[:separation_pt], combined[separation_pt:] ads, cat = nn.functional.normalize(ads), nn.functional.normalize(cat) @@ -253,7 +259,7 @@ def __init__(self, **kwargs): inter_interaction_type = kwargs.get("tifaenet_mode", None) self.inter_interaction_type = inter_interaction_type - assert inter_interaction_type is not None, "When using TIFaenet, tifaenet_mode is needed. Options: attention, transformer" + assert inter_interaction_type is not None, "When using TIFaenet, tifaenet_mode is needed. Options: attention, transformer, gat" assert inter_interaction_type in {"attention", "transformer", "gat"}, "Using an invalid tifaenet_mode. Options: attention, transformer, gat" if inter_interaction_type == "transformer": inter_interaction_type = TransformerInteraction @@ -430,7 +436,7 @@ def energy_forward(self, data): extra_parameters = [index_ads, index_cat, batch_size] elif self.inter_interaction_type == "gat": - extra_parameters = [data["is_disc"].edge_index] + extra_parameters = [data["is_disc"].edge_index, data["is_disc"].edge_weight] # Fix edges between graphs # Now we do interactions. From 886e1c215d6c1852ec6494cd0ca997af3f01a3a5 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 14 Aug 2023 22:17:36 -0400 Subject: [PATCH 066/131] forgot to remove a set trace --- ocpmodels/models/tifaenet.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index 0a505474e1..ae2fc1b664 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -48,10 +48,6 @@ def __init__(self, d_model, version, dropout=0.1): dropout = dropout ) def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): - - import ipdb - ipdb.set_trace() - separation_pt = h_ads.shape[0] combined = torch.concat([h_ads, h_cat], dim = 0) combined = self.interaction(combined, bipartite_dges, bipartite_weights) From 2ae470baffbc6f5636c27e485d7fc5fbc1612f46 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 14 Aug 2023 23:17:10 -0400 Subject: [PATCH 067/131] fixed typo --- ocpmodels/models/tifaenet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index ae2fc1b664..0a47b5d7b0 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -50,7 +50,7 @@ def __init__(self, d_model, version, dropout=0.1): def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): separation_pt = h_ads.shape[0] combined = torch.concat([h_ads, h_cat], dim = 0) - combined = self.interaction(combined, bipartite_dges, bipartite_weights) + combined = self.interaction(combined, bipartite_edges, bipartite_weights) ads, cat = combined[:separation_pt], combined[separation_pt:] ads, cat = nn.functional.normalize(ads), nn.functional.normalize(cat) From 50f794d3d5436d010842d50b6d4db2ba3390858c Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 15 Aug 2023 11:52:55 -0400 Subject: [PATCH 068/131] Better disconnected edge weights implemented --- ocpmodels/models/faenet.py | 1 - ocpmodels/models/tifaenet.py | 20 +++++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/ocpmodels/models/faenet.py b/ocpmodels/models/faenet.py index 8cfadb6db4..d57dc6da23 100644 --- a/ocpmodels/models/faenet.py +++ b/ocpmodels/models/faenet.py @@ -153,7 +153,6 @@ def forward( self, z, rel_pos, edge_attr, tag=None, normalised_rel_pos=None, subnodes=None ): # --- Edge embedding -- - if self.edge_embed_type == "rij": e = self.lin_e1(rel_pos) elif self.edge_embed_type == "all_rij": diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index 0a47b5d7b0..200bfddc94 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -23,7 +23,7 @@ from ocpmodels.models.utils.activations import swish class GATInteraction(nn.Module): - def __init__(self, d_model, version, dropout=0.1): + def __init__(self, d_model, version, edge_dim, dropout=0.1): super(GATInteraction, self).__init__() if version not in {"v1", "v2"}: @@ -35,7 +35,7 @@ def __init__(self, d_model, version, dropout=0.1): out_channels = d_model, heads = 3, concat = False, - edge_dim = 1, + edge_dim = edge_dim, dropout = dropout ) else: @@ -44,7 +44,7 @@ def __init__(self, d_model, version, dropout=0.1): out_channels = d_model, head = 3, concat = False, - edge_dim = 1, + edge_dim = edge_dim, dropout = dropout ) def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): @@ -192,6 +192,9 @@ def __init__(self, **kwargs): self.distance_expansion_cat = GaussianSmearing( 0.0, self.cutoff, kwargs["num_gaussians"] ) + self.distance_expansion_disc = GaussianSmearing( + 0.0, 25.0, kwargs["num_gaussians"] + ) # Embedding block self.embed_block_ads = EmbeddingBlock( @@ -220,6 +223,7 @@ def __init__(self, **kwargs): kwargs["second_layer_MLP"], kwargs["edge_embed_type"], ) + self.disc_edge_embed = Linear(kwargs["num_gaussians"], kwargs["num_filters"] // 2) # Interaction block self.interaction_blocks_ads = nn.ModuleList( @@ -269,7 +273,8 @@ def __init__(self, **kwargs): inter_interaction_type = GATInteraction inter_interaction_parameters = [ kwargs["hidden_channels"], - kwargs["tifaenet_gat_mode"] + kwargs["tifaenet_gat_mode"], + kwargs["num_filters"] // 2 ] self.inter_interactions = nn.ModuleList( @@ -432,7 +437,12 @@ def energy_forward(self, data): extra_parameters = [index_ads, index_cat, batch_size] elif self.inter_interaction_type == "gat": - extra_parameters = [data["is_disc"].edge_index, data["is_disc"].edge_weight] + edge_weights = self.distance_expansion_disc(data["is_disc"].edge_weight) + edge_weights = self.disc_edge_embed(edge_weights) + extra_parameters = [ + data["is_disc"].edge_index, + edge_weights, + ] # Fix edges between graphs # Now we do interactions. From 9d1959027459ae7062d31ef27f6d6939bdd81adf Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 17 Aug 2023 09:05:49 -0400 Subject: [PATCH 069/131] Last update before cleaning up code base. --- ocpmodels/models/tifaenet.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index 200bfddc94..d2ccc447d2 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -47,6 +47,7 @@ def __init__(self, d_model, version, edge_dim, dropout=0.1): edge_dim = edge_dim, dropout = dropout ) + def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): separation_pt = h_ads.shape[0] combined = torch.concat([h_ads, h_cat], dim = 0) From b5bdd3c5f513798ac3321a4d12ca561e26397d84 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 17 Aug 2023 09:07:39 -0400 Subject: [PATCH 070/131] Deleted sparse matrix implementation, which never ran fast enough to even train in 15 hours. --- ocpmodels/models/tifaenet.py | 102 ----------------------------------- 1 file changed, 102 deletions(-) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index d2ccc447d2..f794607a9e 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -59,108 +59,6 @@ def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): return ads, cat -class TransformerInteraction(nn.Module): - def __init__(self, d_model, nhead = 2, num_encoder_layers = 2, num_decoder_layers = 2): - super(TransformerInteraction, self).__init__() - - self.transformer_ads = Transformer( - d_model = d_model, - nhead = nhead, - num_encoder_layers = num_encoder_layers, - num_decoder_layers = num_decoder_layers, - dim_feedforward = d_model, - batch_first = True - ) - - self.transformer_ads = Transformer( - d_model = d_model, - nhead = nhead, - num_encoder_layers = num_encoder_layers, - num_decoder_layers = num_decoder_layers, - dim_feedforward = d_model, - batch_first = True - ) - - def forward(self, h_ads, h_cat, ads_to_cat, cat_to_ads): - import ipdb - ipdb.set_trace() - - - return h_ads, h_cat - - -class AttentionInteraction(nn.Module): - def __init__(self, d_model): - super(AttentionInteraction, self).__init__() - - self.queries_ads = Linear(d_model, d_model) - self.keys_ads = Linear(d_model, d_model) - self.values_ads = Linear(d_model, d_model) - - self.queries_cat = Linear(d_model, d_model) - self.keys_cat = Linear(d_model, d_model) - self.values_cat = Linear(d_model, d_model) - - self.softmax = Softmax(dim = 1) - - def forward(self, - h_ads, h_cat, - index_ads, index_cat, - batch_size - ): - d_model = h_ads.shape[1] - natoms_ads = h_ads.shape[0] - natoms_cat = h_cat.shape[0] - - # Create matrices with values - query_ads = self.queries_ads(h_ads) - key_ads = self.keys_ads(h_ads) - value_ads = self.values_ads(h_ads) - - query_cat = self.queries_cat(h_cat) - key_cat = self.keys_cat(h_cat) - value_cat = self.values_cat(h_cat) - - key_cat_T_index, key_cat_T_val = transpose_sparse( - index_cat, key_cat.view(-1), - natoms_cat, d_model * batch_size - ) - key_ads_T_index, key_ads_T_val = transpose_sparse( - index_ads, key_ads.view(-1), - natoms_ads, d_model * batch_size - ) - - index_att_ads, attention_ads = spspmm( - index_ads, query_ads.view(-1), - key_cat_T_index, key_cat_T_val, - natoms_ads, d_model * batch_size, natoms_cat - ) - attention_ads = SparseTensor( - row=index_att_ads[0], col=index_att_ads[1], value=attention_ads - ).to_dense() - attention_ads = self.softmax(attention_ads / math.sqrt(d_model)) - new_h_ads = torch.matmul(attention_ads, value_cat) - - index_att_cat, attention_cat = spspmm( - index_cat, query_cat.view(-1), - key_ads_T_index, key_ads_T_val, - natoms_cat, d_model * batch_size, natoms_ads - ) - attention_cat = SparseTensor( - row=index_att_cat[0], col=index_att_cat[1], value=attention_cat - ).to_dense() - attention_cat = self.softmax(attention_cat / math.sqrt(d_model)) - new_h_cat = torch.matmul(attention_cat, value_ads) - - new_h_ads = h_ads + new_h_ads - new_h_cat = h_cat + new_h_cat - - new_h_ads = nn.functional.normalize(new_h_ads) - new_h_cat = nn.functional.normalize(new_h_cat) - - return new_h_ads, new_h_cat - - @registry.register_model("tifaenet") class TIFaenet(BaseModel): def __init__(self, **kwargs): From b614b719ccb4ef04e720a266a0bf593a890f5f99 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 17 Aug 2023 09:30:33 -0400 Subject: [PATCH 071/131] Changed named of TIFaenet to AFaenet. ALSO I changed the largest z-index from 25.0 to 20.0 --- .../models/{tifaenet.yaml => afaenet.yaml} | 0 ocpmodels/models/tifaenet.py | 103 +++--------------- ocpmodels/trainers/base_trainer.py | 2 +- 3 files changed, 18 insertions(+), 87 deletions(-) rename configs/models/{tifaenet.yaml => afaenet.yaml} (100%) diff --git a/configs/models/tifaenet.yaml b/configs/models/afaenet.yaml similarity index 100% rename from configs/models/tifaenet.yaml rename to configs/models/afaenet.yaml diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index f794607a9e..46401fc254 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -59,8 +59,8 @@ def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): return ads, cat -@registry.register_model("tifaenet") -class TIFaenet(BaseModel): +@registry.register_model("afaenet") +class AFaenet(BaseModel): def __init__(self, **kwargs): super(TIFaenet, self).__init__() @@ -92,8 +92,9 @@ def __init__(self, **kwargs): 0.0, self.cutoff, kwargs["num_gaussians"] ) self.distance_expansion_disc = GaussianSmearing( - 0.0, 25.0, kwargs["num_gaussians"] + 0.0, 20.0, kwargs["num_gaussians"] ) + # Set the second parameter as the highest possible z-axis value # Embedding block self.embed_block_ads = EmbeddingBlock( @@ -154,31 +155,14 @@ def __init__(self, **kwargs): ] ) - # Transformer Interaction - - inter_interaction_type = kwargs.get("tifaenet_mode", None) - self.inter_interaction_type = inter_interaction_type - assert inter_interaction_type is not None, "When using TIFaenet, tifaenet_mode is needed. Options: attention, transformer, gat" - assert inter_interaction_type in {"attention", "transformer", "gat"}, "Using an invalid tifaenet_mode. Options: attention, transformer, gat" - if inter_interaction_type == "transformer": - inter_interaction_type = TransformerInteraction - - elif inter_interaction_type == "attention": - inter_interaction_type = AttentionInteraction - inter_interaction_parameters = [kwargs["hidden_channels"]] - - elif inter_interaction_type == "gat": - assert "tifaenet_gat_mode" in kwargs, "When using GAT mode, a version needs to be specified. Options: v1, v2." - inter_interaction_type = GATInteraction - inter_interaction_parameters = [ - kwargs["hidden_channels"], - kwargs["tifaenet_gat_mode"], - kwargs["num_filters"] // 2 - ] - + # Inter Interaction self.inter_interactions = nn.ModuleList( [ - inter_interaction_type(*inter_interaction_parameters) + GATInteraction( + kwargs["hidden_channels"], + kwargs["tifaenet_gat_mode"], + kwargs["num_filters"] // 2, + ) for _ in range(kwargs["num_interactions"]) ] ) @@ -285,64 +269,9 @@ def energy_forward(self, data): alpha_ads = None alpha_cat = None - # Interaction and transformer blocks - - if self.inter_interaction_type == "attention": - # Start by setting up the sparse matrices in scipy - natoms_ads = h_ads.shape[0] - natoms_cat = h_cat.shape[0] - - dummy_ads = torch.arange(natoms_ads * self.hidden_channels).numpy() - dummy_cat = torch.ones(natoms_cat * self.hidden_channels).numpy() - - crowd_indices_ads = torch.arange( - start = 0, end = (natoms_ads + 1)*self.hidden_channels, step = self.hidden_channels, - ).numpy() - crowd_indices_cat = torch.arange( - start = 0, end = (natoms_cat + 1)*self.hidden_channels, step = self.hidden_channels, - ).numpy() - - raw_col_indices = [ - [torch.arange(self.hidden_channels) + (10*j)] * i - for i, j - in zip(adsorbates.natoms, range(batch_size)) - ] - col_indices = [] - for graph in raw_col_indices: - col_indices += graph - col_indices_ads = torch.concat(col_indices).numpy() - - raw_col_indices = [ - [torch.arange(self.hidden_channels) + (10*j)] * i - for i, j - in zip(catalysts.natoms, range(batch_size)) - ] - col_indices = [] - for graph in raw_col_indices: - col_indices += graph - col_indices_cat = torch.concat(col_indices).numpy() - - sparse_ads = sparse.csr_array( - (dummy_ads, col_indices_ads, crowd_indices_ads), shape=(natoms_ads, dummy_ads.shape[0]) - ).tocoo() - row_ads, col_ads = torch.from_numpy(sparse_ads.row), torch.from_numpy(sparse_ads.col) - index_ads = torch.concat([row_ads.view(1, -1), col_ads.view(1, -1)], dim=0).long().to(h_ads.device) - - sparse_cat = sparse.csr_array( - (dummy_cat, col_indices_cat, crowd_indices_cat), shape=(natoms_cat, dummy_cat.shape[0]) - ).tocoo() - row_cat, col_cat = torch.from_numpy(sparse_cat.row), torch.from_numpy(sparse_cat.col) - index_cat = torch.concat([row_cat.view(1, -1), col_cat.view(1, -1)], dim=0).long().to(h_ads.device) - - extra_parameters = [index_ads, index_cat, batch_size] - elif self.inter_interaction_type == "gat": - edge_weights = self.distance_expansion_disc(data["is_disc"].edge_weight) - edge_weights = self.disc_edge_embed(edge_weights) - extra_parameters = [ - data["is_disc"].edge_index, - edge_weights, - ] - # Fix edges between graphs + # Edge embeddings of the complete bipartite graph. + edge_weights = self.distance_expansion_disc(data["is_disc"].edge_weight) + edge_weights = self.disc_edge_embed(edge_weights) # Now we do interactions. energy_skip_co_ads = [] @@ -374,8 +303,10 @@ def energy_forward(self, data): intra_cat = interaction_cat(h_cat, edge_index_cat, e_cat) h_ads, h_cat = inter_interaction( - intra_ads, intra_cat, - *extra_parameters + intra_ads, + intra_cat, + data["is_disc"].edge_index, + edge_weights, ) # Atom skip-co diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 6e66b73190..08761d5b2e 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -164,7 +164,7 @@ def __init__(self, **kwargs): # Here's the models whose graphs are disconnected in the dataset self.separate_models = ["indfaenet"] - self.heterogeneous_models = ["tifaenet"] + self.heterogeneous_models = ["afaenet"] self.data_mode = "normal" self.separate_dataset = False From c6062989615b3d83a6c3d75b8eb67926887607a1 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 17 Aug 2023 09:37:07 -0400 Subject: [PATCH 072/131] Cleaned up the config files --- configs/exps/alvaro/10k-training.yaml | 12 ++--- configs/exps/alvaro/all-training.yaml | 10 ++-- configs/exps/alvaro/default-config.yaml | 16 ------- configs/exps/alvaro/faenet-orion.yaml | 62 ------------------------- ocpmodels/models/tifaenet.py | 5 +- 5 files changed, 12 insertions(+), 93 deletions(-) delete mode 100644 configs/exps/alvaro/default-config.yaml delete mode 100644 configs/exps/alvaro/faenet-orion.yaml diff --git a/configs/exps/alvaro/10k-training.yaml b/configs/exps/alvaro/10k-training.yaml index 79915904c8..23926c71a5 100644 --- a/configs/exps/alvaro/10k-training.yaml +++ b/configs/exps/alvaro/10k-training.yaml @@ -34,7 +34,7 @@ default: skip_co: concat edge_embed_type: all_rij optim: - lr_initial: 0.0005 + lr_initial: 0.001 scheduler: LinearWarmupCosineAnnealingLR max_epochs: 20 eval_every: 0.4 @@ -42,12 +42,10 @@ default: eval_batch_size: 256 runs: - - config: tifaenet-is2re-10k + - config: afaenet-is2re-10k model: - tifaenet_mode: "gat" - tifaenet_gat_mode: "v2" + afaenet_gat_mode: "v1" - - config: tifaenet-is2re-10k + - config: afaenet-is2re-10k model: - tifaenet_mode: "gat" - tifaenet_gat_mode: "v1" + afaenet_gat_mode: "v2" diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index abb55ca978..f49ae0e0df 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -42,12 +42,10 @@ default: eval_batch_size: 256 runs: - - config: tifaenet-is2re-all + - config: afaenet-is2re-all model: - tifaenet_mode: "gat" - tifaenet_gat_mode: "v2" + afaenet_gat_mode: "v1" - - config: tifaenet-is2re-all + - config: afaenet-is2re-all model: - tifaenet_mode: "gat" - tifaenet_gat_mode: "v1" + afaenet_gat_mode: "v2" diff --git a/configs/exps/alvaro/default-config.yaml b/configs/exps/alvaro/default-config.yaml deleted file mode 100644 index c04e1ee533..0000000000 --- a/configs/exps/alvaro/default-config.yaml +++ /dev/null @@ -1,16 +0,0 @@ -job: - mem: 32GB - cpus: 4 - gres: gpu:rtx8000:1 - partition: long - -default: - test_ri: True - mode: train - wandb_name: alvaro-carbonero-math # DO NOT USE THIS CONFIG FILE. IT'S BAD! - wandb_project: ocp-alvaro - -runs: - # Run 1 - - config: faenet-is2re-10k - note: 'default-with-train-mode' diff --git a/configs/exps/alvaro/faenet-orion.yaml b/configs/exps/alvaro/faenet-orion.yaml deleted file mode 100644 index 229d8bcc0b..0000000000 --- a/configs/exps/alvaro/faenet-orion.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# more epochs, larger batch size, explore faenet: larger model & skip-co & mlp_rij -job: - mem: 8GB - cpus: 4 - gres: gpu:1 - time: 30:00 - partition: main - # code_loc: /home/mila/s/schmidtv/ocp-project/ocp-drlab - # env: ocp-a100 - -default: - wandb_project: ocp-alvaro - config: faenet-is2re-10k - mode: train - test_ri: true - wandb_tags: is2re-10k, orion - cp_data_to_tmpdir: false - graph_rewiring: remove-tag-0 - log_train_every: 20 - optim: - warmup_steps: 100 - # parameters EMA - ema_decay: 0.999 - decay_steps: max_steps - scheduler: LinearWarmupCosineAnnealingLR - batch_size: 64 - note: - model: name, num_gaussians, hidden_channels, num_filters, num_interactions, phys_embeds, pg_hidden_channels, phys_hidden_channels, tag_hidden_channels, energy_head, edge_embed_type, mp_type, graph_norm - optim: batch_size, lr_initial - _root_: frame_averaging, fa_frames - orion_mult_factor: - value: 32 - targets: hidden_channels, num_filters, pg_hidden_channels, phys_hidden_channels, tag_hidden_channels - -orion: - # Remember to change the experiment name if you change anything in the search space - n_jobs: 20 - - unique_exp_name: faenet-is2re-10k-v1.3.0 - - space: - optim/max_epochs: fidelity(20, 100, base=4) - optim/lr_initial: loguniform(1e-4, 5e-3, precision=2) - model/graph_norm: choices([True, False]) - model/edge_embed_type: choices(["rij", "all_rij", "sh", "all"]) - model/energy_head: choices(["", "weighted-av-final-embeds", "weighted-av-initial-embeds"]) - model/hidden_channels: uniform(4, 16, discrete=True) - model/mp_type: choices(["simple", "base", "sfarinet", "updownscale", "updownscale_base", "base_with_att", "att", "local_env", "updown_local_env"]) - model/num_filters: uniform(1, 16, discrete=True) - model/num_gaussians: uniform(20, 150, discrete=True) - model/num_interactions: uniform(1, 7, discrete=True) - model/pg_hidden_channels: uniform(0, 2, discrete=True) - model/phys_embeds: choices([True, False]) - model/phys_hidden_channels: uniform(0, 2, discrete=True) - model/tag_hidden_channels: uniform(0, 2, discrete=True) - frame_averaging: choices(["", "2D", "3D", "DA"]) - fa_frames: choices(["", "random", "det", "all", "se3-all", "se3-random", "se3-det", "multiple", "se3-multiple"]) - algorithms: - asha: - seed: 123 - num_rungs: 5 - num_brackets: 1 diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/tifaenet.py index 46401fc254..a71b3281d0 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/tifaenet.py @@ -62,7 +62,7 @@ def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): @registry.register_model("afaenet") class AFaenet(BaseModel): def __init__(self, **kwargs): - super(TIFaenet, self).__init__() + super(AFaenet, self).__init__() self.cutoff = kwargs["cutoff"] self.energy_head = kwargs["energy_head"] @@ -155,12 +155,13 @@ def __init__(self, **kwargs): ] ) + assert "afaenet_gat_mode" in kwargs, "Faenet version needs to be specified. Options: v1, v2" # Inter Interaction self.inter_interactions = nn.ModuleList( [ GATInteraction( kwargs["hidden_channels"], - kwargs["tifaenet_gat_mode"], + kwargs["afaenet_gat_mode"], kwargs["num_filters"] // 2, ) for _ in range(kwargs["num_interactions"]) From 86d70798b49b2d01087368362705947b2b14a206 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 17 Aug 2023 10:08:45 -0400 Subject: [PATCH 073/131] Combined personalized datasets into one --- ocpmodels/datasets/heterogeneous.py | 31 ------------------- ...{separate_dataset.py => other_datasets.py} | 29 +++++++++++++++-- 2 files changed, 26 insertions(+), 34 deletions(-) delete mode 100644 ocpmodels/datasets/heterogeneous.py rename ocpmodels/datasets/{separate_dataset.py => other_datasets.py} (80%) diff --git a/ocpmodels/datasets/heterogeneous.py b/ocpmodels/datasets/heterogeneous.py deleted file mode 100644 index 2a192eafb0..0000000000 --- a/ocpmodels/datasets/heterogeneous.py +++ /dev/null @@ -1,31 +0,0 @@ -import torch - -from ocpmodels.datasets.separate_dataset import SeparateLmdbDataset -from ocpmodels.common.registry import registry - -from torch_geometric.data import HeteroData - -@registry.register_dataset("heterogeneous") -class HeterogeneousDataset(SeparateLmdbDataset): - def __getitem__(self, idx): - adsorbate, catalyst = super().__getitem__(idx) - - reaction = HeteroData() - for graph in [adsorbate, catalyst]: - mode = graph.mode - for key in graph.keys: - if key == "edge_index": - continue - reaction[mode][key] = graph[key] - - reaction[mode, "is_close", mode].edge_index = graph.edge_index - - sender = torch.repeat_interleave(torch.arange(catalyst.natoms.item()), adsorbate.natoms.item()) - receiver = torch.arange(0, adsorbate.natoms.item()).repeat(catalyst.natoms.item()) - reaction["catalyst", "is_disc", "adsorbate"].edge_index = torch.stack([sender, receiver]) - reaction["catalyst", "is_disc", "adsorbate"].edge_weight = torch.repeat_interleave( - reaction["catalyst"].pos[:, 2], - adsorbate.natoms.item(), - ) - - return reaction diff --git a/ocpmodels/datasets/separate_dataset.py b/ocpmodels/datasets/other_datasets.py similarity index 80% rename from ocpmodels/datasets/separate_dataset.py rename to ocpmodels/datasets/other_datasets.py index 0fe49a3bd9..2e58480994 100644 --- a/ocpmodels/datasets/separate_dataset.py +++ b/ocpmodels/datasets/other_datasets.py @@ -4,10 +4,8 @@ import time from pathlib import Path -import lmdb -import numpy as np import torch -from torch_geometric.data import Data +from torch_geometric.data import Data, HeteroData from ocpmodels.datasets.lmdb_dataset import LmdbDataset from ocpmodels.common.registry import registry @@ -132,3 +130,28 @@ def __getitem__(self, idx): catalyst.total_get_time = total_get_time return (adsorbate, catalyst) + +@registry.register_dataset("heterogeneous") +class HeterogeneousDataset(SeparateLmdbDataset): + def __getitem__(self, idx): + adsorbate, catalyst = super().__getitem__(idx) + + reaction = HeteroData() + for graph in [adsorbate, catalyst]: + mode = graph.mode + for key in graph.keys: + if key == "edge_index": + continue + reaction[mode][key] = graph[key] + + reaction[mode, "is_close", mode].edge_index = graph.edge_index + + sender = torch.repeat_interleave(torch.arange(catalyst.natoms.item()), adsorbate.natoms.item()) + receiver = torch.arange(0, adsorbate.natoms.item()).repeat(catalyst.natoms.item()) + reaction["catalyst", "is_disc", "adsorbate"].edge_index = torch.stack([sender, receiver]) + reaction["catalyst", "is_disc", "adsorbate"].edge_weight = torch.repeat_interleave( + reaction["catalyst"].pos[:, 2], + adsorbate.natoms.item(), + ) + + return reaction From 00b4f9f48d69d585466e306b3c7886e3d93fe0bb Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 17 Aug 2023 11:28:06 -0400 Subject: [PATCH 074/131] Changed the name of tifaenet to afaenet --- configs/models/afaenet.yaml | 2 +- debug.py | 6 ++---- ocpmodels/models/{tifaenet.py => afaenet.py} | 2 +- ocpmodels/models/faenet.py | 2 +- ocpmodels/trainers/base_trainer.py | 1 + 5 files changed, 6 insertions(+), 7 deletions(-) rename ocpmodels/models/{tifaenet.py => afaenet.py} (99%) diff --git a/configs/models/afaenet.yaml b/configs/models/afaenet.yaml index d4125a0568..bb24eabf95 100644 --- a/configs/models/afaenet.yaml +++ b/configs/models/afaenet.yaml @@ -1,6 +1,6 @@ default: model: - name: tifaenet + name: afaenet act: swish hidden_channels: 128 num_filters: 100 diff --git a/debug.py b/debug.py index fc96aa314c..1b9e72b80d 100644 --- a/debug.py +++ b/debug.py @@ -96,7 +96,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.mode = "train" args.graph_rewiring = "remove-tag-0" args.cp_data_to_tmpdir = True - args.config = "tifaenet-is2re-10k" + args.config = "afaenet-is2re-10k" args.frame_averaging = "2D" args.fa_frames = "se3-random" @@ -125,8 +125,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["second_layer_MLP"] = True trainer_config["model"]["skip_co"] = "concat" trainer_config["model"]["transformer_out"] = False - trainer_config["model"]["tifaenet_mode"] = "gat" - trainer_config["model"]["tifaenet_gat_mode"] = "v2" + trainer_config["model"]["afaenet_gat_mode"] = "v1" #trainer_config["model"]["disconnected_mlp"] = True #trainer_config["optim"]["batch_sizes"] = 256 @@ -161,7 +160,6 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config = merge_dicts(trainer_config, hparams) # -- Setup trainer - trainer_config = continue_orion_exp(trainer_config) trainer_config = auto_note(trainer_config) trainer_config = set_min_hidden_channels(trainer_config) diff --git a/ocpmodels/models/tifaenet.py b/ocpmodels/models/afaenet.py similarity index 99% rename from ocpmodels/models/tifaenet.py rename to ocpmodels/models/afaenet.py index a71b3281d0..a3e231ac37 100644 --- a/ocpmodels/models/tifaenet.py +++ b/ocpmodels/models/afaenet.py @@ -192,7 +192,7 @@ def __init__(self, **kwargs): kwargs["num_interactions"] + 1, 1 ) - elif kwargs["model_name"] in {"indfaenet", "tifaenet"}: + elif kwargs["model_name"] in {"indfaenet", "afaenet"}: self.mlp_skip_co_ads = Linear( (kwargs["num_interactions"] + 1) * kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2 diff --git a/ocpmodels/models/faenet.py b/ocpmodels/models/faenet.py index d57dc6da23..c47a0c9d77 100644 --- a/ocpmodels/models/faenet.py +++ b/ocpmodels/models/faenet.py @@ -406,7 +406,7 @@ def __init__( self.lin1 = Linear(hidden_channels, hidden_channels // 2) if model_name == "faenet": self.lin2 = Linear(hidden_channels // 2, 1) - elif model_name in {"indfaenet", "tifaenet"}: + elif model_name in {"indfaenet", "afaenet"}: # These are models that output more than one scalar. self.lin2 = Linear(hidden_channels // 2, hidden_channels // 2) # weighted average & pooling diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 08761d5b2e..b8273cf6c4 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -51,6 +51,7 @@ class BaseTrainer(ABC): def __init__(self, **kwargs): run_dir = kwargs["run_dir"] + model_name = kwargs["model"].pop( "name", kwargs.get("model_name", "Unknown - base_trainer issue") ) From 629853937216b4a8588c03d36997e28f5603b45d Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 18 Aug 2023 17:01:55 -0400 Subject: [PATCH 075/131] Data base cleaned, making sure results still make sense --- configs/exps/alvaro/all-training.yaml | 12 +++++++++--- debug.py | 2 +- ocpmodels/models/afaenet.py | 26 ++++++++------------------ ocpmodels/models/faenet.py | 2 +- 4 files changed, 19 insertions(+), 23 deletions(-) diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index f49ae0e0df..fffb6a7640 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -34,7 +34,7 @@ default: skip_co: concat edge_embed_type: all_rij optim: - lr_initial: 0.0005 + lr_initial: 0.001 scheduler: LinearWarmupCosineAnnealingLR max_epochs: 20 eval_every: 0.4 @@ -46,6 +46,12 @@ runs: model: afaenet_gat_mode: "v1" - - config: afaenet-is2re-all + - config: depfaenet-is2re-all + + - config: indfaenet-is2re-all + + - config: faenet-is2re-all + + - config: faenet-is2re-all model: - afaenet_gat_mode: "v2" + is_disconnected: True diff --git a/debug.py b/debug.py index 1b9e72b80d..c993cc0d6a 100644 --- a/debug.py +++ b/debug.py @@ -96,7 +96,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.mode = "train" args.graph_rewiring = "remove-tag-0" args.cp_data_to_tmpdir = True - args.config = "afaenet-is2re-10k" + args.config = "depfaenet-is2re-10k" args.frame_averaging = "2D" args.fa_frames = "se3-random" diff --git a/ocpmodels/models/afaenet.py b/ocpmodels/models/afaenet.py index a3e231ac37..37c6dd0d01 100644 --- a/ocpmodels/models/afaenet.py +++ b/ocpmodels/models/afaenet.py @@ -183,24 +183,14 @@ def __init__(self, **kwargs): # Skip co if self.skip_co == "concat": # for the implementation of independent faenet, make sure the input is large enough - if kwargs["model_name"] in {"faenet", "depfaenet"}: - self.mlp_skip_co_ads = Linear( - kwargs["num_interactions"] + 1, - 1 - ) - self.mlp_skip_co_cat = Linear( - kwargs["num_interactions"] + 1, - 1 - ) - elif kwargs["model_name"] in {"indfaenet", "afaenet"}: - self.mlp_skip_co_ads = Linear( - (kwargs["num_interactions"] + 1) * kwargs["hidden_channels"] // 2, - kwargs["hidden_channels"] // 2 - ) - self.mlp_skip_co_cat = Linear( - (kwargs["num_interactions"] + 1) * kwargs["hidden_channels"] // 2, - kwargs["hidden_channels"] // 2 - ) + self.mlp_skip_co_ads = Linear( + (kwargs["num_interactions"] + 1) * kwargs["hidden_channels"] // 2, + kwargs["hidden_channels"] // 2 + ) + self.mlp_skip_co_cat = Linear( + (kwargs["num_interactions"] + 1) * kwargs["hidden_channels"] // 2, + kwargs["hidden_channels"] // 2 + ) elif self.skip_co == "concat_atom": self.mlp_skip_co = Linear( diff --git a/ocpmodels/models/faenet.py b/ocpmodels/models/faenet.py index c47a0c9d77..433eac94db 100644 --- a/ocpmodels/models/faenet.py +++ b/ocpmodels/models/faenet.py @@ -590,7 +590,7 @@ def __init__(self, **kwargs): ) # Skip co - if self.skip_co == "concat": # for the implementation of independent faenet, make sure the input is large enough + if self.skip_co == "concat": if kwargs["model_name"] in ["faenet", "depfaenet"]: self.mlp_skip_co = Linear((kwargs["num_interactions"] + 1), 1) elif kwargs["model_name"] == "indfaenet": From d1df105bfdec2868e90e7384649ef828cd4275c6 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 21 Aug 2023 13:27:44 -0400 Subject: [PATCH 076/131] Fixed and improved the implementation of indfaenet --- configs/exps/alvaro/10k-training.yaml | 4 +-- debug.py | 2 +- ocpmodels/common/data_parallel.py | 6 ++++ ocpmodels/datasets/lmdb_dataset.py | 50 +++++++++++++++++++------- ocpmodels/models/base_model.py | 10 ++++-- ocpmodels/models/indfaenet.py | 51 +++------------------------ ocpmodels/trainers/single_trainer.py | 31 +++++++++++++--- 7 files changed, 82 insertions(+), 72 deletions(-) diff --git a/configs/exps/alvaro/10k-training.yaml b/configs/exps/alvaro/10k-training.yaml index 23926c71a5..f42049c5c1 100644 --- a/configs/exps/alvaro/10k-training.yaml +++ b/configs/exps/alvaro/10k-training.yaml @@ -46,6 +46,4 @@ runs: model: afaenet_gat_mode: "v1" - - config: afaenet-is2re-10k - model: - afaenet_gat_mode: "v2" + - config: indfaenet-is2re-10k diff --git a/debug.py b/debug.py index c993cc0d6a..1b9e72b80d 100644 --- a/debug.py +++ b/debug.py @@ -96,7 +96,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.mode = "train" args.graph_rewiring = "remove-tag-0" args.cp_data_to_tmpdir = True - args.config = "depfaenet-is2re-10k" + args.config = "afaenet-is2re-10k" args.frame_averaging = "2D" args.fa_frames = "se3-random" diff --git a/ocpmodels/common/data_parallel.py b/ocpmodels/common/data_parallel.py index 6f0ceca86b..f5edd3bbe6 100644 --- a/ocpmodels/common/data_parallel.py +++ b/ocpmodels/common/data_parallel.py @@ -13,6 +13,7 @@ import numpy as np import torch from torch.utils.data import BatchSampler, DistributedSampler, Sampler +from torch_geometric.data import Data from ocpmodels.common import dist_utils from ocpmodels.datasets import data_list_collater @@ -53,6 +54,11 @@ def forward(self, batch_list, **kwargs): return self.module(batch_list[0], **kwargs) if len(self.device_ids) == 1: + if type(batch_list[0]) is list: + return self.module([ + batch_list[0][0].to(f"cuda:{self.device_ids[0]}"), + batch_list[0][1].to(f"cuda:{self.device_ids[0]}") + ], **kwargs) return self.module(batch_list[0].to(f"cuda:{self.device_ids[0]}"), **kwargs) for t in chain(self.module.parameters(), self.module.buffers()): diff --git a/ocpmodels/datasets/lmdb_dataset.py b/ocpmodels/datasets/lmdb_dataset.py index 673cf0b2dc..efc13ba7f5 100644 --- a/ocpmodels/datasets/lmdb_dataset.py +++ b/ocpmodels/datasets/lmdb_dataset.py @@ -16,7 +16,7 @@ import numpy as np import torch from torch.utils.data import Dataset -from torch_geometric.data import Batch, HeteroData +from torch_geometric.data import Batch, HeteroData, Data from ocpmodels.common.registry import registry from ocpmodels.common.utils import pyg2_data_transform @@ -157,20 +157,27 @@ def __init__(self, config, transform=None): ) +# In this function, we combine a list of samples into a batch. Notice that we first create the batch, then we fix +# the neighbor problem: that some elements in the batch don't have edges, which pytorch geometric doesn't handle well +# and which leads to errors in the forward step. def data_list_collater(data_list, otf_graph=False): # Check if len(batch) is ever used - if type(data_list[0]) is tuple: - graphs = [system[0] for system in data_list] + [system[1] for system in data_list] - batch = Batch.from_data_list(graphs) - for i in range(len(batch)): - if batch[i].neighbors.shape[0] == 0: - batch[i].neighbors = torch.tensor( - [0], - device = batch[i].neighbors.device, - dtype = torch.int64 - ) + # FIRST, MAKE BATCH + + if ( # This is for indfaenet + type(data_list[0]) is tuple + and type(data_list[0][0]) is Data + ): + adsorbates = [system[0] for system in data_list] + catalysts = [system[1] for system in data_list] + + ads_batch = Batch.from_data_list(adsorbates) + cat_batch = Batch.from_data_list(catalysts) else: batch = Batch.from_data_list(data_list) + + # THEN, FIX NEIGHBOR PROBLEM + if ( not otf_graph and hasattr(data_list[0], "edge_index") @@ -187,9 +194,26 @@ def data_list_collater(data_list, otf_graph=False): # Check if len(batch) is eve "LMDB does not contain edge index information, set otf_graph=True" ) - elif ( + elif ( # This is for indfaenet + not otf_graph + and type(data_list[0]) is tuple + and type(data_list[0][0]) is Data + ): + batches = [ads_batch, cat_batch] + lists = [adsorbates, catalysts] + for batch, list_type in zip(batches, lists): + n_neighbors = [] + for i, data in enumerate(list_type): + n_index = data.edge_index[1, :] + n_neighbors.append(n_index.shape[0]) + batch.neighbors = torch.tensor(n_neighbors) + + return batches + + + elif ( # This is for afaenet not otf_graph - and hasattr(data_list[0]["adsorbate", "is_close", "adsorbate"], "edge_index") + and type(data_list[0]) is HeteroData ): # First, fix the neighborhood dimension. n_neighbors_ads = [] diff --git a/ocpmodels/models/base_model.py b/ocpmodels/models/base_model.py index b9e3e9e40f..695f74c6d9 100644 --- a/ocpmodels/models/base_model.py +++ b/ocpmodels/models/base_model.py @@ -9,6 +9,7 @@ import torch import torch.nn as nn from torch_geometric.nn import radius_graph +from torch_geometric.data import HeteroData from ocpmodels.common.utils import ( compute_neighbors, @@ -44,11 +45,14 @@ def forward(self, data, mode="train"): # energy gradient w.r.t. positions will be computed if mode == "train" or self.regress_forces == "from_energy": - try: - data.pos.requires_grad_(True) - except: + if type(data) is list: + data[0].pos.requires_grad_(True) + data[1].pos.requires_grad_(True) + elif type(data[0]) is HeteroData: data["adsorbate"].pos.requires_grad_(True) data["catalyst"].pos.requires_grad_(True) + else: + data.pos.requires_grad_(True) # predict energy preds = self.energy_forward(data) diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index c46769c12b..ca39e5c72d 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -75,53 +75,10 @@ def __init__(self, **kwargs): ) def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! - batch_size = len(data) // 2 - - adsorbates = Batch.from_data_list(data[:batch_size]) - catalysts = Batch.from_data_list(data[batch_size:]) - - # Fixing neighbor's dimensions. This error happens when an adsorbate has 0 edges. - num_adsorbates = len(adsorbates) - # Find indices of adsorbates without edges: - edgeless_ads = [ - i for i - in range(num_adsorbates) - if adsorbates[i].neighbors.shape[0] == 0 - ] - if len(edgeless_ads) > 0: - # Since most adsorbates have an edge, - # we pop those values specifically from range(num_adsorbates) - mask = list(range(num_adsorbates)) - num_popped = 0 # We can do this since edgeless is already sorted - for unwanted in edgeless_ads: - mask.pop(unwanted-num_popped) - num_popped += 1 - new_nbrs = torch.zeros( - num_adsorbates, - dtype = torch.int64, - device = adsorbates.neighbors.device, - ) - new_nbrs[mask] = adsorbates.neighbors - adsorbates.neighbors = new_nbrs - - # Now for catalysts - num_catalysts = len(catalysts) - edgeless_cats = [i for i in range(num_catalysts) if catalysts[i].neighbors.shape[0] == 0] - if len(edgeless_cats) > 0: - mask = list(range(num_catalysts)) - num_popped = 0 - for unwanted in edgeless_cats: - mask.pop(unwanted-num_popped) - num_popped += 1 - - # Now, we create the new neighbors. - new_nbrs = torch.zeros( - num_catalysts, - dtype = torch.int64, - device = catalysts.neighbors.device, - ) - new_nbrs[mask] = catalysts.neighbors - catalysts.neighbors = new_nbrs + import ipdb + ipdb.set_trace() + adsorbates = data[0] + catalysts = data[1] # We make predictions for each pred_ads = self.ads_model(adsorbates, mode) diff --git a/ocpmodels/trainers/single_trainer.py b/ocpmodels/trainers/single_trainer.py index 40bbffb475..120c4acc2c 100644 --- a/ocpmodels/trainers/single_trainer.py +++ b/ocpmodels/trainers/single_trainer.py @@ -482,20 +482,29 @@ def model_forward(self, batch_list, mode="train"): if self.data_mode == "heterogeneous": original_pos_ads = batch_list[0]["adsorbate"].pos original_pos_cat = batch_list[0]["catalyst"].pos + if self.task_name in OCP_TASKS: original_cell = batch_list[0]["catalyst"].cell + + fa_pos_length = len(batch_list[0]["adsorbate"].fa_pos) + elif self.data_mode == "separate": + original_pos_ads = batch_list[0][0].pos + original_pos_cat = batch_list[0][1].pos + + if self.task_name in OCP_TASKS: + original_cell = batch_list[0][1].cell + + fa_pos_length = len(batch_list[0][0].fa_pos) else: original_pos = batch_list[0].pos + if self.task_name in OCP_TASKS: original_cell = batch_list[0].cell - e_all, p_all, f_all, gt_all = [], [], [], [] - # Compute model prediction for each frame - if self.data_mode == "heterogeneous": - fa_pos_length = len(batch_list[0]["adsorbate"].fa_pos) - else: fa_pos_length = len(batch_list[0].fa_pos) + e_all, p_all, f_all, gt_all = [], [], [], [] + # Compute model prediction for each frame for i in range(fa_pos_length): if self.data_mode == "heterogeneous": batch_list[0]["adsorbate"].pos = batch_list[0]["adsorbate"].fa_pos[i] @@ -503,6 +512,12 @@ def model_forward(self, batch_list, mode="train"): if self.task_name in OCP_TASKS: batch_list[0]["adsorbate"].cell = batch_list[0]["adsorbate"].fa_cell[i] batch_list[0]["catalyst"].cell = batch_list[0]["catalyst"].fa_cell[i] + elif self.data_mode == "separate": + batch_list[0][0].pos = batch_list[0][0].fa_pos[i] + batch_list[0][1].pos = batch_list[0][1].fa_pos[i] + if self.task_name in OCP_TASKS: + batch_list[0][0].cell = batch_list[0][0].fa_cell[i] + batch_list[0][1].cell = batch_list[0][1].fa_cell[i] else: batch_list[0].pos = batch_list[0].fa_pos[i] if self.task_name in OCP_TASKS: @@ -552,6 +567,12 @@ def model_forward(self, batch_list, mode="train"): if self.task_name in OCP_TASKS: batch_list[0]["adsorbate"].cell = original_cell batch_list[0]["catalyst"].cell = original_cell + elif self.data_mode == "separate": + batch_list[0][0].pos = original_pos_ads + batch_list[0][1].pos = original_pos_cat + if self.task_name in OCP_TASKS: + batch_list[0][0].cell = original_cell + batch_list[0][1].cell = original_cell else: batch_list[0].pos = original_pos if self.task_name in OCP_TASKS: From 4b9d648d825b5b4db4a014515c700aee541aa0fa Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 21 Aug 2023 13:28:56 -0400 Subject: [PATCH 077/131] Added other experiments to make sure they still work --- configs/exps/alvaro/10k-training.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/configs/exps/alvaro/10k-training.yaml b/configs/exps/alvaro/10k-training.yaml index f42049c5c1..2e5f7ce7b7 100644 --- a/configs/exps/alvaro/10k-training.yaml +++ b/configs/exps/alvaro/10k-training.yaml @@ -47,3 +47,7 @@ runs: afaenet_gat_mode: "v1" - config: indfaenet-is2re-10k + + - config: depfaenet-is2re-10k + + - config: faenet-is2re-10k From 89c9f6d8b4c906c75347676992247894168ebad6 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 21 Aug 2023 15:35:35 -0400 Subject: [PATCH 078/131] Forgot to delete a set trace --- ocpmodels/models/indfaenet.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index ca39e5c72d..64a83cfa2f 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -75,8 +75,6 @@ def __init__(self, **kwargs): ) def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! - import ipdb - ipdb.set_trace() adsorbates = data[0] catalysts = data[1] From c8444f64859f369a59cdb7c142a72eeab7bc6c79 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 21 Aug 2023 17:53:59 -0400 Subject: [PATCH 079/131] Fixed some errors caused when using indfaenet --- ocpmodels/trainers/single_trainer.py | 37 ++++++++++++++-------------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/ocpmodels/trainers/single_trainer.py b/ocpmodels/trainers/single_trainer.py index 120c4acc2c..8a87ea3b78 100644 --- a/ocpmodels/trainers/single_trainer.py +++ b/ocpmodels/trainers/single_trainer.py @@ -608,19 +608,19 @@ def compute_loss(self, preds, batch_list): ], dim=0 ) - - elif self.data_mode != "normal": + + elif self.data_mode == "separate": + energy_target = batch_list[0][0].y_relaxed.to(self.device) + else: energy_target = torch.cat( [ - batch.y_relaxed.to(self.device) + batch[0][0].y_relaxed.to(self.device) if self.task_name == "is2re" else batch.y.to(self.device) for batch in batch_list ], dim=0, ) - else: - energy_target = batch_list[0].y_relaxed[:preds["energy"].shape[0]].to(self.device) if self.normalizer.get("normalize_labels", False): hofs = None @@ -719,17 +719,22 @@ def compute_metrics( if self.data_mode == "heterogeneous": natoms = (batch_list[0]["adsorbate"].natoms.to(self.device) + batch_list[0]["catalyst"].natoms.to(self.device)) - else: - natoms = torch.cat( - [batch.natoms.to(self.device) for batch in batch_list], dim=0 - ) - - if self.data_mode == "heterogeneous": target = { "energy": batch_list[0]["adsorbate"].y_relaxed.to(self.device), "natoms": natoms, } - elif self.data_mode != "normal": + + elif self.data_mode == "separate": + natoms = batch_list[0][0].natoms.to(self.device) + batch_list[0][1].natoms.to(self.device) + target = { + "energy": batch_list[0][0].y_relaxed.to(self.device), + "natoms": natoms, + } + + else: + natoms = torch.cat( + [batch.natoms.to(self.device) for batch in batch_list], dim=0 + ) target = { "energy": torch.cat( [ @@ -738,15 +743,11 @@ def compute_metrics( else batch.y.to(self.device) for batch in batch_list ], - dim=0, + dim = 0, ), "natoms": natoms, } - else: - target = { - "energy": batch_list[0].y_relaxed[:preds["energy"].shape[0]].to(self.device), - "natoms": natoms, - } + if self.config["model"].get("regress_forces", False): target["forces"] = torch.cat( From 2c93b919ab5ef86e15f9f57ebaa9d1145ea14b8a Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 22 Aug 2023 10:36:39 -0400 Subject: [PATCH 080/131] Fixed a small typo that affected the normal data mode --- configs/exps/alvaro/all-training.yaml | 6 ------ debug.py | 2 +- ocpmodels/tasks/task.py | 2 +- ocpmodels/trainers/single_trainer.py | 2 +- 4 files changed, 3 insertions(+), 9 deletions(-) diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index fffb6a7640..10008d7eeb 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -42,14 +42,8 @@ default: eval_batch_size: 256 runs: - - config: afaenet-is2re-all - model: - afaenet_gat_mode: "v1" - - config: depfaenet-is2re-all - - config: indfaenet-is2re-all - - config: faenet-is2re-all - config: faenet-is2re-all diff --git a/debug.py b/debug.py index 1b9e72b80d..657ddb69f1 100644 --- a/debug.py +++ b/debug.py @@ -96,7 +96,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.mode = "train" args.graph_rewiring = "remove-tag-0" args.cp_data_to_tmpdir = True - args.config = "afaenet-is2re-10k" + args.config = "faenet-is2re-10k" args.frame_averaging = "2D" args.fa_frames = "se3-random" diff --git a/ocpmodels/tasks/task.py b/ocpmodels/tasks/task.py index caac6b253e..e229152212 100644 --- a/ocpmodels/tasks/task.py +++ b/ocpmodels/tasks/task.py @@ -52,7 +52,7 @@ def run(self): if loops > 0: print("----------------------------------------") print("⏱️ Measuring inference time.") - self.trainer.measure_inference_time(loops=loops) + #self.trainer.measure_inference_time(loops=loops) print("----------------------------------------\n") torch.set_grad_enabled(True) return self.trainer.train( diff --git a/ocpmodels/trainers/single_trainer.py b/ocpmodels/trainers/single_trainer.py index 8a87ea3b78..b6f55fe9fa 100644 --- a/ocpmodels/trainers/single_trainer.py +++ b/ocpmodels/trainers/single_trainer.py @@ -614,7 +614,7 @@ def compute_loss(self, preds, batch_list): else: energy_target = torch.cat( [ - batch[0][0].y_relaxed.to(self.device) + batch.y_relaxed.to(self.device) if self.task_name == "is2re" else batch.y.to(self.device) for batch in batch_list From 4b161e1a9006d77154a7063f33259e34a037dc4b Mon Sep 17 00:00:00 2001 From: alvaro Date: Wed, 23 Aug 2023 11:48:01 -0400 Subject: [PATCH 081/131] Forgot to uncomment the task part. I also didn't use the is_disconnected flag correctly in the testing part --- configs/exps/alvaro/all-training.yaml | 3 +-- ocpmodels/tasks/task.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index 10008d7eeb..dc1e5598a7 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -47,5 +47,4 @@ runs: - config: faenet-is2re-all - config: faenet-is2re-all - model: - is_disconnected: True + is_disconnected: True diff --git a/ocpmodels/tasks/task.py b/ocpmodels/tasks/task.py index e229152212..caac6b253e 100644 --- a/ocpmodels/tasks/task.py +++ b/ocpmodels/tasks/task.py @@ -52,7 +52,7 @@ def run(self): if loops > 0: print("----------------------------------------") print("⏱️ Measuring inference time.") - #self.trainer.measure_inference_time(loops=loops) + self.trainer.measure_inference_time(loops=loops) print("----------------------------------------\n") torch.set_grad_enabled(True) return self.trainer.train( From 8b461eded0f9b899cd937bf5109fa1ab2cab15c2 Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 3 Sep 2023 17:38:11 -0400 Subject: [PATCH 082/131] Comments added, removed a point trace. --- configs/exps/alvaro/10k-training.yaml | 10 +--------- configs/exps/alvaro/all-training.yaml | 7 +------ debug.py | 6 +++--- ocpmodels/datasets/other_datasets.py | 7 +++++++ ocpmodels/models/afaenet.py | 10 ++++++++-- ocpmodels/models/depfaenet.py | 6 ++++++ ocpmodels/models/indfaenet.py | 3 ++- 7 files changed, 28 insertions(+), 21 deletions(-) diff --git a/configs/exps/alvaro/10k-training.yaml b/configs/exps/alvaro/10k-training.yaml index 2e5f7ce7b7..5742f60ba2 100644 --- a/configs/exps/alvaro/10k-training.yaml +++ b/configs/exps/alvaro/10k-training.yaml @@ -42,12 +42,4 @@ default: eval_batch_size: 256 runs: - - config: afaenet-is2re-10k - model: - afaenet_gat_mode: "v1" - - - config: indfaenet-is2re-10k - - - config: depfaenet-is2re-10k - - - config: faenet-is2re-10k + - config: gemnet-is2re-10k diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index dc1e5598a7..0b11b39b2d 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -42,9 +42,4 @@ default: eval_batch_size: 256 runs: - - config: depfaenet-is2re-all - - - config: faenet-is2re-all - - - config: faenet-is2re-all - is_disconnected: True + - config: gemnet-is2re-all diff --git a/debug.py b/debug.py index 657ddb69f1..1ca5198477 100644 --- a/debug.py +++ b/debug.py @@ -96,7 +96,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.mode = "train" args.graph_rewiring = "remove-tag-0" args.cp_data_to_tmpdir = True - args.config = "faenet-is2re-10k" + args.config = "schnet-is2re-10k" args.frame_averaging = "2D" args.fa_frames = "se3-random" @@ -124,8 +124,8 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["num_interactions"] = 6 trainer_config["model"]["second_layer_MLP"] = True trainer_config["model"]["skip_co"] = "concat" - trainer_config["model"]["transformer_out"] = False - trainer_config["model"]["afaenet_gat_mode"] = "v1" + #trainer_config["model"]["transformer_out"] = False + #trainer_config["model"]["afaenet_gat_mode"] = "v1" #trainer_config["model"]["disconnected_mlp"] = True #trainer_config["optim"]["batch_sizes"] = 256 diff --git a/ocpmodels/datasets/other_datasets.py b/ocpmodels/datasets/other_datasets.py index 2e58480994..ed068e77c5 100644 --- a/ocpmodels/datasets/other_datasets.py +++ b/ocpmodels/datasets/other_datasets.py @@ -11,6 +11,8 @@ from ocpmodels.common.registry import registry from ocpmodels.common.utils import pyg2_data_transform +# This is a function that receives an adsorbate/catalyst system and returns +# each of these parts separately. def graph_splitter(graph): edge_index = graph.edge_index pos = graph.pos @@ -84,6 +86,7 @@ def graph_splitter(graph): return adsorbate, catalyst +# This dataset class sends back a tuple with the adsorbate and catalyst. @registry.register_dataset("separate") class SeparateLmdbDataset(LmdbDataset): # Check that the dataset works as intended, with an specific example. def __getitem__(self, idx): @@ -109,6 +112,7 @@ def __getitem__(self, idx): datapoint_pickled = self.env.begin().get(self._keys[idx]) data_object = pyg2_data_transform(pickle.loads(datapoint_pickled)) + # We separate the graphs adsorbate, catalyst = graph_splitter(data_object) t1 = time.time_ns() @@ -134,8 +138,10 @@ def __getitem__(self, idx): @registry.register_dataset("heterogeneous") class HeterogeneousDataset(SeparateLmdbDataset): def __getitem__(self, idx): + # We start by separating the adsorbate and catalyst adsorbate, catalyst = super().__getitem__(idx) + # We save each into the heterogeneous graph reaction = HeteroData() for graph in [adsorbate, catalyst]: mode = graph.mode @@ -146,6 +152,7 @@ def __getitem__(self, idx): reaction[mode, "is_close", mode].edge_index = graph.edge_index + # We create the edges between both parts of the graph. sender = torch.repeat_interleave(torch.arange(catalyst.natoms.item()), adsorbate.natoms.item()) receiver = torch.arange(0, adsorbate.natoms.item()).repeat(catalyst.natoms.item()) reaction["catalyst", "is_disc", "adsorbate"].edge_index = torch.stack([sender, receiver]) diff --git a/ocpmodels/models/afaenet.py b/ocpmodels/models/afaenet.py index 37c6dd0d01..7db377643e 100644 --- a/ocpmodels/models/afaenet.py +++ b/ocpmodels/models/afaenet.py @@ -29,6 +29,7 @@ def __init__(self, d_model, version, edge_dim, dropout=0.1): if version not in {"v1", "v2"}: raise ValueError(f"Invalid GAT version. Received {version}, available: v1, v2.") + # Not quite sure what is the impact of increasing or decreasing the number of heads if version == "v1": self.interaction = GATConv( in_channels = d_model, @@ -49,13 +50,16 @@ def __init__(self, d_model, version, edge_dim, dropout=0.1): ) def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): + # We first do the message passing separation_pt = h_ads.shape[0] combined = torch.concat([h_ads, h_cat], dim = 0) combined = self.interaction(combined, bipartite_edges, bipartite_weights) + # Then we normalize and add residual connections ads, cat = combined[:separation_pt], combined[separation_pt:] ads, cat = nn.functional.normalize(ads), nn.functional.normalize(cat) ads, cat = ads + h_ads, cat + h_cat + # QUESTION: Should normalization happen before separating them? return ads, cat @@ -155,7 +159,7 @@ def __init__(self, **kwargs): ] ) - assert "afaenet_gat_mode" in kwargs, "Faenet version needs to be specified. Options: v1, v2" + assert "afaenet_gat_mode" in kwargs, "GAT version needs to be specified. Options: v1, v2" # Inter Interaction self.inter_interactions = nn.ModuleList( [ @@ -225,7 +229,6 @@ def __init__(self, **kwargs): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): batch_size = len(data) - batch_ads = data["adsorbate"]["batch"] batch_cat = data["catalyst"]["batch"] @@ -290,15 +293,18 @@ def energy_forward(self, data): h_cat, edge_index_cat, edge_weight_cat, batch_cat, alpha_cat ) ) + # First we do intra interaction intra_ads = interaction_ads(h_ads, edge_index_ads, e_ads) intra_cat = interaction_cat(h_cat, edge_index_cat, e_cat) + # Then we do inter interaction h_ads, h_cat = inter_interaction( intra_ads, intra_cat, data["is_disc"].edge_index, edge_weights, ) + # QUESTION: Can we do both simultaneously? # Atom skip-co if self.skip_co == "concat_atom": diff --git a/ocpmodels/models/depfaenet.py b/ocpmodels/models/depfaenet.py index d3c0f0ed9c..d4138c26b4 100644 --- a/ocpmodels/models/depfaenet.py +++ b/ocpmodels/models/depfaenet.py @@ -15,6 +15,7 @@ def __init__(self, energy_head, hidden_channels, act, disconnected_mlp = False): energy_head, hidden_channels, act ) + # We modify the last output linear function to make the output a vector self.lin2 = Linear(hidden_channels // 2, hidden_channels // 2) self.disconnected_mlp = disconnected_mlp @@ -22,6 +23,7 @@ def __init__(self, energy_head, hidden_channels, act, disconnected_mlp = False): self.ads_lin = Linear(hidden_channels // 2, hidden_channels // 2) self.cat_lin = Linear(hidden_channels // 2, hidden_channels // 2) + # Combines the hidden representation of each to a scalar. self.sys_lin1 = Linear(hidden_channels // 2 * 2, hidden_channels // 2) self.sys_lin2 = Linear(hidden_channels // 2, 1) @@ -50,6 +52,7 @@ def forward(self, h, edge_index, edge_weight, batch, alpha): }: h = h * alpha + # We pool separately and then we concatenate. ads = self.current_tags == 2 cat = ~ads @@ -62,6 +65,7 @@ def forward(self, h, edge_index, edge_weight, batch, alpha): system = torch.cat([ads_out, cat_out], dim = 1) + # Finally, we predict a number. system = self.sys_lin1(system) energy = self.sys_lin2(system) @@ -72,6 +76,7 @@ class depFAENet(FAENet): def __init__(self, **kwargs): super().__init__(**kwargs) + # We replace the old output block by the new output block self.disconnected_mlp = kwargs.get("disconnected_mlp", False) self.output_block = discOutputBlock( self.energy_head, kwargs["hidden_channels"], self.act, self.disconnected_mlp @@ -79,6 +84,7 @@ def __init__(self, **kwargs): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): + # We need to save the tags so this step is necessary. self.output_block.tags_saver(data.tags) pred = super().energy_forward(data) diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index 64a83cfa2f..b54d64a0c5 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -88,6 +88,7 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION ads_energy = self.ads_lin(ads_energy) cat_energy = self.cat_lin(cat_energy) + # We combine predictions if self.transformer_out: batch_size = ads_energy.shape[0] @@ -108,7 +109,7 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION system_energy = torch.cat([ads_energy, cat_energy], dim = 1) system_energy = self.combination(system_energy) - # We combine predictions and return them + # We return them pred_system = { "energy" : system_energy, "pooling_loss" : pred_ads["pooling_loss"] if pred_ads["pooling_loss"] is None From ce00cc93e434c86809c1e9721e942dafd91ac004 Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 3 Sep 2023 17:48:17 -0400 Subject: [PATCH 083/131] modified a training file --- configs/exps/alvaro/10k-training.yaml | 4 +++- configs/exps/alvaro/all-training.yaml | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/configs/exps/alvaro/10k-training.yaml b/configs/exps/alvaro/10k-training.yaml index 5742f60ba2..471ded838d 100644 --- a/configs/exps/alvaro/10k-training.yaml +++ b/configs/exps/alvaro/10k-training.yaml @@ -42,4 +42,6 @@ default: eval_batch_size: 256 runs: - - config: gemnet-is2re-10k + - config: schnet-is2re-10k + + - config: gemnet_oc-is2re-10k diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index 0b11b39b2d..08c4436e01 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -42,4 +42,6 @@ default: eval_batch_size: 256 runs: - - config: gemnet-is2re-all + - config: gemnet_oc-is2re-all + + - config: schnet-is2re-all From f92b8f5f5d45f7314fe4b59bcb3efb0327cb4736 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 4 Sep 2023 00:22:38 -0400 Subject: [PATCH 084/131] Implemented all disconnected schnet models except for aschnet --- configs/exps/alvaro/all-training.yaml | 7 +- configs/models/depschnet.yaml | 225 ++++++++++++++++ configs/models/indschnet.yaml | 225 ++++++++++++++++ debug.py | 2 +- ocpmodels/models/aschnet.py | 370 ++++++++++++++++++++++++++ ocpmodels/models/depschnet.py | 46 ++++ ocpmodels/models/indschnet.py | 116 ++++++++ ocpmodels/models/schnet.py | 11 +- ocpmodels/trainers/base_trainer.py | 4 +- 9 files changed, 1000 insertions(+), 6 deletions(-) create mode 100644 configs/models/depschnet.yaml create mode 100644 configs/models/indschnet.yaml create mode 100644 ocpmodels/models/aschnet.py create mode 100644 ocpmodels/models/depschnet.py create mode 100644 ocpmodels/models/indschnet.py diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index 08c4436e01..de01dbfffa 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -42,6 +42,11 @@ default: eval_batch_size: 256 runs: - - config: gemnet_oc-is2re-all + - config: schnet-is2re-all - config: schnet-is2re-all + is_disconnected: True + + - config: depschnet-is2re-all + + - config: indschnet-is2re-all diff --git a/configs/models/depschnet.yaml b/configs/models/depschnet.yaml new file mode 100644 index 0000000000..65fcb15037 --- /dev/null +++ b/configs/models/depschnet.yaml @@ -0,0 +1,225 @@ +default: + model: + name: depschnet + num_filters: 128 + num_gaussians: 100 + hidden_channels: 256 + num_interactions: 3 + cutoff: 6.0 + use_pbc: True + regress_forces: False + readout: add + atomref: null + # drlab attributes: + tag_hidden_channels: 0 # 32 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_embeds: False # True + phys_hidden_channels: 0 + energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} + optim: + batch_size: 64 + eval_batch_size: 64 + num_workers: 4 + lr_gamma: 0.1 + warmup_factor: 0.2 + +# ------------------- +# ----- IS2RE ----- +# ------------------- + +is2re: + # *** Important note *** + # The total number of gpus used for this run was 1. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + 10k: + model: + hidden_channels: 256 + num_interactions: 3 + optim: + lr_initial: 0.005 + max_epochs: 20 + lr_milestones: + - 1562 + - 2343 + - 3125 + warmup_steps: 468 + batch_size: 256 + eval_batch_size: 256 + + 100k: + model: + hidden_channels: 384 + num_interactions: 4 + optim: + lr_initial: 0.0005 + max_epochs: 25 + lr_milestones: + - 15625 + - 31250 + - 46875 + warmup_steps: 9375 + batch_size: 256 + eval_batch_size: 256 + + all: + model: + hidden_channels: 384 + num_interactions: 4 + optim: + lr_initial: 0.001 + max_epochs: 17 + lr_gamma: 0.1 + lr_milestones: + - 17981 + - 26972 + - 35963 + warmup_steps: 5394 + batch_size: 256 + eval_batch_size: 256 + +# ------------------ +# ----- S2EF ----- +# ------------------ + +s2ef: + default: + model: + regress_forces: "from_energy" + hidden_channels: 1024 + num_filters: 256 + num_interactions: 5 + num_gaussians: 200 + optim: + # *** Important note *** + # The total number of gpus used for this run was 1. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 192 + eval_batch_size: 192 + num_workers: 16 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: + - 52083 + - 83333 + - 104166 + warmup_steps: 31250 + max_epochs: 15 + force_coefficient: 100 + + 200k: + model: + hidden_channels: 1024 + num_filters: 256 + num_interactions: 3 + num_gaussians: 200 + optim: + batch_size: 128 + eval_batch_size: 128 + num_workers: 16 + lr_initial: 0.0005 + lr_gamma: 0.1 + lr_milestones: + - 7812 + - 12500 + - 15625 + warmup_steps: 4687 + max_epochs: 30 + force_coefficient: 100 + + 2M: {} + + 20M: + model: + hidden_channels: 1024 + num_filters: 256 + num_interactions: 5 + num_gaussians: 200 + optim: + # *** Important note *** + # The total number of gpus used for this run was 48. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 24 + eval_batch_size: 24 + num_workers: 16 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: + - 86805 + - 138888 + - 173611 + warmup_steps: 52083 + max_epochs: 30 + force_coefficient: 50 + + all: + model: + hidden_channels: 1024 + num_filters: 256 + num_interactions: 5 + num_gaussians: 200 + optim: + # *** Important note *** + # The total number of gpus used for this run was 64. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 20 + eval_batch_size: 20 + num_workers: 16 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: + - 313907 + - 523179 + - 732451 + warmup_steps: 209271 + max_epochs: 15 + force_coefficient: 30 + +qm9: + default: + model: + hidden_channels: 128 + num_gaussians: 100 + num_filters: 128 + num_interactions: 6 + cutoff: 5.0 + optim: + batch_size: 1024 + lr_initial: 0.001 + max_epochs: 1000 + decay_steps: 125000 + decay_rate: 0.01 + ema_decay: 0.999 + lr_gamma: 0.25 + lr_milestones: + - 17981 + - 26972 + - 35963 + - 52000 + - 100000 + warmup_steps: 1000 + + 10k: {} + all: {} + +qm7x: + default: + model: + hidden_channels: 384 + num_interactions: 4 + optim: + batch_size: 128 + lr_initial: 0.001 + max_epochs: 25 + lr_gamma: 0.1 + lr_milestones: + - 17981 + - 26972 + - 35963 + warmup_steps: 15000 + + all: {} + 1k: {} diff --git a/configs/models/indschnet.yaml b/configs/models/indschnet.yaml new file mode 100644 index 0000000000..d8acf62ba2 --- /dev/null +++ b/configs/models/indschnet.yaml @@ -0,0 +1,225 @@ +default: + model: + name: indschnet + num_filters: 128 + num_gaussians: 100 + hidden_channels: 256 + num_interactions: 3 + cutoff: 6.0 + use_pbc: True + regress_forces: False + readout: add + atomref: null + # drlab attributes: + tag_hidden_channels: 0 # 32 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_embeds: False # True + phys_hidden_channels: 0 + energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} + optim: + batch_size: 64 + eval_batch_size: 64 + num_workers: 4 + lr_gamma: 0.1 + warmup_factor: 0.2 + +# ------------------- +# ----- IS2RE ----- +# ------------------- + +is2re: + # *** Important note *** + # The total number of gpus used for this run was 1. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + 10k: + model: + hidden_channels: 256 + num_interactions: 3 + optim: + lr_initial: 0.005 + max_epochs: 20 + lr_milestones: + - 1562 + - 2343 + - 3125 + warmup_steps: 468 + batch_size: 256 + eval_batch_size: 256 + + 100k: + model: + hidden_channels: 384 + num_interactions: 4 + optim: + lr_initial: 0.0005 + max_epochs: 25 + lr_milestones: + - 15625 + - 31250 + - 46875 + warmup_steps: 9375 + batch_size: 256 + eval_batch_size: 256 + + all: + model: + hidden_channels: 384 + num_interactions: 4 + optim: + lr_initial: 0.001 + max_epochs: 17 + lr_gamma: 0.1 + lr_milestones: + - 17981 + - 26972 + - 35963 + warmup_steps: 5394 + batch_size: 256 + eval_batch_size: 256 + +# ------------------ +# ----- S2EF ----- +# ------------------ + +s2ef: + default: + model: + regress_forces: "from_energy" + hidden_channels: 1024 + num_filters: 256 + num_interactions: 5 + num_gaussians: 200 + optim: + # *** Important note *** + # The total number of gpus used for this run was 1. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 192 + eval_batch_size: 192 + num_workers: 16 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: + - 52083 + - 83333 + - 104166 + warmup_steps: 31250 + max_epochs: 15 + force_coefficient: 100 + + 200k: + model: + hidden_channels: 1024 + num_filters: 256 + num_interactions: 3 + num_gaussians: 200 + optim: + batch_size: 128 + eval_batch_size: 128 + num_workers: 16 + lr_initial: 0.0005 + lr_gamma: 0.1 + lr_milestones: + - 7812 + - 12500 + - 15625 + warmup_steps: 4687 + max_epochs: 30 + force_coefficient: 100 + + 2M: {} + + 20M: + model: + hidden_channels: 1024 + num_filters: 256 + num_interactions: 5 + num_gaussians: 200 + optim: + # *** Important note *** + # The total number of gpus used for this run was 48. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 24 + eval_batch_size: 24 + num_workers: 16 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: + - 86805 + - 138888 + - 173611 + warmup_steps: 52083 + max_epochs: 30 + force_coefficient: 50 + + all: + model: + hidden_channels: 1024 + num_filters: 256 + num_interactions: 5 + num_gaussians: 200 + optim: + # *** Important note *** + # The total number of gpus used for this run was 64. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 20 + eval_batch_size: 20 + num_workers: 16 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: + - 313907 + - 523179 + - 732451 + warmup_steps: 209271 + max_epochs: 15 + force_coefficient: 30 + +qm9: + default: + model: + hidden_channels: 128 + num_gaussians: 100 + num_filters: 128 + num_interactions: 6 + cutoff: 5.0 + optim: + batch_size: 1024 + lr_initial: 0.001 + max_epochs: 1000 + decay_steps: 125000 + decay_rate: 0.01 + ema_decay: 0.999 + lr_gamma: 0.25 + lr_milestones: + - 17981 + - 26972 + - 35963 + - 52000 + - 100000 + warmup_steps: 1000 + + 10k: {} + all: {} + +qm7x: + default: + model: + hidden_channels: 384 + num_interactions: 4 + optim: + batch_size: 128 + lr_initial: 0.001 + max_epochs: 25 + lr_gamma: 0.1 + lr_milestones: + - 17981 + - 26972 + - 35963 + warmup_steps: 15000 + + all: {} + 1k: {} diff --git a/debug.py b/debug.py index 1ca5198477..282fc9c820 100644 --- a/debug.py +++ b/debug.py @@ -96,7 +96,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.mode = "train" args.graph_rewiring = "remove-tag-0" args.cp_data_to_tmpdir = True - args.config = "schnet-is2re-10k" + args.config = "indschnet-is2re-10k" args.frame_averaging = "2D" args.fa_frames = "se3-random" diff --git a/ocpmodels/models/aschnet.py b/ocpmodels/models/aschnet.py new file mode 100644 index 0000000000..f6daf85f4c --- /dev/null +++ b/ocpmodels/models/aschnet.py @@ -0,0 +1,370 @@ +""" +Copyright (c) Facebook, Inc. and its affiliates. + +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree. +""" +from math import pi as PI + +import torch +import torch.nn.functional as F +from torch.nn import Embedding, Linear, ModuleList, Sequential +from torch_geometric.nn import MessagePassing, radius_graph +from torch_scatter import scatter + +from ocpmodels.common.registry import registry +from ocpmodels.common.utils import ( + conditional_grad, + get_pbc_distances, + radius_graph_pbc, +) +from ocpmodels.models.base_model import BaseModel +from ocpmodels.models.utils.pos_encodings import PositionalEncoding +from ocpmodels.modules.phys_embeddings import PhysEmbedding +from ocpmodels.modules.pooling import Graclus, Hierarchical_Pooling +from ocpmodels.models.schnet import ( + InteractionBlock, + CFConv, + GaussianSmearing, + ShiftedSoftplus, +) +from ocpmodels.models.afaenet import GATInteraction + +NUM_CLUSTERS = 20 +NUM_POOLING_LAYERS = 1 + +@registry.register_model("aschnet") +class ASchNet(BaseModel): + r"""The continuous-filter convolutional neural network SchNet from the + `"SchNet: A Continuous-filter Convolutional Neural Network for Modeling + Quantum Interactions" `_ paper that uses + the interactions blocks of the form + + .. math:: + \mathbf{x}^{\prime}_i = \sum_{j \in \mathcal{N}(i)} \mathbf{x}_j \odot + h_{\mathbf{\Theta}} ( \exp(-\gamma(\mathbf{e}_{j,i} - \mathbf{\mu}))), + + here :math:`h_{\mathbf{\Theta}}` denotes an MLP and + :math:`\mathbf{e}_{j,i}` denotes the interatomic distances between atoms. + + Args: + cutoff (float, optional): Cutoff distance for interatomic interactions. + (default: :obj:`10.0`) + use_pbc (bool, optional): Use of periodic boundary conditions. + (default: true) + otf_graph (bool, optional): Recompute radius graph. + (default: false) + max_num_neighbors (int, optional): The maximum number of neighbors to + collect for each node within the :attr:`cutoff` distance. + (default: :obj:`32`) + graph_rewiring (str, optional): Method used to create the graph, + among "", remove-tag-0, supernodes. + energy_head (str, optional): Method to compute energy prediction + from atom representations. + hidden_channels (int, optional): Hidden embedding size. + (default: :obj:`128`) + tag_hidden_channels (int, optional): Hidden tag embedding size. + (default: :obj:`32`) + pg_hidden_channels (int, optional): Hidden period and group embed size. + (default: obj:`32`) + phys_embed (bool, optional): Concat fixed physics-aware embeddings. + phys_hidden_channels (int, optional): Hidden size of learnable phys embed. + (default: obj:`32`) + num_filters (int, optional): The number of filters to use. + (default: :obj:`128`) + num_interactions (int, optional): The number of interaction blocks. + (default: :obj:`6`) + num_gaussians (int, optional): The number of gaussians :math:`\mu`. + (default: :obj:`50`) + readout (string, optional): Whether to apply :obj:`"add"` or + :obj:`"mean"` global aggregation. (default: :obj:`"add"`) + atomref (torch.Tensor, optional): The reference of single-atom + properties. + Expects a vector of shape :obj:`(max_atomic_number, )`. + """ + + url = "http://www.quantum-machine.org/datasets/trained_schnet_models.zip" + + def __init__(self, **kwargs): + super().__init__() + + import ase + + self.use_pbc = kwargs["use_pbc"] + self.cutoff = kwargs["cutoff"] + self.otf_graph = kwargs["otf_graph"] + self.scale = None + self.regress_forces = kwargs["regress_forces"] + + self.num_filters = kwargs["num_filters"] + self.num_interactions = kwargs["num_interactions"] + self.num_gaussians = kwargs["num_gaussians"] + self.max_num_neighbors = kwargs["max_num_neighbors"] + self.readout = kwargs["readout"] + self.hidden_channels = kwargs["hidden_channels"] + self.tag_hidden_channels = kwargs["tag_hidden_channels"] + self.use_tag = self.tag_hidden_channels > 0 + self.pg_hidden_channels = kwargs["pg_hidden_channels"] + self.use_pg = self.pg_hidden_channels > 0 + self.phys_hidden_channels = kwargs["phys_hidden_channels"] + self.energy_head = kwargs["energy_head"] + self.use_phys_embeddings = kwargs["phys_embeds"] + self.use_mlp_phys = self.phys_hidden_channels > 0 and kwargs["phys_embeds"] + self.use_positional_embeds = kwargs["graph_rewiring"] in { + "one-supernode-per-graph", + "one-supernode-per-atom-type", + "one-supernode-per-atom-type-dist", + } + + self.register_buffer( + "initial_atomref", + torch.tensor(kwargs["atomref"]) if kwargs["atomref"] is not None else None, + ) + self.atomref = None + if kwargs["atomref"] is not None: + self.atomref = Embedding(100, 1) + self.atomref.weight.data.copy_(torch.tensor(kwargs["atomref"])) + + atomic_mass = torch.from_numpy(ase.data.atomic_masses) + # self.covalent_radii = torch.from_numpy(ase.data.covalent_radii) + # self.vdw_radii = torch.from_numpy(ase.data.vdw_radii) + self.register_buffer("atomic_mass", atomic_mass) + + if self.use_tag: + self.tag_embedding = Embedding(3, self.tag_hidden_channels) + + # Phys embeddings + self.phys_emb = PhysEmbedding(props=kwargs["phys_embeds"], pg=self.use_pg) + if self.use_mlp_phys: + self.phys_lin = Linear( + self.phys_emb.n_properties, self.phys_hidden_channels + ) + else: + self.phys_hidden_channels = self.phys_emb.n_properties + + # Period + group embeddings + if self.use_pg: + self.period_embedding = Embedding( + self.phys_emb.period_size, self.pg_hidden_channels + ) + self.group_embedding = Embedding( + self.phys_emb.group_size, self.pg_hidden_channels + ) + + assert ( + self.tag_hidden_channels + + 2 * self.pg_hidden_channels + + self.phys_hidden_channels + < self.hidden_channels + ) + + # Main embedding + self.embedding = Embedding( + 85, + self.hidden_channels + - self.tag_hidden_channels + - self.phys_hidden_channels + - 2 * self.pg_hidden_channels, + ) + + # Position encoding + if self.use_positional_embeds: + self.pe = PositionalEncoding(self.hidden_channels, 210) + + # Interaction block + self.distance_expansion = GaussianSmearing(0.0, self.cutoff, self.num_gaussians) + self.interactions = ModuleList() + for _ in range(self.num_interactions): + block = InteractionBlock( + self.hidden_channels, self.num_gaussians, self.num_filters, self.cutoff + ) + self.interactions.append(block) + + # Output block + self.lin1 = Linear(self.hidden_channels, self.hidden_channels // 2) + self.act = ShiftedSoftplus() + if kwargs["model_name"] == "schnet": + self.lin2 = Linear(self.hidden_channels // 2, 1) + elif kwargs["model_name"] in ["indschnet"]: + self.lin2 = Linear(self.hidden_channels // 2, self.hidden_channels // 2) + + # weighted average & pooling + if self.energy_head in {"pooling", "random"}: + self.hierarchical_pooling = Hierarchical_Pooling( + self.hidden_channels, + self.act, + NUM_POOLING_LAYERS, + NUM_CLUSTERS, + self.energy_head, + ) + elif self.energy_head == "graclus": + self.graclus = Graclus(self.hidden_channels, self.act) + elif self.energy_head in { + "weighted-av-initial-embeds", + "weighted-av-final-embeds", + }: + self.w_lin = Linear(self.hidden_channels, 1) + + self.reset_parameters() + + def reset_parameters(self): + self.embedding.reset_parameters() + if self.use_mlp_phys: + torch.nn.init.xavier_uniform_(self.phys_lin.weight) + if self.use_tag: + self.tag_embedding.reset_parameters() + if self.use_pg: + self.period_embedding.reset_parameters() + self.group_embedding.reset_parameters() + if self.energy_head in {"weighted-av-init-embeds", "weighted-av-final-embeds"}: + self.w_lin.bias.data.fill_(0) + torch.nn.init.xavier_uniform_(self.w_lin.weight) + for interaction in self.interactions: + interaction.reset_parameters() + torch.nn.init.xavier_uniform_(self.lin1.weight) + self.lin1.bias.data.fill_(0) + torch.nn.init.xavier_uniform_(self.lin2.weight) + self.lin2.bias.data.fill_(0) + if self.atomref is not None: + self.atomref.weight.data.copy_(self.initial_atomref) + + def __repr__(self): + return ( + f"{self.__class__.__name__}(" + f"hidden_channels={self.hidden_channels}, " + f"tag_hidden_channels={self.tag_hidden_channels}, " + f"properties={self.phys_hidden_channels}, " + f"period_hidden_channels={self.pg_hidden_channels}, " + f"group_hidden_channels={self.pg_hidden_channels}, " + f"energy_head={self.energy_head}", + f"num_filters={self.num_filters}, " + f"num_interactions={self.num_interactions}, " + f"num_gaussians={self.num_gaussians}, " + f"cutoff={self.cutoff})", + ) + + @conditional_grad(torch.enable_grad()) + def forces_forward(self, preds): + return + + @conditional_grad(torch.enable_grad()) + def energy_forward(self, data): + """""" + # Re-compute on the fly the graph + if self.otf_graph: # STILL WRITING THIS PART!! + edge_index_ads, cell_offsets_ads, neighbors_ads = radius_graph_pbc( + data, self.cutoff, 50 + ) + data.edge_index = edge_index + data.cell_offsets = cell_offsets + data.neighbors = neighbors + + # Rewire the graph + z = data.atomic_numbers.long() + pos = data.pos + batch = data.batch + + # Use periodic boundary conditions + if self.use_pbc: + assert z.dim() == 1 and z.dtype == torch.long + + out = get_pbc_distances( + pos, + data.edge_index, + data.cell, + data.cell_offsets, + data.neighbors, + ) + + edge_index = out["edge_index"] + edge_weight = out["distances"] + edge_attr = self.distance_expansion(edge_weight) + else: + edge_index = radius_graph( + pos, + r=self.cutoff, + batch=batch, + max_num_neighbors=self.max_num_neighbors, + ) + # edge_index = data.edge_index + row, col = edge_index + edge_weight = (pos[row] - pos[col]).norm(dim=-1) + edge_attr = self.distance_expansion(edge_weight) + + h = self.embedding(z) + + if self.use_tag: + assert data.tags is not None + h_tag = self.tag_embedding(data.tags) + h = torch.cat((h, h_tag), dim=1) + + if self.phys_emb.device != batch.device: + self.phys_emb = self.phys_emb.to(batch.device) + + if self.use_phys_embeddings: + h_phys = self.phys_emb.properties[z] + if self.use_mlp_phys: + h_phys = self.phys_lin(h_phys) + h = torch.cat((h, h_phys), dim=1) + + if self.use_pg: + # assert self.phys_emb.period is not None + h_period = self.period_embedding(self.phys_emb.period[z]) + h_group = self.group_embedding(self.phys_emb.group[z]) + h = torch.cat((h, h_period, h_group), dim=1) + + if self.use_positional_embeds: + idx_of_non_zero_val = (data.tags == 0).nonzero().T.squeeze(0) + h_pos = torch.zeros_like(h, device=h.device) + h_pos[idx_of_non_zero_val, :] = self.pe(data.subnodes).to( + device=h_pos.device + ) + h += h_pos + + if self.energy_head == "weighted-av-initial-embeds": + alpha = self.w_lin(h) + + for interaction in self.interactions: + h = h + interaction(h, edge_index, edge_weight, edge_attr) + + pooling_loss = None # deal with pooling loss + + if self.energy_head == "weighted-av-final-embeds": + alpha = self.w_lin(h) + + elif self.energy_head == "graclus": + h, batch = self.graclus(h, edge_index, edge_weight, batch) + + if self.energy_head in {"pooling", "random"}: + h, batch, pooling_loss = self.hierarchical_pooling( + h, edge_index, edge_weight, batch + ) + + # MLP + h = self.lin1(h) + h = self.act(h) + h = self.lin2(h) + + if self.energy_head in { + "weighted-av-initial-embeds", + "weighted-av-final-embeds", + }: + h = h * alpha + + if self.atomref is not None: + h = h + self.atomref(z) + + # Global pooling + out = self.scattering(h, batch) + + if self.scale is not None: + out = self.scale * out + + return { + "energy": out, + "pooling_loss": pooling_loss, + } + + @conditional_grad(torch.enable_grad()) + def scattering(self, h, batch): + return scatter(h, batch, dim=0, reduce=self.readout) diff --git a/ocpmodels/models/depschnet.py b/ocpmodels/models/depschnet.py new file mode 100644 index 0000000000..c1a4dd85ac --- /dev/null +++ b/ocpmodels/models/depschnet.py @@ -0,0 +1,46 @@ +import torch +from torch.nn import Linear +from torch_scatter import scatter + +from ocpmodels.models.schnet import SchNet +from ocpmodels.models.faenet import OutputBlock as conOutputBlock +from ocpmodels.common.registry import registry +from ocpmodels.common.utils import conditional_grad + +from torch_geometric.data import Batch + +@registry.register_model("depschnet") +class depSchNet(SchNet): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # We replace the last linear transform to keep dimentionality + self.lin2 = Linear(self.hidden_channels // 2, self.hidden_channels // 2) + + self.sys_lin1 = Linear(self.hidden_channels // 2 * 2, self.hidden_channels // 2) + self.sys_lin2 = Linear(self.hidden_channels // 2, 1) + + @conditional_grad(torch.enable_grad()) + def energy_forward(self, data): + # We need to save the tags so this step is necessary. + self.tags_saver(data.tags) + pred = super().energy_forward(data) + + return pred + + def tags_saver(self, tags): + self.current_tags = tags + + @conditional_grad(torch.enable_grad()) + def scattering(self, h, batch): + ads = self.current_tags == 2 + cat = ~ads + + ads_out = scatter(h, batch * ads, dim = 0, reduce = self.readout) + cat_out = scatter(h, batch * cat, dim = 0, reduce = self.readout) + + system = torch.cat([ads_out, cat_out], dim = 1) + system = self.sys_lin1(system) + system = self.sys_lin2(system) + + return system diff --git a/ocpmodels/models/indschnet.py b/ocpmodels/models/indschnet.py new file mode 100644 index 0000000000..0c638243f5 --- /dev/null +++ b/ocpmodels/models/indschnet.py @@ -0,0 +1,116 @@ +import torch, math +from torch import nn +from torch.nn import Linear, Transformer + +from ocpmodels.models.schnet import SchNet +from ocpmodels.models.faenet import OutputBlock +from ocpmodels.models.base_model import BaseModel +from ocpmodels.common.registry import registry +from ocpmodels.models.utils.activations import swish + +from torch_geometric.data import Batch + +# Implementation of positional encoding obtained from Harvard's annotated transformer's guide +class PositionalEncoding(nn.Module): + def __init__(self, d_model, dropout = 0.1, max_len = 5): + super(PositionalEncoding, self).__init__() + self.dropout = nn.Dropout(p = dropout) + + # Compute the positional encodings once in log space. + pe = torch.zeros(max_len, d_model) + position = torch.arange(0, max_len).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model) + ) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = pe.unsqueeze(0) + self.register_buffer("pe", pe) + + def forward(self, x): + x = x + self.pe[:, : x.size(1)].requires_grad_(False) + return self.dropout(x) + +@registry.register_model("indschnet") +class indSchNet(BaseModel): # Change to make it inherit from base model. + def __init__(self, **kwargs): + super(indSchNet, self).__init__() + + self.regress_forces = kwargs["regress_forces"] + + self.ads_model = SchNet(**kwargs) + self.cat_model = SchNet(**kwargs) + + self.disconnected_mlp = kwargs.get("disconnected_mlp", False) + if self.disconnected_mlp: + self.ads_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) + self.cat_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) + + self.transformer_out = kwargs.get("transformer_out", False) + self.act = swish + if self.transformer_out: + self.combination = Transformer( + d_model = kwargs["hidden_channels"] // 2, + nhead = 2, + num_encoder_layers = 2, + num_decoder_layers = 2, + dim_feedforward = kwargs["hidden_channels"], + batch_first = True + ) + self.positional_encoding = PositionalEncoding( + kwargs["hidden_channels"] // 2, + dropout = 0.1, + max_len = 5, + ) + self.query_pos = nn.Parameter(torch.rand(kwargs["hidden_channels"] // 2)) + self.transformer_lin = Linear(kwargs["hidden_channels"] // 2, 1) + else: + self.combination = nn.Sequential( + Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2), + self.act, + Linear(kwargs["hidden_channels"] // 2, 1) + ) + + def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! + adsorbates = data[0] + catalysts = data[1] + + # We make predictions for each + pred_ads = self.ads_model(adsorbates, mode) + pred_cat = self.cat_model(catalysts, mode) + + ads_energy = pred_ads["energy"] + cat_energy = pred_cat["energy"] + if self.disconnected_mlp: + ads_energy = self.ads_lin(ads_energy) + cat_energy = self.cat_lin(cat_energy) + + # We combine predictions + if self.transformer_out: + batch_size = ads_energy.shape[0] + + fake_target_sequence = self.query_pos.unsqueeze(0).expand(batch_size, -1).unsqueeze(1) + system_energy = torch.cat( + [ + ads_energy.unsqueeze(1), + cat_energy.unsqueeze(1) + ], + dim = 1 + ) + + system_energy = self.positional_encoding(system_energy) + + system_energy = self.combination(system_energy, fake_target_sequence).squeeze(1) + system_energy = self.transformer_lin(system_energy) + else: + system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = self.combination(system_energy) + + # We return them + pred_system = { + "energy" : system_energy, + "pooling_loss" : pred_ads["pooling_loss"] if pred_ads["pooling_loss"] is None + else pred_ads["pooling_loss"] + pred_cat["pooling_loss"] + } + + return pred_system diff --git a/ocpmodels/models/schnet.py b/ocpmodels/models/schnet.py index 063968473f..1bee6a657e 100644 --- a/ocpmodels/models/schnet.py +++ b/ocpmodels/models/schnet.py @@ -258,7 +258,10 @@ def __init__(self, **kwargs): # Output block self.lin1 = Linear(self.hidden_channels, self.hidden_channels // 2) self.act = ShiftedSoftplus() - self.lin2 = Linear(self.hidden_channels // 2, 1) + if kwargs["model_name"] == "schnet": + self.lin2 = Linear(self.hidden_channels // 2, 1) + elif kwargs["model_name"] in ["indschnet"]: + self.lin2 = Linear(self.hidden_channels // 2, self.hidden_channels // 2) # weighted average & pooling if self.energy_head in {"pooling", "random"}: @@ -427,7 +430,7 @@ def energy_forward(self, data): h = h + self.atomref(z) # Global pooling - out = scatter(h, batch, dim=0, reduce=self.readout) + out = self.scattering(h, batch) if self.scale is not None: out = self.scale * out @@ -436,3 +439,7 @@ def energy_forward(self, data): "energy": out, "pooling_loss": pooling_loss, } + + @conditional_grad(torch.enable_grad()) + def scattering(self, h, batch): + return scatter(h, batch, dim=0, reduce=self.readout) diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index b8273cf6c4..a7d5934499 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -154,7 +154,7 @@ def __init__(self, **kwargs): (run_dir / f"config-{JOB_ID}.yaml").write_text(yaml.dump(self.config)) # Here's the models whose edges are removed as a transform - transform_models = ["depfaenet"] + transform_models = ["depfaenet", "depschnet"] if self.config["is_disconnected"]: print("\n\nHeads up: cat-ads edges being removed!") if self.config["model_name"] in transform_models: @@ -164,7 +164,7 @@ def __init__(self, **kwargs): self.config["is_disconnected"] = True # Here's the models whose graphs are disconnected in the dataset - self.separate_models = ["indfaenet"] + self.separate_models = ["indfaenet", "indschnet"] self.heterogeneous_models = ["afaenet"] self.data_mode = "normal" self.separate_dataset = False From e7c9d581c7fd352ffee6ee7b70dbe0f68b116b03 Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 5 Sep 2023 01:08:39 -0400 Subject: [PATCH 085/131] Finished implementing aschnet and also made some modifications to the way in which residual connections happen --- configs/exps/alvaro/all-training.yaml | 9 +- configs/exps/alvaro/schnet-config.yaml | 42 +++++ configs/models/aschnet.yaml | 225 +++++++++++++++++++++++ debug.py | 47 ++--- debug_faenet.py | 222 +++++++++++++++++++++++ ocpmodels/common/utils.py | 33 ++-- ocpmodels/models/afaenet.py | 9 +- ocpmodels/models/aschnet.py | 235 ++++++++++++++++++------- ocpmodels/trainers/base_trainer.py | 2 +- 9 files changed, 699 insertions(+), 125 deletions(-) create mode 100644 configs/exps/alvaro/schnet-config.yaml create mode 100644 configs/models/aschnet.yaml create mode 100644 debug_faenet.py diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index de01dbfffa..eaa665fc92 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -42,11 +42,4 @@ default: eval_batch_size: 256 runs: - - config: schnet-is2re-all - - - config: schnet-is2re-all - is_disconnected: True - - - config: depschnet-is2re-all - - - config: indschnet-is2re-all + - config: afaenet-is2re-all diff --git a/configs/exps/alvaro/schnet-config.yaml b/configs/exps/alvaro/schnet-config.yaml new file mode 100644 index 0000000000..56f21cf7af --- /dev/null +++ b/configs/exps/alvaro/schnet-config.yaml @@ -0,0 +1,42 @@ +# MODIFY THIS ONE FOR RUNS + +job: + mem: 32GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + time: 15:00:00 + +default: + wandb_name: alvaro-carbonero-math + wandb_project: ocp-alvaro + tag_hidden_channels: 32 + pg_hidden_channels: 32 + phys_embeds: true + phys_hidden_channels: 0 + energy_head: false + num_targets: 1 + otf_graph: false + max_num_neighbors: 40 + hidden_channels: 142 + graph_rewiring: remove-tag-0 + optim: + batch_size: 32 + eval_batch_size: 32 + num_workers: 4 + max_epochs: 30 + es_patience: 5 + +runs: + - config: schnet-is2re-all + + - config: schnet-is2re-all + is_disconnected: True + + - config: depschnet-is2re-all + + - config: indschnet-is2re-all + + - config: aschnet-is2re-all + model: + gat_mode: v1 diff --git a/configs/models/aschnet.yaml b/configs/models/aschnet.yaml new file mode 100644 index 0000000000..23d8db1496 --- /dev/null +++ b/configs/models/aschnet.yaml @@ -0,0 +1,225 @@ +default: + model: + name: aschnet + num_filters: 128 + num_gaussians: 100 + hidden_channels: 256 + num_interactions: 3 + cutoff: 6.0 + use_pbc: True + regress_forces: False + readout: add + atomref: null + # drlab attributes: + tag_hidden_channels: 0 # 32 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_embeds: False # True + phys_hidden_channels: 0 + energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} + optim: + batch_size: 64 + eval_batch_size: 64 + num_workers: 4 + lr_gamma: 0.1 + warmup_factor: 0.2 + +# ------------------- +# ----- IS2RE ----- +# ------------------- + +is2re: + # *** Important note *** + # The total number of gpus used for this run was 1. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + 10k: + model: + hidden_channels: 256 + num_interactions: 3 + optim: + lr_initial: 0.005 + max_epochs: 20 + lr_milestones: + - 1562 + - 2343 + - 3125 + warmup_steps: 468 + batch_size: 256 + eval_batch_size: 256 + + 100k: + model: + hidden_channels: 384 + num_interactions: 4 + optim: + lr_initial: 0.0005 + max_epochs: 25 + lr_milestones: + - 15625 + - 31250 + - 46875 + warmup_steps: 9375 + batch_size: 256 + eval_batch_size: 256 + + all: + model: + hidden_channels: 384 + num_interactions: 4 + optim: + lr_initial: 0.001 + max_epochs: 17 + lr_gamma: 0.1 + lr_milestones: + - 17981 + - 26972 + - 35963 + warmup_steps: 5394 + batch_size: 256 + eval_batch_size: 256 + +# ------------------ +# ----- S2EF ----- +# ------------------ + +s2ef: + default: + model: + regress_forces: "from_energy" + hidden_channels: 1024 + num_filters: 256 + num_interactions: 5 + num_gaussians: 200 + optim: + # *** Important note *** + # The total number of gpus used for this run was 1. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 192 + eval_batch_size: 192 + num_workers: 16 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: + - 52083 + - 83333 + - 104166 + warmup_steps: 31250 + max_epochs: 15 + force_coefficient: 100 + + 200k: + model: + hidden_channels: 1024 + num_filters: 256 + num_interactions: 3 + num_gaussians: 200 + optim: + batch_size: 128 + eval_batch_size: 128 + num_workers: 16 + lr_initial: 0.0005 + lr_gamma: 0.1 + lr_milestones: + - 7812 + - 12500 + - 15625 + warmup_steps: 4687 + max_epochs: 30 + force_coefficient: 100 + + 2M: {} + + 20M: + model: + hidden_channels: 1024 + num_filters: 256 + num_interactions: 5 + num_gaussians: 200 + optim: + # *** Important note *** + # The total number of gpus used for this run was 48. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 24 + eval_batch_size: 24 + num_workers: 16 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: + - 86805 + - 138888 + - 173611 + warmup_steps: 52083 + max_epochs: 30 + force_coefficient: 50 + + all: + model: + hidden_channels: 1024 + num_filters: 256 + num_interactions: 5 + num_gaussians: 200 + optim: + # *** Important note *** + # The total number of gpus used for this run was 64. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 20 + eval_batch_size: 20 + num_workers: 16 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: + - 313907 + - 523179 + - 732451 + warmup_steps: 209271 + max_epochs: 15 + force_coefficient: 30 + +qm9: + default: + model: + hidden_channels: 128 + num_gaussians: 100 + num_filters: 128 + num_interactions: 6 + cutoff: 5.0 + optim: + batch_size: 1024 + lr_initial: 0.001 + max_epochs: 1000 + decay_steps: 125000 + decay_rate: 0.01 + ema_decay: 0.999 + lr_gamma: 0.25 + lr_milestones: + - 17981 + - 26972 + - 35963 + - 52000 + - 100000 + warmup_steps: 1000 + + 10k: {} + all: {} + +qm7x: + default: + model: + hidden_channels: 384 + num_interactions: 4 + optim: + batch_size: 128 + lr_initial: 0.001 + max_epochs: 25 + lr_gamma: 0.1 + lr_milestones: + - 17981 + - 26972 + - 35963 + warmup_steps: 15000 + + all: {} + 1k: {} diff --git a/debug.py b/debug.py index 282fc9c820..b5dda168e9 100644 --- a/debug.py +++ b/debug.py @@ -92,13 +92,17 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.wandb_name = "alvaro-carbonero-math" args.wandb_project = "ocp-alvaro" - args.test_ri = True - args.mode = "train" + args.tag_hidden_channels: 32 + args.pg_hidden_channels: 32 + args.phys_embeds = True + args.phys_hidden_channels = 0 + args.energy_head = False + args.num_targets = 1 + args.otf_graph = False + args.max_num_neighbors = 40 + args.hidden_channels = 142 args.graph_rewiring = "remove-tag-0" - args.cp_data_to_tmpdir = True - args.config = "indschnet-is2re-10k" - args.frame_averaging = "2D" - args.fa_frames = "se3-random" + args.config = "aschnet-is2re-10k" trainer_config = build_config(args, override_args) @@ -110,30 +114,13 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["dataset"] ) - trainer_config["model"]["edge_embed_type"] = "all_rij" - trainer_config["model"]["mp_type"] = "updownscale" - trainer_config["model"]["phys_embeds"] = True - trainer_config["model"]["tag_hidden_channels"] = 32 - trainer_config["model"]["pg_hidden_channels"] = 64 - trainer_config["model"]["energy_head"] = "weighted-av-final-embeds" - trainer_config["model"]["complex_mp"] = False - trainer_config["model"]["graph_norm"] = True - trainer_config["model"]['hidden_channels'] = 352 - trainer_config["model"]["num_filters"] = 448 - trainer_config["model"]["num_gaussians"] = 99 - trainer_config["model"]["num_interactions"] = 6 - trainer_config["model"]["second_layer_MLP"] = True - trainer_config["model"]["skip_co"] = "concat" - #trainer_config["model"]["transformer_out"] = False - #trainer_config["model"]["afaenet_gat_mode"] = "v1" - #trainer_config["model"]["disconnected_mlp"] = True - - #trainer_config["optim"]["batch_sizes"] = 256 - #trainer_config["optim"]["eval_batch_sizes"] = 256 - trainer_config["optim"]["lr_initial"] = 0.0019 - trainer_config["optim"]["scheduler"] = "LinearWarmupCosineAnnealingLR" - trainer_config["optim"]["max_epochs"] = 20 - trainer_config["optim"]["eval_every"] = 0.4 + trainer_config["optim"]["batch_size"] = 32 + trainer_config["optim"]["eval_batch_size"] = 32 + #trainer_config["optim"]["num_workers"] = 4 + trainer_config["optim"]["max_epochs"] = 30 + trainer_config["optim"]["es_patience"] = 5 + + trainer_config["model"]["gat_mode"] = "v1" # -- Initial setup diff --git a/debug_faenet.py b/debug_faenet.py new file mode 100644 index 0000000000..c22cc2b1e4 --- /dev/null +++ b/debug_faenet.py @@ -0,0 +1,222 @@ +""" +Copyright (c) Facebook, Inc. and its affiliates. + +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree. +""" + +import logging +import os +import time +import traceback +import sys +import torch +from yaml import dump + +from ocpmodels.common import dist_utils +from ocpmodels.common.flags import flags +from ocpmodels.common.registry import registry +from ocpmodels.common.utils import ( + JOB_ID, + auto_note, + build_config, + merge_dicts, + move_lmdb_data_to_slurm_tmpdir, + resolve, + setup_imports, + setup_logging, + update_from_sbatch_py_vars, + set_min_hidden_channels, +) +from ocpmodels.common.orion_utils import ( + continue_orion_exp, + load_orion_exp, + sample_orion_hparams, +) +from ocpmodels.trainers import BaseTrainer + +# os.environ["CUDA_LAUNCH_BLOCKING"] = "1" +torch.multiprocessing.set_sharing_strategy("file_system") + + +def print_warnings(): + warnings = [ + "`max_num_neighbors` is set to 40. This should be tuned per model.", + "`tag_specific_weights` is not handled for " + + "`regress_forces: direct_with_gradient_target` in compute_loss()", + ] + print("\n" + "-" * 80 + "\n") + print("🛑 OCP-DR-Lab Warnings (nota benes):") + for warning in warnings: + print(f" • {warning}") + print("Remove warnings when they are fixed in the code/configs.") + print("\n" + "-" * 80 + "\n") + + +def wrap_up(args, start_time, error=None, signal=None, trainer=None): + total_time = time.time() - start_time + logging.info(f"Total time taken: {total_time}") + if trainer and trainer.logger is not None: + trainer.logger.log({"Total time": total_time}) + + if args.distributed: + print( + "\nWaiting for all processes to finish with dist_utils.cleanup()...", + end="", + ) + dist_utils.cleanup() + print("Done!") + + if "interactive" not in os.popen(f"squeue -hj {JOB_ID}").read(): + print("\nSelf-canceling SLURM job in 32s", JOB_ID) + os.popen(f"sleep 32 && scancel {JOB_ID}") + + if trainer and trainer.logger: + trainer.logger.finish(error or signal) + + +if __name__ == "__main__": + error = signal = orion_exp = orion_trial = trainer = None + orion_race_condition = False + hparams = {} + + setup_logging() + + parser = flags.get_parser() + args, override_args = parser.parse_known_args() + args = update_from_sbatch_py_vars(args) + if args.logdir: + args.logdir = resolve(args.logdir) + + # -- Build config + + args.wandb_name = "alvaro-carbonero-math" + args.wandb_project = "ocp-alvaro" + args.test_ri = True + args.mode = "train" + args.graph_rewiring = "remove-tag-0" + args.cp_data_to_tmpdir = True + args.config = "afaenet-is2re-10k" + args.frame_averaging = "2D" + args.fa_frames = "se3-random" + + trainer_config = build_config(args, override_args) + + if dist_utils.is_master(): + trainer_config = move_lmdb_data_to_slurm_tmpdir(trainer_config) + dist_utils.synchronize() + + trainer_config["dataset"] = dist_utils.broadcast_from_master( + trainer_config["dataset"] + ) + + trainer_config["model"]["edge_embed_type"] = "all_rij" + trainer_config["model"]["mp_type"] = "updownscale" + trainer_config["model"]["phys_embeds"] = True + trainer_config["model"]["tag_hidden_channels"] = 32 + trainer_config["model"]["pg_hidden_channels"] = 64 + trainer_config["model"]["energy_head"] = "weighted-av-final-embeds" + trainer_config["model"]["complex_mp"] = False + trainer_config["model"]["graph_norm"] = True + trainer_config["model"]['hidden_channels'] = 352 + trainer_config["model"]["num_filters"] = 448 + trainer_config["model"]["num_gaussians"] = 99 + trainer_config["model"]["num_interactions"] = 6 + trainer_config["model"]["second_layer_MLP"] = True + trainer_config["model"]["skip_co"] = "concat" + #trainer_config["model"]["transformer_out"] = False + trainer_config["model"]["afaenet_gat_mode"] = "v1" + #trainer_config["model"]["disconnected_mlp"] = True + + #trainer_config["optim"]["batch_sizes"] = 256 + #trainer_config["optim"]["eval_batch_sizes"] = 256 + trainer_config["optim"]["lr_initial"] = 0.0019 + trainer_config["optim"]["scheduler"] = "LinearWarmupCosineAnnealingLR" + trainer_config["optim"]["max_epochs"] = 20 + trainer_config["optim"]["eval_every"] = 0.4 + + # -- Initial setup + + setup_imports() + print("\n🚩 All things imported.\n") + start_time = time.time() + + try: + # -- Orion + + if args.orion_exp_config_path and dist_utils.is_master(): + orion_exp = load_orion_exp(args) + hparams, orion_trial = sample_orion_hparams(orion_exp, trainer_config) + + if hparams.get("orion_race_condition"): + logging.warning("\n\n ⛔️ Orion race condition. Stopping here.\n\n") + wrap_up(args, start_time, error, signal) + sys.exit() + + hparams = dist_utils.broadcast_from_master(hparams) + if hparams: + print("\n💎 Received hyper-parameters from Orion:") + print(dump(hparams), end="\n") + trainer_config = merge_dicts(trainer_config, hparams) + + # -- Setup trainer + trainer_config = continue_orion_exp(trainer_config) + trainer_config = auto_note(trainer_config) + trainer_config = set_min_hidden_channels(trainer_config) + + try: + cls = registry.get_trainer_class(trainer_config["trainer"]) + trainer: BaseTrainer = cls(**trainer_config) + except Exception as e: + traceback.print_exc() + logging.warning(f"\n💀 Error in trainer initialization: {e}\n") + signal = "trainer_init_error" + + if signal is None: + task = registry.get_task_class(trainer_config["mode"])(trainer_config) + task.setup(trainer) + print_warnings() + + # -- Start Training + + signal = task.run() + + # -- End of training + + # handle job preemption / time limit + if signal == "SIGTERM": + print("\nJob was preempted. Wrapping up...\n") + if trainer: + trainer.close_datasets() + + dist_utils.synchronize() + + objective = dist_utils.broadcast_from_master( + trainer.objective if trainer else None + ) + + if orion_exp is not None: + if objective is None: + if signal == "loss_is_nan": + objective = 1e12 + print("Received NaN objective from worker. Setting to 1e12.") + if signal == "trainer_init_error": + objective = 1e12 + print( + "Received trainer_init_error from worker.", + "Setting objective to 1e12.", + ) + if objective is not None: + orion_exp.observe( + orion_trial, + [{"type": "objective", "name": "energy_mae", "value": objective}], + ) + else: + print("Received None objective from worker. Skipping observation.") + + except Exception: + error = True + print(traceback.format_exc()) + + finally: + wrap_up(args, start_time, error, signal, trainer=trainer) diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index dfa48f51d4..1fea18dec9 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -1279,17 +1279,24 @@ def get_pbc_distances( return out - def radius_graph_pbc(data, radius, max_num_neighbors_threshold): - device = data.pos.device - batch_size = len(data.natoms) - - # position of the atoms atom_pos = data.pos + natoms = data.natoms + cell = data.cell + + return radius_graph_pbc_inputs( + atom_pos, natoms, cell, radius, max_num_neighbors_threshold + ) + +def radius_graph_pbc_inputs( + atom_pos, natoms, cell, radius, max_num_neighbors_threshold +): + device = atom_pos.device + batch_size = len(natoms) # Before computing the pairwise distances between atoms, first create a list # of atom indices to compare for the entire batch - num_atoms_per_image = data.natoms + num_atoms_per_image = natoms num_atoms_per_image_sqr = (num_atoms_per_image**2).long() # index offset between images @@ -1335,22 +1342,22 @@ def radius_graph_pbc(data, radius, max_num_neighbors_threshold): # Note that the unit cell volume V = a1 * (a2 x a3) and that # (a2 x a3) / V is also the reciprocal primitive vector # (crystallographer's definition). - cross_a2a3 = torch.cross(data.cell[:, 1], data.cell[:, 2], dim=-1) - cell_vol = torch.sum(data.cell[:, 0] * cross_a2a3, dim=-1, keepdim=True) + cross_a2a3 = torch.cross(cell[:, 1], cell[:, 2], dim=-1) + cell_vol = torch.sum(cell[:, 0] * cross_a2a3, dim=-1, keepdim=True) inv_min_dist_a1 = torch.norm(cross_a2a3 / cell_vol, p=2, dim=-1) rep_a1 = torch.ceil(radius * inv_min_dist_a1) - cross_a3a1 = torch.cross(data.cell[:, 2], data.cell[:, 0], dim=-1) + cross_a3a1 = torch.cross(cell[:, 2], cell[:, 0], dim=-1) inv_min_dist_a2 = torch.norm(cross_a3a1 / cell_vol, p=2, dim=-1) rep_a2 = torch.ceil(radius * inv_min_dist_a2) if radius >= 20: # Cutoff larger than the vacuum layer of 20A - cross_a1a2 = torch.cross(data.cell[:, 0], data.cell[:, 1], dim=-1) + cross_a1a2 = torch.cross(cell[:, 0], cell[:, 1], dim=-1) inv_min_dist_a3 = torch.norm(cross_a1a2 / cell_vol, p=2, dim=-1) rep_a3 = torch.ceil(radius * inv_min_dist_a3) else: - rep_a3 = data.cell.new_zeros(1) + rep_a3 = cell.new_zeros(1) # Take the max over all images for uniformity. This is essentially padding. # Note that this can significantly increase the number of computed distances # if the required repetitions are very different between images @@ -1371,7 +1378,7 @@ def radius_graph_pbc(data, radius, max_num_neighbors_threshold): unit_cell_batch = unit_cell.view(1, 3, num_cells).expand(batch_size, -1, -1) # Compute the x, y, z positional offsets for each cell in each image - data_cell = torch.transpose(data.cell, 1, 2) + data_cell = torch.transpose(cell, 1, 2) pbc_offsets = torch.bmm(data_cell, unit_cell_batch) pbc_offsets_per_atom = torch.repeat_interleave( pbc_offsets, num_atoms_per_image_sqr, dim=0 @@ -1403,7 +1410,7 @@ def radius_graph_pbc(data, radius, max_num_neighbors_threshold): atom_distance_sqr = torch.masked_select(atom_distance_sqr, mask) mask_num_neighbors, num_neighbors_image = get_max_neighbors_mask( - natoms=data.natoms, + natoms=natoms, index=index1, atom_distance=atom_distance_sqr, max_num_neighbors_threshold=max_num_neighbors_threshold, diff --git a/ocpmodels/models/afaenet.py b/ocpmodels/models/afaenet.py index 7db377643e..14f53268fc 100644 --- a/ocpmodels/models/afaenet.py +++ b/ocpmodels/models/afaenet.py @@ -55,9 +55,9 @@ def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): combined = torch.concat([h_ads, h_cat], dim = 0) combined = self.interaction(combined, bipartite_edges, bipartite_weights) - # Then we normalize and add residual connections + # Then we add residual connections ads, cat = combined[:separation_pt], combined[separation_pt:] - ads, cat = nn.functional.normalize(ads), nn.functional.normalize(cat) + #ads, cat = nn.functional.normalize(ads), nn.functional.normalize(cat) ads, cat = ads + h_ads, cat + h_cat # QUESTION: Should normalization happen before separating them? @@ -127,7 +127,7 @@ def __init__(self, **kwargs): kwargs["second_layer_MLP"], kwargs["edge_embed_type"], ) - self.disc_edge_embed = Linear(kwargs["num_gaussians"], kwargs["num_filters"] // 2) + self.disc_edge_embed = Linear(kwargs["num_gaussians"], kwargs["num_filters"]) # Interaction block self.interaction_blocks_ads = nn.ModuleList( @@ -166,7 +166,7 @@ def __init__(self, **kwargs): GATInteraction( kwargs["hidden_channels"], kwargs["afaenet_gat_mode"], - kwargs["num_filters"] // 2, + kwargs["num_filters"], ) for _ in range(kwargs["num_interactions"]) ] @@ -222,7 +222,6 @@ def __init__(self, **kwargs): else: self.combination = nn.Sequential( Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2), - self.act, Linear(kwargs["hidden_channels"] // 2, 1) ) diff --git a/ocpmodels/models/aschnet.py b/ocpmodels/models/aschnet.py index f6daf85f4c..a2584531be 100644 --- a/ocpmodels/models/aschnet.py +++ b/ocpmodels/models/aschnet.py @@ -9,6 +9,7 @@ import torch import torch.nn.functional as F from torch.nn import Embedding, Linear, ModuleList, Sequential +from torch import nn from torch_geometric.nn import MessagePassing, radius_graph from torch_scatter import scatter @@ -159,13 +160,26 @@ def __init__(self, **kwargs): ) # Main embedding - self.embedding = Embedding( + self.embedding_ads = Embedding( 85, self.hidden_channels - self.tag_hidden_channels - self.phys_hidden_channels - 2 * self.pg_hidden_channels, ) + self.embedding_cat = Embedding( + 85, + self.hidden_channels + - self.tag_hidden_channels + - self.phys_hidden_channels + - 2 * self.pg_hidden_channels, + ) + + # Gaussian basis and linear transformation of disc edges + self.distance_expansion_disc = GaussianSmearing( + 0.0, 20.0, self.num_gaussians + ) + self.disc_edge_embed = Linear(self.num_gaussians, self.num_filters) # Position encoding if self.use_positional_embeds: @@ -173,20 +187,35 @@ def __init__(self, **kwargs): # Interaction block self.distance_expansion = GaussianSmearing(0.0, self.cutoff, self.num_gaussians) - self.interactions = ModuleList() + + self.interactions_ads = ModuleList() for _ in range(self.num_interactions): block = InteractionBlock( self.hidden_channels, self.num_gaussians, self.num_filters, self.cutoff ) - self.interactions.append(block) + self.interactions_ads.append(block) + + self.interactions_cat = ModuleList() + for _ in range(self.num_interactions): + block = InteractionBlock( + self.hidden_channels, self.num_gaussians, self.num_filters, self.cutoff + ) + self.interactions_cat.append(block) + + self.interactions_disc = ModuleList() + assert "gat_mode" in kwargs, "GAT version needs to be specified. Options: v1, v2" + for _ in range(self.num_interactions): + block = GATInteraction( + self.hidden_channels, kwargs["gat_mode"], self.num_filters + ) + self.interactions_disc.append(block) # Output block - self.lin1 = Linear(self.hidden_channels, self.hidden_channels // 2) + self.lin1_ads = Linear(self.hidden_channels, self.hidden_channels // 2) + self.lin1_cat = Linear(self.hidden_channels, self.hidden_channels // 2) self.act = ShiftedSoftplus() - if kwargs["model_name"] == "schnet": - self.lin2 = Linear(self.hidden_channels // 2, 1) - elif kwargs["model_name"] in ["indschnet"]: - self.lin2 = Linear(self.hidden_channels // 2, self.hidden_channels // 2) + self.lin2_ads = Linear(self.hidden_channels // 2, self.hidden_channels // 2) + self.lin2_cat = Linear(self.hidden_channels // 2, self.hidden_channels // 2) # weighted average & pooling if self.energy_head in {"pooling", "random"}: @@ -205,10 +234,16 @@ def __init__(self, **kwargs): }: self.w_lin = Linear(self.hidden_channels, 1) + self.combination = nn.Sequential( + Linear(self.hidden_channels, self.hidden_channels // 2), + Linear(kwargs["hidden_channels"] // 2, 1) + ) + self.reset_parameters() def reset_parameters(self): - self.embedding.reset_parameters() + self.embedding_ads.reset_parameters() + self.embedding_cat.reset_parameters() if self.use_mlp_phys: torch.nn.init.xavier_uniform_(self.phys_lin.weight) if self.use_tag: @@ -219,12 +254,26 @@ def reset_parameters(self): if self.energy_head in {"weighted-av-init-embeds", "weighted-av-final-embeds"}: self.w_lin.bias.data.fill_(0) torch.nn.init.xavier_uniform_(self.w_lin.weight) - for interaction in self.interactions: - interaction.reset_parameters() - torch.nn.init.xavier_uniform_(self.lin1.weight) - self.lin1.bias.data.fill_(0) - torch.nn.init.xavier_uniform_(self.lin2.weight) - self.lin2.bias.data.fill_(0) + for ( + interaction_ads, + interaction_cat, + interaction_disc + ) in zip ( + self.interactions_ads, + self.interactions_cat, + self.interactions_disc + ): + interaction_ads.reset_parameters() + interaction_cat.reset_parameters() + #interaction_disc.reset_parameters() # need to implement this! + torch.nn.init.xavier_uniform_(self.lin1_ads.weight) + self.lin1_ads.bias.data.fill_(0) + torch.nn.init.xavier_uniform_(self.lin2_ads.weight) + self.lin2_ads.bias.data.fill_(0) + torch.nn.init.xavier_uniform_(self.lin1_cat.weight) + self.lin1_cat.bias.data.fill_(0) + torch.nn.init.xavier_uniform_(self.lin2_cat.weight) + self.lin2_cat.bias.data.fill_(0) if self.atomref is not None: self.atomref.weight.data.copy_(self.initial_atomref) @@ -251,69 +300,62 @@ def forces_forward(self, preds): def energy_forward(self, data): """""" # Re-compute on the fly the graph - if self.otf_graph: # STILL WRITING THIS PART!! - edge_index_ads, cell_offsets_ads, neighbors_ads = radius_graph_pbc( - data, self.cutoff, 50 + if self.otf_graph: + edge_index, cell_offsets, neighbors = radius_graph_pbc_inputs( + data["adsorbate"].pos, + data["adsorbate"].natoms, + data["adsorbate"].cell, + self.cutoff, + 50, + ) + data["adsorbate", "is_close", "adsorbate"].edge_index = edge_index + data["adsorbate"].cell_offsets = cell_offsets + data["adsorbate"].neighbors = neighbors + + edge_index, cell_offsets, neighbors = radius_graph_pbc_inputs( + data["catalyst"].pos, + data["catalyst"].natoms, + data["catalyst"].cell, + self.cutoff, + 50, ) - data.edge_index = edge_index - data.cell_offsets = cell_offsets - data.neighbors = neighbors + data["catalyst", "is_close", "catalyst"].edge_index = edge_index + data["catalyst"].cell_offsets = cell_offsets + data["catalyst"].neighbors = neighbors # Rewire the graph - z = data.atomic_numbers.long() - pos = data.pos - batch = data.batch - # Use periodic boundary conditions - if self.use_pbc: - assert z.dim() == 1 and z.dtype == torch.long - - out = get_pbc_distances( - pos, - data.edge_index, - data.cell, - data.cell_offsets, - data.neighbors, - ) + ads_rewiring, cat_rewiring = self.graph_rewiring(data, ) + edge_index_ads, edge_weight_ads, edge_attr_ads = ads_rewiring + edge_index_cat, edge_weight_cat, edge_attr_cat = cat_rewiring - edge_index = out["edge_index"] - edge_weight = out["distances"] - edge_attr = self.distance_expansion(edge_weight) - else: - edge_index = radius_graph( - pos, - r=self.cutoff, - batch=batch, - max_num_neighbors=self.max_num_neighbors, - ) - # edge_index = data.edge_index - row, col = edge_index - edge_weight = (pos[row] - pos[col]).norm(dim=-1) - edge_attr = self.distance_expansion(edge_weight) + h_ads = self.embedding_ads(data["adsorbate"].atomic_numbers.long()) + h_cat = self.embedding_cat(data["catalyst"].atomic_numbers.long()) - h = self.embedding(z) + edge_weights_disc = self.distance_expansion_disc(data["is_disc"].edge_weight) + edge_weights_disc = self.disc_edge_embed(edge_weights_disc) - if self.use_tag: - assert data.tags is not None + if self.use_tag: # NOT IMPLEMENTED + assert data["adsorbate"].tags is not None h_tag = self.tag_embedding(data.tags) h = torch.cat((h, h_tag), dim=1) - if self.phys_emb.device != batch.device: - self.phys_emb = self.phys_emb.to(batch.device) + if self.phys_emb.device != data["adsorbate"].batch.device: # NOT IMPLEMENTED + self.phys_emb = self.phys_emb.to(data["adsorbate"].batch.device) - if self.use_phys_embeddings: + if self.use_phys_embeddings: # NOT IMPLEMENTED h_phys = self.phys_emb.properties[z] if self.use_mlp_phys: h_phys = self.phys_lin(h_phys) h = torch.cat((h, h_phys), dim=1) - if self.use_pg: + if self.use_pg: # NOT IMPLEMENTED # assert self.phys_emb.period is not None h_period = self.period_embedding(self.phys_emb.period[z]) h_group = self.group_embedding(self.phys_emb.group[z]) h = torch.cat((h, h_period, h_group), dim=1) - if self.use_positional_embeds: + if self.use_positional_embeds: # NOT IMPLEMENTED idx_of_non_zero_val = (data.tags == 0).nonzero().T.squeeze(0) h_pos = torch.zeros_like(h, device=h.device) h_pos[idx_of_non_zero_val, :] = self.pe(data.subnodes).to( @@ -324,47 +366,104 @@ def energy_forward(self, data): if self.energy_head == "weighted-av-initial-embeds": alpha = self.w_lin(h) - for interaction in self.interactions: - h = h + interaction(h, edge_index, edge_weight, edge_attr) + for ( + interaction_ads, + interaction_cat, + interaction_disc + ) in zip ( + self.interactions_ads, + self.interactions_cat, + self.interactions_disc + ): + h_ads = h_ads + interaction_ads(h_ads, edge_index_ads, edge_weight_ads, edge_attr_ads) + h_cat = h_cat + interaction_cat(h_cat, edge_index_cat, edge_weight_cat, edge_attr_cat) + h_ads, h_cat = interaction_disc( + h_ads, + h_cat, + data["is_disc"].edge_index, + edge_weights_disc + ) pooling_loss = None # deal with pooling loss - if self.energy_head == "weighted-av-final-embeds": + if self.energy_head == "weighted-av-final-embeds": # NOT IMPLEMENTED alpha = self.w_lin(h) elif self.energy_head == "graclus": - h, batch = self.graclus(h, edge_index, edge_weight, batch) + h, batch = self.graclus(h, edge_index, edge_weight, batch) # NOT IMPLEMENTED - if self.energy_head in {"pooling", "random"}: + if self.energy_head in {"pooling", "random"}: # NOT IMPLEMENTED h, batch, pooling_loss = self.hierarchical_pooling( h, edge_index, edge_weight, batch ) # MLP - h = self.lin1(h) - h = self.act(h) - h = self.lin2(h) + h_ads = self.lin1_ads(h_ads) + h_ads = self.act(h_ads) + h_ads = self.lin2_ads(h_ads) + + h_cat = self.lin1_cat(h_cat) + h_cat = self.act(h_cat) + h_cat = self.lin2_cat(h_cat) - if self.energy_head in { + if self.energy_head in { # NOT IMPLEMENTED "weighted-av-initial-embeds", "weighted-av-final-embeds", }: h = h * alpha - if self.atomref is not None: + if self.atomref is not None: # NOT IMPLEMENTED h = h + self.atomref(z) # Global pooling - out = self.scattering(h, batch) + out_ads = self.scattering(h_ads, data["adsorbate"].batch) + out_cat = self.scattering(h_cat, data["catalyst"].batch) if self.scale is not None: out = self.scale * out + system = torch.concat([out_ads, out_cat], dim = 1) + out = self.combination(system) + return { "energy": out, "pooling_loss": pooling_loss, } + @conditional_grad(torch.enable_grad()) + def graph_rewiring(self, data): + results = [] + + if self.use_pbc: + for mode in ["adsorbate", "catalyst"]: + out = get_pbc_distances( + data[mode].pos, + data[mode, "is_close", mode].edge_index, + data[mode].cell, + data[mode].cell_offsets, + data[mode].neighbors, + return_distance_vec = True + ) + + edge_index = out["edge_index"] + edge_weight = out["distances"] + edge_attr = self.distance_expansion(edge_weight) + results.append([edge_index, edge_weight, edge_attr]) + else: + for mode in ["adsorbate", "catalyst"]: + edge_index = radius_graph( + data[mode].pos, + r = self.cutoff, + batch =data[mode].batch, + max_num_neighbors = self.max_num_neighbors, + ) + row, col = edge_index + edge_weight = (pos[row] - pos[col]).norm(dim=-1) + edge_attr = self.distance_expansion(edge_weight) + results.append([edge_index, edge_weight, edge_attr]) + + return results + @conditional_grad(torch.enable_grad()) def scattering(self, h, batch): return scatter(h, batch, dim=0, reduce=self.readout) diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index a7d5934499..53de22d1b3 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -165,7 +165,7 @@ def __init__(self, **kwargs): # Here's the models whose graphs are disconnected in the dataset self.separate_models = ["indfaenet", "indschnet"] - self.heterogeneous_models = ["afaenet"] + self.heterogeneous_models = ["afaenet", "aschnet"] self.data_mode = "normal" self.separate_dataset = False From 69f84d3150d23d307b9c2aa050254ded94eda0af Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 5 Sep 2023 12:49:37 -0400 Subject: [PATCH 086/131] Modified config files and a couple other updates --- configs/exps/alvaro/schnet-config.yaml | 31 +++++++++++--------------- debug.py | 2 +- ocpmodels/models/afaenet.py | 8 ++++--- ocpmodels/models/aschnet.py | 12 +++++----- 4 files changed, 26 insertions(+), 27 deletions(-) diff --git a/configs/exps/alvaro/schnet-config.yaml b/configs/exps/alvaro/schnet-config.yaml index 56f21cf7af..c04b00054a 100644 --- a/configs/exps/alvaro/schnet-config.yaml +++ b/configs/exps/alvaro/schnet-config.yaml @@ -10,33 +10,28 @@ job: default: wandb_name: alvaro-carbonero-math wandb_project: ocp-alvaro - tag_hidden_channels: 32 - pg_hidden_channels: 32 - phys_embeds: true - phys_hidden_channels: 0 energy_head: false - num_targets: 1 - otf_graph: false - max_num_neighbors: 40 + num_targets: 15 hidden_channels: 142 graph_rewiring: remove-tag-0 + model: + otf_graph: false + max_num_neighbors: 40 optim: - batch_size: 32 - eval_batch_size: 32 num_workers: 4 - max_epochs: 30 - es_patience: 5 + max_epochs: 17 + warmup_factor: 0.2 runs: - config: schnet-is2re-all - - config: schnet-is2re-all - is_disconnected: True + #- config: schnet-is2re-all + # is_disconnected: True - - config: depschnet-is2re-all + #- config: depschnet-is2re-all - - config: indschnet-is2re-all + #- config: indschnet-is2re-all - - config: aschnet-is2re-all - model: - gat_mode: v1 + #- config: aschnet-is2re-all + # model: + # gat_mode: v1 diff --git a/debug.py b/debug.py index b5dda168e9..69a925ed3f 100644 --- a/debug.py +++ b/debug.py @@ -102,7 +102,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.max_num_neighbors = 40 args.hidden_channels = 142 args.graph_rewiring = "remove-tag-0" - args.config = "aschnet-is2re-10k" + args.config = "schnet-is2re-10k" trainer_config = build_config(args, override_args) diff --git a/ocpmodels/models/afaenet.py b/ocpmodels/models/afaenet.py index 14f53268fc..df3664b9f9 100644 --- a/ocpmodels/models/afaenet.py +++ b/ocpmodels/models/afaenet.py @@ -55,10 +55,10 @@ def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): combined = torch.concat([h_ads, h_cat], dim = 0) combined = self.interaction(combined, bipartite_edges, bipartite_weights) - # Then we add residual connections + # We separate again and we return ads, cat = combined[:separation_pt], combined[separation_pt:] #ads, cat = nn.functional.normalize(ads), nn.functional.normalize(cat) - ads, cat = ads + h_ads, cat + h_cat + #ads, cat = ads + h_ads, cat + h_cat # QUESTION: Should normalization happen before separating them? return ads, cat @@ -297,12 +297,14 @@ def energy_forward(self, data): intra_cat = interaction_cat(h_cat, edge_index_cat, e_cat) # Then we do inter interaction - h_ads, h_cat = inter_interaction( + inter_ads, inter_cat = inter_interaction( intra_ads, intra_cat, data["is_disc"].edge_index, edge_weights, ) + h_ads = h_ads + inter_ads + h_cat = h_cat + inter_cat # QUESTION: Can we do both simultaneously? # Atom skip-co diff --git a/ocpmodels/models/aschnet.py b/ocpmodels/models/aschnet.py index a2584531be..1b17caa712 100644 --- a/ocpmodels/models/aschnet.py +++ b/ocpmodels/models/aschnet.py @@ -375,14 +375,16 @@ def energy_forward(self, data): self.interactions_cat, self.interactions_disc ): - h_ads = h_ads + interaction_ads(h_ads, edge_index_ads, edge_weight_ads, edge_attr_ads) - h_cat = h_cat + interaction_cat(h_cat, edge_index_cat, edge_weight_cat, edge_attr_cat) - h_ads, h_cat = interaction_disc( - h_ads, - h_cat, + intra_ads = interaction_ads(h_ads, edge_index_ads, edge_weight_ads, edge_attr_ads) + intra_cat = interaction_cat(h_cat, edge_index_cat, edge_weight_cat, edge_attr_cat) + inter_ads, inter_cat = interaction_disc( + intra_ads, + intra_cat, data["is_disc"].edge_index, edge_weights_disc ) + h_ads = h_ads + inter_ads + h_cat = h_cat + inter_cat pooling_loss = None # deal with pooling loss From bb7713cd90b88c67e0eab00a28a7fdfa4a9de0cc Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 5 Sep 2023 12:52:37 -0400 Subject: [PATCH 087/131] small mistake in config file --- configs/exps/alvaro/schnet-config.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/configs/exps/alvaro/schnet-config.yaml b/configs/exps/alvaro/schnet-config.yaml index c04b00054a..5d2e716aaa 100644 --- a/configs/exps/alvaro/schnet-config.yaml +++ b/configs/exps/alvaro/schnet-config.yaml @@ -12,7 +12,6 @@ default: wandb_project: ocp-alvaro energy_head: false num_targets: 15 - hidden_channels: 142 graph_rewiring: remove-tag-0 model: otf_graph: false From f1b333571d3235daab734423dea050e1dcacd416 Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 5 Sep 2023 13:13:47 -0400 Subject: [PATCH 088/131] fixed a config file --- configs/exps/alvaro/all-training.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index eaa665fc92..52eecf418b 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -43,3 +43,5 @@ default: runs: - config: afaenet-is2re-all + model: + afaenet_gat_mode: v1 From fe398c6086753292939783bd2ac3d4a89798f3c5 Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 5 Sep 2023 13:59:57 -0400 Subject: [PATCH 089/131] prepared config files for jobs --- configs/exps/alvaro/all-training.yaml | 2 +- configs/exps/alvaro/schnet-config.yaml | 16 ++++++++-------- debug.py | 8 ++++---- ocpmodels/models/depschnet.py | 2 ++ ocpmodels/models/schnet.py | 6 +++--- 5 files changed, 18 insertions(+), 16 deletions(-) diff --git a/configs/exps/alvaro/all-training.yaml b/configs/exps/alvaro/all-training.yaml index 52eecf418b..f124a63c66 100644 --- a/configs/exps/alvaro/all-training.yaml +++ b/configs/exps/alvaro/all-training.yaml @@ -34,7 +34,7 @@ default: skip_co: concat edge_embed_type: all_rij optim: - lr_initial: 0.001 + lr_initial: 0.0005 scheduler: LinearWarmupCosineAnnealingLR max_epochs: 20 eval_every: 0.4 diff --git a/configs/exps/alvaro/schnet-config.yaml b/configs/exps/alvaro/schnet-config.yaml index 5d2e716aaa..b4d86f9a93 100644 --- a/configs/exps/alvaro/schnet-config.yaml +++ b/configs/exps/alvaro/schnet-config.yaml @@ -22,15 +22,15 @@ default: warmup_factor: 0.2 runs: - - config: schnet-is2re-all - #- config: schnet-is2re-all - # is_disconnected: True - #- config: depschnet-is2re-all + - config: schnet-is2re-all + is_disconnected: True + + - config: depschnet-is2re-all - #- config: indschnet-is2re-all + - config: indschnet-is2re-all - #- config: aschnet-is2re-all - # model: - # gat_mode: v1 + - config: aschnet-is2re-all + model: + gat_mode: v1 diff --git a/debug.py b/debug.py index 69a925ed3f..ec1c6f7e34 100644 --- a/debug.py +++ b/debug.py @@ -102,7 +102,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.max_num_neighbors = 40 args.hidden_channels = 142 args.graph_rewiring = "remove-tag-0" - args.config = "schnet-is2re-10k" + args.config = "depschnet-is2re-10k" trainer_config = build_config(args, override_args) @@ -114,9 +114,9 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["dataset"] ) - trainer_config["optim"]["batch_size"] = 32 - trainer_config["optim"]["eval_batch_size"] = 32 - #trainer_config["optim"]["num_workers"] = 4 + trainer_config["optim"]["batch_size"] = 64 + trainer_config["optim"]["eval_batch_size"] = 64 + trainer_config["optim"]["lr_initial"] = 0.0005 trainer_config["optim"]["max_epochs"] = 30 trainer_config["optim"]["es_patience"] = 5 diff --git a/ocpmodels/models/depschnet.py b/ocpmodels/models/depschnet.py index c1a4dd85ac..8ddd77aa1c 100644 --- a/ocpmodels/models/depschnet.py +++ b/ocpmodels/models/depschnet.py @@ -16,6 +16,8 @@ def __init__(self, **kwargs): # We replace the last linear transform to keep dimentionality self.lin2 = Linear(self.hidden_channels // 2, self.hidden_channels // 2) + torch.nn.init.xavier_uniform_(self.lin2.weight) + self.lin2.bias.data.fill_(0) self.sys_lin1 = Linear(self.hidden_channels // 2 * 2, self.hidden_channels // 2) self.sys_lin2 = Linear(self.hidden_channels // 2, 1) diff --git a/ocpmodels/models/schnet.py b/ocpmodels/models/schnet.py index 1bee6a657e..60528d4681 100644 --- a/ocpmodels/models/schnet.py +++ b/ocpmodels/models/schnet.py @@ -258,10 +258,10 @@ def __init__(self, **kwargs): # Output block self.lin1 = Linear(self.hidden_channels, self.hidden_channels // 2) self.act = ShiftedSoftplus() - if kwargs["model_name"] == "schnet": - self.lin2 = Linear(self.hidden_channels // 2, 1) - elif kwargs["model_name"] in ["indschnet"]: + if kwargs["model_name"] in ["indschnet"]: self.lin2 = Linear(self.hidden_channels // 2, self.hidden_channels // 2) + else: + self.lin2 = Linear(self.hidden_channels // 2, 1) # weighted average & pooling if self.energy_head in {"pooling", "random"}: From 3e48f9668ddeead5ec3872b064ea96d77efcb4e2 Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 5 Sep 2023 18:12:42 -0400 Subject: [PATCH 090/131] modified afaenet slightly --- debug.py | 2 +- debug_schnet.py | 209 ++++++++++++++++++++++++++++++++++++ ocpmodels/models/afaenet.py | 10 +- 3 files changed, 216 insertions(+), 5 deletions(-) create mode 100644 debug_schnet.py diff --git a/debug.py b/debug.py index ec1c6f7e34..77b7433a47 100644 --- a/debug.py +++ b/debug.py @@ -102,7 +102,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.max_num_neighbors = 40 args.hidden_channels = 142 args.graph_rewiring = "remove-tag-0" - args.config = "depschnet-is2re-10k" + args.config = "gemnet_oc-is2re-10k" trainer_config = build_config(args, override_args) diff --git a/debug_schnet.py b/debug_schnet.py new file mode 100644 index 0000000000..ec1c6f7e34 --- /dev/null +++ b/debug_schnet.py @@ -0,0 +1,209 @@ +""" +Copyright (c) Facebook, Inc. and its affiliates. + +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree. +""" + +import logging +import os +import time +import traceback +import sys +import torch +from yaml import dump + +from ocpmodels.common import dist_utils +from ocpmodels.common.flags import flags +from ocpmodels.common.registry import registry +from ocpmodels.common.utils import ( + JOB_ID, + auto_note, + build_config, + merge_dicts, + move_lmdb_data_to_slurm_tmpdir, + resolve, + setup_imports, + setup_logging, + update_from_sbatch_py_vars, + set_min_hidden_channels, +) +from ocpmodels.common.orion_utils import ( + continue_orion_exp, + load_orion_exp, + sample_orion_hparams, +) +from ocpmodels.trainers import BaseTrainer + +# os.environ["CUDA_LAUNCH_BLOCKING"] = "1" +torch.multiprocessing.set_sharing_strategy("file_system") + + +def print_warnings(): + warnings = [ + "`max_num_neighbors` is set to 40. This should be tuned per model.", + "`tag_specific_weights` is not handled for " + + "`regress_forces: direct_with_gradient_target` in compute_loss()", + ] + print("\n" + "-" * 80 + "\n") + print("🛑 OCP-DR-Lab Warnings (nota benes):") + for warning in warnings: + print(f" • {warning}") + print("Remove warnings when they are fixed in the code/configs.") + print("\n" + "-" * 80 + "\n") + + +def wrap_up(args, start_time, error=None, signal=None, trainer=None): + total_time = time.time() - start_time + logging.info(f"Total time taken: {total_time}") + if trainer and trainer.logger is not None: + trainer.logger.log({"Total time": total_time}) + + if args.distributed: + print( + "\nWaiting for all processes to finish with dist_utils.cleanup()...", + end="", + ) + dist_utils.cleanup() + print("Done!") + + if "interactive" not in os.popen(f"squeue -hj {JOB_ID}").read(): + print("\nSelf-canceling SLURM job in 32s", JOB_ID) + os.popen(f"sleep 32 && scancel {JOB_ID}") + + if trainer and trainer.logger: + trainer.logger.finish(error or signal) + + +if __name__ == "__main__": + error = signal = orion_exp = orion_trial = trainer = None + orion_race_condition = False + hparams = {} + + setup_logging() + + parser = flags.get_parser() + args, override_args = parser.parse_known_args() + args = update_from_sbatch_py_vars(args) + if args.logdir: + args.logdir = resolve(args.logdir) + + # -- Build config + + args.wandb_name = "alvaro-carbonero-math" + args.wandb_project = "ocp-alvaro" + args.tag_hidden_channels: 32 + args.pg_hidden_channels: 32 + args.phys_embeds = True + args.phys_hidden_channels = 0 + args.energy_head = False + args.num_targets = 1 + args.otf_graph = False + args.max_num_neighbors = 40 + args.hidden_channels = 142 + args.graph_rewiring = "remove-tag-0" + args.config = "depschnet-is2re-10k" + + trainer_config = build_config(args, override_args) + + if dist_utils.is_master(): + trainer_config = move_lmdb_data_to_slurm_tmpdir(trainer_config) + dist_utils.synchronize() + + trainer_config["dataset"] = dist_utils.broadcast_from_master( + trainer_config["dataset"] + ) + + trainer_config["optim"]["batch_size"] = 64 + trainer_config["optim"]["eval_batch_size"] = 64 + trainer_config["optim"]["lr_initial"] = 0.0005 + trainer_config["optim"]["max_epochs"] = 30 + trainer_config["optim"]["es_patience"] = 5 + + trainer_config["model"]["gat_mode"] = "v1" + + # -- Initial setup + + setup_imports() + print("\n🚩 All things imported.\n") + start_time = time.time() + + try: + # -- Orion + + if args.orion_exp_config_path and dist_utils.is_master(): + orion_exp = load_orion_exp(args) + hparams, orion_trial = sample_orion_hparams(orion_exp, trainer_config) + + if hparams.get("orion_race_condition"): + logging.warning("\n\n ⛔️ Orion race condition. Stopping here.\n\n") + wrap_up(args, start_time, error, signal) + sys.exit() + + hparams = dist_utils.broadcast_from_master(hparams) + if hparams: + print("\n💎 Received hyper-parameters from Orion:") + print(dump(hparams), end="\n") + trainer_config = merge_dicts(trainer_config, hparams) + + # -- Setup trainer + trainer_config = continue_orion_exp(trainer_config) + trainer_config = auto_note(trainer_config) + trainer_config = set_min_hidden_channels(trainer_config) + + try: + cls = registry.get_trainer_class(trainer_config["trainer"]) + trainer: BaseTrainer = cls(**trainer_config) + except Exception as e: + traceback.print_exc() + logging.warning(f"\n💀 Error in trainer initialization: {e}\n") + signal = "trainer_init_error" + + if signal is None: + task = registry.get_task_class(trainer_config["mode"])(trainer_config) + task.setup(trainer) + print_warnings() + + # -- Start Training + + signal = task.run() + + # -- End of training + + # handle job preemption / time limit + if signal == "SIGTERM": + print("\nJob was preempted. Wrapping up...\n") + if trainer: + trainer.close_datasets() + + dist_utils.synchronize() + + objective = dist_utils.broadcast_from_master( + trainer.objective if trainer else None + ) + + if orion_exp is not None: + if objective is None: + if signal == "loss_is_nan": + objective = 1e12 + print("Received NaN objective from worker. Setting to 1e12.") + if signal == "trainer_init_error": + objective = 1e12 + print( + "Received trainer_init_error from worker.", + "Setting objective to 1e12.", + ) + if objective is not None: + orion_exp.observe( + orion_trial, + [{"type": "objective", "name": "energy_mae", "value": objective}], + ) + else: + print("Received None objective from worker. Skipping observation.") + + except Exception: + error = True + print(traceback.format_exc()) + + finally: + wrap_up(args, start_time, error, signal, trainer=trainer) diff --git a/ocpmodels/models/afaenet.py b/ocpmodels/models/afaenet.py index df3664b9f9..47e09800aa 100644 --- a/ocpmodels/models/afaenet.py +++ b/ocpmodels/models/afaenet.py @@ -57,9 +57,9 @@ def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): # We separate again and we return ads, cat = combined[:separation_pt], combined[separation_pt:] - #ads, cat = nn.functional.normalize(ads), nn.functional.normalize(cat) - #ads, cat = ads + h_ads, cat + h_cat # QUESTION: Should normalization happen before separating them? + # ads, cat = nn.functional.normalize(ads), nn.functional.normalize(cat) + # ads, cat = ads + h_ads, cat + h_cat return ads, cat @@ -222,6 +222,7 @@ def __init__(self, **kwargs): else: self.combination = nn.Sequential( Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2), + nn.ReLU(), Linear(kwargs["hidden_channels"] // 2, 1) ) @@ -303,10 +304,11 @@ def energy_forward(self, data): data["is_disc"].edge_index, edge_weights, ) - h_ads = h_ads + inter_ads - h_cat = h_cat + inter_cat # QUESTION: Can we do both simultaneously? + h_ads, h_cat = h_ads + inter_ads, h_cat + inter_cat + h_ads, h_cat = nn.functional.normalize(h_ads), nn.functional.normalize(h_cat) + # Atom skip-co if self.skip_co == "concat_atom": energy_skip_co_ads.append(h_ads) From 10d882f38c2e6b674d1234db59a711bbe1fd61e5 Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 10 Sep 2023 21:06:21 -0400 Subject: [PATCH 091/131] Started implementing depgemnet --- configs/exps/alvaro/gemnet-config.yaml | 18 ++++++++ configs/exps/alvaro/schnet-config.yaml | 2 - debug.py | 19 ++++----- debug_schnet.py | 2 +- ocpmodels/common/utils.py | 1 + ocpmodels/models/gemnet_oc/depgemnet_oc.py | 48 ++++++++++++++++++++++ 6 files changed, 76 insertions(+), 14 deletions(-) create mode 100644 configs/exps/alvaro/gemnet-config.yaml create mode 100644 ocpmodels/models/gemnet_oc/depgemnet_oc.py diff --git a/configs/exps/alvaro/gemnet-config.yaml b/configs/exps/alvaro/gemnet-config.yaml new file mode 100644 index 0000000000..37f7b7a322 --- /dev/null +++ b/configs/exps/alvaro/gemnet-config.yaml @@ -0,0 +1,18 @@ +job: + mem: 32GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + time: 15:00:00 + +default: + wandb_name: alvaro-carbonero-math + wandb_project: ocp-alvaro + model: + tag_hidden_channels: 32 + pg_hidden_channels: 32 + phys_embeds: True + + +runs: + - config: gemnet_oc-is2re-all diff --git a/configs/exps/alvaro/schnet-config.yaml b/configs/exps/alvaro/schnet-config.yaml index b4d86f9a93..33ee55e429 100644 --- a/configs/exps/alvaro/schnet-config.yaml +++ b/configs/exps/alvaro/schnet-config.yaml @@ -1,5 +1,3 @@ -# MODIFY THIS ONE FOR RUNS - job: mem: 32GB cpus: 4 diff --git a/debug.py b/debug.py index 77b7433a47..8f06829d91 100644 --- a/debug.py +++ b/debug.py @@ -92,17 +92,15 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.wandb_name = "alvaro-carbonero-math" args.wandb_project = "ocp-alvaro" - args.tag_hidden_channels: 32 - args.pg_hidden_channels: 32 + args.config = "gemnet_oc-is2re-10k" + + args.tag_hidden_channels = 32 + args.pg_hidden_channels = 32 args.phys_embeds = True - args.phys_hidden_channels = 0 - args.energy_head = False - args.num_targets = 1 args.otf_graph = False args.max_num_neighbors = 40 args.hidden_channels = 142 args.graph_rewiring = "remove-tag-0" - args.config = "gemnet_oc-is2re-10k" trainer_config = build_config(args, override_args) @@ -114,13 +112,12 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["dataset"] ) - trainer_config["optim"]["batch_size"] = 64 - trainer_config["optim"]["eval_batch_size"] = 64 - trainer_config["optim"]["lr_initial"] = 0.0005 + trainer_config["optim"]["batch_size"] = 32 + trainer_config["optim"]["eval_batch_size"] = 32 trainer_config["optim"]["max_epochs"] = 30 - trainer_config["optim"]["es_patience"] = 5 + #trainer_config["optim"]["es_patience"] = 5 - trainer_config["model"]["gat_mode"] = "v1" + #trainer_config["model"]["gat_mode"] = "v1" # -- Initial setup diff --git a/debug_schnet.py b/debug_schnet.py index ec1c6f7e34..77b7433a47 100644 --- a/debug_schnet.py +++ b/debug_schnet.py @@ -102,7 +102,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.max_num_neighbors = 40 args.hidden_channels = 142 args.graph_rewiring = "remove-tag-0" - args.config = "depschnet-is2re-10k" + args.config = "gemnet_oc-is2re-10k" trainer_config = build_config(args, override_args) diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index 1fea18dec9..0173298a2f 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -1437,6 +1437,7 @@ def get_max_neighbors_mask(natoms, index, atom_distance, max_num_neighbors_thres `max_num_neighbors_threshold` neighbors. Assumes that `index` is sorted. """ + device = natoms.device num_atoms = natoms.sum() diff --git a/ocpmodels/models/gemnet_oc/depgemnet_oc.py b/ocpmodels/models/gemnet_oc/depgemnet_oc.py new file mode 100644 index 0000000000..f48938f648 --- /dev/null +++ b/ocpmodels/models/gemnet_oc/depgemnet_oc.py @@ -0,0 +1,48 @@ +import torch +from torch.nn import Linear +from torch_scatter import scatter + +from ocpmodels.models.schnet import SchNet +from ocpmodels.models.faenet import OutputBlock as conOutputBlock +from ocpmodels.common.registry import registry +from ocpmodels.common.utils import conditional_grad + +from torch_geometric.data import Batch + +@registry.register_model("depgemnet_oc") +class depGemNetOC(SchNet): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # We replace the last linear transform to keep dimentionality + self.lin2 = Linear(self.hidden_channels // 2, self.hidden_channels // 2) + torch.nn.init.xavier_uniform_(self.lin2.weight) + self.lin2.bias.data.fill_(0) + + self.sys_lin1 = Linear(self.hidden_channels // 2 * 2, self.hidden_channels // 2) + self.sys_lin2 = Linear(self.hidden_channels // 2, 1) + + @conditional_grad(torch.enable_grad()) + def energy_forward(self, data): + # We need to save the tags so this step is necessary. + self.tags_saver(data.tags) + pred = super().energy_forward(data) + + return pred + + def tags_saver(self, tags): + self.current_tags = tags + + @conditional_grad(torch.enable_grad()) + def scattering(self, h, batch): + ads = self.current_tags == 2 + cat = ~ads + + ads_out = scatter(h, batch * ads, dim = 0, reduce = self.readout) + cat_out = scatter(h, batch * cat, dim = 0, reduce = self.readout) + + system = torch.cat([ads_out, cat_out], dim = 1) + system = self.sys_lin1(system) + system = self.sys_lin2(system) + + return system From cbfd5455a64e08afe80a476cec56a2088cda7a9d Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 10 Sep 2023 21:07:18 -0400 Subject: [PATCH 092/131] modified a config file --- configs/exps/alvaro/gemnet-config.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/configs/exps/alvaro/gemnet-config.yaml b/configs/exps/alvaro/gemnet-config.yaml index 37f7b7a322..299f684634 100644 --- a/configs/exps/alvaro/gemnet-config.yaml +++ b/configs/exps/alvaro/gemnet-config.yaml @@ -12,7 +12,14 @@ default: tag_hidden_channels: 32 pg_hidden_channels: 32 phys_embeds: True - + otf_graph: False + max_num_neighbors: 40 + hidden_channels: 142 + graph_rewiring: remove-0-tag + optim: + batch_size: 32 + eval_batch_size: 32 + max_epochs: 30 runs: - config: gemnet_oc-is2re-all From 5289e1c549f8368a29876a06277b6a68dc62dc88 Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 10 Sep 2023 21:32:57 -0400 Subject: [PATCH 093/131] Corrected mistake in config file --- configs/exps/alvaro/gemnet-config.yaml | 1 + debug.py | 2 +- ocpmodels/models/gemnet_oc/gemnet_oc.py | 3 +++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/configs/exps/alvaro/gemnet-config.yaml b/configs/exps/alvaro/gemnet-config.yaml index 299f684634..953bbfb1e1 100644 --- a/configs/exps/alvaro/gemnet-config.yaml +++ b/configs/exps/alvaro/gemnet-config.yaml @@ -15,6 +15,7 @@ default: otf_graph: False max_num_neighbors: 40 hidden_channels: 142 + regress_forces: True graph_rewiring: remove-0-tag optim: batch_size: 32 diff --git a/debug.py b/debug.py index 8f06829d91..651d988389 100644 --- a/debug.py +++ b/debug.py @@ -117,7 +117,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["optim"]["max_epochs"] = 30 #trainer_config["optim"]["es_patience"] = 5 - #trainer_config["model"]["gat_mode"] = "v1" + trainer_config["model"]["regress_forces"] = False # -- Initial setup diff --git a/ocpmodels/models/gemnet_oc/gemnet_oc.py b/ocpmodels/models/gemnet_oc/gemnet_oc.py index da00442486..b253d2c7eb 100644 --- a/ocpmodels/models/gemnet_oc/gemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/gemnet_oc.py @@ -1282,6 +1282,9 @@ def energy_forward(self, data): xs_E.append(x_E) xs_F.append(x_F) + import ipdb + ipdb.set_trace() + # Global output block for final predictions x_E = self.out_mlp_E(torch.cat(xs_E, dim=-1)) if self.direct_forces: From ac60cdc314cd3a5d18e9409ad04e7177eaf26f95 Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 10 Sep 2023 22:25:32 -0400 Subject: [PATCH 094/131] implementing more --- configs/models/depgemnet_oc.yaml | 102 +++++++++++++++++++++ ocpmodels/common/scaling/compat.py | 2 + ocpmodels/common/utils.py | 1 + ocpmodels/models/gemnet_oc/depgemnet_oc.py | 18 ++-- ocpmodels/models/gemnet_oc/gemnet_oc.py | 4 + 5 files changed, 116 insertions(+), 11 deletions(-) create mode 100644 configs/models/depgemnet_oc.yaml diff --git a/configs/models/depgemnet_oc.yaml b/configs/models/depgemnet_oc.yaml new file mode 100644 index 0000000000..d3fb79cabc --- /dev/null +++ b/configs/models/depgemnet_oc.yaml @@ -0,0 +1,102 @@ +default: + model: + name: depgemnet_oc + num_spherical: 7 + num_radial: 128 + num_blocks: 4 + emb_size_atom: 256 + emb_size_edge: 512 + emb_size_trip_in: 64 + emb_size_trip_out: 64 + emb_size_quad_in: 32 + emb_size_quad_out: 32 + emb_size_aint_in: 64 + emb_size_aint_out: 64 + emb_size_rbf: 16 + emb_size_cbf: 16 + emb_size_sbf: 32 + num_before_skip: 2 + num_after_skip: 2 + num_concat: 1 + num_atom: 3 + num_output_afteratom: 3 + cutoff: 12.0 + cutoff_qint: 12.0 + cutoff_aeaint: 12.0 + cutoff_aint: 12.0 + max_neighbors: 30 + max_neighbors_qint: 8 + max_neighbors_aeaint: 20 + max_neighbors_aint: 1000 + rbf: + name: gaussian + envelope: + name: polynomial + exponent: 5 + cbf: + name: spherical_harmonics + sbf: + name: legendre_outer + extensive: True + output_init: HeOrthogonal + activation: silu + scale_file: configs/models/scaling_factors/gemnet-oc.pt + + regress_forces: True + direct_forces: True + forces_coupled: False + + quad_interaction: True + atom_edge_interaction: True + edge_atom_interaction: True + atom_interaction: True + + num_atom_emb_layers: 2 + num_global_out_layers: 2 + qint_tags: [1, 2] + + # PhAST + tag_hidden_channels: 0 # 64 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_embeds: False # True + phys_hidden_channels: 0 + energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} + + optim: + batch_size: 16 + eval_batch_size: 16 + load_balancing: atoms + eval_every: 5000 + num_workers: 2 + lr_initial: 5.e-4 + optimizer: AdamW + optimizer_params: {"amsgrad": True} + scheduler: ReduceLROnPlateau + mode: min + factor: 0.8 + patience: 3 + max_epochs: 80 + force_coefficient: 100 + energy_coefficient: 1 + ema_decay: 0.999 + clip_grad_norm: 10 + loss_energy: mae + loss_force: l2mae + weight_decay: 0 + +is2re: + default: + model: + regress_forces: False + num_targets: 1 + 10k: {} + all: {} + +s2ef: + default: + model: + num_targets: 1 + 200k: {} + 2M: {} + 20M: {} + all: {} diff --git a/ocpmodels/common/scaling/compat.py b/ocpmodels/common/scaling/compat.py index 56ef12e369..8d22085e55 100644 --- a/ocpmodels/common/scaling/compat.py +++ b/ocpmodels/common/scaling/compat.py @@ -50,6 +50,8 @@ def _load_scale_dict(scale_file: Optional[Union[str, ScaleDict]]): def load_scales_compat(module: nn.Module, scale_file: Optional[Union[str, ScaleDict]]): + import ipdb + ipdb.set_trace() scale_dict = _load_scale_dict(scale_file) if not scale_dict: return diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index 0173298a2f..86fdf375da 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -759,6 +759,7 @@ def setup_imports(): # manual model imports importlib.import_module("ocpmodels.models.gemnet_oc.gemnet_oc") + importlib.import_module("ocpmodels.models.gemnet_oc.depgemnet_oc") experimental_folder = os.path.join(root_folder, "../experimental/") if os.path.exists(experimental_folder): diff --git a/ocpmodels/models/gemnet_oc/depgemnet_oc.py b/ocpmodels/models/gemnet_oc/depgemnet_oc.py index f48938f648..9b42e9da79 100644 --- a/ocpmodels/models/gemnet_oc/depgemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/depgemnet_oc.py @@ -2,25 +2,21 @@ from torch.nn import Linear from torch_scatter import scatter -from ocpmodels.models.schnet import SchNet -from ocpmodels.models.faenet import OutputBlock as conOutputBlock +from ocpmodels.models.gemnet_oc.gemnet_oc import GemNetOC from ocpmodels.common.registry import registry from ocpmodels.common.utils import conditional_grad from torch_geometric.data import Batch @registry.register_model("depgemnet_oc") -class depGemNetOC(SchNet): +class depGemNetOC(GemNetOC): def __init__(self, **kwargs): + import ipdb + ipdb.set_trace() + kwargs["num_targets"] = kwargs["emb_size_atom"] // 2 super().__init__(**kwargs) - - # We replace the last linear transform to keep dimentionality - self.lin2 = Linear(self.hidden_channels // 2, self.hidden_channels // 2) - torch.nn.init.xavier_uniform_(self.lin2.weight) - self.lin2.bias.data.fill_(0) - - self.sys_lin1 = Linear(self.hidden_channels // 2 * 2, self.hidden_channels // 2) - self.sys_lin2 = Linear(self.hidden_channels // 2, 1) + import ipdb + ipdb.set_trace() @conditional_grad(torch.enable_grad()) def energy_forward(self, data): diff --git a/ocpmodels/models/gemnet_oc/gemnet_oc.py b/ocpmodels/models/gemnet_oc/gemnet_oc.py index b253d2c7eb..60e61d7fc6 100644 --- a/ocpmodels/models/gemnet_oc/gemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/gemnet_oc.py @@ -358,6 +358,7 @@ def __init__( for _ in range(num_global_out_layers) ] self.out_mlp_E = torch.nn.Sequential(*out_mlp_E) + self.out_energy = Dense(emb_size_atom, num_targets, bias=False, activation=None) if direct_forces: out_mlp_F = [ @@ -384,6 +385,9 @@ def __init__( if direct_forces: self.out_forces.reset_parameters(out_initializer) + import ipdb + ipdb.set_trace() + load_scales_compat(self, scale_file) def set_cutoffs(self, cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint): From f64a3ff46cafd8aafbe22e3a046d40e3aab0c4b5 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 11 Sep 2023 00:07:00 -0400 Subject: [PATCH 095/131] Finished implementing dep and ind gemnet --- configs/models/indgemnet_oc.yaml | 102 +++++++++++++++++++++ debug.py | 2 +- ocpmodels/common/scaling/compat.py | 2 - ocpmodels/common/utils.py | 1 + ocpmodels/models/gemnet_oc/depgemnet_oc.py | 28 ++++-- ocpmodels/models/gemnet_oc/gemnet_oc.py | 21 +++-- ocpmodels/models/gemnet_oc/indgemnet_oc.py | 57 ++++++++++++ ocpmodels/trainers/base_trainer.py | 4 +- 8 files changed, 194 insertions(+), 23 deletions(-) create mode 100644 configs/models/indgemnet_oc.yaml create mode 100644 ocpmodels/models/gemnet_oc/indgemnet_oc.py diff --git a/configs/models/indgemnet_oc.yaml b/configs/models/indgemnet_oc.yaml new file mode 100644 index 0000000000..8cf5c66d17 --- /dev/null +++ b/configs/models/indgemnet_oc.yaml @@ -0,0 +1,102 @@ +default: + model: + name: indgemnet_oc + num_spherical: 7 + num_radial: 128 + num_blocks: 4 + emb_size_atom: 256 + emb_size_edge: 512 + emb_size_trip_in: 64 + emb_size_trip_out: 64 + emb_size_quad_in: 32 + emb_size_quad_out: 32 + emb_size_aint_in: 64 + emb_size_aint_out: 64 + emb_size_rbf: 16 + emb_size_cbf: 16 + emb_size_sbf: 32 + num_before_skip: 2 + num_after_skip: 2 + num_concat: 1 + num_atom: 3 + num_output_afteratom: 3 + cutoff: 12.0 + cutoff_qint: 12.0 + cutoff_aeaint: 12.0 + cutoff_aint: 12.0 + max_neighbors: 30 + max_neighbors_qint: 8 + max_neighbors_aeaint: 20 + max_neighbors_aint: 1000 + rbf: + name: gaussian + envelope: + name: polynomial + exponent: 5 + cbf: + name: spherical_harmonics + sbf: + name: legendre_outer + extensive: True + output_init: HeOrthogonal + activation: silu + scale_file: configs/models/scaling_factors/gemnet-oc.pt + + regress_forces: True + direct_forces: True + forces_coupled: False + + quad_interaction: True + atom_edge_interaction: True + edge_atom_interaction: True + atom_interaction: True + + num_atom_emb_layers: 2 + num_global_out_layers: 2 + qint_tags: [1, 2] + + # PhAST + tag_hidden_channels: 0 # 64 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_embeds: False # True + phys_hidden_channels: 0 + energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} + + optim: + batch_size: 16 + eval_batch_size: 16 + load_balancing: atoms + eval_every: 5000 + num_workers: 2 + lr_initial: 5.e-4 + optimizer: AdamW + optimizer_params: {"amsgrad": True} + scheduler: ReduceLROnPlateau + mode: min + factor: 0.8 + patience: 3 + max_epochs: 80 + force_coefficient: 100 + energy_coefficient: 1 + ema_decay: 0.999 + clip_grad_norm: 10 + loss_energy: mae + loss_force: l2mae + weight_decay: 0 + +is2re: + default: + model: + regress_forces: False + num_targets: 1 + 10k: {} + all: {} + +s2ef: + default: + model: + num_targets: 1 + 200k: {} + 2M: {} + 20M: {} + all: {} diff --git a/debug.py b/debug.py index 651d988389..ed47a7c531 100644 --- a/debug.py +++ b/debug.py @@ -92,7 +92,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.wandb_name = "alvaro-carbonero-math" args.wandb_project = "ocp-alvaro" - args.config = "gemnet_oc-is2re-10k" + args.config = "indgemnet_oc-is2re-10k" args.tag_hidden_channels = 32 args.pg_hidden_channels = 32 diff --git a/ocpmodels/common/scaling/compat.py b/ocpmodels/common/scaling/compat.py index 8d22085e55..56ef12e369 100644 --- a/ocpmodels/common/scaling/compat.py +++ b/ocpmodels/common/scaling/compat.py @@ -50,8 +50,6 @@ def _load_scale_dict(scale_file: Optional[Union[str, ScaleDict]]): def load_scales_compat(module: nn.Module, scale_file: Optional[Union[str, ScaleDict]]): - import ipdb - ipdb.set_trace() scale_dict = _load_scale_dict(scale_file) if not scale_dict: return diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index 86fdf375da..0f29423c8f 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -760,6 +760,7 @@ def setup_imports(): # manual model imports importlib.import_module("ocpmodels.models.gemnet_oc.gemnet_oc") importlib.import_module("ocpmodels.models.gemnet_oc.depgemnet_oc") + importlib.import_module("ocpmodels.models.gemnet_oc.indgemnet_oc") experimental_folder = os.path.join(root_folder, "../experimental/") if os.path.exists(experimental_folder): diff --git a/ocpmodels/models/gemnet_oc/depgemnet_oc.py b/ocpmodels/models/gemnet_oc/depgemnet_oc.py index 9b42e9da79..f7000606d3 100644 --- a/ocpmodels/models/gemnet_oc/depgemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/depgemnet_oc.py @@ -4,22 +4,28 @@ from ocpmodels.models.gemnet_oc.gemnet_oc import GemNetOC from ocpmodels.common.registry import registry -from ocpmodels.common.utils import conditional_grad +from ocpmodels.common.utils import ( + conditional_grad, + scatter_det +) from torch_geometric.data import Batch @registry.register_model("depgemnet_oc") class depGemNetOC(GemNetOC): def __init__(self, **kwargs): - import ipdb - ipdb.set_trace() - kwargs["num_targets"] = kwargs["emb_size_atom"] // 2 + self.hidden_channels = kwargs["emb_size_atom"] + + kwargs["num_targets"] = self.hidden_channels // 2 super().__init__(**kwargs) - import ipdb - ipdb.set_trace() + + self.sys_lin1 = Linear(self.hidden_channels // 2 * 2, self.hidden_channels // 2) + self.sys_lin2 = Linear(self.hidden_channels // 2, 1) @conditional_grad(torch.enable_grad()) def energy_forward(self, data): + import ipdb + ipdb.set_trace() # We need to save the tags so this step is necessary. self.tags_saver(data.tags) pred = super().energy_forward(data) @@ -30,12 +36,16 @@ def tags_saver(self, tags): self.current_tags = tags @conditional_grad(torch.enable_grad()) - def scattering(self, h, batch): + def scattering(self, E_t, batch, dim, dim_size, reduce="add"): ads = self.current_tags == 2 cat = ~ads - ads_out = scatter(h, batch * ads, dim = 0, reduce = self.readout) - cat_out = scatter(h, batch * cat, dim = 0, reduce = self.readout) + ads_out = scatter_det( + src=E_t, index=batch * ads, dim=dim, reduce=reduce + ) + cat_out = scatter_det( + src=E_t, index=batch * cat, dim=dim, reduce=reduce + ) system = torch.cat([ads_out, cat_out], dim = 1) system = self.sys_lin1(system) diff --git a/ocpmodels/models/gemnet_oc/gemnet_oc.py b/ocpmodels/models/gemnet_oc/gemnet_oc.py index 60e61d7fc6..6c25e993fc 100644 --- a/ocpmodels/models/gemnet_oc/gemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/gemnet_oc.py @@ -385,9 +385,6 @@ def __init__( if direct_forces: self.out_forces.reset_parameters(out_initializer) - import ipdb - ipdb.set_trace() - load_scales_compat(self, scale_file) def set_cutoffs(self, cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint): @@ -1286,9 +1283,6 @@ def energy_forward(self, data): xs_E.append(x_E) xs_F.append(x_F) - import ipdb - ipdb.set_trace() - # Global output block for final predictions x_E = self.out_mlp_E(torch.cat(xs_E, dim=-1)) if self.direct_forces: @@ -1299,12 +1293,13 @@ def energy_forward(self, data): F_st = self.out_forces(x_F.float()) nMolecules = torch.max(batch) + 1 + if self.extensive: - E_t = scatter_det( + E_t = self.scattering( E_t, batch, dim=0, dim_size=nMolecules, reduce="add" - ) # (nMolecules, num_targets) + ) # (nMolecules, num_targets) else: - E_t = scatter_det( + E_t = self.scattering( E_t, batch, dim=0, dim_size=nMolecules, reduce="mean" ) # (nMolecules, num_targets) @@ -1318,6 +1313,14 @@ def energy_forward(self, data): "F_st": F_st, } + @conditional_grad(torch.enable_grad()) + def scattering(self, E_t, batch, dim, dim_size, reduce="add"): + E_t = scatter_det( + src=E_t, index=batch, dim=dim, dim_size=dim_size, reduce=reduce + ) + + return E_t + @conditional_grad(torch.enable_grad()) def forces_forward(self, preds): diff --git a/ocpmodels/models/gemnet_oc/indgemnet_oc.py b/ocpmodels/models/gemnet_oc/indgemnet_oc.py new file mode 100644 index 0000000000..958154a485 --- /dev/null +++ b/ocpmodels/models/gemnet_oc/indgemnet_oc.py @@ -0,0 +1,57 @@ +import torch, math +from torch import nn +from torch.nn import Linear, Transformer + +from ocpmodels.models.gemnet_oc.gemnet_oc import GemNetOC +from ocpmodels.models.faenet import OutputBlock +from ocpmodels.models.base_model import BaseModel +from ocpmodels.common.registry import registry +from ocpmodels.models.utils.activations import swish + +from torch_geometric.data import Batch + +@registry.register_model("indgemnet_oc") +class indGemNetOC(BaseModel): # Change to make it inherit from base model. + def __init__(self, **kwargs): + super().__init__() + + self.regress_forces = kwargs["regress_forces"] + + kwargs["num_targets"] = kwargs["emb_size_atom"] // 2 + + self.ads_model = GemNetOC(**kwargs) + self.cat_model = GemNetOC(**kwargs) + + self.act = swish + self.combination = nn.Sequential( + Linear(kwargs["emb_size_atom"] // 2 * 2, kwargs["emb_size_atom"] // 2), + self.act, + Linear(kwargs["emb_size_atom"] // 2, 1) + ) + + def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! + import ipdb + ipdb.set_trace() + + adsorbates = data[0] + catalysts = data[1] + + # We make predictions for each + pred_ads = self.ads_model(adsorbates, mode) + pred_cat = self.cat_model(catalysts, mode) + + ads_energy = pred_ads["energy"] + cat_energy = pred_cat["energy"] + + # We combine predictions + system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = self.combination(system_energy) + + # We return them + pred_system = { + "energy" : system_energy, + "pooling_loss" : pred_ads["pooling_loss"] if pred_ads["pooling_loss"] is None + else pred_ads["pooling_loss"] + pred_cat["pooling_loss"] + } + + return pred_system diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 53de22d1b3..6bfb0df41e 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -154,7 +154,7 @@ def __init__(self, **kwargs): (run_dir / f"config-{JOB_ID}.yaml").write_text(yaml.dump(self.config)) # Here's the models whose edges are removed as a transform - transform_models = ["depfaenet", "depschnet"] + transform_models = ["depfaenet", "depschnet", "depgemnet_oc"] if self.config["is_disconnected"]: print("\n\nHeads up: cat-ads edges being removed!") if self.config["model_name"] in transform_models: @@ -164,7 +164,7 @@ def __init__(self, **kwargs): self.config["is_disconnected"] = True # Here's the models whose graphs are disconnected in the dataset - self.separate_models = ["indfaenet", "indschnet"] + self.separate_models = ["indfaenet", "indschnet", "indgemnet_oc"] self.heterogeneous_models = ["afaenet", "aschnet"] self.data_mode = "normal" self.separate_dataset = False From b03446d634cb8601f130a50f99672b50411f9aaf Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 11 Sep 2023 00:07:52 -0400 Subject: [PATCH 096/131] readied config files --- configs/exps/alvaro/gemnet-config.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/configs/exps/alvaro/gemnet-config.yaml b/configs/exps/alvaro/gemnet-config.yaml index 953bbfb1e1..e22d378f00 100644 --- a/configs/exps/alvaro/gemnet-config.yaml +++ b/configs/exps/alvaro/gemnet-config.yaml @@ -24,3 +24,7 @@ default: runs: - config: gemnet_oc-is2re-all + + - config: depgemnet_oc-is2re-all + + - config: indgemnet_oc-is2re-all From a54ff2df8d6e43c13d7d4e31d1bd1eac0e94f72e Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 11 Sep 2023 21:27:16 -0400 Subject: [PATCH 097/131] About to start incorporating agemnet_oc --- configs/exps/alvaro/faenet-training.yaml | 47 + configs/exps/alvaro/gemnet-config.yaml | 11 +- ocpmodels/models/gemnet_oc/agemnet_oc.py | 1369 ++++++++++++++++++++++ ocpmodels/trainers/base_trainer.py | 2 +- 4 files changed, 1425 insertions(+), 4 deletions(-) create mode 100644 configs/exps/alvaro/faenet-training.yaml create mode 100644 ocpmodels/models/gemnet_oc/agemnet_oc.py diff --git a/configs/exps/alvaro/faenet-training.yaml b/configs/exps/alvaro/faenet-training.yaml new file mode 100644 index 0000000000..f124a63c66 --- /dev/null +++ b/configs/exps/alvaro/faenet-training.yaml @@ -0,0 +1,47 @@ +# MODIFY THIS ONE FOR RUNS + +job: + mem: 32GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + time: 15:00:00 + +default: + wandb_name: alvaro-carbonero-math + wandb_project: ocp-alvaro + test_ri: True + mode: train + graph_rewiring: remove-tag-0 + optim: + cp_data_to_tmpdir: true + wandb-tags: 'best-config-??' # Insert what model you're running if running one by one. + frame_averaging: 2D + fa_frames: se3-random + model: + mp_type: updownscale + phys_embeds: True + tag_hidden_channels: 32 + pg_hidden_channels: 64 + energy_head: weighted-av-final-embeds + complex_mp: False + graph_norm: True + hidden_channels: 352 + num_filters: 448 + num_gaussians: 99 + num_interactions: 6 + second_layer_MLP: True + skip_co: concat + edge_embed_type: all_rij + optim: + lr_initial: 0.0005 + scheduler: LinearWarmupCosineAnnealingLR + max_epochs: 20 + eval_every: 0.4 + batch_size: 256 + eval_batch_size: 256 + +runs: + - config: afaenet-is2re-all + model: + afaenet_gat_mode: v1 diff --git a/configs/exps/alvaro/gemnet-config.yaml b/configs/exps/alvaro/gemnet-config.yaml index e22d378f00..beb18a48d4 100644 --- a/configs/exps/alvaro/gemnet-config.yaml +++ b/configs/exps/alvaro/gemnet-config.yaml @@ -23,8 +23,13 @@ default: max_epochs: 30 runs: - - config: gemnet_oc-is2re-all + #- config: gemnet_oc-is2re-all + + #- config: depgemnet_oc-is2re-all - - config: depgemnet_oc-is2re-all + #- config: indgemnet_oc-is2re-all + + - config: gemnet_oc-is2re-all + is_disconnected: True - - config: indgemnet_oc-is2re-all + - config: agemnet_oc-is2re-all diff --git a/ocpmodels/models/gemnet_oc/agemnet_oc.py b/ocpmodels/models/gemnet_oc/agemnet_oc.py new file mode 100644 index 0000000000..6c25e993fc --- /dev/null +++ b/ocpmodels/models/gemnet_oc/agemnet_oc.py @@ -0,0 +1,1369 @@ +""" +Copyright (c) Facebook, Inc. and its affiliates. +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree. +""" + +import logging +from typing import Optional + +import numpy as np +import torch +from torch_scatter import segment_coo + +from ocpmodels.common.registry import registry +from ocpmodels.common.utils import ( + conditional_grad, + get_max_neighbors_mask, + scatter_det, +) +from ocpmodels.models.base_model import BaseModel +from ocpmodels.common.scaling.compat import load_scales_compat + +from .initializers import get_initializer +from .interaction_indices import ( + get_quadruplets, + get_triplets, +) +from .layers.atom_update_block import OutputBlock +from .layers.base_layers import Dense, ResidualLayer +from .layers.efficient import BasisEmbedding +from .layers.embedding_block import AtomEmbedding, EdgeEmbedding +from .layers.force_scaler import ForceScaler +from .layers.interaction_block import InteractionBlock +from .layers.radial_basis import RadialBasis +from .layers.spherical_basis import CircularBasisLayer, SphericalBasisLayer +from .utils import ( + get_angle, + get_edge_id, + get_inner_idx, + inner_product_clamped, + mask_neighbors, + repeat_blocks, +) +from .interaction_indices import get_mixed_triplets + + +@registry.register_model("gemnet_oc") +class GemNetOC(BaseModel): + """ + Arguments + --------- + num_atoms (int): Unused argument + bond_feat_dim (int): Unused argument + num_targets: int + Number of prediction targets. + + num_spherical: int + Controls maximum frequency. + num_radial: int + Controls maximum frequency. + num_blocks: int + Number of building blocks to be stacked. + + emb_size_atom: int + Embedding size of the atoms. + emb_size_edge: int + Embedding size of the edges. + emb_size_trip_in: int + (Down-projected) embedding size of the quadruplet edge embeddings + before the bilinear layer. + emb_size_trip_out: int + (Down-projected) embedding size of the quadruplet edge embeddings + after the bilinear layer. + emb_size_quad_in: int + (Down-projected) embedding size of the quadruplet edge embeddings + before the bilinear layer. + emb_size_quad_out: int + (Down-projected) embedding size of the quadruplet edge embeddings + after the bilinear layer. + emb_size_aint_in: int + Embedding size in the atom interaction before the bilinear layer. + emb_size_aint_out: int + Embedding size in the atom interaction after the bilinear layer. + emb_size_rbf: int + Embedding size of the radial basis transformation. + emb_size_cbf: int + Embedding size of the circular basis transformation (one angle). + emb_size_sbf: int + Embedding size of the spherical basis transformation (two angles). + + num_before_skip: int + Number of residual blocks before the first skip connection. + num_after_skip: int + Number of residual blocks after the first skip connection. + num_concat: int + Number of residual blocks after the concatenation. + num_atom: int + Number of residual blocks in the atom embedding blocks. + num_output_afteratom: int + Number of residual blocks in the output blocks + after adding the atom embedding. + num_atom_emb_layers: int + Number of residual blocks for transforming atom embeddings. + num_global_out_layers: int + Number of final residual blocks before the output. + + regress_forces: bool + Whether to predict forces. Default: True + direct_forces: bool + If True predict forces based on aggregation of interatomic directions. + If False predict forces based on negative gradient of energy potential. + use_pbc: bool + Whether to use periodic boundary conditions. + scale_backprop_forces: bool + Whether to scale up the energy and then scales down the forces + to prevent NaNs and infs in backpropagated forces. + + cutoff: float + Embedding cutoff for interatomic connections and embeddings in Angstrom. + cutoff_qint: float + Quadruplet interaction cutoff in Angstrom. + Optional. Uses cutoff per default. + cutoff_aeaint: float + Edge-to-atom and atom-to-edge interaction cutoff in Angstrom. + Optional. Uses cutoff per default. + cutoff_aint: float + Atom-to-atom interaction cutoff in Angstrom. + Optional. Uses maximum of all other cutoffs per default. + max_neighbors: int + Maximum number of neighbors for interatomic connections and embeddings. + max_neighbors_qint: int + Maximum number of quadruplet interactions per embedding. + Optional. Uses max_neighbors per default. + max_neighbors_aeaint: int + Maximum number of edge-to-atom and atom-to-edge interactions per embedding. + Optional. Uses max_neighbors per default. + max_neighbors_aint: int + Maximum number of atom-to-atom interactions per atom. + Optional. Uses maximum of all other neighbors per default. + + rbf: dict + Name and hyperparameters of the radial basis function. + rbf_spherical: dict + Name and hyperparameters of the radial basis function used as part of the + circular and spherical bases. + Optional. Uses rbf per default. + envelope: dict + Name and hyperparameters of the envelope function. + cbf: dict + Name and hyperparameters of the circular basis function. + sbf: dict + Name and hyperparameters of the spherical basis function. + extensive: bool + Whether the output should be extensive (proportional to the number of atoms) + forces_coupled: bool + If True, enforce that |F_st| = |F_ts|. No effect if direct_forces is False. + output_init: str + Initialization method for the final dense layer. + activation: str + Name of the activation function. + scale_file: str + Path to the pytorch file containing the scaling factors. + + quad_interaction: bool + Whether to use quadruplet interactions (with dihedral angles) + atom_edge_interaction: bool + Whether to use atom-to-edge interactions + edge_atom_interaction: bool + Whether to use edge-to-atom interactions + atom_interaction: bool + Whether to use atom-to-atom interactions + + scale_basis: bool + Whether to use a scaling layer in the raw basis function for better + numerical stability. + qint_tags: list + Which atom tags to use quadruplet interactions for. + 0=sub-surface bulk, 1=surface, 2=adsorbate atoms. + """ + + def __init__( + self, + num_atoms: Optional[int], + bond_feat_dim: int, + num_targets: int, + num_spherical: int, + num_radial: int, + num_blocks: int, + emb_size_atom: int, + emb_size_edge: int, + emb_size_trip_in: int, + emb_size_trip_out: int, + emb_size_quad_in: int, + emb_size_quad_out: int, + emb_size_aint_in: int, + emb_size_aint_out: int, + emb_size_rbf: int, + emb_size_cbf: int, + emb_size_sbf: int, + num_before_skip: int, + num_after_skip: int, + num_concat: int, + num_atom: int, + num_output_afteratom: int, + num_atom_emb_layers: int = 0, + num_global_out_layers: int = 2, + regress_forces: bool = True, + direct_forces: bool = False, + use_pbc: bool = True, + scale_backprop_forces: bool = False, + cutoff: float = 6.0, + cutoff_qint: Optional[float] = None, + cutoff_aeaint: Optional[float] = None, + cutoff_aint: Optional[float] = None, + max_neighbors: int = 50, + max_neighbors_qint: Optional[int] = None, + max_neighbors_aeaint: Optional[int] = None, + max_neighbors_aint: Optional[int] = None, + rbf: dict = {"name": "gaussian"}, + rbf_spherical: Optional[dict] = None, + envelope: dict = {"name": "polynomial", "exponent": 5}, + cbf: dict = {"name": "spherical_harmonics"}, + sbf: dict = {"name": "spherical_harmonics"}, + extensive: bool = True, + forces_coupled: bool = False, + output_init: str = "HeOrthogonal", + activation: str = "silu", + quad_interaction: bool = False, + atom_edge_interaction: bool = False, + edge_atom_interaction: bool = False, + atom_interaction: bool = False, + scale_basis: bool = False, + qint_tags: list = [0, 1, 2], + num_elements: int = 83, + otf_graph: bool = False, + scale_file: Optional[str] = None, + tag_hidden_channels: int = 0, + pg_hidden_channels: int = 0, + phys_embeds: bool = False, + phys_hidden_channels: int = 0, + **kwargs, # backwards compatibility with deprecated arguments + ): + super().__init__() + if len(kwargs) > 0: + logging.warning(f"Unrecognized arguments: {list(kwargs.keys())}") + self.num_targets = num_targets + assert num_blocks > 0 + self.num_blocks = num_blocks + self.extensive = extensive + + self.atom_edge_interaction = atom_edge_interaction + self.edge_atom_interaction = edge_atom_interaction + self.atom_interaction = atom_interaction + self.quad_interaction = quad_interaction + self.qint_tags = torch.tensor(qint_tags) + self.otf_graph = otf_graph + if not rbf_spherical: + rbf_spherical = rbf + + self.set_cutoffs(cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint) + self.set_max_neighbors( + max_neighbors, + max_neighbors_qint, + max_neighbors_aeaint, + max_neighbors_aint, + ) + self.use_pbc = use_pbc + + self.direct_forces = direct_forces + self.forces_coupled = forces_coupled + self.regress_forces = regress_forces + self.force_scaler = ForceScaler(enabled=scale_backprop_forces) + + self.init_basis_functions( + num_radial, + num_spherical, + rbf, + rbf_spherical, + envelope, + cbf, + sbf, + scale_basis, + ) + self.init_shared_basis_layers( + num_radial, num_spherical, emb_size_rbf, emb_size_cbf, emb_size_sbf + ) + + # Embedding blocks + self.atom_emb = AtomEmbedding( + emb_size_atom, + num_elements, + tag_hidden_channels=tag_hidden_channels, + pg_hidden_channels=pg_hidden_channels, + phys_hidden_channels=phys_hidden_channels, + phys_embeds=phys_embeds, + ) + self.edge_emb = EdgeEmbedding( + emb_size_atom, num_radial, emb_size_edge, activation=activation + ) + + # Interaction Blocks + int_blocks = [] + for _ in range(num_blocks): + int_blocks.append( + InteractionBlock( + emb_size_atom=emb_size_atom, + emb_size_edge=emb_size_edge, + emb_size_trip_in=emb_size_trip_in, + emb_size_trip_out=emb_size_trip_out, + emb_size_quad_in=emb_size_quad_in, + emb_size_quad_out=emb_size_quad_out, + emb_size_a2a_in=emb_size_aint_in, + emb_size_a2a_out=emb_size_aint_out, + emb_size_rbf=emb_size_rbf, + emb_size_cbf=emb_size_cbf, + emb_size_sbf=emb_size_sbf, + num_before_skip=num_before_skip, + num_after_skip=num_after_skip, + num_concat=num_concat, + num_atom=num_atom, + num_atom_emb_layers=num_atom_emb_layers, + quad_interaction=quad_interaction, + atom_edge_interaction=atom_edge_interaction, + edge_atom_interaction=edge_atom_interaction, + atom_interaction=atom_interaction, + activation=activation, + ) + ) + self.int_blocks = torch.nn.ModuleList(int_blocks) + + out_blocks = [] + for _ in range(num_blocks + 1): + out_blocks.append( + OutputBlock( + emb_size_atom=emb_size_atom, + emb_size_edge=emb_size_edge, + emb_size_rbf=emb_size_rbf, + nHidden=num_atom, + nHidden_afteratom=num_output_afteratom, + activation=activation, + direct_forces=direct_forces, + ) + ) + self.out_blocks = torch.nn.ModuleList(out_blocks) + + out_mlp_E = [ + Dense( + emb_size_atom * (num_blocks + 1), + emb_size_atom, + activation=activation, + ) + ] + out_mlp_E += [ + ResidualLayer( + emb_size_atom, + activation=activation, + ) + for _ in range(num_global_out_layers) + ] + self.out_mlp_E = torch.nn.Sequential(*out_mlp_E) + + self.out_energy = Dense(emb_size_atom, num_targets, bias=False, activation=None) + if direct_forces: + out_mlp_F = [ + Dense( + emb_size_edge * (num_blocks + 1), + emb_size_edge, + activation=activation, + ) + ] + out_mlp_F += [ + ResidualLayer( + emb_size_edge, + activation=activation, + ) + for _ in range(num_global_out_layers) + ] + self.out_mlp_F = torch.nn.Sequential(*out_mlp_F) + self.out_forces = Dense( + emb_size_edge, num_targets, bias=False, activation=None + ) + + out_initializer = get_initializer(output_init) + self.out_energy.reset_parameters(out_initializer) + if direct_forces: + self.out_forces.reset_parameters(out_initializer) + + load_scales_compat(self, scale_file) + + def set_cutoffs(self, cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint): + self.cutoff = cutoff + + if ( + not (self.atom_edge_interaction or self.edge_atom_interaction) + or cutoff_aeaint is None + ): + self.cutoff_aeaint = self.cutoff + else: + self.cutoff_aeaint = cutoff_aeaint + if not self.quad_interaction or cutoff_qint is None: + self.cutoff_qint = self.cutoff + else: + self.cutoff_qint = cutoff_qint + if not self.atom_interaction or cutoff_aint is None: + self.cutoff_aint = max( + self.cutoff, + self.cutoff_aeaint, + self.cutoff_qint, + ) + else: + self.cutoff_aint = cutoff_aint + + assert self.cutoff <= self.cutoff_aint + assert self.cutoff_aeaint <= self.cutoff_aint + assert self.cutoff_qint <= self.cutoff_aint + + def set_max_neighbors( + self, + max_neighbors, + max_neighbors_qint, + max_neighbors_aeaint, + max_neighbors_aint, + ): + self.max_neighbors = max_neighbors + + if ( + not (self.atom_edge_interaction or self.edge_atom_interaction) + or max_neighbors_aeaint is None + ): + self.max_neighbors_aeaint = self.max_neighbors + else: + self.max_neighbors_aeaint = max_neighbors_aeaint + if not self.quad_interaction or max_neighbors_qint is None: + self.max_neighbors_qint = self.max_neighbors + else: + self.max_neighbors_qint = max_neighbors_qint + if not self.atom_interaction or max_neighbors_aint is None: + self.max_neighbors_aint = max( + self.max_neighbors, + self.max_neighbors_aeaint, + self.max_neighbors_qint, + ) + else: + self.max_neighbors_aint = max_neighbors_aint + + assert self.max_neighbors <= self.max_neighbors_aint + assert self.max_neighbors_aeaint <= self.max_neighbors_aint + assert self.max_neighbors_qint <= self.max_neighbors_aint + + def init_basis_functions( + self, + num_radial, + num_spherical, + rbf, + rbf_spherical, + envelope, + cbf, + sbf, + scale_basis, + ): + self.radial_basis = RadialBasis( + num_radial=num_radial, + cutoff=self.cutoff, + rbf=rbf, + envelope=envelope, + scale_basis=scale_basis, + ) + radial_basis_spherical = RadialBasis( + num_radial=num_radial, + cutoff=self.cutoff, + rbf=rbf_spherical, + envelope=envelope, + scale_basis=scale_basis, + ) + if self.quad_interaction: + radial_basis_spherical_qint = RadialBasis( + num_radial=num_radial, + cutoff=self.cutoff_qint, + rbf=rbf_spherical, + envelope=envelope, + scale_basis=scale_basis, + ) + self.cbf_basis_qint = CircularBasisLayer( + num_spherical, + radial_basis=radial_basis_spherical_qint, + cbf=cbf, + scale_basis=scale_basis, + ) + + self.sbf_basis_qint = SphericalBasisLayer( + num_spherical, + radial_basis=radial_basis_spherical, + sbf=sbf, + scale_basis=scale_basis, + ) + if self.atom_edge_interaction: + self.radial_basis_aeaint = RadialBasis( + num_radial=num_radial, + cutoff=self.cutoff_aeaint, + rbf=rbf, + envelope=envelope, + scale_basis=scale_basis, + ) + self.cbf_basis_aeint = CircularBasisLayer( + num_spherical, + radial_basis=radial_basis_spherical, + cbf=cbf, + scale_basis=scale_basis, + ) + if self.edge_atom_interaction: + self.radial_basis_aeaint = RadialBasis( + num_radial=num_radial, + cutoff=self.cutoff_aeaint, + rbf=rbf, + envelope=envelope, + scale_basis=scale_basis, + ) + radial_basis_spherical_aeaint = RadialBasis( + num_radial=num_radial, + cutoff=self.cutoff_aeaint, + rbf=rbf_spherical, + envelope=envelope, + scale_basis=scale_basis, + ) + self.cbf_basis_eaint = CircularBasisLayer( + num_spherical, + radial_basis=radial_basis_spherical_aeaint, + cbf=cbf, + scale_basis=scale_basis, + ) + if self.atom_interaction: + self.radial_basis_aint = RadialBasis( + num_radial=num_radial, + cutoff=self.cutoff_aint, + rbf=rbf, + envelope=envelope, + scale_basis=scale_basis, + ) + + self.cbf_basis_tint = CircularBasisLayer( + num_spherical, + radial_basis=radial_basis_spherical, + cbf=cbf, + scale_basis=scale_basis, + ) + + def init_shared_basis_layers( + self, + num_radial, + num_spherical, + emb_size_rbf, + emb_size_cbf, + emb_size_sbf, + ): + # Share basis down projections across all interaction blocks + if self.quad_interaction: + self.mlp_rbf_qint = Dense( + num_radial, + emb_size_rbf, + activation=None, + bias=False, + ) + self.mlp_cbf_qint = BasisEmbedding(num_radial, emb_size_cbf, num_spherical) + self.mlp_sbf_qint = BasisEmbedding( + num_radial, emb_size_sbf, num_spherical**2 + ) + + if self.atom_edge_interaction: + self.mlp_rbf_aeint = Dense( + num_radial, + emb_size_rbf, + activation=None, + bias=False, + ) + self.mlp_cbf_aeint = BasisEmbedding(num_radial, emb_size_cbf, num_spherical) + if self.edge_atom_interaction: + self.mlp_rbf_eaint = Dense( + num_radial, + emb_size_rbf, + activation=None, + bias=False, + ) + self.mlp_cbf_eaint = BasisEmbedding(num_radial, emb_size_cbf, num_spherical) + if self.atom_interaction: + self.mlp_rbf_aint = BasisEmbedding(num_radial, emb_size_rbf) + + self.mlp_rbf_tint = Dense( + num_radial, + emb_size_rbf, + activation=None, + bias=False, + ) + self.mlp_cbf_tint = BasisEmbedding(num_radial, emb_size_cbf, num_spherical) + + # Share the dense Layer of the atom embedding block across + # the interaction blocks + self.mlp_rbf_h = Dense( + num_radial, + emb_size_rbf, + activation=None, + bias=False, + ) + self.mlp_rbf_out = Dense( + num_radial, + emb_size_rbf, + activation=None, + bias=False, + ) + + # Set shared parameters for better gradients + self.shared_parameters = [ + (self.mlp_rbf_tint.linear.weight, self.num_blocks), + (self.mlp_cbf_tint.weight, self.num_blocks), + (self.mlp_rbf_h.linear.weight, self.num_blocks), + (self.mlp_rbf_out.linear.weight, self.num_blocks + 1), + ] + if self.quad_interaction: + self.shared_parameters += [ + (self.mlp_rbf_qint.linear.weight, self.num_blocks), + (self.mlp_cbf_qint.weight, self.num_blocks), + (self.mlp_sbf_qint.weight, self.num_blocks), + ] + if self.atom_edge_interaction: + self.shared_parameters += [ + (self.mlp_rbf_aeint.linear.weight, self.num_blocks), + (self.mlp_cbf_aeint.weight, self.num_blocks), + ] + if self.edge_atom_interaction: + self.shared_parameters += [ + (self.mlp_rbf_eaint.linear.weight, self.num_blocks), + (self.mlp_cbf_eaint.weight, self.num_blocks), + ] + if self.atom_interaction: + self.shared_parameters += [ + (self.mlp_rbf_aint.weight, self.num_blocks), + ] + + def calculate_quad_angles( + self, + V_st, + V_qint_st, + quad_idx, + ): + """Calculate angles for quadruplet-based message passing. + + Arguments + --------- + V_st: Tensor, shape = (nAtoms, 3) + Normalized directions from s to t + V_qint_st: Tensor, shape = (nAtoms, 3) + Normalized directions from s to t for the quadruplet + interaction graph + quad_idx: dict of torch.Tensor + Indices relevant for quadruplet interactions. + + Returns + ------- + cosφ_cab: Tensor, shape = (num_triplets_inint,) + Cosine of angle between atoms c -> a <- b. + cosφ_abd: Tensor, shape = (num_triplets_qint,) + Cosine of angle between atoms a -> b -> d. + angle_cabd: Tensor, shape = (num_quadruplets,) + Dihedral angle between atoms c <- a-b -> d. + """ + # ---------------------------------- d -> b -> a ----------------------------- # + V_ba = V_qint_st[quad_idx["triplet_in"]["out"]] + # (num_triplets_qint, 3) + V_db = V_st[quad_idx["triplet_in"]["in"]] + # (num_triplets_qint, 3) + cosφ_abd = inner_product_clamped(V_ba, V_db) + # (num_triplets_qint,) + + # Project for calculating dihedral angle + # Cross product is the same as projection, just 90° rotated + V_db_cross = torch.cross(V_db, V_ba, dim=-1) # a - b -| d + V_db_cross = V_db_cross[quad_idx["trip_in_to_quad"]] + # (num_quadruplets,) + + # ----------------------------- c -> a <- b ---------------------------------- # + V_ca = V_st[quad_idx["triplet_out"]["out"]] # (num_triplets_in, 3) + V_ba = V_qint_st[quad_idx["triplet_out"]["in"]] # (num_triplets_in, 3) + cosφ_cab = inner_product_clamped(V_ca, V_ba) # (n4Triplets,) + + # Project for calculating dihedral angle + # Cross product is the same as projection, just 90° rotated + V_ca_cross = torch.cross(V_ca, V_ba, dim=-1) # c |- a - b + V_ca_cross = V_ca_cross[quad_idx["trip_out_to_quad"]] + # (num_quadruplets,) + + # --------------------------- c -> a - b <- d -------------------------------- # + half_angle_cabd = get_angle(V_ca_cross, V_db_cross) + # (num_quadruplets,) + angle_cabd = half_angle_cabd + # Ignore parity and just use the half angle. + + return cosφ_cab, cosφ_abd, angle_cabd + + def select_symmetric_edges(self, tensor, mask, reorder_idx, opposite_neg): + """Use a mask to remove values of removed edges and then + duplicate the values for the correct edge direction. + + Arguments + --------- + tensor: torch.Tensor + Values to symmetrize for the new tensor. + mask: torch.Tensor + Mask defining which edges go in the correct direction. + reorder_idx: torch.Tensor + Indices defining how to reorder the tensor values after + concatenating the edge values of both directions. + opposite_neg: bool + Whether the edge in the opposite direction should use the + negative tensor value. + + Returns + ------- + tensor_ordered: torch.Tensor + A tensor with symmetrized values. + """ + # Mask out counter-edges + tensor_directed = tensor[mask] + # Concatenate counter-edges after normal edges + sign = 1 - 2 * opposite_neg + tensor_cat = torch.cat([tensor_directed, sign * tensor_directed]) + # Reorder everything so the edges of every image are consecutive + tensor_ordered = tensor_cat[reorder_idx] + return tensor_ordered + + def symmetrize_edges( + self, + graph, + batch_idx, + ): + """ + Symmetrize edges to ensure existence of counter-directional edges. + + Some edges are only present in one direction in the data, + since every atom has a maximum number of neighbors. + We only use i->j edges here. So we lose some j->i edges + and add others by making it symmetric. + """ + num_atoms = batch_idx.shape[0] + new_graph = {} + + # Generate mask + mask_sep_atoms = graph["edge_index"][0] < graph["edge_index"][1] + # Distinguish edges between the same (periodic) atom by ordering the cells + cell_earlier = ( + (graph["cell_offset"][:, 0] < 0) + | ((graph["cell_offset"][:, 0] == 0) & (graph["cell_offset"][:, 1] < 0)) + | ( + (graph["cell_offset"][:, 0] == 0) + & (graph["cell_offset"][:, 1] == 0) + & (graph["cell_offset"][:, 2] < 0) + ) + ) + mask_same_atoms = graph["edge_index"][0] == graph["edge_index"][1] + mask_same_atoms &= cell_earlier + mask = mask_sep_atoms | mask_same_atoms + + # Mask out counter-edges + edge_index_directed = graph["edge_index"][mask[None, :].expand(2, -1)].view( + 2, -1 + ) + + # Concatenate counter-edges after normal edges + edge_index_cat = torch.cat( + [edge_index_directed, edge_index_directed.flip(0)], + dim=1, + ) + + # Count remaining edges per image + batch_edge = torch.repeat_interleave( + torch.arange( + graph["num_neighbors"].size(0), + device=graph["edge_index"].device, + ), + graph["num_neighbors"], + ) + batch_edge = batch_edge[mask] + # segment_coo assumes sorted batch_edge + # Factor 2 since this is only one half of the edges + ones = batch_edge.new_ones(1).expand_as(batch_edge) + new_graph["num_neighbors"] = 2 * segment_coo( + ones, batch_edge, dim_size=graph["num_neighbors"].size(0) + ) + + # Create indexing array + edge_reorder_idx = repeat_blocks( + torch.div(new_graph["num_neighbors"], 2, rounding_mode="floor"), + repeats=2, + continuous_indexing=True, + repeat_inc=edge_index_directed.size(1), + ) + + # Reorder everything so the edges of every image are consecutive + new_graph["edge_index"] = edge_index_cat[:, edge_reorder_idx] + new_graph["cell_offset"] = self.select_symmetric_edges( + graph["cell_offset"], mask, edge_reorder_idx, True + ) + new_graph["distance"] = self.select_symmetric_edges( + graph["distance"], mask, edge_reorder_idx, False + ) + new_graph["vector"] = self.select_symmetric_edges( + graph["vector"], mask, edge_reorder_idx, True + ) + + # Indices for swapping c->a and a->c (for symmetric MP) + # To obtain these efficiently and without any index assumptions, + # we get order the counter-edge IDs and then + # map this order back to the edge IDs. + # Double argsort gives the desired mapping + # from the ordered tensor to the original tensor. + edge_ids = get_edge_id( + new_graph["edge_index"], new_graph["cell_offset"], num_atoms + ) + order_edge_ids = torch.argsort(edge_ids) + inv_order_edge_ids = torch.argsort(order_edge_ids) + edge_ids_counter = get_edge_id( + new_graph["edge_index"].flip(0), + -new_graph["cell_offset"], + num_atoms, + ) + order_edge_ids_counter = torch.argsort(edge_ids_counter) + id_swap = order_edge_ids_counter[inv_order_edge_ids] + + return new_graph, id_swap + + def subselect_edges( + self, + data, + graph, + cutoff=None, + max_neighbors=None, + ): + """Subselect edges using a stricter cutoff and max_neighbors.""" + subgraph = graph.copy() + + if cutoff is not None: + edge_mask = subgraph["distance"] <= cutoff + + subgraph["edge_index"] = subgraph["edge_index"][:, edge_mask] + subgraph["cell_offset"] = subgraph["cell_offset"][edge_mask] + subgraph["num_neighbors"] = mask_neighbors( + subgraph["num_neighbors"], edge_mask + ) + subgraph["distance"] = subgraph["distance"][edge_mask] + subgraph["vector"] = subgraph["vector"][edge_mask] + + if max_neighbors is not None: + edge_mask, subgraph["num_neighbors"] = get_max_neighbors_mask( + natoms=data.natoms, + index=subgraph["edge_index"][1], + atom_distance=subgraph["distance"], + max_num_neighbors_threshold=max_neighbors, + ) + if not torch.all(edge_mask): + subgraph["edge_index"] = subgraph["edge_index"][:, edge_mask] + subgraph["cell_offset"] = subgraph["cell_offset"][edge_mask] + subgraph["distance"] = subgraph["distance"][edge_mask] + subgraph["vector"] = subgraph["vector"][edge_mask] + + empty_image = subgraph["num_neighbors"] == 0 + if torch.any(empty_image): + raise ValueError( + f"An image has no neighbors: id={data.id[empty_image]}, " + f"sid={data.sid[empty_image]}, fid={data.fid[empty_image]}" + ) + return subgraph + + def generate_graph_dict(self, data, cutoff, max_neighbors): + """Generate a radius/nearest neighbor graph.""" + otf_graph = cutoff > 6 or max_neighbors > 50 or self.otf_graph + + ( + edge_index, + edge_dist, + distance_vec, + cell_offsets, + _, # cell offset distances + num_neighbors, + ) = self.generate_graph( + data, + cutoff=cutoff, + max_neighbors=max_neighbors, + otf_graph=otf_graph, + ) + # These vectors actually point in the opposite direction. + # But we want to use col as idx_t for efficient aggregation. + edge_vector = -distance_vec / edge_dist[:, None] + cell_offsets = -cell_offsets # a - c + offset + + graph = { + "edge_index": edge_index, + "distance": edge_dist, + "vector": edge_vector, + "cell_offset": cell_offsets, + "num_neighbors": num_neighbors, + } + + # Mask interaction edges if required + if otf_graph or np.isclose(cutoff, 6): + select_cutoff = None + else: + select_cutoff = cutoff + if otf_graph or max_neighbors == 50: + select_neighbors = None + else: + select_neighbors = max_neighbors + graph = self.subselect_edges( + data=data, + graph=graph, + cutoff=select_cutoff, + max_neighbors=select_neighbors, + ) + + return graph + + def subselect_graph( + self, + data, + graph, + cutoff, + max_neighbors, + cutoff_orig, + max_neighbors_orig, + ): + """If the new cutoff and max_neighbors is different from the original, + subselect the edges of a given graph. + """ + # Check if embedding edges are different from interaction edges + if np.isclose(cutoff, cutoff_orig): + select_cutoff = None + else: + select_cutoff = cutoff + if max_neighbors == max_neighbors_orig: + select_neighbors = None + else: + select_neighbors = max_neighbors + + return self.subselect_edges( + data=data, + graph=graph, + cutoff=select_cutoff, + max_neighbors=select_neighbors, + ) + + def get_graphs_and_indices(self, data): + """ "Generate embedding and interaction graphs and indices.""" + num_atoms = data.atomic_numbers.size(0) + + # Atom interaction graph is always the largest + if ( + self.atom_edge_interaction + or self.edge_atom_interaction + or self.atom_interaction + ): + a2a_graph = self.generate_graph_dict( + data, self.cutoff_aint, self.max_neighbors_aint + ) + main_graph = self.subselect_graph( + data, + a2a_graph, + self.cutoff, + self.max_neighbors, + self.cutoff_aint, + self.max_neighbors_aint, + ) + a2ee2a_graph = self.subselect_graph( + data, + a2a_graph, + self.cutoff_aeaint, + self.max_neighbors_aeaint, + self.cutoff_aint, + self.max_neighbors_aint, + ) + else: + main_graph = self.generate_graph_dict(data, self.cutoff, self.max_neighbors) + a2a_graph = {} + a2ee2a_graph = {} + if self.quad_interaction: + if ( + self.atom_edge_interaction + or self.edge_atom_interaction + or self.atom_interaction + ): + qint_graph = self.subselect_graph( + data, + a2a_graph, + self.cutoff_qint, + self.max_neighbors_qint, + self.cutoff_aint, + self.max_neighbors_aint, + ) + else: + assert self.cutoff_qint <= self.cutoff + assert self.max_neighbors_qint <= self.max_neighbors + qint_graph = self.subselect_graph( + data, + main_graph, + self.cutoff_qint, + self.max_neighbors_qint, + self.cutoff, + self.max_neighbors, + ) + + # Only use quadruplets for certain tags + self.qint_tags = self.qint_tags.to(qint_graph["edge_index"].device) + tags_s = data.tags[qint_graph["edge_index"][0]] + tags_t = data.tags[qint_graph["edge_index"][1]] + qint_tag_mask_s = (tags_s[..., None] == self.qint_tags).any(dim=-1) + qint_tag_mask_t = (tags_t[..., None] == self.qint_tags).any(dim=-1) + qint_tag_mask = qint_tag_mask_s | qint_tag_mask_t + qint_graph["edge_index"] = qint_graph["edge_index"][:, qint_tag_mask] + qint_graph["cell_offset"] = qint_graph["cell_offset"][qint_tag_mask, :] + qint_graph["distance"] = qint_graph["distance"][qint_tag_mask] + qint_graph["vector"] = qint_graph["vector"][qint_tag_mask, :] + del qint_graph["num_neighbors"] + else: + qint_graph = {} + + # Symmetrize edges for swapping in symmetric message passing + main_graph, id_swap = self.symmetrize_edges(main_graph, data.batch) + + trip_idx_e2e = get_triplets(main_graph, num_atoms=num_atoms) + + # Additional indices for quadruplets + if self.quad_interaction: + quad_idx = get_quadruplets( + main_graph, + qint_graph, + num_atoms, + ) + else: + quad_idx = {} + + if self.atom_edge_interaction: + trip_idx_a2e = get_mixed_triplets( + a2ee2a_graph, + main_graph, + num_atoms=num_atoms, + return_agg_idx=True, + ) + else: + trip_idx_a2e = {} + if self.edge_atom_interaction: + trip_idx_e2a = get_mixed_triplets( + main_graph, + a2ee2a_graph, + num_atoms=num_atoms, + return_agg_idx=True, + ) + # a2ee2a_graph['edge_index'][1] has to be sorted for this + a2ee2a_graph["target_neighbor_idx"] = get_inner_idx( + a2ee2a_graph["edge_index"][1], dim_size=num_atoms + ) + else: + trip_idx_e2a = {} + if self.atom_interaction: + # a2a_graph['edge_index'][1] has to be sorted for this + a2a_graph["target_neighbor_idx"] = get_inner_idx( + a2a_graph["edge_index"][1], dim_size=num_atoms + ) + + return ( + main_graph, + a2a_graph, + a2ee2a_graph, + qint_graph, + id_swap, + trip_idx_e2e, + trip_idx_a2e, + trip_idx_e2a, + quad_idx, + ) + + def get_bases( + self, + main_graph, + a2a_graph, + a2ee2a_graph, + qint_graph, + trip_idx_e2e, + trip_idx_a2e, + trip_idx_e2a, + quad_idx, + num_atoms, + ): + """Calculate and transform basis functions.""" + basis_rad_main_raw = self.radial_basis(main_graph["distance"]) + + # Calculate triplet angles + cosφ_cab = inner_product_clamped( + main_graph["vector"][trip_idx_e2e["out"]], + main_graph["vector"][trip_idx_e2e["in"]], + ) + basis_rad_cir_e2e_raw, basis_cir_e2e_raw = self.cbf_basis_tint( + main_graph["distance"], cosφ_cab + ) + + if self.quad_interaction: + # Calculate quadruplet angles + cosφ_cab_q, cosφ_abd, angle_cabd = self.calculate_quad_angles( + main_graph["vector"], + qint_graph["vector"], + quad_idx, + ) + + basis_rad_cir_qint_raw, basis_cir_qint_raw = self.cbf_basis_qint( + qint_graph["distance"], cosφ_abd + ) + basis_rad_sph_qint_raw, basis_sph_qint_raw = self.sbf_basis_qint( + main_graph["distance"], + cosφ_cab_q[quad_idx["trip_out_to_quad"]], + angle_cabd, + ) + if self.atom_edge_interaction: + basis_rad_a2ee2a_raw = self.radial_basis_aeaint(a2ee2a_graph["distance"]) + cosφ_cab_a2e = inner_product_clamped( + main_graph["vector"][trip_idx_a2e["out"]], + a2ee2a_graph["vector"][trip_idx_a2e["in"]], + ) + basis_rad_cir_a2e_raw, basis_cir_a2e_raw = self.cbf_basis_aeint( + main_graph["distance"], cosφ_cab_a2e + ) + if self.edge_atom_interaction: + cosφ_cab_e2a = inner_product_clamped( + a2ee2a_graph["vector"][trip_idx_e2a["out"]], + main_graph["vector"][trip_idx_e2a["in"]], + ) + basis_rad_cir_e2a_raw, basis_cir_e2a_raw = self.cbf_basis_eaint( + a2ee2a_graph["distance"], cosφ_cab_e2a + ) + if self.atom_interaction: + basis_rad_a2a_raw = self.radial_basis_aint(a2a_graph["distance"]) + + # Shared Down Projections + bases_qint = {} + if self.quad_interaction: + bases_qint["rad"] = self.mlp_rbf_qint(basis_rad_main_raw) + bases_qint["cir"] = self.mlp_cbf_qint( + rad_basis=basis_rad_cir_qint_raw, + sph_basis=basis_cir_qint_raw, + idx_sph_outer=quad_idx["triplet_in"]["out"], + ) + bases_qint["sph"] = self.mlp_sbf_qint( + rad_basis=basis_rad_sph_qint_raw, + sph_basis=basis_sph_qint_raw, + idx_sph_outer=quad_idx["out"], + idx_sph_inner=quad_idx["out_agg"], + ) + + bases_a2e = {} + if self.atom_edge_interaction: + bases_a2e["rad"] = self.mlp_rbf_aeint(basis_rad_a2ee2a_raw) + bases_a2e["cir"] = self.mlp_cbf_aeint( + rad_basis=basis_rad_cir_a2e_raw, + sph_basis=basis_cir_a2e_raw, + idx_sph_outer=trip_idx_a2e["out"], + idx_sph_inner=trip_idx_a2e["out_agg"], + ) + bases_e2a = {} + if self.edge_atom_interaction: + bases_e2a["rad"] = self.mlp_rbf_eaint(basis_rad_main_raw) + bases_e2a["cir"] = self.mlp_cbf_eaint( + rad_basis=basis_rad_cir_e2a_raw, + sph_basis=basis_cir_e2a_raw, + idx_rad_outer=a2ee2a_graph["edge_index"][1], + idx_rad_inner=a2ee2a_graph["target_neighbor_idx"], + idx_sph_outer=trip_idx_e2a["out"], + idx_sph_inner=trip_idx_e2a["out_agg"], + num_atoms=num_atoms, + ) + if self.atom_interaction: + basis_a2a_rad = self.mlp_rbf_aint( + rad_basis=basis_rad_a2a_raw, + idx_rad_outer=a2a_graph["edge_index"][1], + idx_rad_inner=a2a_graph["target_neighbor_idx"], + num_atoms=num_atoms, + ) + else: + basis_a2a_rad = None + + bases_e2e = {} + bases_e2e["rad"] = self.mlp_rbf_tint(basis_rad_main_raw) + bases_e2e["cir"] = self.mlp_cbf_tint( + rad_basis=basis_rad_cir_e2e_raw, + sph_basis=basis_cir_e2e_raw, + idx_sph_outer=trip_idx_e2e["out"], + idx_sph_inner=trip_idx_e2e["out_agg"], + ) + + basis_atom_update = self.mlp_rbf_h(basis_rad_main_raw) + basis_output = self.mlp_rbf_out(basis_rad_main_raw) + + return ( + basis_rad_main_raw, + basis_atom_update, + basis_output, + bases_qint, + bases_e2e, + bases_a2e, + bases_e2a, + basis_a2a_rad, + ) + + def energy_forward(self, data): + pos = data.pos + batch = data.batch + atomic_numbers = data.atomic_numbers.long() + num_atoms = atomic_numbers.shape[0] + + if self.regress_forces and not self.direct_forces: + pos.requires_grad_(True) + + ( + main_graph, + a2a_graph, + a2ee2a_graph, + qint_graph, + id_swap, + trip_idx_e2e, + trip_idx_a2e, + trip_idx_e2a, + quad_idx, + ) = self.get_graphs_and_indices(data) + _, idx_t = main_graph["edge_index"] + + ( + basis_rad_raw, + basis_atom_update, + basis_output, + bases_qint, + bases_e2e, + bases_a2e, + bases_e2a, + basis_a2a_rad, + ) = self.get_bases( + main_graph=main_graph, + a2a_graph=a2a_graph, + a2ee2a_graph=a2ee2a_graph, + qint_graph=qint_graph, + trip_idx_e2e=trip_idx_e2e, + trip_idx_a2e=trip_idx_a2e, + trip_idx_e2a=trip_idx_e2a, + quad_idx=quad_idx, + num_atoms=num_atoms, + ) + + # Embedding block + h = self.atom_emb(atomic_numbers, data.tags if hasattr(data, "tags") else None) + # (nAtoms, emb_size_atom) + m = self.edge_emb(h, basis_rad_raw, main_graph["edge_index"]) + # (nEdges, emb_size_edge) + + x_E, x_F = self.out_blocks[0](h, m, basis_output, idx_t) + # (nAtoms, emb_size_atom), (nEdges, emb_size_edge) + xs_E, xs_F = [x_E], [x_F] + + for i in range(self.num_blocks): + # Interaction block + h, m = self.int_blocks[i]( + h=h, + m=m, + bases_qint=bases_qint, + bases_e2e=bases_e2e, + bases_a2e=bases_a2e, + bases_e2a=bases_e2a, + basis_a2a_rad=basis_a2a_rad, + basis_atom_update=basis_atom_update, + edge_index_main=main_graph["edge_index"], + a2ee2a_graph=a2ee2a_graph, + a2a_graph=a2a_graph, + id_swap=id_swap, + trip_idx_e2e=trip_idx_e2e, + trip_idx_a2e=trip_idx_a2e, + trip_idx_e2a=trip_idx_e2a, + quad_idx=quad_idx, + ) # (nAtoms, emb_size_atom), (nEdges, emb_size_edge) + + x_E, x_F = self.out_blocks[i + 1](h, m, basis_output, idx_t) + # (nAtoms, emb_size_atom), (nEdges, emb_size_edge) + xs_E.append(x_E) + xs_F.append(x_F) + + # Global output block for final predictions + x_E = self.out_mlp_E(torch.cat(xs_E, dim=-1)) + if self.direct_forces: + x_F = self.out_mlp_F(torch.cat(xs_F, dim=-1)) + with torch.cuda.amp.autocast(False): + E_t = self.out_energy(x_E.float()) + if self.direct_forces: + F_st = self.out_forces(x_F.float()) + + nMolecules = torch.max(batch) + 1 + + if self.extensive: + E_t = self.scattering( + E_t, batch, dim=0, dim_size=nMolecules, reduce="add" + ) # (nMolecules, num_targets) + else: + E_t = self.scattering( + E_t, batch, dim=0, dim_size=nMolecules, reduce="mean" + ) # (nMolecules, num_targets) + + return { + "energy": E_t.squeeze(1), # (num_molecules) + "E_t": E_t, + "idx_t": idx_t, + "main_graph": main_graph, + "num_atoms": num_atoms, + "pos": pos, + "F_st": F_st, + } + + @conditional_grad(torch.enable_grad()) + def scattering(self, E_t, batch, dim, dim_size, reduce="add"): + E_t = scatter_det( + src=E_t, index=batch, dim=dim, dim_size=dim_size, reduce=reduce + ) + + return E_t + + @conditional_grad(torch.enable_grad()) + def forces_forward(self, preds): + + idx_t = preds["idx_t"] + main_graph = preds["main_graph"] + num_atoms = preds["num_atoms"] + pos = preds["pos"] + F_st = preds["F_st"] + E_t = preds["E_t"] + + if self.direct_forces: + if self.forces_coupled: # enforce F_st = F_ts + nEdges = idx_t.shape[0] + id_undir = repeat_blocks( + main_graph["num_neighbors"] // 2, + repeats=2, + continuous_indexing=True, + ) + F_st = scatter_det( + F_st, + id_undir, + dim=0, + dim_size=int(nEdges / 2), + reduce="mean", + ) # (nEdges/2, num_targets) + F_st = F_st[id_undir] # (nEdges, num_targets) + + # map forces in edge directions + F_st_vec = F_st[:, :, None] * main_graph["vector"][:, None, :] + # (nEdges, num_targets, 3) + F_t = scatter_det( + F_st_vec, + idx_t, + dim=0, + dim_size=num_atoms, + reduce="add", + ) # (nAtoms, num_targets, 3) + else: + F_t = self.force_scaler.calc_forces_and_update(E_t, pos) + + F_t = F_t.squeeze(1) # (num_atoms, 3) + return F_t + + @property + def num_params(self): + return sum(p.numel() for p in self.parameters()) diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 6bfb0df41e..233a4c0dd5 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -165,7 +165,7 @@ def __init__(self, **kwargs): # Here's the models whose graphs are disconnected in the dataset self.separate_models = ["indfaenet", "indschnet", "indgemnet_oc"] - self.heterogeneous_models = ["afaenet", "aschnet"] + self.heterogeneous_models = ["afaenet", "aschnet", "agemnet_oc"] self.data_mode = "normal" self.separate_dataset = False From 936e5fe2e643813c42053b86d919be5a096d63c0 Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 12 Sep 2023 01:42:12 -0400 Subject: [PATCH 098/131] was implemented agemnet, need to stop for today and run tests --- configs/exps/alvaro/gemnet-config.yaml | 10 +- configs/models/agemnet_oc.yaml | 102 ++ ocpmodels/common/utils.py | 1 + ocpmodels/models/gemnet_oc/agemnet_oc.py | 1411 ++------------------ ocpmodels/models/gemnet_oc/depgemnet_oc.py | 2 - ocpmodels/models/gemnet_oc/gemnet_oc.py | 179 ++- ocpmodels/models/gemnet_oc/indgemnet_oc.py | 11 +- 7 files changed, 316 insertions(+), 1400 deletions(-) create mode 100644 configs/models/agemnet_oc.yaml diff --git a/configs/exps/alvaro/gemnet-config.yaml b/configs/exps/alvaro/gemnet-config.yaml index beb18a48d4..d9524c34bb 100644 --- a/configs/exps/alvaro/gemnet-config.yaml +++ b/configs/exps/alvaro/gemnet-config.yaml @@ -1,9 +1,9 @@ job: - mem: 32GB + mem: 40GB cpus: 4 gres: gpu:rtx8000:1 partition: long - time: 15:00:00 + time: 18:00:00 default: wandb_name: alvaro-carbonero-math @@ -23,13 +23,13 @@ default: max_epochs: 30 runs: - #- config: gemnet_oc-is2re-all + - config: gemnet_oc-is2re-all - #- config: depgemnet_oc-is2re-all + - config: depgemnet_oc-is2re-all #- config: indgemnet_oc-is2re-all - config: gemnet_oc-is2re-all is_disconnected: True - - config: agemnet_oc-is2re-all + #- config: agemnet_oc-is2re-all diff --git a/configs/models/agemnet_oc.yaml b/configs/models/agemnet_oc.yaml new file mode 100644 index 0000000000..5374c5da68 --- /dev/null +++ b/configs/models/agemnet_oc.yaml @@ -0,0 +1,102 @@ +default: + model: + name: agemnet_oc + num_spherical: 7 + num_radial: 128 + num_blocks: 4 + emb_size_atom: 256 + emb_size_edge: 512 + emb_size_trip_in: 64 + emb_size_trip_out: 64 + emb_size_quad_in: 32 + emb_size_quad_out: 32 + emb_size_aint_in: 64 + emb_size_aint_out: 64 + emb_size_rbf: 16 + emb_size_cbf: 16 + emb_size_sbf: 32 + num_before_skip: 2 + num_after_skip: 2 + num_concat: 1 + num_atom: 3 + num_output_afteratom: 3 + cutoff: 12.0 + cutoff_qint: 12.0 + cutoff_aeaint: 12.0 + cutoff_aint: 12.0 + max_neighbors: 30 + max_neighbors_qint: 8 + max_neighbors_aeaint: 20 + max_neighbors_aint: 1000 + rbf: + name: gaussian + envelope: + name: polynomial + exponent: 5 + cbf: + name: spherical_harmonics + sbf: + name: legendre_outer + extensive: True + output_init: HeOrthogonal + activation: silu + scale_file: configs/models/scaling_factors/gemnet-oc.pt + + regress_forces: True + direct_forces: True + forces_coupled: False + + quad_interaction: True + atom_edge_interaction: True + edge_atom_interaction: True + atom_interaction: True + + num_atom_emb_layers: 2 + num_global_out_layers: 2 + qint_tags: [1, 2] + + # PhAST + tag_hidden_channels: 0 # 64 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_embeds: False # True + phys_hidden_channels: 0 + energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} + + optim: + batch_size: 16 + eval_batch_size: 16 + load_balancing: atoms + eval_every: 5000 + num_workers: 2 + lr_initial: 5.e-4 + optimizer: AdamW + optimizer_params: {"amsgrad": True} + scheduler: ReduceLROnPlateau + mode: min + factor: 0.8 + patience: 3 + max_epochs: 80 + force_coefficient: 100 + energy_coefficient: 1 + ema_decay: 0.999 + clip_grad_norm: 10 + loss_energy: mae + loss_force: l2mae + weight_decay: 0 + +is2re: + default: + model: + regress_forces: False + num_targets: 1 + 10k: {} + all: {} + +s2ef: + default: + model: + num_targets: 1 + 200k: {} + 2M: {} + 20M: {} + all: {} diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index 0f29423c8f..3bee0cdc1f 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -761,6 +761,7 @@ def setup_imports(): importlib.import_module("ocpmodels.models.gemnet_oc.gemnet_oc") importlib.import_module("ocpmodels.models.gemnet_oc.depgemnet_oc") importlib.import_module("ocpmodels.models.gemnet_oc.indgemnet_oc") + importlib.import_module("ocpmodels.models.gemnet_oc.agemnet_oc") experimental_folder = os.path.join(root_folder, "../experimental/") if os.path.exists(experimental_folder): diff --git a/ocpmodels/models/gemnet_oc/agemnet_oc.py b/ocpmodels/models/gemnet_oc/agemnet_oc.py index 6c25e993fc..fa5c2c474f 100644 --- a/ocpmodels/models/gemnet_oc/agemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/agemnet_oc.py @@ -1,1369 +1,110 @@ -""" -Copyright (c) Facebook, Inc. and its affiliates. -This source code is licensed under the MIT license found in the -LICENSE file in the root directory of this source tree. -""" +import torch, math +from torch import nn +from torch.nn import Linear +from torch_geometric.data import Data, Batch -import logging -from typing import Optional - -import numpy as np -import torch -from torch_scatter import segment_coo - -from ocpmodels.common.registry import registry -from ocpmodels.common.utils import ( - conditional_grad, - get_max_neighbors_mask, - scatter_det, -) +from ocpmodels.models.gemnet_oc.gemnet_oc import GemNetOC from ocpmodels.models.base_model import BaseModel -from ocpmodels.common.scaling.compat import load_scales_compat - -from .initializers import get_initializer -from .interaction_indices import ( - get_quadruplets, - get_triplets, -) -from .layers.atom_update_block import OutputBlock -from .layers.base_layers import Dense, ResidualLayer -from .layers.efficient import BasisEmbedding -from .layers.embedding_block import AtomEmbedding, EdgeEmbedding -from .layers.force_scaler import ForceScaler -from .layers.interaction_block import InteractionBlock -from .layers.radial_basis import RadialBasis -from .layers.spherical_basis import CircularBasisLayer, SphericalBasisLayer -from .utils import ( - get_angle, - get_edge_id, - get_inner_idx, - inner_product_clamped, - mask_neighbors, - repeat_blocks, -) -from .interaction_indices import get_mixed_triplets - - -@registry.register_model("gemnet_oc") -class GemNetOC(BaseModel): - """ - Arguments - --------- - num_atoms (int): Unused argument - bond_feat_dim (int): Unused argument - num_targets: int - Number of prediction targets. - - num_spherical: int - Controls maximum frequency. - num_radial: int - Controls maximum frequency. - num_blocks: int - Number of building blocks to be stacked. - - emb_size_atom: int - Embedding size of the atoms. - emb_size_edge: int - Embedding size of the edges. - emb_size_trip_in: int - (Down-projected) embedding size of the quadruplet edge embeddings - before the bilinear layer. - emb_size_trip_out: int - (Down-projected) embedding size of the quadruplet edge embeddings - after the bilinear layer. - emb_size_quad_in: int - (Down-projected) embedding size of the quadruplet edge embeddings - before the bilinear layer. - emb_size_quad_out: int - (Down-projected) embedding size of the quadruplet edge embeddings - after the bilinear layer. - emb_size_aint_in: int - Embedding size in the atom interaction before the bilinear layer. - emb_size_aint_out: int - Embedding size in the atom interaction after the bilinear layer. - emb_size_rbf: int - Embedding size of the radial basis transformation. - emb_size_cbf: int - Embedding size of the circular basis transformation (one angle). - emb_size_sbf: int - Embedding size of the spherical basis transformation (two angles). - - num_before_skip: int - Number of residual blocks before the first skip connection. - num_after_skip: int - Number of residual blocks after the first skip connection. - num_concat: int - Number of residual blocks after the concatenation. - num_atom: int - Number of residual blocks in the atom embedding blocks. - num_output_afteratom: int - Number of residual blocks in the output blocks - after adding the atom embedding. - num_atom_emb_layers: int - Number of residual blocks for transforming atom embeddings. - num_global_out_layers: int - Number of final residual blocks before the output. - - regress_forces: bool - Whether to predict forces. Default: True - direct_forces: bool - If True predict forces based on aggregation of interatomic directions. - If False predict forces based on negative gradient of energy potential. - use_pbc: bool - Whether to use periodic boundary conditions. - scale_backprop_forces: bool - Whether to scale up the energy and then scales down the forces - to prevent NaNs and infs in backpropagated forces. - - cutoff: float - Embedding cutoff for interatomic connections and embeddings in Angstrom. - cutoff_qint: float - Quadruplet interaction cutoff in Angstrom. - Optional. Uses cutoff per default. - cutoff_aeaint: float - Edge-to-atom and atom-to-edge interaction cutoff in Angstrom. - Optional. Uses cutoff per default. - cutoff_aint: float - Atom-to-atom interaction cutoff in Angstrom. - Optional. Uses maximum of all other cutoffs per default. - max_neighbors: int - Maximum number of neighbors for interatomic connections and embeddings. - max_neighbors_qint: int - Maximum number of quadruplet interactions per embedding. - Optional. Uses max_neighbors per default. - max_neighbors_aeaint: int - Maximum number of edge-to-atom and atom-to-edge interactions per embedding. - Optional. Uses max_neighbors per default. - max_neighbors_aint: int - Maximum number of atom-to-atom interactions per atom. - Optional. Uses maximum of all other neighbors per default. - - rbf: dict - Name and hyperparameters of the radial basis function. - rbf_spherical: dict - Name and hyperparameters of the radial basis function used as part of the - circular and spherical bases. - Optional. Uses rbf per default. - envelope: dict - Name and hyperparameters of the envelope function. - cbf: dict - Name and hyperparameters of the circular basis function. - sbf: dict - Name and hyperparameters of the spherical basis function. - extensive: bool - Whether the output should be extensive (proportional to the number of atoms) - forces_coupled: bool - If True, enforce that |F_st| = |F_ts|. No effect if direct_forces is False. - output_init: str - Initialization method for the final dense layer. - activation: str - Name of the activation function. - scale_file: str - Path to the pytorch file containing the scaling factors. - - quad_interaction: bool - Whether to use quadruplet interactions (with dihedral angles) - atom_edge_interaction: bool - Whether to use atom-to-edge interactions - edge_atom_interaction: bool - Whether to use edge-to-atom interactions - atom_interaction: bool - Whether to use atom-to-atom interactions +from ocpmodels.common.registry import registry +from ocpmodels.models.utils.activations import swish - scale_basis: bool - Whether to use a scaling layer in the raw basis function for better - numerical stability. - qint_tags: list - Which atom tags to use quadruplet interactions for. - 0=sub-surface bulk, 1=surface, 2=adsorbate atoms. - """ +from torch_geometric.data import Batch - def __init__( - self, - num_atoms: Optional[int], - bond_feat_dim: int, - num_targets: int, - num_spherical: int, - num_radial: int, - num_blocks: int, - emb_size_atom: int, - emb_size_edge: int, - emb_size_trip_in: int, - emb_size_trip_out: int, - emb_size_quad_in: int, - emb_size_quad_out: int, - emb_size_aint_in: int, - emb_size_aint_out: int, - emb_size_rbf: int, - emb_size_cbf: int, - emb_size_sbf: int, - num_before_skip: int, - num_after_skip: int, - num_concat: int, - num_atom: int, - num_output_afteratom: int, - num_atom_emb_layers: int = 0, - num_global_out_layers: int = 2, - regress_forces: bool = True, - direct_forces: bool = False, - use_pbc: bool = True, - scale_backprop_forces: bool = False, - cutoff: float = 6.0, - cutoff_qint: Optional[float] = None, - cutoff_aeaint: Optional[float] = None, - cutoff_aint: Optional[float] = None, - max_neighbors: int = 50, - max_neighbors_qint: Optional[int] = None, - max_neighbors_aeaint: Optional[int] = None, - max_neighbors_aint: Optional[int] = None, - rbf: dict = {"name": "gaussian"}, - rbf_spherical: Optional[dict] = None, - envelope: dict = {"name": "polynomial", "exponent": 5}, - cbf: dict = {"name": "spherical_harmonics"}, - sbf: dict = {"name": "spherical_harmonics"}, - extensive: bool = True, - forces_coupled: bool = False, - output_init: str = "HeOrthogonal", - activation: str = "silu", - quad_interaction: bool = False, - atom_edge_interaction: bool = False, - edge_atom_interaction: bool = False, - atom_interaction: bool = False, - scale_basis: bool = False, - qint_tags: list = [0, 1, 2], - num_elements: int = 83, - otf_graph: bool = False, - scale_file: Optional[str] = None, - tag_hidden_channels: int = 0, - pg_hidden_channels: int = 0, - phys_embeds: bool = False, - phys_hidden_channels: int = 0, - **kwargs, # backwards compatibility with deprecated arguments - ): +@registry.register_model("agemnet_oc") +class aGemNetOC(BaseModel): # Change to make it inherit from base model. + def __init__(self, **kwargs): super().__init__() - if len(kwargs) > 0: - logging.warning(f"Unrecognized arguments: {list(kwargs.keys())}") - self.num_targets = num_targets - assert num_blocks > 0 - self.num_blocks = num_blocks - self.extensive = extensive - - self.atom_edge_interaction = atom_edge_interaction - self.edge_atom_interaction = edge_atom_interaction - self.atom_interaction = atom_interaction - self.quad_interaction = quad_interaction - self.qint_tags = torch.tensor(qint_tags) - self.otf_graph = otf_graph - if not rbf_spherical: - rbf_spherical = rbf - - self.set_cutoffs(cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint) - self.set_max_neighbors( - max_neighbors, - max_neighbors_qint, - max_neighbors_aeaint, - max_neighbors_aint, - ) - self.use_pbc = use_pbc - - self.direct_forces = direct_forces - self.forces_coupled = forces_coupled - self.regress_forces = regress_forces - self.force_scaler = ForceScaler(enabled=scale_backprop_forces) - - self.init_basis_functions( - num_radial, - num_spherical, - rbf, - rbf_spherical, - envelope, - cbf, - sbf, - scale_basis, - ) - self.init_shared_basis_layers( - num_radial, num_spherical, emb_size_rbf, emb_size_cbf, emb_size_sbf - ) - - # Embedding blocks - self.atom_emb = AtomEmbedding( - emb_size_atom, - num_elements, - tag_hidden_channels=tag_hidden_channels, - pg_hidden_channels=pg_hidden_channels, - phys_hidden_channels=phys_hidden_channels, - phys_embeds=phys_embeds, - ) - self.edge_emb = EdgeEmbedding( - emb_size_atom, num_radial, emb_size_edge, activation=activation - ) - - # Interaction Blocks - int_blocks = [] - for _ in range(num_blocks): - int_blocks.append( - InteractionBlock( - emb_size_atom=emb_size_atom, - emb_size_edge=emb_size_edge, - emb_size_trip_in=emb_size_trip_in, - emb_size_trip_out=emb_size_trip_out, - emb_size_quad_in=emb_size_quad_in, - emb_size_quad_out=emb_size_quad_out, - emb_size_a2a_in=emb_size_aint_in, - emb_size_a2a_out=emb_size_aint_out, - emb_size_rbf=emb_size_rbf, - emb_size_cbf=emb_size_cbf, - emb_size_sbf=emb_size_sbf, - num_before_skip=num_before_skip, - num_after_skip=num_after_skip, - num_concat=num_concat, - num_atom=num_atom, - num_atom_emb_layers=num_atom_emb_layers, - quad_interaction=quad_interaction, - atom_edge_interaction=atom_edge_interaction, - edge_atom_interaction=edge_atom_interaction, - atom_interaction=atom_interaction, - activation=activation, - ) - ) - self.int_blocks = torch.nn.ModuleList(int_blocks) - - out_blocks = [] - for _ in range(num_blocks + 1): - out_blocks.append( - OutputBlock( - emb_size_atom=emb_size_atom, - emb_size_edge=emb_size_edge, - emb_size_rbf=emb_size_rbf, - nHidden=num_atom, - nHidden_afteratom=num_output_afteratom, - activation=activation, - direct_forces=direct_forces, - ) - ) - self.out_blocks = torch.nn.ModuleList(out_blocks) - - out_mlp_E = [ - Dense( - emb_size_atom * (num_blocks + 1), - emb_size_atom, - activation=activation, - ) - ] - out_mlp_E += [ - ResidualLayer( - emb_size_atom, - activation=activation, - ) - for _ in range(num_global_out_layers) - ] - self.out_mlp_E = torch.nn.Sequential(*out_mlp_E) - - self.out_energy = Dense(emb_size_atom, num_targets, bias=False, activation=None) - if direct_forces: - out_mlp_F = [ - Dense( - emb_size_edge * (num_blocks + 1), - emb_size_edge, - activation=activation, - ) - ] - out_mlp_F += [ - ResidualLayer( - emb_size_edge, - activation=activation, - ) - for _ in range(num_global_out_layers) - ] - self.out_mlp_F = torch.nn.Sequential(*out_mlp_F) - self.out_forces = Dense( - emb_size_edge, num_targets, bias=False, activation=None - ) - - out_initializer = get_initializer(output_init) - self.out_energy.reset_parameters(out_initializer) - if direct_forces: - self.out_forces.reset_parameters(out_initializer) - - load_scales_compat(self, scale_file) - - def set_cutoffs(self, cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint): - self.cutoff = cutoff - - if ( - not (self.atom_edge_interaction or self.edge_atom_interaction) - or cutoff_aeaint is None - ): - self.cutoff_aeaint = self.cutoff - else: - self.cutoff_aeaint = cutoff_aeaint - if not self.quad_interaction or cutoff_qint is None: - self.cutoff_qint = self.cutoff - else: - self.cutoff_qint = cutoff_qint - if not self.atom_interaction or cutoff_aint is None: - self.cutoff_aint = max( - self.cutoff, - self.cutoff_aeaint, - self.cutoff_qint, - ) - else: - self.cutoff_aint = cutoff_aint - - assert self.cutoff <= self.cutoff_aint - assert self.cutoff_aeaint <= self.cutoff_aint - assert self.cutoff_qint <= self.cutoff_aint - - def set_max_neighbors( - self, - max_neighbors, - max_neighbors_qint, - max_neighbors_aeaint, - max_neighbors_aint, - ): - self.max_neighbors = max_neighbors - - if ( - not (self.atom_edge_interaction or self.edge_atom_interaction) - or max_neighbors_aeaint is None - ): - self.max_neighbors_aeaint = self.max_neighbors - else: - self.max_neighbors_aeaint = max_neighbors_aeaint - if not self.quad_interaction or max_neighbors_qint is None: - self.max_neighbors_qint = self.max_neighbors - else: - self.max_neighbors_qint = max_neighbors_qint - if not self.atom_interaction or max_neighbors_aint is None: - self.max_neighbors_aint = max( - self.max_neighbors, - self.max_neighbors_aeaint, - self.max_neighbors_qint, - ) - else: - self.max_neighbors_aint = max_neighbors_aint - - assert self.max_neighbors <= self.max_neighbors_aint - assert self.max_neighbors_aeaint <= self.max_neighbors_aint - assert self.max_neighbors_qint <= self.max_neighbors_aint - - def init_basis_functions( - self, - num_radial, - num_spherical, - rbf, - rbf_spherical, - envelope, - cbf, - sbf, - scale_basis, - ): - self.radial_basis = RadialBasis( - num_radial=num_radial, - cutoff=self.cutoff, - rbf=rbf, - envelope=envelope, - scale_basis=scale_basis, - ) - radial_basis_spherical = RadialBasis( - num_radial=num_radial, - cutoff=self.cutoff, - rbf=rbf_spherical, - envelope=envelope, - scale_basis=scale_basis, - ) - if self.quad_interaction: - radial_basis_spherical_qint = RadialBasis( - num_radial=num_radial, - cutoff=self.cutoff_qint, - rbf=rbf_spherical, - envelope=envelope, - scale_basis=scale_basis, - ) - self.cbf_basis_qint = CircularBasisLayer( - num_spherical, - radial_basis=radial_basis_spherical_qint, - cbf=cbf, - scale_basis=scale_basis, - ) - - self.sbf_basis_qint = SphericalBasisLayer( - num_spherical, - radial_basis=radial_basis_spherical, - sbf=sbf, - scale_basis=scale_basis, - ) - if self.atom_edge_interaction: - self.radial_basis_aeaint = RadialBasis( - num_radial=num_radial, - cutoff=self.cutoff_aeaint, - rbf=rbf, - envelope=envelope, - scale_basis=scale_basis, - ) - self.cbf_basis_aeint = CircularBasisLayer( - num_spherical, - radial_basis=radial_basis_spherical, - cbf=cbf, - scale_basis=scale_basis, - ) - if self.edge_atom_interaction: - self.radial_basis_aeaint = RadialBasis( - num_radial=num_radial, - cutoff=self.cutoff_aeaint, - rbf=rbf, - envelope=envelope, - scale_basis=scale_basis, - ) - radial_basis_spherical_aeaint = RadialBasis( - num_radial=num_radial, - cutoff=self.cutoff_aeaint, - rbf=rbf_spherical, - envelope=envelope, - scale_basis=scale_basis, - ) - self.cbf_basis_eaint = CircularBasisLayer( - num_spherical, - radial_basis=radial_basis_spherical_aeaint, - cbf=cbf, - scale_basis=scale_basis, - ) - if self.atom_interaction: - self.radial_basis_aint = RadialBasis( - num_radial=num_radial, - cutoff=self.cutoff_aint, - rbf=rbf, - envelope=envelope, - scale_basis=scale_basis, - ) - - self.cbf_basis_tint = CircularBasisLayer( - num_spherical, - radial_basis=radial_basis_spherical, - cbf=cbf, - scale_basis=scale_basis, - ) - - def init_shared_basis_layers( - self, - num_radial, - num_spherical, - emb_size_rbf, - emb_size_cbf, - emb_size_sbf, - ): - # Share basis down projections across all interaction blocks - if self.quad_interaction: - self.mlp_rbf_qint = Dense( - num_radial, - emb_size_rbf, - activation=None, - bias=False, - ) - self.mlp_cbf_qint = BasisEmbedding(num_radial, emb_size_cbf, num_spherical) - self.mlp_sbf_qint = BasisEmbedding( - num_radial, emb_size_sbf, num_spherical**2 - ) - - if self.atom_edge_interaction: - self.mlp_rbf_aeint = Dense( - num_radial, - emb_size_rbf, - activation=None, - bias=False, - ) - self.mlp_cbf_aeint = BasisEmbedding(num_radial, emb_size_cbf, num_spherical) - if self.edge_atom_interaction: - self.mlp_rbf_eaint = Dense( - num_radial, - emb_size_rbf, - activation=None, - bias=False, - ) - self.mlp_cbf_eaint = BasisEmbedding(num_radial, emb_size_cbf, num_spherical) - if self.atom_interaction: - self.mlp_rbf_aint = BasisEmbedding(num_radial, emb_size_rbf) - - self.mlp_rbf_tint = Dense( - num_radial, - emb_size_rbf, - activation=None, - bias=False, - ) - self.mlp_cbf_tint = BasisEmbedding(num_radial, emb_size_cbf, num_spherical) - - # Share the dense Layer of the atom embedding block across - # the interaction blocks - self.mlp_rbf_h = Dense( - num_radial, - emb_size_rbf, - activation=None, - bias=False, - ) - self.mlp_rbf_out = Dense( - num_radial, - emb_size_rbf, - activation=None, - bias=False, - ) - - # Set shared parameters for better gradients - self.shared_parameters = [ - (self.mlp_rbf_tint.linear.weight, self.num_blocks), - (self.mlp_cbf_tint.weight, self.num_blocks), - (self.mlp_rbf_h.linear.weight, self.num_blocks), - (self.mlp_rbf_out.linear.weight, self.num_blocks + 1), - ] - if self.quad_interaction: - self.shared_parameters += [ - (self.mlp_rbf_qint.linear.weight, self.num_blocks), - (self.mlp_cbf_qint.weight, self.num_blocks), - (self.mlp_sbf_qint.weight, self.num_blocks), - ] - if self.atom_edge_interaction: - self.shared_parameters += [ - (self.mlp_rbf_aeint.linear.weight, self.num_blocks), - (self.mlp_cbf_aeint.weight, self.num_blocks), - ] - if self.edge_atom_interaction: - self.shared_parameters += [ - (self.mlp_rbf_eaint.linear.weight, self.num_blocks), - (self.mlp_cbf_eaint.weight, self.num_blocks), - ] - if self.atom_interaction: - self.shared_parameters += [ - (self.mlp_rbf_aint.weight, self.num_blocks), - ] - - def calculate_quad_angles( - self, - V_st, - V_qint_st, - quad_idx, - ): - """Calculate angles for quadruplet-based message passing. - - Arguments - --------- - V_st: Tensor, shape = (nAtoms, 3) - Normalized directions from s to t - V_qint_st: Tensor, shape = (nAtoms, 3) - Normalized directions from s to t for the quadruplet - interaction graph - quad_idx: dict of torch.Tensor - Indices relevant for quadruplet interactions. - - Returns - ------- - cosφ_cab: Tensor, shape = (num_triplets_inint,) - Cosine of angle between atoms c -> a <- b. - cosφ_abd: Tensor, shape = (num_triplets_qint,) - Cosine of angle between atoms a -> b -> d. - angle_cabd: Tensor, shape = (num_quadruplets,) - Dihedral angle between atoms c <- a-b -> d. - """ - # ---------------------------------- d -> b -> a ----------------------------- # - V_ba = V_qint_st[quad_idx["triplet_in"]["out"]] - # (num_triplets_qint, 3) - V_db = V_st[quad_idx["triplet_in"]["in"]] - # (num_triplets_qint, 3) - cosφ_abd = inner_product_clamped(V_ba, V_db) - # (num_triplets_qint,) - - # Project for calculating dihedral angle - # Cross product is the same as projection, just 90° rotated - V_db_cross = torch.cross(V_db, V_ba, dim=-1) # a - b -| d - V_db_cross = V_db_cross[quad_idx["trip_in_to_quad"]] - # (num_quadruplets,) - - # ----------------------------- c -> a <- b ---------------------------------- # - V_ca = V_st[quad_idx["triplet_out"]["out"]] # (num_triplets_in, 3) - V_ba = V_qint_st[quad_idx["triplet_out"]["in"]] # (num_triplets_in, 3) - cosφ_cab = inner_product_clamped(V_ca, V_ba) # (n4Triplets,) - - # Project for calculating dihedral angle - # Cross product is the same as projection, just 90° rotated - V_ca_cross = torch.cross(V_ca, V_ba, dim=-1) # c |- a - b - V_ca_cross = V_ca_cross[quad_idx["trip_out_to_quad"]] - # (num_quadruplets,) - - # --------------------------- c -> a - b <- d -------------------------------- # - half_angle_cabd = get_angle(V_ca_cross, V_db_cross) - # (num_quadruplets,) - angle_cabd = half_angle_cabd - # Ignore parity and just use the half angle. - - return cosφ_cab, cosφ_abd, angle_cabd - def select_symmetric_edges(self, tensor, mask, reorder_idx, opposite_neg): - """Use a mask to remove values of removed edges and then - duplicate the values for the correct edge direction. + self.regress_forces = kwargs["regress_forces"] + self.direct_forces = kwargs["direct_forces"] - Arguments - --------- - tensor: torch.Tensor - Values to symmetrize for the new tensor. - mask: torch.Tensor - Mask defining which edges go in the correct direction. - reorder_idx: torch.Tensor - Indices defining how to reorder the tensor values after - concatenating the edge values of both directions. - opposite_neg: bool - Whether the edge in the opposite direction should use the - negative tensor value. + self.regress_forces = kwargs["regress_forces"] - Returns - ------- - tensor_ordered: torch.Tensor - A tensor with symmetrized values. - """ - # Mask out counter-edges - tensor_directed = tensor[mask] - # Concatenate counter-edges after normal edges - sign = 1 - 2 * opposite_neg - tensor_cat = torch.cat([tensor_directed, sign * tensor_directed]) - # Reorder everything so the edges of every image are consecutive - tensor_ordered = tensor_cat[reorder_idx] - return tensor_ordered + kwargs["num_targets"] = kwargs["emb_size_atom"] // 2 - def symmetrize_edges( - self, - graph, - batch_idx, - ): - """ - Symmetrize edges to ensure existence of counter-directional edges. + self.ads_model = GemNetOC(**kwargs) + self.cat_model = GemNetOC(**kwargs) - Some edges are only present in one direction in the data, - since every atom has a maximum number of neighbors. - We only use i->j edges here. So we lose some j->i edges - and add others by making it symmetric. - """ - num_atoms = batch_idx.shape[0] - new_graph = {} - - # Generate mask - mask_sep_atoms = graph["edge_index"][0] < graph["edge_index"][1] - # Distinguish edges between the same (periodic) atom by ordering the cells - cell_earlier = ( - (graph["cell_offset"][:, 0] < 0) - | ((graph["cell_offset"][:, 0] == 0) & (graph["cell_offset"][:, 1] < 0)) - | ( - (graph["cell_offset"][:, 0] == 0) - & (graph["cell_offset"][:, 1] == 0) - & (graph["cell_offset"][:, 2] < 0) - ) - ) - mask_same_atoms = graph["edge_index"][0] == graph["edge_index"][1] - mask_same_atoms &= cell_earlier - mask = mask_sep_atoms | mask_same_atoms - - # Mask out counter-edges - edge_index_directed = graph["edge_index"][mask[None, :].expand(2, -1)].view( - 2, -1 - ) - - # Concatenate counter-edges after normal edges - edge_index_cat = torch.cat( - [edge_index_directed, edge_index_directed.flip(0)], - dim=1, - ) - - # Count remaining edges per image - batch_edge = torch.repeat_interleave( - torch.arange( - graph["num_neighbors"].size(0), - device=graph["edge_index"].device, - ), - graph["num_neighbors"], - ) - batch_edge = batch_edge[mask] - # segment_coo assumes sorted batch_edge - # Factor 2 since this is only one half of the edges - ones = batch_edge.new_ones(1).expand_as(batch_edge) - new_graph["num_neighbors"] = 2 * segment_coo( - ones, batch_edge, dim_size=graph["num_neighbors"].size(0) - ) - - # Create indexing array - edge_reorder_idx = repeat_blocks( - torch.div(new_graph["num_neighbors"], 2, rounding_mode="floor"), - repeats=2, - continuous_indexing=True, - repeat_inc=edge_index_directed.size(1), - ) - - # Reorder everything so the edges of every image are consecutive - new_graph["edge_index"] = edge_index_cat[:, edge_reorder_idx] - new_graph["cell_offset"] = self.select_symmetric_edges( - graph["cell_offset"], mask, edge_reorder_idx, True - ) - new_graph["distance"] = self.select_symmetric_edges( - graph["distance"], mask, edge_reorder_idx, False - ) - new_graph["vector"] = self.select_symmetric_edges( - graph["vector"], mask, edge_reorder_idx, True - ) - - # Indices for swapping c->a and a->c (for symmetric MP) - # To obtain these efficiently and without any index assumptions, - # we get order the counter-edge IDs and then - # map this order back to the edge IDs. - # Double argsort gives the desired mapping - # from the ordered tensor to the original tensor. - edge_ids = get_edge_id( - new_graph["edge_index"], new_graph["cell_offset"], num_atoms - ) - order_edge_ids = torch.argsort(edge_ids) - inv_order_edge_ids = torch.argsort(order_edge_ids) - edge_ids_counter = get_edge_id( - new_graph["edge_index"].flip(0), - -new_graph["cell_offset"], - num_atoms, - ) - order_edge_ids_counter = torch.argsort(edge_ids_counter) - id_swap = order_edge_ids_counter[inv_order_edge_ids] - - return new_graph, id_swap - - def subselect_edges( - self, - data, - graph, - cutoff=None, - max_neighbors=None, - ): - """Subselect edges using a stricter cutoff and max_neighbors.""" - subgraph = graph.copy() - - if cutoff is not None: - edge_mask = subgraph["distance"] <= cutoff - - subgraph["edge_index"] = subgraph["edge_index"][:, edge_mask] - subgraph["cell_offset"] = subgraph["cell_offset"][edge_mask] - subgraph["num_neighbors"] = mask_neighbors( - subgraph["num_neighbors"], edge_mask - ) - subgraph["distance"] = subgraph["distance"][edge_mask] - subgraph["vector"] = subgraph["vector"][edge_mask] - - if max_neighbors is not None: - edge_mask, subgraph["num_neighbors"] = get_max_neighbors_mask( - natoms=data.natoms, - index=subgraph["edge_index"][1], - atom_distance=subgraph["distance"], - max_num_neighbors_threshold=max_neighbors, - ) - if not torch.all(edge_mask): - subgraph["edge_index"] = subgraph["edge_index"][:, edge_mask] - subgraph["cell_offset"] = subgraph["cell_offset"][edge_mask] - subgraph["distance"] = subgraph["distance"][edge_mask] - subgraph["vector"] = subgraph["vector"][edge_mask] - - empty_image = subgraph["num_neighbors"] == 0 - if torch.any(empty_image): - raise ValueError( - f"An image has no neighbors: id={data.id[empty_image]}, " - f"sid={data.sid[empty_image]}, fid={data.fid[empty_image]}" - ) - return subgraph - - def generate_graph_dict(self, data, cutoff, max_neighbors): - """Generate a radius/nearest neighbor graph.""" - otf_graph = cutoff > 6 or max_neighbors > 50 or self.otf_graph - - ( - edge_index, - edge_dist, - distance_vec, - cell_offsets, - _, # cell offset distances - num_neighbors, - ) = self.generate_graph( - data, - cutoff=cutoff, - max_neighbors=max_neighbors, - otf_graph=otf_graph, - ) - # These vectors actually point in the opposite direction. - # But we want to use col as idx_t for efficient aggregation. - edge_vector = -distance_vec / edge_dist[:, None] - cell_offsets = -cell_offsets # a - c + offset - - graph = { - "edge_index": edge_index, - "distance": edge_dist, - "vector": edge_vector, - "cell_offset": cell_offsets, - "num_neighbors": num_neighbors, - } - - # Mask interaction edges if required - if otf_graph or np.isclose(cutoff, 6): - select_cutoff = None - else: - select_cutoff = cutoff - if otf_graph or max_neighbors == 50: - select_neighbors = None - else: - select_neighbors = max_neighbors - graph = self.subselect_edges( - data=data, - graph=graph, - cutoff=select_cutoff, - max_neighbors=select_neighbors, - ) - - return graph - - def subselect_graph( - self, - data, - graph, - cutoff, - max_neighbors, - cutoff_orig, - max_neighbors_orig, - ): - """If the new cutoff and max_neighbors is different from the original, - subselect the edges of a given graph. - """ - # Check if embedding edges are different from interaction edges - if np.isclose(cutoff, cutoff_orig): - select_cutoff = None - else: - select_cutoff = cutoff - if max_neighbors == max_neighbors_orig: - select_neighbors = None - else: - select_neighbors = max_neighbors - - return self.subselect_edges( - data=data, - graph=graph, - cutoff=select_cutoff, - max_neighbors=select_neighbors, + self.act = swish + self.combination = nn.Sequential( + Linear(kwargs["emb_size_atom"] // 2 * 2, kwargs["emb_size_atom"] // 2), + self.act, + Linear(kwargs["emb_size_atom"] // 2, 1) ) - def get_graphs_and_indices(self, data): - """ "Generate embedding and interaction graphs and indices.""" - num_atoms = data.atomic_numbers.size(0) - - # Atom interaction graph is always the largest - if ( - self.atom_edge_interaction - or self.edge_atom_interaction - or self.atom_interaction - ): - a2a_graph = self.generate_graph_dict( - data, self.cutoff_aint, self.max_neighbors_aint - ) - main_graph = self.subselect_graph( - data, - a2a_graph, - self.cutoff, - self.max_neighbors, - self.cutoff_aint, - self.max_neighbors_aint, - ) - a2ee2a_graph = self.subselect_graph( - data, - a2a_graph, - self.cutoff_aeaint, - self.max_neighbors_aeaint, - self.cutoff_aint, - self.max_neighbors_aint, - ) - else: - main_graph = self.generate_graph_dict(data, self.cutoff, self.max_neighbors) - a2a_graph = {} - a2ee2a_graph = {} - if self.quad_interaction: - if ( - self.atom_edge_interaction - or self.edge_atom_interaction - or self.atom_interaction - ): - qint_graph = self.subselect_graph( - data, - a2a_graph, - self.cutoff_qint, - self.max_neighbors_qint, - self.cutoff_aint, - self.max_neighbors_aint, - ) - else: - assert self.cutoff_qint <= self.cutoff - assert self.max_neighbors_qint <= self.max_neighbors - qint_graph = self.subselect_graph( - data, - main_graph, - self.cutoff_qint, - self.max_neighbors_qint, - self.cutoff, - self.max_neighbors, - ) - - # Only use quadruplets for certain tags - self.qint_tags = self.qint_tags.to(qint_graph["edge_index"].device) - tags_s = data.tags[qint_graph["edge_index"][0]] - tags_t = data.tags[qint_graph["edge_index"][1]] - qint_tag_mask_s = (tags_s[..., None] == self.qint_tags).any(dim=-1) - qint_tag_mask_t = (tags_t[..., None] == self.qint_tags).any(dim=-1) - qint_tag_mask = qint_tag_mask_s | qint_tag_mask_t - qint_graph["edge_index"] = qint_graph["edge_index"][:, qint_tag_mask] - qint_graph["cell_offset"] = qint_graph["cell_offset"][qint_tag_mask, :] - qint_graph["distance"] = qint_graph["distance"][qint_tag_mask] - qint_graph["vector"] = qint_graph["vector"][qint_tag_mask, :] - del qint_graph["num_neighbors"] - else: - qint_graph = {} - - # Symmetrize edges for swapping in symmetric message passing - main_graph, id_swap = self.symmetrize_edges(main_graph, data.batch) + def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! + import ipdb + ipdb.set_trace() - trip_idx_e2e = get_triplets(main_graph, num_atoms=num_atoms) + bip_edges = data["is_disc"].edge_index + bip_weights = data["is_disc"].edge_weight - # Additional indices for quadruplets - if self.quad_interaction: - quad_idx = get_quadruplets( - main_graph, - qint_graph, - num_atoms, - ) - else: - quad_idx = {} - - if self.atom_edge_interaction: - trip_idx_a2e = get_mixed_triplets( - a2ee2a_graph, - main_graph, - num_atoms=num_atoms, - return_agg_idx=True, - ) - else: - trip_idx_a2e = {} - if self.edge_atom_interaction: - trip_idx_e2a = get_mixed_triplets( - main_graph, - a2ee2a_graph, - num_atoms=num_atoms, - return_agg_idx=True, - ) - # a2ee2a_graph['edge_index'][1] has to be sorted for this - a2ee2a_graph["target_neighbor_idx"] = get_inner_idx( - a2ee2a_graph["edge_index"][1], dim_size=num_atoms - ) - else: - trip_idx_e2a = {} - if self.atom_interaction: - # a2a_graph['edge_index'][1] has to be sorted for this - a2a_graph["target_neighbor_idx"] = get_inner_idx( - a2a_graph["edge_index"][1], dim_size=num_atoms - ) - - return ( - main_graph, - a2a_graph, - a2ee2a_graph, - qint_graph, - id_swap, - trip_idx_e2e, - trip_idx_a2e, - trip_idx_e2a, - quad_idx, - ) - - def get_bases( - self, - main_graph, - a2a_graph, - a2ee2a_graph, - qint_graph, - trip_idx_e2e, - trip_idx_a2e, - trip_idx_e2a, - quad_idx, - num_atoms, - ): - """Calculate and transform basis functions.""" - basis_rad_main_raw = self.radial_basis(main_graph["distance"]) - - # Calculate triplet angles - cosφ_cab = inner_product_clamped( - main_graph["vector"][trip_idx_e2e["out"]], - main_graph["vector"][trip_idx_e2e["in"]], - ) - basis_rad_cir_e2e_raw, basis_cir_e2e_raw = self.cbf_basis_tint( - main_graph["distance"], cosφ_cab - ) + adsorbates, catalysts = [], [] + for i in range(len(data)): + adsorbates.append(Data( + **data[i]["adsorbate"]._mapping, + edge_index=data[i]["adsorbate", "is_close", "adsorbate"] + )) + catalyst.append(Data( + **data[i]["catalyst"]._mapping, + edge_index=data[i]["catalyst", "is_close", "catalyst"] + )) + del data + adsorbates = Batch.from_data_list(adsorbates) + catalysts = Batch.from_data_list(catalysts) - if self.quad_interaction: - # Calculate quadruplet angles - cosφ_cab_q, cosφ_abd, angle_cabd = self.calculate_quad_angles( - main_graph["vector"], - qint_graph["vector"], - quad_idx, - ) + # We make predictions for each + pos_ads = adsorbates.pos + batch_ads = adsorbates.batch + atomic_numbers_ads = adsorbates.atomic_numbers.long() + num_atoms_ads = adsorbates.shape[0] - basis_rad_cir_qint_raw, basis_cir_qint_raw = self.cbf_basis_qint( - qint_graph["distance"], cosφ_abd - ) - basis_rad_sph_qint_raw, basis_sph_qint_raw = self.sbf_basis_qint( - main_graph["distance"], - cosφ_cab_q[quad_idx["trip_out_to_quad"]], - angle_cabd, - ) - if self.atom_edge_interaction: - basis_rad_a2ee2a_raw = self.radial_basis_aeaint(a2ee2a_graph["distance"]) - cosφ_cab_a2e = inner_product_clamped( - main_graph["vector"][trip_idx_a2e["out"]], - a2ee2a_graph["vector"][trip_idx_a2e["in"]], - ) - basis_rad_cir_a2e_raw, basis_cir_a2e_raw = self.cbf_basis_aeint( - main_graph["distance"], cosφ_cab_a2e - ) - if self.edge_atom_interaction: - cosφ_cab_e2a = inner_product_clamped( - a2ee2a_graph["vector"][trip_idx_e2a["out"]], - main_graph["vector"][trip_idx_e2a["in"]], - ) - basis_rad_cir_e2a_raw, basis_cir_e2a_raw = self.cbf_basis_eaint( - a2ee2a_graph["distance"], cosφ_cab_e2a - ) - if self.atom_interaction: - basis_rad_a2a_raw = self.radial_basis_aint(a2a_graph["distance"]) - - # Shared Down Projections - bases_qint = {} - if self.quad_interaction: - bases_qint["rad"] = self.mlp_rbf_qint(basis_rad_main_raw) - bases_qint["cir"] = self.mlp_cbf_qint( - rad_basis=basis_rad_cir_qint_raw, - sph_basis=basis_cir_qint_raw, - idx_sph_outer=quad_idx["triplet_in"]["out"], - ) - bases_qint["sph"] = self.mlp_sbf_qint( - rad_basis=basis_rad_sph_qint_raw, - sph_basis=basis_sph_qint_raw, - idx_sph_outer=quad_idx["out"], - idx_sph_inner=quad_idx["out_agg"], - ) - - bases_a2e = {} - if self.atom_edge_interaction: - bases_a2e["rad"] = self.mlp_rbf_aeint(basis_rad_a2ee2a_raw) - bases_a2e["cir"] = self.mlp_cbf_aeint( - rad_basis=basis_rad_cir_a2e_raw, - sph_basis=basis_cir_a2e_raw, - idx_sph_outer=trip_idx_a2e["out"], - idx_sph_inner=trip_idx_a2e["out_agg"], - ) - bases_e2a = {} - if self.edge_atom_interaction: - bases_e2a["rad"] = self.mlp_rbf_eaint(basis_rad_main_raw) - bases_e2a["cir"] = self.mlp_cbf_eaint( - rad_basis=basis_rad_cir_e2a_raw, - sph_basis=basis_cir_e2a_raw, - idx_rad_outer=a2ee2a_graph["edge_index"][1], - idx_rad_inner=a2ee2a_graph["target_neighbor_idx"], - idx_sph_outer=trip_idx_e2a["out"], - idx_sph_inner=trip_idx_e2a["out_agg"], - num_atoms=num_atoms, - ) - if self.atom_interaction: - basis_a2a_rad = self.mlp_rbf_aint( - rad_basis=basis_rad_a2a_raw, - idx_rad_outer=a2a_graph["edge_index"][1], - idx_rad_inner=a2a_graph["target_neighbor_idx"], - num_atoms=num_atoms, - ) - else: - basis_a2a_rad = None - - bases_e2e = {} - bases_e2e["rad"] = self.mlp_rbf_tint(basis_rad_main_raw) - bases_e2e["cir"] = self.mlp_cbf_tint( - rad_basis=basis_rad_cir_e2e_raw, - sph_basis=basis_cir_e2e_raw, - idx_sph_outer=trip_idx_e2e["out"], - idx_sph_inner=trip_idx_e2e["out_agg"], - ) - - basis_atom_update = self.mlp_rbf_h(basis_rad_main_raw) - basis_output = self.mlp_rbf_out(basis_rad_main_raw) - - return ( - basis_rad_main_raw, - basis_atom_update, - basis_output, - bases_qint, - bases_e2e, - bases_a2e, - bases_e2a, - basis_a2a_rad, - ) - - def energy_forward(self, data): - pos = data.pos - batch = data.batch - atomic_numbers = data.atomic_numbers.long() - num_atoms = atomic_numbers.shape[0] + pos_cat = catalysts.pos + batch_cat = catalysts.batch + atomic_numbers_cat = catalysts.atomic_numbers.long() + num_atoms_cat = catalysts.shape[0] if self.regress_forces and not self.direct_forces: - pos.requires_grad_(True) - - ( - main_graph, - a2a_graph, - a2ee2a_graph, - qint_graph, - id_swap, - trip_idx_e2e, - trip_idx_a2e, - trip_idx_e2a, - quad_idx, - ) = self.get_graphs_and_indices(data) - _, idx_t = main_graph["edge_index"] + pos_ads.requires_grad_(True) + pos_cat.requires_grad_(True) - ( - basis_rad_raw, - basis_atom_update, - basis_output, - bases_qint, - bases_e2e, - bases_a2e, - bases_e2a, - basis_a2a_rad, - ) = self.get_bases( - main_graph=main_graph, - a2a_graph=a2a_graph, - a2ee2a_graph=a2ee2a_graph, - qint_graph=qint_graph, - trip_idx_e2e=trip_idx_e2e, - trip_idx_a2e=trip_idx_a2e, - trip_idx_e2a=trip_idx_e2a, - quad_idx=quad_idx, - num_atoms=num_atoms, + output_ads = self.ads_model.pre_interaction( + pos_ads, batch_ads, atomic_numbers_ads, num_atoms_ads, adsorbates + ) + output_cat = self.cat_model.pre_interaction( + pos_cat, batch_cat, atomic_numbers_cat, num_atoms_cat, catalysts ) - # Embedding block - h = self.atom_emb(atomic_numbers, data.tags if hasattr(data, "tags") else None) - # (nAtoms, emb_size_atom) - m = self.edge_emb(h, basis_rad_raw, main_graph["edge_index"]) - # (nEdges, emb_size_edge) + inter_outputs_ads, inter_outputs_cat = self.interactions(output_ads, output_cat) - x_E, x_F = self.out_blocks[0](h, m, basis_output, idx_t) - # (nAtoms, emb_size_atom), (nEdges, emb_size_edge) - xs_E, xs_F = [x_E], [x_F] - for i in range(self.num_blocks): - # Interaction block - h, m = self.int_blocks[i]( - h=h, - m=m, - bases_qint=bases_qint, - bases_e2e=bases_e2e, - bases_a2e=bases_a2e, - bases_e2a=bases_e2a, - basis_a2a_rad=basis_a2a_rad, - basis_atom_update=basis_atom_update, - edge_index_main=main_graph["edge_index"], - a2ee2a_graph=a2ee2a_graph, - a2a_graph=a2a_graph, - id_swap=id_swap, - trip_idx_e2e=trip_idx_e2e, - trip_idx_a2e=trip_idx_a2e, - trip_idx_e2a=trip_idx_e2a, - quad_idx=quad_idx, - ) # (nAtoms, emb_size_atom), (nEdges, emb_size_edge) - x_E, x_F = self.out_blocks[i + 1](h, m, basis_output, idx_t) - # (nAtoms, emb_size_atom), (nEdges, emb_size_edge) - xs_E.append(x_E) - xs_F.append(x_F) - # Global output block for final predictions - x_E = self.out_mlp_E(torch.cat(xs_E, dim=-1)) - if self.direct_forces: - x_F = self.out_mlp_F(torch.cat(xs_F, dim=-1)) - with torch.cuda.amp.autocast(False): - E_t = self.out_energy(x_E.float()) - if self.direct_forces: - F_st = self.out_forces(x_F.float()) - nMolecules = torch.max(batch) + 1 + ads_energy = pred_ads["energy"] + cat_energy = pred_cat["energy"] - if self.extensive: - E_t = self.scattering( - E_t, batch, dim=0, dim_size=nMolecules, reduce="add" - ) # (nMolecules, num_targets) - else: - E_t = self.scattering( - E_t, batch, dim=0, dim_size=nMolecules, reduce="mean" - ) # (nMolecules, num_targets) + # We combine predictions + system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = self.combination(system_energy) - return { - "energy": E_t.squeeze(1), # (num_molecules) - "E_t": E_t, - "idx_t": idx_t, - "main_graph": main_graph, - "num_atoms": num_atoms, - "pos": pos, - "F_st": F_st, + # We return them + pred_system = { + "energy" : system_energy, + "pooling_loss" : pred_ads["pooling_loss"] if pred_ads["pooling_loss"] is None + else pred_ads["pooling_loss"] + pred_cat["pooling_loss"] } - @conditional_grad(torch.enable_grad()) - def scattering(self, E_t, batch, dim, dim_size, reduce="add"): - E_t = scatter_det( - src=E_t, index=batch, dim=dim, dim_size=dim_size, reduce=reduce - ) - - return E_t - - @conditional_grad(torch.enable_grad()) - def forces_forward(self, preds): - - idx_t = preds["idx_t"] - main_graph = preds["main_graph"] - num_atoms = preds["num_atoms"] - pos = preds["pos"] - F_st = preds["F_st"] - E_t = preds["E_t"] + return pred_system - if self.direct_forces: - if self.forces_coupled: # enforce F_st = F_ts - nEdges = idx_t.shape[0] - id_undir = repeat_blocks( - main_graph["num_neighbors"] // 2, - repeats=2, - continuous_indexing=True, - ) - F_st = scatter_det( - F_st, - id_undir, - dim=0, - dim_size=int(nEdges / 2), - reduce="mean", - ) # (nEdges/2, num_targets) - F_st = F_st[id_undir] # (nEdges, num_targets) + def interactions(self, output_ads, output_cat): + h_ads, m_ads = output_ads["h"], output_ads["m"] + h_cat, m_cat = output_cat["h"], output_cat["m"] + del output_ads["h"]; del output_ads["m"] + del output_cat["h"]; del output_cat["m"] - # map forces in edge directions - F_st_vec = F_st[:, :, None] * main_graph["vector"][:, None, :] - # (nEdges, num_targets, 3) - F_t = scatter_det( - F_st_vec, - idx_t, - dim=0, - dim_size=num_atoms, - reduce="add", - ) # (nAtoms, num_targets, 3) - else: - F_t = self.force_scaler.calc_forces_and_update(E_t, pos) + #basis_output_ads, idx - F_t = F_t.squeeze(1) # (num_atoms, 3) - return F_t + return 1, 2 - @property - def num_params(self): - return sum(p.numel() for p in self.parameters()) + # GOT UP TO HERE. I NEED TO DO INTERACTIONS. HERE. diff --git a/ocpmodels/models/gemnet_oc/depgemnet_oc.py b/ocpmodels/models/gemnet_oc/depgemnet_oc.py index f7000606d3..935cb058bc 100644 --- a/ocpmodels/models/gemnet_oc/depgemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/depgemnet_oc.py @@ -24,8 +24,6 @@ def __init__(self, **kwargs): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): - import ipdb - ipdb.set_trace() # We need to save the tags so this step is necessary. self.tags_saver(data.tags) pred = super().energy_forward(data) diff --git a/ocpmodels/models/gemnet_oc/gemnet_oc.py b/ocpmodels/models/gemnet_oc/gemnet_oc.py index 6c25e993fc..d6dd87602b 100644 --- a/ocpmodels/models/gemnet_oc/gemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/gemnet_oc.py @@ -860,8 +860,11 @@ def subselect_edges( subgraph["distance"] = subgraph["distance"][edge_mask] subgraph["vector"] = subgraph["vector"][edge_mask] + empty_image = subgraph["num_neighbors"] == 0 if torch.any(empty_image): + import ipdb + ipdb.set_trace() raise ValueError( f"An image has no neighbors: id={data.id[empty_image]}, " f"sid={data.sid[empty_image]}, fid={data.fid[empty_image]}" @@ -1213,6 +1216,102 @@ def energy_forward(self, data): if self.regress_forces and not self.direct_forces: pos.requires_grad_(True) + outputs = self.pre_interaction( + pos, batch, atomic_numbers, num_atoms, data + ) + + #h, m, basis_output, idx_t, x_E, x_F, xs_E, xs_F + interaction_outputs = self.interactions(outputs) + + E_t, idx_t, F_st = self.post_interactions( + batch=batch, **interaction_outputs, + ) + + return { + "energy": E_t.squeeze(1), # (num_molecules) + "E_t": E_t, + "idx_t": idx_t, + "main_graph": outputs["main_graph"], + "num_atoms": num_atoms, + "pos": pos, + "F_st": F_st, + } + + def post_interactions(self, h, m, basis_output, idx_t, x_E, x_F, xs_E, xs_F, batch): + # Global output block for final predictions + x_E = self.out_mlp_E(torch.cat(xs_E, dim=-1)) + if self.direct_forces: + x_F = self.out_mlp_F(torch.cat(xs_F, dim=-1)) + with torch.cuda.amp.autocast(False): + E_t = self.out_energy(x_E.float()) + if self.direct_forces: + F_st = self.out_forces(x_F.float()) + + nMolecules = torch.max(batch) + 1 + + if self.extensive: + E_t = self.scattering( + E_t, batch, dim=0, dim_size=nMolecules, reduce="add" + ) # (nMolecules, num_targets) + else: + E_t = self.scattering( + E_t, batch, dim=0, dim_size=nMolecules, reduce="mean" + ) # (nMolecules, num_targets) + + return E_t, idx_t, F_st + + def interactions(self, outputs): + h, m = outputs["h"], outputs["m"] + del outputs["h"]; del outputs["m"] + + basis_output, idx_t = outputs["basis_output"], outputs["idx_t"] + del outputs["basis_output"]; del outputs["idx_t"] + + x_E, x_F = outputs["x_E"], outputs["x_F"] + del outputs["x_E"]; outputs["x_F"] + + xs_E, xs_F = outputs["xs_E"], outputs["xs_F"] + del outputs["xs_E"]; del outputs["xs_F"] + + for i in range(self.num_blocks): + # Interaction block + h, m = self.int_blocks[i]( + h=h, + m=m, + bases_qint=outputs["bases_qint"], + bases_e2e=outputs["bases_e2e"], + bases_a2e=outputs["bases_a2e"], + bases_e2a=outputs["bases_e2a"], + basis_a2a_rad=outputs["basis_a2a_rad"], + basis_atom_update=outputs["basis_atom_update"], + edge_index_main=outputs["main_graph"]["edge_index"], + a2ee2a_graph=outputs["a2ee2a_graph"], + a2a_graph=outputs["a2a_graph"], + id_swap=outputs["id_swap"], + trip_idx_e2e=outputs["trip_idx_e2e"], + trip_idx_a2e=outputs["trip_idx_a2e"], + trip_idx_e2a=outputs["trip_idx_e2a"], + quad_idx=outputs["quad_idx"], + ) # (nAtoms, emb_size_atom), (nEdges, emb_size_edge) + + x_E, x_F = self.out_blocks[i + 1](h, m, basis_output, idx_t) + # (nAtoms, emb_size_atom), (nEdges, emb_size_edge) + xs_E.append(x_E) + xs_F.append(x_F) + + interaction_outputs = { + "h" : h, + "m" : m, + "basis_output" : basis_output, + "idx_t" : idx_t, + "x_E" : x_E, + "x_F" : x_F, + "xs_E" : xs_E, + "xs_F" : xs_F + } + return interaction_outputs + + def pre_interaction(self, pos, batch, atomic_numbers, num_atoms, data): ( main_graph, a2a_graph, @@ -1257,62 +1356,34 @@ def energy_forward(self, data): # (nAtoms, emb_size_atom), (nEdges, emb_size_edge) xs_E, xs_F = [x_E], [x_F] - for i in range(self.num_blocks): - # Interaction block - h, m = self.int_blocks[i]( - h=h, - m=m, - bases_qint=bases_qint, - bases_e2e=bases_e2e, - bases_a2e=bases_a2e, - bases_e2a=bases_e2a, - basis_a2a_rad=basis_a2a_rad, - basis_atom_update=basis_atom_update, - edge_index_main=main_graph["edge_index"], - a2ee2a_graph=a2ee2a_graph, - a2a_graph=a2a_graph, - id_swap=id_swap, - trip_idx_e2e=trip_idx_e2e, - trip_idx_a2e=trip_idx_a2e, - trip_idx_e2a=trip_idx_e2a, - quad_idx=quad_idx, - ) # (nAtoms, emb_size_atom), (nEdges, emb_size_edge) - - x_E, x_F = self.out_blocks[i + 1](h, m, basis_output, idx_t) - # (nAtoms, emb_size_atom), (nEdges, emb_size_edge) - xs_E.append(x_E) - xs_F.append(x_F) - - # Global output block for final predictions - x_E = self.out_mlp_E(torch.cat(xs_E, dim=-1)) - if self.direct_forces: - x_F = self.out_mlp_F(torch.cat(xs_F, dim=-1)) - with torch.cuda.amp.autocast(False): - E_t = self.out_energy(x_E.float()) - if self.direct_forces: - F_st = self.out_forces(x_F.float()) - - nMolecules = torch.max(batch) + 1 - - if self.extensive: - E_t = self.scattering( - E_t, batch, dim=0, dim_size=nMolecules, reduce="add" - ) # (nMolecules, num_targets) - else: - E_t = self.scattering( - E_t, batch, dim=0, dim_size=nMolecules, reduce="mean" - ) # (nMolecules, num_targets) - - return { - "energy": E_t.squeeze(1), # (num_molecules) - "E_t": E_t, - "idx_t": idx_t, - "main_graph": main_graph, - "num_atoms": num_atoms, - "pos": pos, - "F_st": F_st, + outputs = { + "main_graph" : main_graph, + "a2a_graph" : a2a_graph, + "a2ee2a_graph" : a2ee2a_graph, + "id_swap" : id_swap, + "trip_idx_e2e" : trip_idx_e2e, + "trip_idx_a2e" : trip_idx_a2e, + "trip_idx_e2a" : trip_idx_e2a, + "quad_idx" : quad_idx, + "idx_t" : idx_t, + "basis_rad_raw" : basis_rad_raw, + "basis_atom_update" : basis_atom_update, + "basis_output" : basis_output, + "bases_qint" : bases_qint, + "bases_e2e" : bases_e2e, + "bases_a2e" : bases_a2e, + "bases_e2a" :bases_e2a, + "basis_a2a_rad" : basis_a2a_rad, + "h" : h, + "m" : m, + "x_E" : x_E, + "x_F" : x_F, + "xs_E" : xs_E, + "xs_F" : xs_F, } + return outputs + @conditional_grad(torch.enable_grad()) def scattering(self, E_t, batch, dim, dim_size, reduce="add"): E_t = scatter_det( diff --git a/ocpmodels/models/gemnet_oc/indgemnet_oc.py b/ocpmodels/models/gemnet_oc/indgemnet_oc.py index 958154a485..83d6fce700 100644 --- a/ocpmodels/models/gemnet_oc/indgemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/indgemnet_oc.py @@ -1,9 +1,8 @@ import torch, math from torch import nn -from torch.nn import Linear, Transformer +from torch.nn import Linear from ocpmodels.models.gemnet_oc.gemnet_oc import GemNetOC -from ocpmodels.models.faenet import OutputBlock from ocpmodels.models.base_model import BaseModel from ocpmodels.common.registry import registry from ocpmodels.models.utils.activations import swish @@ -50,8 +49,12 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION # We return them pred_system = { "energy" : system_energy, - "pooling_loss" : pred_ads["pooling_loss"] if pred_ads["pooling_loss"] is None - else pred_ads["pooling_loss"] + pred_cat["pooling_loss"] + "E_t": pred_ads["E_t"], + "idx_t": pred_ads["idx_t"], + "main_graph": pred_ads["main_graph"], + "num_atoms": pred_ads["num_atoms"], + "pos": pred_ads["pos"], + "F_st": pred_ads["F_st"] } return pred_system From 06cb33d0a4255b35512b81a78ec9cfc043b63a88 Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 12 Sep 2023 22:32:58 -0400 Subject: [PATCH 099/131] changing to implementing gemnet_t --- configs/exps/alvaro/gemnet-config.yaml | 15 +++---- configs/exps/alvaro/oldgemnet-config.yaml | 35 +++++++++++++++ configs/models/painn.yaml | 3 ++ debug.py | 27 ++++++------ ocpmodels/models/gemnet/depgemnet_t.py | 52 +++++++++++++++++++++++ ocpmodels/models/gemnet_oc/gemnet_oc.py | 8 ++-- ocpmodels/models/painn.py | 3 ++ ocpmodels/tasks/task.py | 2 +- 8 files changed, 119 insertions(+), 26 deletions(-) create mode 100644 configs/exps/alvaro/oldgemnet-config.yaml create mode 100644 ocpmodels/models/gemnet/depgemnet_t.py diff --git a/configs/exps/alvaro/gemnet-config.yaml b/configs/exps/alvaro/gemnet-config.yaml index d9524c34bb..f0e74da675 100644 --- a/configs/exps/alvaro/gemnet-config.yaml +++ b/configs/exps/alvaro/gemnet-config.yaml @@ -1,5 +1,5 @@ job: - mem: 40GB + mem: 32GB cpus: 4 gres: gpu:rtx8000:1 partition: long @@ -15,7 +15,6 @@ default: otf_graph: False max_num_neighbors: 40 hidden_channels: 142 - regress_forces: True graph_rewiring: remove-0-tag optim: batch_size: 32 @@ -23,13 +22,13 @@ default: max_epochs: 30 runs: - - config: gemnet_oc-is2re-all + - config: gemnet_t-is2re-all - - config: depgemnet_oc-is2re-all + #- config: depgemnet_t-is2re-all - #- config: indgemnet_oc-is2re-all + #- config: indgemnet_t-is2re-all - - config: gemnet_oc-is2re-all - is_disconnected: True + #- config: gemnet_t-is2re-all + # is_disconnected: True - #- config: agemnet_oc-is2re-all + #- config: agemnet_t-is2re-all diff --git a/configs/exps/alvaro/oldgemnet-config.yaml b/configs/exps/alvaro/oldgemnet-config.yaml new file mode 100644 index 0000000000..d9524c34bb --- /dev/null +++ b/configs/exps/alvaro/oldgemnet-config.yaml @@ -0,0 +1,35 @@ +job: + mem: 40GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + time: 18:00:00 + +default: + wandb_name: alvaro-carbonero-math + wandb_project: ocp-alvaro + model: + tag_hidden_channels: 32 + pg_hidden_channels: 32 + phys_embeds: True + otf_graph: False + max_num_neighbors: 40 + hidden_channels: 142 + regress_forces: True + graph_rewiring: remove-0-tag + optim: + batch_size: 32 + eval_batch_size: 32 + max_epochs: 30 + +runs: + - config: gemnet_oc-is2re-all + + - config: depgemnet_oc-is2re-all + + #- config: indgemnet_oc-is2re-all + + - config: gemnet_oc-is2re-all + is_disconnected: True + + #- config: agemnet_oc-is2re-all diff --git a/configs/models/painn.yaml b/configs/models/painn.yaml index 2c0abac112..c138652a81 100644 --- a/configs/models/painn.yaml +++ b/configs/models/painn.yaml @@ -2,6 +2,9 @@ default: model: name: painn use_pbc: True + optim: + num_workers: 4 + eval_batch_size: 64 # ------------------- # ----- IS2RE ----- diff --git a/debug.py b/debug.py index ed47a7c531..887eeaf83b 100644 --- a/debug.py +++ b/debug.py @@ -92,15 +92,16 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.wandb_name = "alvaro-carbonero-math" args.wandb_project = "ocp-alvaro" - args.config = "indgemnet_oc-is2re-10k" + # args.config = "faenet-is2re-all" + args.config = "gemnet_t-is2re-all" - args.tag_hidden_channels = 32 - args.pg_hidden_channels = 32 - args.phys_embeds = True - args.otf_graph = False - args.max_num_neighbors = 40 - args.hidden_channels = 142 - args.graph_rewiring = "remove-tag-0" + # args.tag_hidden_channels = 32 + # args.pg_hidden_channels = 32 + # args.phys_embeds = True + # args.otf_graph = False + # args.max_num_neighbors = 40 + # args.hidden_channels = 142 + # args.graph_rewiring = "remove-tag-0" trainer_config = build_config(args, override_args) @@ -112,12 +113,12 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["dataset"] ) - trainer_config["optim"]["batch_size"] = 32 - trainer_config["optim"]["eval_batch_size"] = 32 - trainer_config["optim"]["max_epochs"] = 30 + # trainer_config["optim"]["batch_size"] = 32 + # trainer_config["optim"]["eval_batch_size"] = 32 + # trainer_config["optim"]["max_epochs"] = 30 #trainer_config["optim"]["es_patience"] = 5 - - trainer_config["model"]["regress_forces"] = False + trainer_config["optim"]["num_workers"] = 0 + # trainer_config["model"]["regress_forces"] = False # -- Initial setup diff --git a/ocpmodels/models/gemnet/depgemnet_t.py b/ocpmodels/models/gemnet/depgemnet_t.py new file mode 100644 index 0000000000..e4edc8f848 --- /dev/null +++ b/ocpmodels/models/gemnet/depgemnet_t.py @@ -0,0 +1,52 @@ +import torch +from torch.nn import Linear +from torch_scatter import scatter + +from ocpmodels.models.gemnet_oc.gemnet_oc import GemNetOC +from ocpmodels.common.registry import registry +from ocpmodels.common.utils import ( + conditional_grad, + scatter_det +) + +from torch_geometric.data import Batch + +@registry.register_model("depgemnet_t") +class depGemNetOC(GemNetOC): + def __init__(self, **kwargs): + self.hidden_channels = kwargs["emb_size_atom"] + + kwargs["num_targets"] = self.hidden_channels // 2 + super().__init__(**kwargs) + + self.sys_lin1 = Linear(self.hidden_channels // 2 * 2, self.hidden_channels // 2) + self.sys_lin2 = Linear(self.hidden_channels // 2, 1) + + @conditional_grad(torch.enable_grad()) + def energy_forward(self, data): + # We need to save the tags so this step is necessary. + self.tags_saver(data.tags) + pred = super().energy_forward(data) + + return pred + + def tags_saver(self, tags): + self.current_tags = tags + + @conditional_grad(torch.enable_grad()) + def scattering(self, E_t, batch, dim, dim_size, reduce="add"): + ads = self.current_tags == 2 + cat = ~ads + + ads_out = scatter_det( + src=E_t, index=batch * ads, dim=dim, reduce=reduce + ) + cat_out = scatter_det( + src=E_t, index=batch * cat, dim=dim, reduce=reduce + ) + + system = torch.cat([ads_out, cat_out], dim = 1) + system = self.sys_lin1(system) + system = self.sys_lin2(system) + + return system diff --git a/ocpmodels/models/gemnet_oc/gemnet_oc.py b/ocpmodels/models/gemnet_oc/gemnet_oc.py index d6dd87602b..e67538f66f 100644 --- a/ocpmodels/models/gemnet_oc/gemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/gemnet_oc.py @@ -1262,16 +1262,16 @@ def post_interactions(self, h, m, basis_output, idx_t, x_E, x_F, xs_E, xs_F, bat def interactions(self, outputs): h, m = outputs["h"], outputs["m"] - del outputs["h"]; del outputs["m"] + # del outputs["h"]; del outputs["m"] basis_output, idx_t = outputs["basis_output"], outputs["idx_t"] - del outputs["basis_output"]; del outputs["idx_t"] + # del outputs["basis_output"]; del outputs["idx_t"] x_E, x_F = outputs["x_E"], outputs["x_F"] - del outputs["x_E"]; outputs["x_F"] + # del outputs["x_E"]; outputs["x_F"] xs_E, xs_F = outputs["xs_E"], outputs["xs_F"] - del outputs["xs_E"]; del outputs["xs_F"] + # del outputs["xs_E"]; del outputs["xs_F"] for i in range(self.num_blocks): # Interaction block diff --git a/ocpmodels/models/painn.py b/ocpmodels/models/painn.py index 8b2f5d45c3..2eda37dc1c 100644 --- a/ocpmodels/models/painn.py +++ b/ocpmodels/models/painn.py @@ -612,6 +612,9 @@ def forces_forward(self, preds): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): + import ipdb + ipdb.set_trace() + pos = data.pos batch = data.batch z = data.atomic_numbers.long() diff --git a/ocpmodels/tasks/task.py b/ocpmodels/tasks/task.py index caac6b253e..e229152212 100644 --- a/ocpmodels/tasks/task.py +++ b/ocpmodels/tasks/task.py @@ -52,7 +52,7 @@ def run(self): if loops > 0: print("----------------------------------------") print("⏱️ Measuring inference time.") - self.trainer.measure_inference_time(loops=loops) + #self.trainer.measure_inference_time(loops=loops) print("----------------------------------------\n") torch.set_grad_enabled(True) return self.trainer.train( From f68c49b2b70677e5fc85d36cb0bbe26c7da2e3b4 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 14 Sep 2023 20:29:12 -0400 Subject: [PATCH 100/131] fixed a mistake in aschnet --- debug_schnet.py | 2 +- ocpmodels/models/aschnet.py | 4 ++-- ocpmodels/models/gemnet/depgemnet_t.py | 4 ++-- ocpmodels/tasks/task.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/debug_schnet.py b/debug_schnet.py index 77b7433a47..a3b618c951 100644 --- a/debug_schnet.py +++ b/debug_schnet.py @@ -102,7 +102,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.max_num_neighbors = 40 args.hidden_channels = 142 args.graph_rewiring = "remove-tag-0" - args.config = "gemnet_oc-is2re-10k" + args.config = "aschnet-is2re-10k" trainer_config = build_config(args, override_args) diff --git a/ocpmodels/models/aschnet.py b/ocpmodels/models/aschnet.py index 1b17caa712..0b3fb35f06 100644 --- a/ocpmodels/models/aschnet.py +++ b/ocpmodels/models/aschnet.py @@ -383,8 +383,8 @@ def energy_forward(self, data): data["is_disc"].edge_index, edge_weights_disc ) - h_ads = h_ads + inter_ads - h_cat = h_cat + inter_cat + h_ads, h_cat = h_ads + inter_ads, h_cat + inter_cat + h_ads, h_cat = nn.functional.normalize(h_ads), nn.functional.normalize(h_cat) pooling_loss = None # deal with pooling loss diff --git a/ocpmodels/models/gemnet/depgemnet_t.py b/ocpmodels/models/gemnet/depgemnet_t.py index e4edc8f848..2bc1ed236f 100644 --- a/ocpmodels/models/gemnet/depgemnet_t.py +++ b/ocpmodels/models/gemnet/depgemnet_t.py @@ -2,7 +2,7 @@ from torch.nn import Linear from torch_scatter import scatter -from ocpmodels.models.gemnet_oc.gemnet_oc import GemNetOC +from ocpmodels.models.gemnet.gemnet import GemNetT from ocpmodels.common.registry import registry from ocpmodels.common.utils import ( conditional_grad, @@ -12,7 +12,7 @@ from torch_geometric.data import Batch @registry.register_model("depgemnet_t") -class depGemNetOC(GemNetOC): +class depGemNetOC(GemNetT): def __init__(self, **kwargs): self.hidden_channels = kwargs["emb_size_atom"] diff --git a/ocpmodels/tasks/task.py b/ocpmodels/tasks/task.py index e229152212..caac6b253e 100644 --- a/ocpmodels/tasks/task.py +++ b/ocpmodels/tasks/task.py @@ -52,7 +52,7 @@ def run(self): if loops > 0: print("----------------------------------------") print("⏱️ Measuring inference time.") - #self.trainer.measure_inference_time(loops=loops) + self.trainer.measure_inference_time(loops=loops) print("----------------------------------------\n") torch.set_grad_enabled(True) return self.trainer.train( From fe10633c517f9e1be98bce33c2d731b9989f61c8 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 14 Sep 2023 20:34:47 -0400 Subject: [PATCH 101/131] config file editing --- configs/exps/alvaro/schnet-config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/configs/exps/alvaro/schnet-config.yaml b/configs/exps/alvaro/schnet-config.yaml index 33ee55e429..148eaefa5e 100644 --- a/configs/exps/alvaro/schnet-config.yaml +++ b/configs/exps/alvaro/schnet-config.yaml @@ -22,12 +22,12 @@ default: runs: #- config: schnet-is2re-all - - config: schnet-is2re-all - is_disconnected: True + #- config: schnet-is2re-all + #is_disconnected: True - - config: depschnet-is2re-all + #- config: depschnet-is2re-all - - config: indschnet-is2re-all + #- config: indschnet-is2re-all - config: aschnet-is2re-all model: From 218df213ba4108354e6f0677efc4738d96c9ffb6 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 14 Sep 2023 21:11:23 -0400 Subject: [PATCH 102/131] debugging gemnet --- ocpmodels/models/gemnet/gemnet.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ocpmodels/models/gemnet/gemnet.py b/ocpmodels/models/gemnet/gemnet.py index a1b1d4f28b..ea8fb4e045 100644 --- a/ocpmodels/models/gemnet/gemnet.py +++ b/ocpmodels/models/gemnet/gemnet.py @@ -520,6 +520,8 @@ def generate_interaction_graph(self, data): ) def energy_forward(self, data): + import ipdb + ipdb.set_trace() pos = data.pos batch = data.batch atomic_numbers = data.atomic_numbers.long() From 73ee5a1a0967f520f7c8cefdf7f5f2c5a171b441 Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 14 Sep 2023 22:13:26 -0400 Subject: [PATCH 103/131] readied gemnet for training --- configs/exps/alvaro/gemnet-config.yaml | 7 ++++--- ocpmodels/models/gemnet/gemnet.py | 2 -- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/configs/exps/alvaro/gemnet-config.yaml b/configs/exps/alvaro/gemnet-config.yaml index f0e74da675..e6cdd1bcc0 100644 --- a/configs/exps/alvaro/gemnet-config.yaml +++ b/configs/exps/alvaro/gemnet-config.yaml @@ -8,6 +8,7 @@ job: default: wandb_name: alvaro-carbonero-math wandb_project: ocp-alvaro + graph_rewiring: remove-0-tag model: tag_hidden_channels: 32 pg_hidden_channels: 32 @@ -24,11 +25,11 @@ default: runs: - config: gemnet_t-is2re-all + - config: gemnet_t-is2re-all + is_disconnected: True + #- config: depgemnet_t-is2re-all #- config: indgemnet_t-is2re-all - #- config: gemnet_t-is2re-all - # is_disconnected: True - #- config: agemnet_t-is2re-all diff --git a/ocpmodels/models/gemnet/gemnet.py b/ocpmodels/models/gemnet/gemnet.py index ea8fb4e045..a1b1d4f28b 100644 --- a/ocpmodels/models/gemnet/gemnet.py +++ b/ocpmodels/models/gemnet/gemnet.py @@ -520,8 +520,6 @@ def generate_interaction_graph(self, data): ) def energy_forward(self, data): - import ipdb - ipdb.set_trace() pos = data.pos batch = data.batch atomic_numbers = data.atomic_numbers.long() From c56e117bbbf039e5e147822f8b85d3595770583d Mon Sep 17 00:00:00 2001 From: alvaro Date: Thu, 14 Sep 2023 22:17:59 -0400 Subject: [PATCH 104/131] config mistake --- configs/exps/alvaro/gemnet-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/exps/alvaro/gemnet-config.yaml b/configs/exps/alvaro/gemnet-config.yaml index e6cdd1bcc0..782368994a 100644 --- a/configs/exps/alvaro/gemnet-config.yaml +++ b/configs/exps/alvaro/gemnet-config.yaml @@ -8,7 +8,7 @@ job: default: wandb_name: alvaro-carbonero-math wandb_project: ocp-alvaro - graph_rewiring: remove-0-tag + graph_rewiring: remove-tag-0 model: tag_hidden_channels: 32 pg_hidden_channels: 32 From 2242eec0285ac788eecd8d75f5bd733435724014 Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 15 Sep 2023 00:36:49 -0400 Subject: [PATCH 105/131] tried to implement gemnet, didn't work. Now rerunning faenet with the more standard config files --- configs/exps/alvaro/standard-faenet.yaml | 37 ++++++++ configs/models/depgemnet_t.yaml | 113 +++++++++++++++++++++++ configs/models/indgemnet_t.yaml | 113 +++++++++++++++++++++++ ocpmodels/models/__init__.py | 2 + ocpmodels/models/gemnet/depgemnet_t.py | 2 +- ocpmodels/models/gemnet/gemnet.py | 14 ++- ocpmodels/models/gemnet/indgemnet_t.py | 60 ++++++++++++ ocpmodels/trainers/base_trainer.py | 6 +- 8 files changed, 339 insertions(+), 8 deletions(-) create mode 100644 configs/exps/alvaro/standard-faenet.yaml create mode 100644 configs/models/depgemnet_t.yaml create mode 100644 configs/models/indgemnet_t.yaml create mode 100644 ocpmodels/models/gemnet/indgemnet_t.py diff --git a/configs/exps/alvaro/standard-faenet.yaml b/configs/exps/alvaro/standard-faenet.yaml new file mode 100644 index 0000000000..256b3d001a --- /dev/null +++ b/configs/exps/alvaro/standard-faenet.yaml @@ -0,0 +1,37 @@ +# MODIFY THIS ONE FOR RUNS + +job: + mem: 32GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + time: 15:00:00 + +default: + wandb_name: alvaro-carbonero-math + wandb_project: ocp-alvaro + test_ri: True + mode: train + graph_rewiring: remove-tag-0 + cp_data_to_tmpdir: true + frame_averaging: 2D + fa_frames: se3-random + model: + phys_embeds: True + tag_hidden_channels: 32 + pg_hidden_channels: 32 + energy_head: weighted-av-final-embeds + skip_co: concat + edge_embed_type: all_rij + optim: + lr_initial: 0.0005 + scheduler: LinearWarmupCosineAnnealingLR + max_epochs: 20 + eval_every: 0.4 + batch_size: 256 + eval_batch_size: 256 + +runs: + - config: faenet-is2re-all + model: + afaenet_gat_mode: v1 diff --git a/configs/models/depgemnet_t.yaml b/configs/models/depgemnet_t.yaml new file mode 100644 index 0000000000..2523da00e8 --- /dev/null +++ b/configs/models/depgemnet_t.yaml @@ -0,0 +1,113 @@ +# From OCP original repo -> https://github.com/Open-Catalyst-Project/ocp/blob/d16de9ee6f26d8661be5b9171e8c73c80237a82f/configs/oc22/is2re/gemnet-dT/gemnet-dT.yml +# Run this on 1 GPU -- so with an effective batch size of 8. + +default: + model: + name: depgemnet_t + use_pbc: true + num_spherical: 7 + num_radial: 64 + num_blocks: 5 + emb_size_atom: 256 + emb_size_edge: 512 + emb_size_trip: 64 + emb_size_rbf: 64 + emb_size_cbf: 16 + emb_size_bil_trip: 64 + num_before_skip: 1 + num_after_skip: 2 + num_concat: 1 + num_atom: 3 + cutoff: 6.0 + max_neighbors: 50 + rbf: + name: gaussian + envelope: + name: polynomial + exponent: 5 + cbf: + name: spherical_harmonics + extensive: True + otf_graph: False + output_init: HeOrthogonal + activation: silu + scale_file: configs/models/scaling_factors/gemnet-dT_c12.json + regress_forces: False + # PhAST + tag_hidden_channels: 0 # 64 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_hidden_channels: 0 # 32 -> physical properties embedding hidden channels + phys_embeds: False # True + optim: + batch_size: 8 + eval_batch_size: 8 + num_workers: 2 + lr_initial: 1.e-4 + optimizer: AdamW + optimizer_params: { "amsgrad": True } + scheduler: ReduceLROnPlateau + mode: min + factor: 0.8 + patience: 3 + max_epochs: 100 + energy_coefficient: 1 + ema_decay: 0.999 + clip_grad_norm: 10 + loss_energy: mae + +# ------------------- +# ----- IS2RE ----- +# ------------------- + +is2re: + 10k: {} + + 100k: {} + + all: {} +# ------------------ +# ----- S2EF ----- +# ------------------ + +s2ef: + default: + model: + cutoff: 6.0 + scale_file: configs/models/scaling_factors/gemnet-dT.json + regress_forces: "direct" + otf_graph: False + max_neighbors: 50 + num_radial: 128 + num_blocks: 3 + emb_size_atom: 512 + emb_size_trip: 64 + emb_size_rbf: 16 + optim: + clip_grad_norm: 10 + loss_force: l2mae + batch_size: 32 + eval_batch_size: 32 + lr_initial: 5.e-4 + max_epochs: 80 + force_coefficient: 100 + energy_coefficient: 1 + + 200k: {} + + 2M: {} + + 20M: {} + + all: {} + +qm9: + default: {} + + 10k: {} + all: {} + +qm7x: + default: {} + + all: {} + 1k: {} diff --git a/configs/models/indgemnet_t.yaml b/configs/models/indgemnet_t.yaml new file mode 100644 index 0000000000..a68b95f7b4 --- /dev/null +++ b/configs/models/indgemnet_t.yaml @@ -0,0 +1,113 @@ +# From OCP original repo -> https://github.com/Open-Catalyst-Project/ocp/blob/d16de9ee6f26d8661be5b9171e8c73c80237a82f/configs/oc22/is2re/gemnet-dT/gemnet-dT.yml +# Run this on 1 GPU -- so with an effective batch size of 8. + +default: + model: + name: indgemnet_t + use_pbc: true + num_spherical: 7 + num_radial: 64 + num_blocks: 5 + emb_size_atom: 256 + emb_size_edge: 512 + emb_size_trip: 64 + emb_size_rbf: 64 + emb_size_cbf: 16 + emb_size_bil_trip: 64 + num_before_skip: 1 + num_after_skip: 2 + num_concat: 1 + num_atom: 3 + cutoff: 6.0 + max_neighbors: 50 + rbf: + name: gaussian + envelope: + name: polynomial + exponent: 5 + cbf: + name: spherical_harmonics + extensive: True + otf_graph: False + output_init: HeOrthogonal + activation: silu + scale_file: configs/models/scaling_factors/gemnet-dT_c12.json + regress_forces: False + # PhAST + tag_hidden_channels: 0 # 64 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_hidden_channels: 0 # 32 -> physical properties embedding hidden channels + phys_embeds: False # True + optim: + batch_size: 8 + eval_batch_size: 8 + num_workers: 2 + lr_initial: 1.e-4 + optimizer: AdamW + optimizer_params: { "amsgrad": True } + scheduler: ReduceLROnPlateau + mode: min + factor: 0.8 + patience: 3 + max_epochs: 100 + energy_coefficient: 1 + ema_decay: 0.999 + clip_grad_norm: 10 + loss_energy: mae + +# ------------------- +# ----- IS2RE ----- +# ------------------- + +is2re: + 10k: {} + + 100k: {} + + all: {} +# ------------------ +# ----- S2EF ----- +# ------------------ + +s2ef: + default: + model: + cutoff: 6.0 + scale_file: configs/models/scaling_factors/gemnet-dT.json + regress_forces: "direct" + otf_graph: False + max_neighbors: 50 + num_radial: 128 + num_blocks: 3 + emb_size_atom: 512 + emb_size_trip: 64 + emb_size_rbf: 16 + optim: + clip_grad_norm: 10 + loss_force: l2mae + batch_size: 32 + eval_batch_size: 32 + lr_initial: 5.e-4 + max_epochs: 80 + force_coefficient: 100 + energy_coefficient: 1 + + 200k: {} + + 2M: {} + + 20M: {} + + all: {} + +qm9: + default: {} + + 10k: {} + all: {} + +qm7x: + default: {} + + all: {} + 1k: {} diff --git a/ocpmodels/models/__init__.py b/ocpmodels/models/__init__.py index a722f78170..417433ac0e 100644 --- a/ocpmodels/models/__init__.py +++ b/ocpmodels/models/__init__.py @@ -8,6 +8,8 @@ from .dimenet import DimeNet # noqa: F401 from .faenet import FAENet # noqa: F401 from .gemnet.gemnet import GemNetT # noqa: F401 +from .gemnet.depgemnet_t import depGemNetT # noqa: F401 +from .gemnet.indgemnet_t import indGemNetT # noqa: F401 from .dimenet_plus_plus import DimeNetPlusPlus # noqa: F401 from .forcenet import ForceNet # noqa: F401 from .schnet import SchNet # noqa: F401 diff --git a/ocpmodels/models/gemnet/depgemnet_t.py b/ocpmodels/models/gemnet/depgemnet_t.py index 2bc1ed236f..5be91d6060 100644 --- a/ocpmodels/models/gemnet/depgemnet_t.py +++ b/ocpmodels/models/gemnet/depgemnet_t.py @@ -12,7 +12,7 @@ from torch_geometric.data import Batch @registry.register_model("depgemnet_t") -class depGemNetOC(GemNetT): +class depGemNetT(GemNetT): def __init__(self, **kwargs): self.hidden_channels = kwargs["emb_size_atom"] diff --git a/ocpmodels/models/gemnet/gemnet.py b/ocpmodels/models/gemnet/gemnet.py index a1b1d4f28b..ac5213bc28 100644 --- a/ocpmodels/models/gemnet/gemnet.py +++ b/ocpmodels/models/gemnet/gemnet.py @@ -414,7 +414,7 @@ def select_edges( edge_vector = edge_vector[edge_mask] empty_image = neighbors == 0 - if torch.any(empty_image): + if torch.any(empty_image) and "mode" not in data.keys: raise ValueError( f"An image has no neighbors: id={data.id[empty_image]}, " f"sid={data.sid[empty_image]}, fid={data.fid[empty_image]}" @@ -494,7 +494,7 @@ def generate_interaction_graph(self, data): ) # Indices for swapping c->a and a->c (for symmetric MP) - block_sizes = torch.div(neighbors, 2, rounding_mode="trunc") + block_sizes = torch.div(neighbors, 2, rounding_mode="trunc") id_swap = repeat_blocks( block_sizes, repeats=2, @@ -582,11 +582,11 @@ def energy_forward(self, data): nMolecules = torch.max(batch) + 1 if self.extensive: - E_t = scatter( + E_t = self.scattering( E_t, batch, dim=0, dim_size=nMolecules, reduce="add" ) # (nMolecules, num_targets) else: - E_t = scatter( + E_t = self.scattering( E_t, batch, dim=0, dim_size=nMolecules, reduce="mean" ) # (nMolecules, num_targets) @@ -599,6 +599,12 @@ def energy_forward(self, data): "pos": pos, } + def scattering(self, E_t, batch, dim, dim_size, reduce="add"): + E_t = scatter( + E_t, batch, dim=0, dim_size=dim_size, reduce=reduce + ) + return E_t + @conditional_grad(torch.enable_grad()) def forces_forward(self, preds): F_st = preds["F_st"] diff --git a/ocpmodels/models/gemnet/indgemnet_t.py b/ocpmodels/models/gemnet/indgemnet_t.py new file mode 100644 index 0000000000..bb1df9be26 --- /dev/null +++ b/ocpmodels/models/gemnet/indgemnet_t.py @@ -0,0 +1,60 @@ +import torch, math +from torch import nn +from torch.nn import Linear + +from ocpmodels.models.gemnet.gemnet import GemNetT +from ocpmodels.models.base_model import BaseModel +from ocpmodels.common.registry import registry +from ocpmodels.models.utils.activations import swish + +from torch_geometric.data import Batch + +@registry.register_model("indgemnet_t") +class indGemNetT(BaseModel): # Change to make it inherit from base model. + def __init__(self, **kwargs): + super().__init__() + + self.regress_forces = kwargs["regress_forces"] + + kwargs["num_targets"] = kwargs["emb_size_atom"] // 2 + + self.ads_model = GemNetT(**kwargs) + self.cat_model = GemNetT(**kwargs) + + self.act = swish + self.combination = nn.Sequential( + Linear(kwargs["emb_size_atom"] // 2 * 2, kwargs["emb_size_atom"] // 2), + self.act, + Linear(kwargs["emb_size_atom"] // 2, 1) + ) + + def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! + import ipdb + ipdb.set_trace() + + adsorbates = data[0] + catalysts = data[1] + + # We make predictions for each + pred_ads = self.ads_model(adsorbates, mode) + pred_cat = self.cat_model(catalysts, mode) + + ads_energy = pred_ads["energy"] + cat_energy = pred_cat["energy"] + + # We combine predictions + system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = self.combination(system_energy) + + # We return them + pred_system = { + "energy" : system_energy, + "E_t": pred_ads["E_t"], + "idx_t": pred_ads["idx_t"], + "main_graph": pred_ads["main_graph"], + "num_atoms": pred_ads["num_atoms"], + "pos": pred_ads["pos"], + "F_st": pred_ads["F_st"] + } + + return pred_system diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 233a4c0dd5..b9fa3fc313 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -154,7 +154,7 @@ def __init__(self, **kwargs): (run_dir / f"config-{JOB_ID}.yaml").write_text(yaml.dump(self.config)) # Here's the models whose edges are removed as a transform - transform_models = ["depfaenet", "depschnet", "depgemnet_oc"] + transform_models = ["depfaenet", "depschnet", "depgemnet_oc", "depgemnet_t"] if self.config["is_disconnected"]: print("\n\nHeads up: cat-ads edges being removed!") if self.config["model_name"] in transform_models: @@ -164,8 +164,8 @@ def __init__(self, **kwargs): self.config["is_disconnected"] = True # Here's the models whose graphs are disconnected in the dataset - self.separate_models = ["indfaenet", "indschnet", "indgemnet_oc"] - self.heterogeneous_models = ["afaenet", "aschnet", "agemnet_oc"] + self.separate_models = ["indfaenet", "indschnet", "indgemnet_oc", "indgemnet_t"] + self.heterogeneous_models = ["afaenet", "aschnet", "agemnet_oc", "agemnet_t"] self.data_mode = "normal" self.separate_dataset = False From 1bf534d7f1cb98727c52fb39124f061bacad1005 Mon Sep 17 00:00:00 2001 From: alvaro Date: Fri, 15 Sep 2023 22:39:38 -0400 Subject: [PATCH 106/131] config file for dpp --- configs/exps/alvaro/dpp-config.yaml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 configs/exps/alvaro/dpp-config.yaml diff --git a/configs/exps/alvaro/dpp-config.yaml b/configs/exps/alvaro/dpp-config.yaml new file mode 100644 index 0000000000..c9b314ed3b --- /dev/null +++ b/configs/exps/alvaro/dpp-config.yaml @@ -0,0 +1,21 @@ +# MODIFY THIS ONE FOR RUNS + +job: + mem: 32GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + time: 15:00:00 + +default: + wandb_name: alvaro-carbonero-math + wandb_project: ocp-alvaro + test_ri: True + mode: train + graph_rewiring: remove-tag-0 + optim: + batch_size: 16 + eval_batch_size: 16 + +runs: + - config: dpp-is2re-all From f9d4b030d96fe0b951df7ebd776b02773e73b1ed Mon Sep 17 00:00:00 2001 From: alvaro Date: Sat, 16 Sep 2023 01:26:09 -0400 Subject: [PATCH 107/131] implemented almost all varients of dimenetpp --- configs/exps/alvaro/dpp-config.yaml | 11 + configs/models/adpp.yaml | 209 ++++++ configs/models/depdpp.yaml | 209 ++++++ configs/models/inddpp.yaml | 209 ++++++ ocpmodels/models/adpp.py | 877 ++++++++++++++++++++++++++ ocpmodels/models/depdpp.py | 46 ++ ocpmodels/models/dimenet_plus_plus.py | 9 +- ocpmodels/models/inddpp.py | 52 ++ ocpmodels/models/indschnet.py | 1 - ocpmodels/trainers/base_trainer.py | 6 +- 10 files changed, 1623 insertions(+), 6 deletions(-) create mode 100644 configs/models/adpp.yaml create mode 100644 configs/models/depdpp.yaml create mode 100644 configs/models/inddpp.yaml create mode 100644 ocpmodels/models/adpp.py create mode 100644 ocpmodels/models/depdpp.py create mode 100644 ocpmodels/models/inddpp.py diff --git a/configs/exps/alvaro/dpp-config.yaml b/configs/exps/alvaro/dpp-config.yaml index c9b314ed3b..229209097f 100644 --- a/configs/exps/alvaro/dpp-config.yaml +++ b/configs/exps/alvaro/dpp-config.yaml @@ -18,4 +18,15 @@ default: eval_batch_size: 16 runs: + #- config: dpp-is2re-all + - config: dpp-is2re-all + is_disconnected: True + + - config: depdpp-is2re-all + + - config: inddpp-is2re-all + + #- config: aschnet-is2re-all + #model: + #gat_mode: v1 diff --git a/configs/models/adpp.yaml b/configs/models/adpp.yaml new file mode 100644 index 0000000000..045ffed1b6 --- /dev/null +++ b/configs/models/adpp.yaml @@ -0,0 +1,209 @@ +default: + model: + name: adpp + hidden_channels: 256 + out_emb_channels: 192 + num_blocks: 3 + cutoff: 6.0 + num_radial: 6 + num_spherical: 7 + num_before_skip: 1 + num_after_skip: 2 + num_output_layers: 3 + regress_forces: False + use_pbc: True + basis_emb_size: 8 + envelope_exponent: 5 + act: swish + int_emb_size: 64 + # drlab attributes: + tag_hidden_channels: 0 # 64 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_embeds: False # True + phys_hidden_channels: 0 + energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} + optim: + batch_size: 4 + eval_batch_size: 4 + num_workers: 4 + lr_gamma: 0.1 + warmup_factor: 0.2 + +# ------------------- +# ----- IS2RE ----- +# ------------------- + +is2re: + # *** Important note *** + # The total number of gpus used for this run was 1. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + 10k: + optim: + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 20000 + - 40000 + - 60000 + warmup_steps: 10000 + max_epochs: 20 + batch_size: 16 + eval_batch_size: 16 + + 100k: + optim: + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 200000 + - 400000 + - 600000 + warmup_steps: 100000 + max_epochs: 15 + batch_size: 16 + eval_batch_size: 16 + + all: + optim: + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 115082 + - 230164 + - 345246 + warmup_steps: 57541 + max_epochs: 8 + batch_size: 16 + eval_batch_size: 16 + +# ------------------ +# ----- S2EF ----- +# ------------------ + +s2ef: + default: + model: + regress_forces: "from_energy" + optim: + num_workers: 8 + eval_every: 10000 + + 200k: + optim: + # *** Important note *** + # The total number of gpus used for this run was 4. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 48 + eval_batch_size: 48 + lr_initial: 0.00001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 5208 + - 8333 + - 10416 + warmup_steps: 3125 + max_epochs: 10 + force_coefficient: 50 + + 2M: + optim: + batch_size: 96 + eval_batch_size: 96 + eval_every: 10000 + num_workers: 8 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 20833 + - 31250 + - 41666 + warmup_steps: 10416 + warmup_factor: 0.2 + max_epochs: 15 + force_coefficient: 50 + model: + hidden_channels: 192 + out_emb_channels: 192 + num_blocks: 3 + cutoff: 6.0 + num_radial: 6 + num_spherical: 7 + num_before_skip: 1 + num_after_skip: 2 + num_output_layers: 3 + regress_forces: True + use_pbc: True + + 20M: + optim: + # *** Important note *** + # The total number of gpus used for this run was 64. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 12 + eval_batch_size: 12 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 78125 + - 130208 + - 208333 + warmup_steps: 52083 + max_epochs: 15 + force_coefficient: 50 + + all: + optim: + # *** Important note *** + # The total number of gpus used for this run was 256. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 8 + eval_batch_size: 8 + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 130794 + - 196192 + - 261589 + warmup_steps: 130794 + max_epochs: 7 + force_coefficient: 50 + +qm9: + default: + model: + num_blocks: 6 + hidden_channels: 128 + optim: + # *** Important note *** + # The total number of gpus used for this run was 4. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + lr_initial: 0.001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 2000000 + - 4000000 + - 6000000 + warmup_steps: 3000 + lr_gamma: 0.1 + batch_size: 128 + max_epochs: 600 + + 10k: {} + all: {} + +qm7x: + default: + optim: + # *** Important note *** + # The total number of gpus used for this run was 4. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 115082 + - 230164 + - 345246 + warmup_steps: 57541 + max_epochs: 8 + + all: {} + 1k: {} diff --git a/configs/models/depdpp.yaml b/configs/models/depdpp.yaml new file mode 100644 index 0000000000..3f04d06209 --- /dev/null +++ b/configs/models/depdpp.yaml @@ -0,0 +1,209 @@ +default: + model: + name: depdpp + hidden_channels: 256 + out_emb_channels: 192 + num_blocks: 3 + cutoff: 6.0 + num_radial: 6 + num_spherical: 7 + num_before_skip: 1 + num_after_skip: 2 + num_output_layers: 3 + regress_forces: False + use_pbc: True + basis_emb_size: 8 + envelope_exponent: 5 + act: swish + int_emb_size: 64 + # drlab attributes: + tag_hidden_channels: 0 # 64 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_embeds: False # True + phys_hidden_channels: 0 + energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} + optim: + batch_size: 4 + eval_batch_size: 4 + num_workers: 4 + lr_gamma: 0.1 + warmup_factor: 0.2 + +# ------------------- +# ----- IS2RE ----- +# ------------------- + +is2re: + # *** Important note *** + # The total number of gpus used for this run was 1. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + 10k: + optim: + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 20000 + - 40000 + - 60000 + warmup_steps: 10000 + max_epochs: 20 + batch_size: 16 + eval_batch_size: 16 + + 100k: + optim: + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 200000 + - 400000 + - 600000 + warmup_steps: 100000 + max_epochs: 15 + batch_size: 16 + eval_batch_size: 16 + + all: + optim: + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 115082 + - 230164 + - 345246 + warmup_steps: 57541 + max_epochs: 8 + batch_size: 16 + eval_batch_size: 16 + +# ------------------ +# ----- S2EF ----- +# ------------------ + +s2ef: + default: + model: + regress_forces: "from_energy" + optim: + num_workers: 8 + eval_every: 10000 + + 200k: + optim: + # *** Important note *** + # The total number of gpus used for this run was 4. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 48 + eval_batch_size: 48 + lr_initial: 0.00001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 5208 + - 8333 + - 10416 + warmup_steps: 3125 + max_epochs: 10 + force_coefficient: 50 + + 2M: + optim: + batch_size: 96 + eval_batch_size: 96 + eval_every: 10000 + num_workers: 8 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 20833 + - 31250 + - 41666 + warmup_steps: 10416 + warmup_factor: 0.2 + max_epochs: 15 + force_coefficient: 50 + model: + hidden_channels: 192 + out_emb_channels: 192 + num_blocks: 3 + cutoff: 6.0 + num_radial: 6 + num_spherical: 7 + num_before_skip: 1 + num_after_skip: 2 + num_output_layers: 3 + regress_forces: True + use_pbc: True + + 20M: + optim: + # *** Important note *** + # The total number of gpus used for this run was 64. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 12 + eval_batch_size: 12 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 78125 + - 130208 + - 208333 + warmup_steps: 52083 + max_epochs: 15 + force_coefficient: 50 + + all: + optim: + # *** Important note *** + # The total number of gpus used for this run was 256. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 8 + eval_batch_size: 8 + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 130794 + - 196192 + - 261589 + warmup_steps: 130794 + max_epochs: 7 + force_coefficient: 50 + +qm9: + default: + model: + num_blocks: 6 + hidden_channels: 128 + optim: + # *** Important note *** + # The total number of gpus used for this run was 4. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + lr_initial: 0.001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 2000000 + - 4000000 + - 6000000 + warmup_steps: 3000 + lr_gamma: 0.1 + batch_size: 128 + max_epochs: 600 + + 10k: {} + all: {} + +qm7x: + default: + optim: + # *** Important note *** + # The total number of gpus used for this run was 4. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 115082 + - 230164 + - 345246 + warmup_steps: 57541 + max_epochs: 8 + + all: {} + 1k: {} diff --git a/configs/models/inddpp.yaml b/configs/models/inddpp.yaml new file mode 100644 index 0000000000..aae9ffb0f3 --- /dev/null +++ b/configs/models/inddpp.yaml @@ -0,0 +1,209 @@ +default: + model: + name: inddpp + hidden_channels: 256 + out_emb_channels: 192 + num_blocks: 3 + cutoff: 6.0 + num_radial: 6 + num_spherical: 7 + num_before_skip: 1 + num_after_skip: 2 + num_output_layers: 3 + regress_forces: False + use_pbc: True + basis_emb_size: 8 + envelope_exponent: 5 + act: swish + int_emb_size: 64 + # drlab attributes: + tag_hidden_channels: 0 # 64 + pg_hidden_channels: 0 # 32 -> period & group embedding hidden channels + phys_embeds: False # True + phys_hidden_channels: 0 + energy_head: False # can be {False, weighted-av-initial-embeds, weighted-av-final-embeds, pooling, graclus, random} + optim: + batch_size: 4 + eval_batch_size: 4 + num_workers: 4 + lr_gamma: 0.1 + warmup_factor: 0.2 + +# ------------------- +# ----- IS2RE ----- +# ------------------- + +is2re: + # *** Important note *** + # The total number of gpus used for this run was 1. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + 10k: + optim: + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 20000 + - 40000 + - 60000 + warmup_steps: 10000 + max_epochs: 20 + batch_size: 16 + eval_batch_size: 16 + + 100k: + optim: + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 200000 + - 400000 + - 600000 + warmup_steps: 100000 + max_epochs: 15 + batch_size: 16 + eval_batch_size: 16 + + all: + optim: + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 115082 + - 230164 + - 345246 + warmup_steps: 57541 + max_epochs: 8 + batch_size: 16 + eval_batch_size: 16 + +# ------------------ +# ----- S2EF ----- +# ------------------ + +s2ef: + default: + model: + regress_forces: "from_energy" + optim: + num_workers: 8 + eval_every: 10000 + + 200k: + optim: + # *** Important note *** + # The total number of gpus used for this run was 4. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 48 + eval_batch_size: 48 + lr_initial: 0.00001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 5208 + - 8333 + - 10416 + warmup_steps: 3125 + max_epochs: 10 + force_coefficient: 50 + + 2M: + optim: + batch_size: 96 + eval_batch_size: 96 + eval_every: 10000 + num_workers: 8 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 20833 + - 31250 + - 41666 + warmup_steps: 10416 + warmup_factor: 0.2 + max_epochs: 15 + force_coefficient: 50 + model: + hidden_channels: 192 + out_emb_channels: 192 + num_blocks: 3 + cutoff: 6.0 + num_radial: 6 + num_spherical: 7 + num_before_skip: 1 + num_after_skip: 2 + num_output_layers: 3 + regress_forces: True + use_pbc: True + + 20M: + optim: + # *** Important note *** + # The total number of gpus used for this run was 64. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 12 + eval_batch_size: 12 + lr_initial: 0.0001 + lr_gamma: 0.1 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 78125 + - 130208 + - 208333 + warmup_steps: 52083 + max_epochs: 15 + force_coefficient: 50 + + all: + optim: + # *** Important note *** + # The total number of gpus used for this run was 256. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + batch_size: 8 + eval_batch_size: 8 + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 130794 + - 196192 + - 261589 + warmup_steps: 130794 + max_epochs: 7 + force_coefficient: 50 + +qm9: + default: + model: + num_blocks: 6 + hidden_channels: 128 + optim: + # *** Important note *** + # The total number of gpus used for this run was 4. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + lr_initial: 0.001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 2000000 + - 4000000 + - 6000000 + warmup_steps: 3000 + lr_gamma: 0.1 + batch_size: 128 + max_epochs: 600 + + 10k: {} + all: {} + +qm7x: + default: + optim: + # *** Important note *** + # The total number of gpus used for this run was 4. + # If the global batch size (num_gpus * batch_size) is modified + # the lr_milestones and warmup_steps need to be adjusted accordingly. + lr_initial: 0.0001 + lr_milestones: # steps at which lr_initial <- lr_initial * lr_gamma + - 115082 + - 230164 + - 345246 + warmup_steps: 57541 + max_epochs: 8 + + all: {} + 1k: {} diff --git a/ocpmodels/models/adpp.py b/ocpmodels/models/adpp.py new file mode 100644 index 0000000000..75bc062b6c --- /dev/null +++ b/ocpmodels/models/adpp.py @@ -0,0 +1,877 @@ +from math import pi as PI +from math import sqrt + +import torch +from torch import nn +from torch.nn import Embedding, Linear +from torch_geometric.nn import radius_graph +from torch_geometric.nn.inits import glorot_orthogonal +from torch_geometric.nn.models.dimenet import ( + Envelope, + ResidualLayer, + SphericalBasisLayer, +) +from torch_scatter import scatter +from torch_sparse import SparseTensor + +from ocpmodels.common.registry import registry +from ocpmodels.common.utils import ( + conditional_grad, + get_pbc_distances, + radius_graph_pbc_inputs, +) +from ocpmodels.models.base_model import BaseModel +from ocpmodels.models.utils.pos_encodings import PositionalEncoding +from ocpmodels.modules.phys_embeddings import PhysEmbedding +from ocpmodels.modules.pooling import Graclus, Hierarchical_Pooling +from ocpmodels.models.utils.activations import swish +from ocpmodels.models.afaenet import ( + GATInteraction, + GaussianSmearing +) + + +try: + import sympy as sym +except ImportError: + sym = None + +NUM_CLUSTERS = 20 +NUM_POOLING_LAYERS = 1 + + +class BesselBasisLayer(torch.nn.Module): + def __init__(self, num_radial, cutoff=5.0, envelope_exponent=5): + super().__init__() + self.cutoff = cutoff + self.envelope = Envelope(envelope_exponent) + + self.freq = torch.nn.Parameter(torch.Tensor(num_radial)) + + self.reset_parameters() + + @torch.no_grad() + def reset_parameters(self): + torch.arange(1, self.freq.numel() + 1, out=self.freq).mul_(PI) + + def forward(self, dist): + dist = dist.unsqueeze(-1) / self.cutoff + return self.envelope(dist) * (self.freq * dist).sin() + + +class EmbeddingBlock(torch.nn.Module): + def __init__(self, num_radial, hidden_channels, act=swish): + super().__init__() + self.act = act + + self.emb = Embedding(85, hidden_channels) + self.lin_rbf = Linear(num_radial, hidden_channels) + self.lin = Linear(3 * hidden_channels, hidden_channels) + + self.reset_parameters() + + def reset_parameters(self): + self.emb.weight.data.uniform_(-sqrt(3), sqrt(3)) + self.lin_rbf.reset_parameters() + self.lin.reset_parameters() + + def forward(self, x, rbf, i, j, tags=None, subnodes=None): + x = self.emb(x) + rbf = self.act(self.lin_rbf(rbf)) + return self.act(self.lin(torch.cat([x[i], x[j], rbf], dim=-1))) + + +class AdvancedEmbeddingBlock(torch.nn.Module): + def __init__( + self, + num_radial, + hidden_channels, + tag_hidden_channels, + pg_hidden_channels, + phys_hidden_channels, + phys_embeds, + graph_rewiring, + act=swish, + ): + super().__init__() + self.act = act + self.use_tag = tag_hidden_channels > 0 + self.use_pg = pg_hidden_channels > 0 + self.use_mlp_phys = phys_hidden_channels > 0 + self.use_positional_embeds = graph_rewiring in { + "one-supernode-per-graph", + "one-supernode-per-atom-type", + "one-supernode-per-atom-type-dist", + } + # self.use_positional_embeds = False + + # Phys embeddings + self.phys_emb = PhysEmbedding(props=phys_embeds, pg=self.use_pg) + # With MLP + if self.use_mlp_phys: + self.phys_lin = Linear(self.phys_emb.n_properties, phys_hidden_channels) + else: + phys_hidden_channels = self.phys_emb.n_properties + # Period + group embeddings + if self.use_pg: + self.period_embedding = Embedding( + self.phys_emb.period_size, pg_hidden_channels + ) + self.group_embedding = Embedding( + self.phys_emb.group_size, pg_hidden_channels + ) + # Tag embedding + if tag_hidden_channels: + self.tag = Embedding(3, tag_hidden_channels) + + # Position encoding + if self.use_positional_embeds: + self.pe = PositionalEncoding(hidden_channels, 210) + + # Main embedding + self.emb = Embedding( + 85, + hidden_channels + - tag_hidden_channels + - phys_hidden_channels + - 2 * pg_hidden_channels, + ) + + self.lin_rbf = Linear(num_radial, hidden_channels) + self.lin = Linear(3 * hidden_channels, hidden_channels) + + self.reset_parameters() + + def reset_parameters(self): + self.emb.weight.data.uniform_(-sqrt(3), sqrt(3)) + if self.use_mlp_phys: + self.phys_lin.reset_parameters() + if self.use_tag: + self.tag.weight.data.uniform_(-sqrt(3), sqrt(3)) + if self.use_pg: + self.period_embedding.weight.data.uniform_(-sqrt(3), sqrt(3)) + self.group_embedding.weight.data.uniform_(-sqrt(3), sqrt(3)) + self.lin_rbf.reset_parameters() + self.lin.reset_parameters() + + def forward(self, x, rbf, i, j, tag=None, subnodes=None): + x_ = self.emb(x) + rbf = self.act(self.lin_rbf(rbf)) + + if self.phys_emb.device != x.device: + self.phys_emb = self.phys_emb.to(x.device) + + if self.use_tag: + x_tag = self.tag(tag) + x_ = torch.cat((x_, x_tag), dim=1) + + if self.phys_emb.n_properties > 0: + x_phys = self.phys_emb.properties[x] + if self.use_mlp_phys: + x_phys = self.phys_lin(x_phys) + x_ = torch.cat((x_, x_phys), dim=1) + + if self.use_pg: + x_period = self.period_embedding(self.phys_emb.period[x]) + x_group = self.group_embedding(self.phys_emb.group[x]) + x_ = torch.cat((x_, x_period, x_group), dim=1) + + if self.use_positional_embeds: + idx_of_non_zero_val = (tag == 0).nonzero().T.squeeze(0) + x_pos = torch.zeros_like(x_, device=x_.device) + x_pos[idx_of_non_zero_val, :] = self.pe(subnodes).to(device=x_pos.device) + x_ += x_pos + + return self.act( + self.lin( + torch.cat( + [ + x_[i], + x_[j], + rbf, + ], + dim=-1, + ) + ) + ) + + +class InteractionPPBlock(torch.nn.Module): + def __init__( + self, + hidden_channels, + int_emb_size, + basis_emb_size, + num_spherical, + num_radial, + num_before_skip, + num_after_skip, + act=swish, + ): + super(InteractionPPBlock, self).__init__() + self.act = act + + # Transformations of Bessel and spherical basis representations. + self.lin_rbf1 = nn.Linear(num_radial, basis_emb_size, bias=False) + self.lin_rbf2 = nn.Linear(basis_emb_size, hidden_channels, bias=False) + self.lin_sbf1 = nn.Linear( + num_spherical * num_radial, basis_emb_size, bias=False + ) + self.lin_sbf2 = nn.Linear(basis_emb_size, int_emb_size, bias=False) + + # Dense transformations of input messages. + self.lin_kj = nn.Linear(hidden_channels, hidden_channels) + self.lin_ji = nn.Linear(hidden_channels, hidden_channels) + + # Embedding projections for interaction triplets. + self.lin_down = nn.Linear(hidden_channels, int_emb_size, bias=False) + self.lin_up = nn.Linear(int_emb_size, hidden_channels, bias=False) + + # Residual layers before and after skip connection. + self.layers_before_skip = torch.nn.ModuleList( + [ResidualLayer(hidden_channels, act) for _ in range(num_before_skip)] + ) + self.lin = nn.Linear(hidden_channels, hidden_channels) + self.layers_after_skip = torch.nn.ModuleList( + [ResidualLayer(hidden_channels, act) for _ in range(num_after_skip)] + ) + + self.reset_parameters() + + def reset_parameters(self): + glorot_orthogonal(self.lin_rbf1.weight, scale=2.0) + glorot_orthogonal(self.lin_rbf2.weight, scale=2.0) + glorot_orthogonal(self.lin_sbf1.weight, scale=2.0) + glorot_orthogonal(self.lin_sbf2.weight, scale=2.0) + + glorot_orthogonal(self.lin_kj.weight, scale=2.0) + self.lin_kj.bias.data.fill_(0) + glorot_orthogonal(self.lin_ji.weight, scale=2.0) + self.lin_ji.bias.data.fill_(0) + + glorot_orthogonal(self.lin_down.weight, scale=2.0) + glorot_orthogonal(self.lin_up.weight, scale=2.0) + + for res_layer in self.layers_before_skip: + res_layer.reset_parameters() + glorot_orthogonal(self.lin.weight, scale=2.0) + self.lin.bias.data.fill_(0) + for res_layer in self.layers_after_skip: + res_layer.reset_parameters() + + def forward(self, x, rbf, sbf, idx_kj, idx_ji): + # Initial transformations. + x_ji = self.act(self.lin_ji(x)) + x_kj = self.act(self.lin_kj(x)) + + # Transformation via Bessel basis. + rbf = self.lin_rbf1(rbf) + rbf = self.lin_rbf2(rbf) + x_kj = x_kj * rbf + + # Down-project embeddings and generate interaction triplet embeddings. + x_kj = self.act(self.lin_down(x_kj)) + + # Transform via 2D spherical basis. + sbf = self.lin_sbf1(sbf) + sbf = self.lin_sbf2(sbf) + x_kj = x_kj[idx_kj] * sbf + + # Aggregate interactions and up-project embeddings. + x_kj = scatter(x_kj, idx_ji, dim=0, dim_size=x.size(0)) + x_kj = self.act(self.lin_up(x_kj)) + + h = x_ji + x_kj + for layer in self.layers_before_skip: + h = layer(h) + h = self.act(self.lin(h)) + x + for layer in self.layers_after_skip: + h = layer(h) + + return h + + +class EHOutputPPBlock(torch.nn.Module): + def __init__( + self, + num_radial, + hidden_channels, + out_emb_channels, + out_channels, + num_layers, + energy_head, + act=swish, + ): + super(EHOutputPPBlock, self).__init__() + self.act = act + self.energy_head = energy_head + + self.lin_rbf = nn.Linear(num_radial, hidden_channels, bias=False) + self.lin_up = nn.Linear(hidden_channels, out_emb_channels, bias=True) + self.lins = torch.nn.ModuleList() + for _ in range(num_layers): + self.lins.append(nn.Linear(out_emb_channels, out_emb_channels)) + self.lin = nn.Linear(out_emb_channels, out_channels, bias=False) + + # weighted average & pooling + if self.energy_head in {"pooling", "random"}: + self.hierarchical_pooling = Hierarchical_Pooling( + hidden_channels, + self.act, + NUM_POOLING_LAYERS, + NUM_CLUSTERS, + self.energy_head, + ) + elif self.energy_head == "graclus": + self.graclus = Graclus(hidden_channels, self.act) + elif self.energy_head == "weighted-av-final-embeds": + self.w_lin = Linear(hidden_channels, 1) + + self.reset_parameters() + + def reset_parameters(self): + glorot_orthogonal(self.lin_rbf.weight, scale=2.0) + glorot_orthogonal(self.lin_up.weight, scale=2.0) + for lin in self.lins: + glorot_orthogonal(lin.weight, scale=2.0) + lin.bias.data.fill_(0) + self.lin.weight.data.fill_(0) + if self.energy_head == "weighted-av-final-embeds": + self.w_lin.bias.data.fill_(0) + torch.nn.init.xavier_uniform_(self.w_lin.weight) + + def forward(self, x, rbf, i, edge_index, edge_weight, batch, num_nodes=None): + x = self.lin_rbf(rbf) * x + x = scatter(x, i, dim=0, dim_size=num_nodes) + + pooling_loss = None + if self.energy_head == "weighted-av-final-embeds": + alpha = self.w_lin(x) + elif self.energy_head == "graclus": + x, batch = self.graclus(x, edge_index, edge_weight, batch) + elif self.energy_head in {"pooling", "random"}: + x, batch, pooling_loss = self.hierarchical_pooling( + x, edge_index, edge_weight, batch + ) + + x = self.lin_up(x) + for lin in self.lins: + x = self.act(lin(x)) + x = self.lin(x) + + if self.energy_head == "weighted-av-final-embeds": + x = x * alpha + + return x, pooling_loss, batch + + +class OutputPPBlock(torch.nn.Module): + def __init__( + self, + num_radial, + hidden_channels, + out_emb_channels, + out_channels, + num_layers, + act=swish, + ): + super(OutputPPBlock, self).__init__() + self.act = act + + self.lin_rbf = nn.Linear(num_radial, hidden_channels, bias=False) + self.lin_up = nn.Linear(hidden_channels, out_emb_channels, bias=True) + self.lins = torch.nn.ModuleList() + for _ in range(num_layers): + self.lins.append(nn.Linear(out_emb_channels, out_emb_channels)) + self.lin = nn.Linear(out_emb_channels, out_channels, bias=False) + + self.reset_parameters() + + def reset_parameters(self): + glorot_orthogonal(self.lin_rbf.weight, scale=2.0) + glorot_orthogonal(self.lin_up.weight, scale=2.0) + for lin in self.lins: + glorot_orthogonal(lin.weight, scale=2.0) + lin.bias.data.fill_(0) + self.lin.weight.data.fill_(0) + + def forward(self, x, rbf, i, num_nodes=None): + x = self.lin_rbf(rbf) * x + x = scatter(x, i, dim=0, dim_size=num_nodes) + x = self.lin_up(x) + for lin in self.lins: + x = self.act(lin(x)) + return self.lin(x) + + +@registry.register_model("adpp") +class ADPP(BaseModel): + r"""DimeNet++ implementation based on https://github.com/klicperajo/dimenet. + + Args: + hidden_channels (int): Hidden embedding size. + tag_hidden_channels (int): tag embedding size + pg_hidden_channels (int): period & group embedding size + phys_hidden_channels (int): MLP hidden size for physics embedding + phys_embeds (bool): whether we use physics embeddings or not + graph_rewiring (str): name of rewiring method. Default=False. + out_channels (int): Size of each output sample. + num_blocks (int): Number of building blocks. + int_emb_size (int): Embedding size used for interaction triplets + basis_emb_size (int): Embedding size used in the basis transformation + out_emb_channels(int): Embedding size used for atoms in the output block + num_spherical (int): Number of spherical harmonics. + num_radial (int): Number of radial basis functions. + cutoff: (float, optional): Cutoff distance for interatomic + interactions. (default: :obj:`5.0`) + use_pbc (bool, optional): Use of periodic boundary conditions. + (default: true) + otf_graph (bool, optional): Recompute radius graph. + (default: false) + envelope_exponent (int, optional): Shape of the smooth cutoff. + (default: :obj:`5`) + num_before_skip: (int, optional): Number of residual layers in the + interaction blocks before the skip connection. (default: :obj:`1`) + num_after_skip: (int, optional): Number of residual layers in the + interaction blocks after the skip connection. (default: :obj:`2`) + num_output_layers: (int, optional): Number of linear layers for the + output blocks. (default: :obj:`3`) + act: (function, optional): The activation function. + (default: :obj:`swish`) + regress_forces: (bool, optional): Compute atom forces from energy. + (default: false). + """ + + url = "https://github.com/klicperajo/dimenet/raw/master/pretrained" + + def __init__(self, **kwargs): + super().__init__() + + kwargs["num_targets"] = kwargs["hidden_channels"] // 2 + + self.cutoff = kwargs["cutoff"] + self.use_pbc = kwargs["use_pbc"] + self.otf_graph = kwargs["otf_graph"] + self.regress_forces = kwargs["regress_forces"] + self.energy_head = kwargs["energy_head"] + use_tag = kwargs["tag_hidden_channels"] > 0 + use_pg = kwargs["pg_hidden_channels"] > 0 + act = ( + getattr(nn.functional, kwargs["act"]) if kwargs["act"] != "swish" else swish + ) + + assert ( + kwargs["tag_hidden_channels"] + 2 * kwargs["pg_hidden_channels"] + 16 + < kwargs["hidden_channels"] + ) + if sym is None: + raise ImportError("Package `sympy` could not be found.") + + self.rbf_ads = BesselBasisLayer( + kwargs["num_radial"], self.cutoff, kwargs["envelope_exponent"] + ) + self.rbf_cat = BesselBasisLayer( + kwargs["num_radial"], self.cutoff, kwargs["envelope_exponent"] + ) + self.sbf_ads = SphericalBasisLayer( + kwargs["num_spherical"], + kwargs["num_radial"], + self.cutoff, + kwargs["envelope_exponent"], + ) + self.sbf_cat = SphericalBasisLayer( + kwargs["num_spherical"], + kwargs["num_radial"], + self.cutoff, + kwargs["envelope_exponent"], + ) + # Disconnected interaction embedding + self.distance_expansion_disc = GaussianSmearing( + 0.0, 20.0, 100 + ) + self.disc_edge_embed = Linear(100, kwargs["hidden_channels"]) + + if use_tag or use_pg or kwargs["phys_embeds"] or kwargs["graph_rewiring"]: + self.emb_ads = AdvancedEmbeddingBlock( + kwargs["num_radial"], + kwargs["hidden_channels"], + kwargs["tag_hidden_channels"], + kwargs["pg_hidden_channels"], + kwargs["phys_hidden_channels"], + kwargs["phys_embeds"], + kwargs["graph_rewiring"], + act, + ) + self.emb_cat = AdvancedEmbeddingBlock( + kwargs["num_radial"], + kwargs["hidden_channels"], + kwargs["tag_hidden_channels"], + kwargs["pg_hidden_channels"], + kwargs["phys_hidden_channels"], + kwargs["phys_embeds"], + kwargs["graph_rewiring"], + act, + ) + else: + self.emb_ads = EmbeddingBlock( + kwargs["num_radial"], kwargs["hidden_channels"], act + ) + self.emb_cat = EmbeddingBlock( + kwargs["num_radial"], kwargs["hidden_channels"], act + ) + + if self.energy_head: + self.output_blocks_ads = torch.nn.ModuleList( + [ + EHOutputPPBlock( + kwargs["num_radial"], + kwargs["hidden_channels"], + kwargs["out_emb_channels"], + kwargs["num_targets"], + kwargs["num_output_layers"], + self.energy_head, + act, + ) + for _ in range(kwargs["num_blocks"] + 1) + ] + ) + self.output_blocks_cat = torch.nn.ModuleList( + [ + EHOutputPPBlock( + kwargs["num_radial"], + kwargs["hidden_channels"], + kwargs["out_emb_channels"], + kwargs["num_targets"], + kwargs["num_output_layers"], + self.energy_head, + act, + ) + for _ in range(kwargs["num_blocks"] + 1) + ] + ) + else: + self.output_blocks_ads = torch.nn.ModuleList( + [ + OutputPPBlock( + kwargs["num_radial"], + kwargs["hidden_channels"], + kwargs["out_emb_channels"], + kwargs["num_targets"], + kwargs["num_output_layers"], + act, + ) + for _ in range(kwargs["num_blocks"] + 1) + ] + ) + self.output_blocks_cat = torch.nn.ModuleList( + [ + OutputPPBlock( + kwargs["num_radial"], + kwargs["hidden_channels"], + kwargs["out_emb_channels"], + kwargs["num_targets"], + kwargs["num_output_layers"], + act, + ) + for _ in range(kwargs["num_blocks"] + 1) + ] + ) + + self.interaction_blocks_ads = torch.nn.ModuleList( + [ + InteractionPPBlock( + kwargs["hidden_channels"], + kwargs["int_emb_size"], + kwargs["basis_emb_size"], + kwargs["num_spherical"], + kwargs["num_radial"], + kwargs["num_before_skip"], + kwargs["num_after_skip"], + act, + ) + for _ in range(kwargs["num_blocks"]) + ] + ) + self.interaction_blocks_cat = torch.nn.ModuleList( + [ + InteractionPPBlock( + kwargs["hidden_channels"], + kwargs["int_emb_size"], + kwargs["basis_emb_size"], + kwargs["num_spherical"], + kwargs["num_radial"], + kwargs["num_before_skip"], + kwargs["num_after_skip"], + act, + ) + for _ in range(kwargs["num_blocks"]) + ] + ) + self.inter_interactions = torch.nn.ModuleList( + [ + GATInteraction( + kwargs["hidden_channels"], + kwargs["gat_mode"], + kwargs["hidden_channels"] + ) + for _ in range(kwargs["num_blocks"]) + ] + ) + + if self.energy_head == "weighted-av-initial-embeds": + self.w_lin_ads = Linear(kwargs["hidden_channels"], 1) + self.w_lin_cat = Linear(kwargs["hidden_channels"], 1) + + self.task = kwargs["task_name"] + + self.reset_parameters() + + def reset_parameters(self): + self.rbf_ads.reset_parameters() + self.rbf_cat.reset_parameters() + self.emb_ads.reset_parameters() + self.emb_cat.reset_parameters() + for out in self.output_blocks_ads: + out.reset_parameters() + for out in self.output_blocks_cat: + out.reset_parameters() + for interaction in self.interaction_blocks_ads: + interaction.reset_parameters() + for interaction in self.interaction_blocks_cat: + interaction.reset_parameters() + if self.energy_head == "weighted-av-initial-embeds": + self.w_lin_ads.bias.data.fill_(0) + self.w_lin_cat.bias.data.fill_(0) + torch.nn.init.xavier_uniform_(self.w_lin.weight) + + def triplets(self, edge_index, cell_offsets, num_nodes): + row, col = edge_index # j->i + + value = torch.arange(row.size(0), device=row.device) + adj_t = SparseTensor( + row=col, col=row, value=value, sparse_sizes=(num_nodes, num_nodes) + ) + adj_t_row = adj_t[row] + num_triplets = adj_t_row.set_value(None).sum(dim=1).to(torch.long) + + # Node indices (k->j->i) for triplets. + idx_i = col.repeat_interleave(num_triplets) + idx_j = row.repeat_interleave(num_triplets) + idx_k = adj_t_row.storage.col() + + # Edge indices (k->j, j->i) for triplets. + idx_kj = adj_t_row.storage.value() + idx_ji = adj_t_row.storage.row() + + # Remove self-loop triplets d->b->d + # Check atom as well as cell offset + cell_offset_kji = cell_offsets[idx_kj] + cell_offsets[idx_ji] + mask = (idx_i != idx_k) | torch.any(cell_offset_kji != 0, dim=-1) + + idx_i, idx_j, idx_k = idx_i[mask], idx_j[mask], idx_k[mask] + idx_kj, idx_ji = idx_kj[mask], idx_ji[mask] + + return col, row, idx_i, idx_j, idx_k, idx_kj, idx_ji + + @conditional_grad(torch.enable_grad()) + def energy_forward(self, data): + import ipdb + ipdb.set_trace() + ( + pos_ads, + edge_index_ads, + cell_ads, + cell_offsets_ads, + neighbors_ads, + batch_ads, + atomic_numbers_ads, + ) = ( + data["adsorbate"].pos, + data["adsorbate", "is_close", "adsorbate"].edge_index, + data["adsorbate"].cell, + data["adsorbate"].cell_offsets, + data["adsorbate"].neighbors, + data["adsorbate"].batch, + data["adsorbate"].atomic_numbers, + ) + ( + pos_cat, + edge_index_cat, + cell_cat, + cell_offsets_cat, + neighbors_cat, + batch_cat, + atomic_numbers_cat, + ) = ( + data["catalyst"].pos, + data["catalyst", "is_close", "catalyst"].edge_index, + data["catalyst"].cell, + data["catalyst"].cell_offsets, + data["catalyst"].neighbors, + data["catalyst"].batch + data["catalyst"].atomic_numbers, + ) + + if self.otf_graph: # NOT IMPLEMENTED!! + edge_index, cell_offsets, neighbors = radius_graph_pbc_inputs( + pos, + natoms, + cell, + self.cutoff, + 50 + ) + data.edge_index = edge_index + data.cell_offsets = cell_offsets + data.neighbors = neighbors + + # Rewire the graph + subnodes = False + + if self.use_pbc: + out = get_pbc_distances( + pos_ads, + edge_index_ads, + cell_ads, + cell_offsets_ads, + neighbors_ads, + return_offsets=True, + ) + + edge_index_ads = out["edge_index"] + dist_ads = out["distances"] + offsets_ads = out["offsets"] + + j_ads, i_ads = edge_index_ads + + out = get_pbc_distances( + pos_cat, + edge_index_cat, + cell_cat, + cell_offsets_cat, + neighbors_cat, + return_offsets=True, + ) + + edge_index_cat = out["edge_index"] + dist_cat = out["distances"] + offsets_cat = out["offsets"] + + j_cat, i_cat = edge_index_cat + else: # NOT IMPLEMENTED + edge_index = radius_graph(pos, r=self.cutoff, batch=batch) + j, i = edge_index + dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt() + + _, _, idx_i_ads, idx_j_ads, idx_k_ads, idx_kj_ads, idx_ji_ads = self.triplets( + edge_index_ads, + cell_offsets_ads, + num_nodes=atomic_numbers_ads.size(0), + ) + _, _, idx_i_cat, idx_j_cat, idx_k_cat, idx_kj_cat, idx_ji_cat = self.triplets( + edge_index_cat, + cell_offsets_cat, + num_nodes=atomic_numbers_cat.size(0), + ) + + # Calculate angles. + pos_i_ads = pos_ads[idx_i_ads].detach() + pos_j_ads = pos_ads[idx_j_ads].detach() + + pos_i_cat = pos_cat[idx_i_cat].detach() + pos_j_cat = pos_cat[idx_j_cat].detach() + if self.use_pbc: + pos_ji_ads, pos_kj_ads = ( + pos_ads[idx_j_ads].detach() - pos_i_ads + offsets_ads[idx_ji_ads], + pos_ads[idx_k_ads].detach() - pos_j_ads + offsets_ads[idx_kj_ads], + ) + pos_ji_cat, pos_kj_cat = ( + pos_cat[idx_j_cat].detach() - pos_i_cat + offsets_cat[idx_ji_cat], + pos_cat[idx_k_cat].detach() - pos_j_cat + offsets_cat[idx_kj_cat], + ) + else: # NOT IMPLEMENTED + pos_ji, pos_kj = ( + pos[idx_j].detach() - pos_i, + pos[idx_k].detach() - pos_j, + ) + + a_ads = (pos_ji_ads * pos_kj_ads).sum(dim=-1) + b_ads = torch.cross(pos_ji_ads, pos_kj_ads).norm(dim=-1) + angle_ads = torch.atan2(b_ads, a_ads) + + a_cat = (pos_ji_cat * pos_kj_cat).sum(dim=-1) + b_cat = torch.cross(pos_ji_cat, pos_kj_cat).norm(dim=-1) + angle_cat = torch.atan2(b_cat, a_cat) + + rbf_ads = self.rbf_ads(dist_ads) + sbf_ads = self.sbf_ads(dist_ads, angle_ads, idx_kj_ads) + + rbf_cat = self.rbf_cat(dist_cat) + sbf_cat = self.sbf_cat(dist_cat, angle_cat, idx_kj_cat) + + pooling_loss = None # deal with pooling loss + + + # IMPLEMENTED UP TO HERE. DAMN I WANT TO BE DONE ALREADY + + + # Embedding block. + x = self.emb(data.atomic_numbers.long(), rbf, i, j, data.tags, data.subnodes) + if self.energy_head: + P, pooling_loss, batch = self.output_blocks[0]( + x, rbf, i, edge_index, dist, data.batch, num_nodes=pos.size(0) + ) + else: + P = self.output_blocks[0](x, rbf, i, num_nodes=pos.size(0)) + + if self.energy_head == "weighted-av-initial-embeds": + alpha = self.w_lin(scatter(x, i, dim=0, dim_size=pos.size(0))) + + # Interaction blocks. + + energy_Ps = [] + + for interaction_block, output_block in zip( + self.interaction_blocks, self.output_blocks[1:] + ): + x = interaction_block(x, rbf, sbf, idx_kj, idx_ji) + if self.energy_head: + P_bis, pooling_loss_bis, _ = output_block( + x, rbf, i, edge_index, dist, data.batch, num_nodes=pos.size(0) + ) + energy_Ps.append( + P_bis.sum(0) / len(P) + if batch is None + else scatter(P_bis, batch, dim=0) + ) + if pooling_loss_bis is not None: + pooling_loss += pooling_loss_bis + else: + P += output_block(x, rbf, i, num_nodes=pos.size(0)) + + P_bis = sum(energy_Ps or [0]) + + if self.energy_head == "weighted-av-initial-embeds": + P = P * alpha + + # Output + # scatter + energy = self.scattering(batch, P, P_bis) + + return { + "energy": energy, + "pooling_loss": pooling_loss, + } + + def scattering(self, batch, P, P_bis): + energy = P.sum(dim=0) if batch is None else scatter(P, batch, dim=0) + energy = energy + P_bis + + return energy + + @conditional_grad(torch.enable_grad()) + def forces_forward(self, preds): + return + + @property + def num_params(self): + return sum(p.numel() for p in self.parameters()) diff --git a/ocpmodels/models/depdpp.py b/ocpmodels/models/depdpp.py new file mode 100644 index 0000000000..d3c6784e4a --- /dev/null +++ b/ocpmodels/models/depdpp.py @@ -0,0 +1,46 @@ +import torch +from torch.nn import Linear +from torch_scatter import scatter + +from ocpmodels.models.dimenet_plus_plus import DimeNetPlusPlus +from ocpmodels.common.registry import registry +from ocpmodels.common.utils import conditional_grad + +from torch_geometric.data import Batch + +@registry.register_model("depdpp") +class depSchNet(DimeNetPlusPlus): + def __init__(self, **kwargs): + self.hidden_channels = kwargs["hidden_channels"] + + kwargs["num_targets"] = kwargs["hidden_channels"] // 2 + super().__init__(**kwargs) + + self.sys_lin1 = Linear(self.hidden_channels // 2 * 2, self.hidden_channels // 2) + self.sys_lin2 = Linear(self.hidden_channels // 2, 1) + + @conditional_grad(torch.enable_grad()) + def energy_forward(self, data): + # We need to save the tags so this step is necessary. + self.tags_saver(data.tags) + pred = super().energy_forward(data) + + return pred + + def tags_saver(self, tags): + self.current_tags = tags + + @conditional_grad(torch.enable_grad()) + def scattering(self, batch, h, P_bis): + ads = self.current_tags == 2 + cat = ~ads + + ads_out = scatter(h, batch * ads, dim=0) + cat_out = scatter(h, batch * cat, dim=0) + + system = torch.cat([ads_out, cat_out], dim = 1) + system = self.sys_lin1(system) + system = self.sys_lin2(system) + system = system + P_bis + + return system diff --git a/ocpmodels/models/dimenet_plus_plus.py b/ocpmodels/models/dimenet_plus_plus.py index c3dec2989b..03570b3faa 100644 --- a/ocpmodels/models/dimenet_plus_plus.py +++ b/ocpmodels/models/dimenet_plus_plus.py @@ -726,14 +726,19 @@ def energy_forward(self, data): # Output # scatter - energy = P.sum(dim=0) if batch is None else scatter(P, batch, dim=0) - energy = energy + P_bis + energy = self.scattering(batch, P, P_bis) return { "energy": energy, "pooling_loss": pooling_loss, } + def scattering(self, batch, P, P_bis): + energy = P.sum(dim=0) if batch is None else scatter(P, batch, dim=0) + energy = energy + P_bis + + return energy + @conditional_grad(torch.enable_grad()) def forces_forward(self, preds): return diff --git a/ocpmodels/models/inddpp.py b/ocpmodels/models/inddpp.py new file mode 100644 index 0000000000..95e9f1180c --- /dev/null +++ b/ocpmodels/models/inddpp.py @@ -0,0 +1,52 @@ +import torch, math +from torch import nn +from torch.nn import Linear, Transformer + +from ocpmodels.models.dimenet_plus_plus import DimeNetPlusPlus +from ocpmodels.models.base_model import BaseModel +from ocpmodels.common.registry import registry +from ocpmodels.models.utils.activations import swish + +from torch_geometric.data import Batch + +@registry.register_model("inddpp") +class indDimeNetPlusPlus(BaseModel): # Change to make it inherit from base model. + def __init__(self, **kwargs): + super().__init__() + + self.regress_forces = kwargs["regress_forces"] + kwargs["num_targets"] = kwargs["hidden_channels"] // 2 + + self.ads_model = DimeNetPlusPlus(**kwargs) + self.cat_model = DimeNetPlusPlus(**kwargs) + + self.act = swish + self.combination = nn.Sequential( + Linear(kwargs["hidden_channels"] // 2 * 2, kwargs["hidden_channels"] // 2), + self.act, + Linear(kwargs["hidden_channels"] // 2, 1) + ) + + def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! + adsorbates = data[0] + catalysts = data[1] + + # We make predictions for each + pred_ads = self.ads_model(adsorbates, mode) + pred_cat = self.cat_model(catalysts, mode) + + ads_energy = pred_ads["energy"] + cat_energy = pred_cat["energy"] + + # We combine predictions + system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = self.combination(system_energy) + + # We return them + pred_system = { + "energy" : system_energy, + "pooling_loss" : pred_ads["pooling_loss"] if pred_ads["pooling_loss"] is None + else pred_ads["pooling_loss"] + pred_cat["pooling_loss"] + } + + return pred_system diff --git a/ocpmodels/models/indschnet.py b/ocpmodels/models/indschnet.py index 0c638243f5..e160df99ba 100644 --- a/ocpmodels/models/indschnet.py +++ b/ocpmodels/models/indschnet.py @@ -3,7 +3,6 @@ from torch.nn import Linear, Transformer from ocpmodels.models.schnet import SchNet -from ocpmodels.models.faenet import OutputBlock from ocpmodels.models.base_model import BaseModel from ocpmodels.common.registry import registry from ocpmodels.models.utils.activations import swish diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index b9fa3fc313..8dd404ea32 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -154,7 +154,7 @@ def __init__(self, **kwargs): (run_dir / f"config-{JOB_ID}.yaml").write_text(yaml.dump(self.config)) # Here's the models whose edges are removed as a transform - transform_models = ["depfaenet", "depschnet", "depgemnet_oc", "depgemnet_t"] + transform_models = ["depfaenet", "depschnet", "depgemnet_oc", "depgemnet_t", "depdpp"] if self.config["is_disconnected"]: print("\n\nHeads up: cat-ads edges being removed!") if self.config["model_name"] in transform_models: @@ -164,8 +164,8 @@ def __init__(self, **kwargs): self.config["is_disconnected"] = True # Here's the models whose graphs are disconnected in the dataset - self.separate_models = ["indfaenet", "indschnet", "indgemnet_oc", "indgemnet_t"] - self.heterogeneous_models = ["afaenet", "aschnet", "agemnet_oc", "agemnet_t"] + self.separate_models = ["indfaenet", "indschnet", "indgemnet_oc", "indgemnet_t", "inddpp"] + self.heterogeneous_models = ["afaenet", "aschnet", "agemnet_oc", "agemnet_t", "adpp"] self.data_mode = "normal" self.separate_dataset = False From 6ea3ed20488782ecba611424ffe5596962c7fe55 Mon Sep 17 00:00:00 2001 From: alvaro Date: Sat, 16 Sep 2023 13:03:08 -0400 Subject: [PATCH 108/131] IMPLEMENTED ALL DPP VARIANTS --- configs/exps/alvaro/dpp-config.yaml | 6 +- ocpmodels/models/adpp.py | 118 ++++++++++++++++++++-------- ocpmodels/models/aschnet.py | 2 + 3 files changed, 92 insertions(+), 34 deletions(-) diff --git a/configs/exps/alvaro/dpp-config.yaml b/configs/exps/alvaro/dpp-config.yaml index 229209097f..e017044e97 100644 --- a/configs/exps/alvaro/dpp-config.yaml +++ b/configs/exps/alvaro/dpp-config.yaml @@ -27,6 +27,6 @@ runs: - config: inddpp-is2re-all - #- config: aschnet-is2re-all - #model: - #gat_mode: v1 + - config: aschnet-is2re-all + model: + gat_mode: v1 diff --git a/ocpmodels/models/adpp.py b/ocpmodels/models/adpp.py index 75bc062b6c..b86401629e 100644 --- a/ocpmodels/models/adpp.py +++ b/ocpmodels/models/adpp.py @@ -448,6 +448,7 @@ def __init__(self, **kwargs): super().__init__() kwargs["num_targets"] = kwargs["hidden_channels"] // 2 + self.act = swish self.cutoff = kwargs["cutoff"] self.use_pbc = kwargs["use_pbc"] @@ -624,6 +625,12 @@ def __init__(self, **kwargs): self.task = kwargs["task_name"] + self.combination = nn.Sequential( + Linear(kwargs["hidden_channels"] // 2 * 2, kwargs["hidden_channels"] // 2), + self.act, + Linear(kwargs["hidden_channels"] // 2, 1) + ) + self.reset_parameters() def reset_parameters(self): @@ -675,8 +682,6 @@ def triplets(self, edge_index, cell_offsets, num_nodes): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): - import ipdb - ipdb.set_trace() ( pos_ads, edge_index_ads, @@ -685,6 +690,7 @@ def energy_forward(self, data): neighbors_ads, batch_ads, atomic_numbers_ads, + tags_ads, ) = ( data["adsorbate"].pos, data["adsorbate", "is_close", "adsorbate"].edge_index, @@ -693,6 +699,7 @@ def energy_forward(self, data): data["adsorbate"].neighbors, data["adsorbate"].batch, data["adsorbate"].atomic_numbers, + data["adsorbate"].tags, ) ( pos_cat, @@ -702,14 +709,16 @@ def energy_forward(self, data): neighbors_cat, batch_cat, atomic_numbers_cat, + tags_cat, ) = ( data["catalyst"].pos, data["catalyst", "is_close", "catalyst"].edge_index, data["catalyst"].cell, data["catalyst"].cell_offsets, data["catalyst"].neighbors, - data["catalyst"].batch + data["catalyst"].batch, data["catalyst"].atomic_numbers, + data["catalyst"].tags, ) if self.otf_graph: # NOT IMPLEMENTED!! @@ -810,61 +819,108 @@ def energy_forward(self, data): pooling_loss = None # deal with pooling loss + # Embedding block. + x_ads = self.emb_ads(atomic_numbers_ads.long(), rbf_ads, i_ads, j_ads, tags_ads, subnodes) + if self.energy_head: + P_ads, pooling_loss, batch_ads = self.output_blocks_ads[0]( + x_ads, rbf_ads, i_ads, edge_index_ads, dist_ads, batch_ads, num_nodes=pos_ads.size(0) + ) + else: + P_ads = self.output_blocks_ads[0](x_ads, rbf_ads, i_ads, num_nodes=pos_ads.size(0)) - # IMPLEMENTED UP TO HERE. DAMN I WANT TO BE DONE ALREADY - + if self.energy_head == "weighted-av-initial-embeds": + alpha_ads = self.w_lin_ads(scatter(x_ads, i_ads, dim=0, dim_size=pos_ads.size(0))) - # Embedding block. - x = self.emb(data.atomic_numbers.long(), rbf, i, j, data.tags, data.subnodes) + x_cat = self.emb_cat(atomic_numbers_cat.long(), rbf_cat, i_cat, j_cat, tags_cat, subnodes) if self.energy_head: - P, pooling_loss, batch = self.output_blocks[0]( - x, rbf, i, edge_index, dist, data.batch, num_nodes=pos.size(0) + P_cat, pooling_loss, batch_cat = self.output_blocks_cat[0]( + x_cat, rbf_cat, i_cat, edge_index_cat, dist_cat, batch_cat, num_nodes=pos_cat.size(0) ) else: - P = self.output_blocks[0](x, rbf, i, num_nodes=pos.size(0)) + P_cat = self.output_blocks_cat[0](x_cat, rbf_cat, i_cat, num_nodes=pos_cat.size(0)) if self.energy_head == "weighted-av-initial-embeds": - alpha = self.w_lin(scatter(x, i, dim=0, dim_size=pos.size(0))) + alpha_cat = self.w_lin_cat(scatter(x_cat, i_cat, dim=0, dim_size=pos_cat.size(0))) + + edge_weights = self.distance_expansion_disc(data["is_disc"].edge_weight) + edge_weights = self.disc_edge_embed(edge_weights) # Interaction blocks. + energy_Ps_ads = [] + energy_Ps_cat = [] + + for ( + interaction_block_ads, + interaction_block_cat, + output_block_ads, + output_block_cat, + disc_interaction, + ) in zip( + self.interaction_blocks_ads, + self.interaction_blocks_cat, + self.output_blocks_ads[1:], + self.output_blocks_cat[1:], + self.inter_interactions, + ): + intra_ads = interaction_block_ads(x_ads, rbf_ads, sbf_ads, idx_kj_ads, idx_ji_ads) + intra_cat = interaction_block_cat(x_cat, rbf_cat, sbf_cat, idx_kj_cat, idx_ji_cat) + + inter_ads, inter_cat = disc_interaction( + intra_ads, + intra_cat, + data["is_disc"].edge_index, + edge_weights + ) - energy_Ps = [] + x_ads, x_cat = x_ads + inter_ads, x_cat + inter_cat + x_ads, x_cat = nn.functional.normalize(x_ads), nn.functional.normalize(x_cat) - for interaction_block, output_block in zip( - self.interaction_blocks, self.output_blocks[1:] - ): - x = interaction_block(x, rbf, sbf, idx_kj, idx_ji) if self.energy_head: - P_bis, pooling_loss_bis, _ = output_block( - x, rbf, i, edge_index, dist, data.batch, num_nodes=pos.size(0) + P_bis_ads, pooling_loss_bis_ads, _ = output_block_ads( + x_ads, rbf_ads, i_ads, edge_index_ads, dist_ads, batch_ads, num_nodes=pos_ads.size(0) ) - energy_Ps.append( - P_bis.sum(0) / len(P) - if batch is None - else scatter(P_bis, batch, dim=0) + energy_Ps_ads.append( + P_bis_ads.sum(0) / len(P) + if batch_ads is None + else scatter(P_bis_ads, batch_ads, dim=0) ) - if pooling_loss_bis is not None: - pooling_loss += pooling_loss_bis - else: - P += output_block(x, rbf, i, num_nodes=pos.size(0)) + if pooling_loss_bis_ads is not None: + pooling_loss += pooling_loss_bis_ads - P_bis = sum(energy_Ps or [0]) + P_bis_cat, pooling_loss_bis_cat, _ = output_block_cat( + x_cat, rbf_cat, i_cat, edge_index_cat, dist_cat, batch_cat, num_nodes=pos_cat.size(0) + ) + energy_Ps_cat.append( + P_bis_cat.sum(0) / len(P) + if batch_cat is None + else scatter(P_bis_cat, batch_cat, dim=0) + ) + if pooling_loss_bis_cat is not None: + pooling_loss += pooling_loss_bis_cat + else: + P_ads += output_block_ads(x_ads, rbf_ads, i_ads, num_nodes=pos_ads.size(0)) + P_cat += output_block_cat(x_cat, rbf_cat, i_cat, num_nodes=pos_cat.size(0)) if self.energy_head == "weighted-av-initial-embeds": P = P * alpha + import ipdb + ipdb.set_trace() + # Output # scatter - energy = self.scattering(batch, P, P_bis) + energy_ads = self.scattering(batch_ads, P_ads) + energy_cat = self.scattering(batch_cat, P_cat) + energy = torch.cat([energy_ads, energy_cat], dim=1) + energy = self.combination(energy) return { "energy": energy, "pooling_loss": pooling_loss, } - def scattering(self, batch, P, P_bis): - energy = P.sum(dim=0) if batch is None else scatter(P, batch, dim=0) - energy = energy + P_bis + def scattering(self, batch, P, P_bis=0): + energy = scatter(P, batch, dim=0, reduce="add") return energy diff --git a/ocpmodels/models/aschnet.py b/ocpmodels/models/aschnet.py index 0b3fb35f06..3baf364793 100644 --- a/ocpmodels/models/aschnet.py +++ b/ocpmodels/models/aschnet.py @@ -23,6 +23,7 @@ from ocpmodels.models.utils.pos_encodings import PositionalEncoding from ocpmodels.modules.phys_embeddings import PhysEmbedding from ocpmodels.modules.pooling import Graclus, Hierarchical_Pooling +from ocpmodels.models.utils.activations import swish from ocpmodels.models.schnet import ( InteractionBlock, CFConv, @@ -236,6 +237,7 @@ def __init__(self, **kwargs): self.combination = nn.Sequential( Linear(self.hidden_channels, self.hidden_channels // 2), + swish, Linear(kwargs["hidden_channels"] // 2, 1) ) From 1520a0aefc5525e63af07668ac4ea08a8eceae9b Mon Sep 17 00:00:00 2001 From: alvaro Date: Sat, 16 Sep 2023 19:48:55 -0400 Subject: [PATCH 109/131] modifications to config files --- configs/exps/alvaro/dpp-config.yaml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/configs/exps/alvaro/dpp-config.yaml b/configs/exps/alvaro/dpp-config.yaml index e017044e97..62a2922df7 100644 --- a/configs/exps/alvaro/dpp-config.yaml +++ b/configs/exps/alvaro/dpp-config.yaml @@ -20,13 +20,13 @@ default: runs: #- config: dpp-is2re-all - - config: dpp-is2re-all - is_disconnected: True + # - config: dpp-is2re-all + # is_disconnected: True + + # - config: depdpp-is2re-all + + # - config: inddpp-is2re-all - - config: depdpp-is2re-all - - - config: inddpp-is2re-all - - - config: aschnet-is2re-all + - config: adpp-is2re-all model: gat_mode: v1 From 1f605df3e13af0f332bc7a77d1d986ee5beaa562 Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 17 Sep 2023 14:39:36 -0400 Subject: [PATCH 110/131] removed an ipdb --- ocpmodels/models/adpp.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/ocpmodels/models/adpp.py b/ocpmodels/models/adpp.py index b86401629e..bf77349c9e 100644 --- a/ocpmodels/models/adpp.py +++ b/ocpmodels/models/adpp.py @@ -904,9 +904,6 @@ def energy_forward(self, data): if self.energy_head == "weighted-av-initial-embeds": P = P * alpha - import ipdb - ipdb.set_trace() - # Output # scatter energy_ads = self.scattering(batch_ads, P_ads) From 76e55cb895b6c5f871bfd91af76f0be750158970 Mon Sep 17 00:00:00 2001 From: alvaro Date: Tue, 19 Sep 2023 21:00:09 -0400 Subject: [PATCH 111/131] good config file included --- configs/exps/alvaro/faenet-training.yaml | 1 - configs/exps/alvaro/top-configs.yaml | 120 +++++++++++++++++++++++ 2 files changed, 120 insertions(+), 1 deletion(-) create mode 100644 configs/exps/alvaro/top-configs.yaml diff --git a/configs/exps/alvaro/faenet-training.yaml b/configs/exps/alvaro/faenet-training.yaml index f124a63c66..62f56573f2 100644 --- a/configs/exps/alvaro/faenet-training.yaml +++ b/configs/exps/alvaro/faenet-training.yaml @@ -13,7 +13,6 @@ default: test_ri: True mode: train graph_rewiring: remove-tag-0 - optim: cp_data_to_tmpdir: true wandb-tags: 'best-config-??' # Insert what model you're running if running one by one. frame_averaging: 2D diff --git a/configs/exps/alvaro/top-configs.yaml b/configs/exps/alvaro/top-configs.yaml new file mode 100644 index 0000000000..cf4e79fe4c --- /dev/null +++ b/configs/exps/alvaro/top-configs.yaml @@ -0,0 +1,120 @@ +job: + mem: 32GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + time: 15:00:00 + +default: + test_ri: True + mode: train + graph_rewiring: remove-tag-0 + model: + edge_embed_type: all_rij + wandb_tags: "best-config" + optim: + batch_size: 256 + eval_batch_size: 256 + cp_data_to_tmpdir: True + +runs: + - config: faenet-is2re-all # 2700544 + note: "top-runs" + frame_averaging: 2D + fa_method: se3-random + model: + mp_type: updownscale_base + phys_embeds: True + tag_hidden_channels: 32 + pg_hidden_channels: 96 + energy_head: weighted-av-final-embeds + complex_mp: True + graph_norm: True + hidden_channels: 384 + num_filters: 480 + num_gaussians: 104 + num_interactions: 5 + second_layer_MLP: False + skip_co: concat + cutoff: 6.0 + optim: + lr_initial: 0.002 + scheduler: LinearWarmupCosineAnnealingLR + max_epochs: 12 + eval_every: 0.25 + + - config: faenet-is2re-all # 2700544 + note: "top-runs" + frame_averaging: 2D + fa_method: random + model: + mp_type: base + phys_embeds: True + tag_hidden_channels: 64 + pg_hidden_channels: 64 + energy_head: weighted-av-final-embeds + complex_mp: True + graph_norm: True + hidden_channels: 384 + num_filters: 480 + num_gaussians: 104 + num_interactions: 5 + second_layer_MLP: False + skip_co: concatai + cutoff: 6.0 + max_num_neighbors: 40 + optim: + lr_initial: 0.002 + scheduler: LinearWarmupCosineAnnealingLR + max_epochs: 12 + eval_every: 0.25 + + - config: faenet-is2re-all # 2700544 + note: "top-run eval every epoch" + frame_averaging: 2D + fa_method: se3-random + model: + mp_type: updownscale_base + phys_embeds: True + tag_hidden_channels: 32 + pg_hidden_channels: 96 + energy_head: weighted-av-final-embeds + complex_mp: True + graph_norm: True + hidden_channels: 352 + num_filters: 288 + num_gaussians: 68 + num_interactions: 5 + second_layer_MLP: False + skip_co: concat + cutoff: 4.0 + optim: + lr_initial: 0.002 + scheduler: LinearWarmupCosineAnnealingLR + max_epochs: 12 + eval_every: 1 + + - config: faenet-is2re-all # 2700544 + note: "top-run" + frame_averaging: 2D + fa_method: se3-random + model: + mp_type: updownscale_base + phys_embeds: True + tag_hidden_channels: 32 + pg_hidden_channels: 96 + energy_head: weighted-av-final-embeds + complex_mp: True + graph_norm: True + hidden_channels: 352 + num_filters: 288 + num_gaussians: 68 + num_interactions: 5 + second_layer_MLP: False + skip_co: concat + cutoff: 4.0 + optim: + lr_initial: 0.002 + scheduler: LinearWarmupCosineAnnealingLR + max_epochs: 9 + eval_every: 0.4 \ No newline at end of file From e6fe4d4baa8a3464794568c0adc0146bc8356533 Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 24 Sep 2023 14:00:12 -0400 Subject: [PATCH 112/131] Fixed dependent models, and attention model --- configs/exps/alvaro/dpp-config.yaml | 14 +-- configs/exps/alvaro/faenet-top-config.yaml | 55 ++++++++++ configs/exps/alvaro/schnet-config.yaml | 16 +-- configs/exps/alvaro/top-configs.yaml | 120 --------------------- debug_faenet.py | 2 +- ocpmodels/models/afaenet.py | 2 +- ocpmodels/models/depdpp.py | 10 +- ocpmodels/models/depfaenet.py | 12 ++- ocpmodels/models/depschnet.py | 9 +- 9 files changed, 95 insertions(+), 145 deletions(-) create mode 100644 configs/exps/alvaro/faenet-top-config.yaml delete mode 100644 configs/exps/alvaro/top-configs.yaml diff --git a/configs/exps/alvaro/dpp-config.yaml b/configs/exps/alvaro/dpp-config.yaml index 62a2922df7..e3def57787 100644 --- a/configs/exps/alvaro/dpp-config.yaml +++ b/configs/exps/alvaro/dpp-config.yaml @@ -18,15 +18,15 @@ default: eval_batch_size: 16 runs: - #- config: dpp-is2re-all + # - config: dpp-is2re-10k - # - config: dpp-is2re-all + # - config: dpp-is2re-10k # is_disconnected: True - # - config: depdpp-is2re-all + - config: depdpp-is2re-10k - # - config: inddpp-is2re-all + # - config: inddpp-is2re-10k - - config: adpp-is2re-all - model: - gat_mode: v1 + # - config: adpp-is2re-10k + # model: + # gat_mode: v1 diff --git a/configs/exps/alvaro/faenet-top-config.yaml b/configs/exps/alvaro/faenet-top-config.yaml new file mode 100644 index 0000000000..709ddee33a --- /dev/null +++ b/configs/exps/alvaro/faenet-top-config.yaml @@ -0,0 +1,55 @@ +job: + mem: 32GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + time: 15:00:00 + +default: + wandb_name: alvaro-carbonero-math + wandb_project: ocp-alvaro + wandb_tags: "best-config" + test_ri: True + mode: train + graph_rewiring: remove-tag-0 + note: "top-run" + frame_averaging: 2D + fa_method: se3-random + cp_data_to_tmpdir: True + model: + edge_embed_type: all_rij + mp_type: updownscale_base + phys_embeds: True + tag_hidden_channels: 32 + pg_hidden_channels: 96 + energy_head: weighted-av-final-embeds + complex_mp: True + graph_norm: True + hidden_channels: 352 + num_filters: 288 + num_gaussians: 68 + num_interactions: 5 + second_layer_MLP: False + skip_co: concat + cutoff: 4.0 + optim: + batch_size: 256 + eval_batch_size: 256 + lr_initial: 0.002 + scheduler: LinearWarmupCosineAnnealingLR + max_epochs: 9 + eval_every: 0.4 + +runs: + # - config: faenet-is2re-10k + + # - config: faenet-is2re-10k + # is_disconnected: True + + - config: depfaenet-is2re-10k + + # - config: indfaenet-is2re-10k + + - config: afaenet-is2re-10k + model: + afaenet_gat_mode: v1 diff --git a/configs/exps/alvaro/schnet-config.yaml b/configs/exps/alvaro/schnet-config.yaml index 148eaefa5e..a66d61d761 100644 --- a/configs/exps/alvaro/schnet-config.yaml +++ b/configs/exps/alvaro/schnet-config.yaml @@ -20,15 +20,15 @@ default: warmup_factor: 0.2 runs: - #- config: schnet-is2re-all + # - config: schnet-is2re-10k - #- config: schnet-is2re-all - #is_disconnected: True + # - config: schnet-is2re-10k + # is_disconnected: True - #- config: depschnet-is2re-all + - config: depschnet-is2re-10k - #- config: indschnet-is2re-all + # - config: indschnet-is2re-10k - - config: aschnet-is2re-all - model: - gat_mode: v1 + # - config: aschnet-is2re-10k + # model: + # gat_mode: v1 diff --git a/configs/exps/alvaro/top-configs.yaml b/configs/exps/alvaro/top-configs.yaml deleted file mode 100644 index cf4e79fe4c..0000000000 --- a/configs/exps/alvaro/top-configs.yaml +++ /dev/null @@ -1,120 +0,0 @@ -job: - mem: 32GB - cpus: 4 - gres: gpu:rtx8000:1 - partition: long - time: 15:00:00 - -default: - test_ri: True - mode: train - graph_rewiring: remove-tag-0 - model: - edge_embed_type: all_rij - wandb_tags: "best-config" - optim: - batch_size: 256 - eval_batch_size: 256 - cp_data_to_tmpdir: True - -runs: - - config: faenet-is2re-all # 2700544 - note: "top-runs" - frame_averaging: 2D - fa_method: se3-random - model: - mp_type: updownscale_base - phys_embeds: True - tag_hidden_channels: 32 - pg_hidden_channels: 96 - energy_head: weighted-av-final-embeds - complex_mp: True - graph_norm: True - hidden_channels: 384 - num_filters: 480 - num_gaussians: 104 - num_interactions: 5 - second_layer_MLP: False - skip_co: concat - cutoff: 6.0 - optim: - lr_initial: 0.002 - scheduler: LinearWarmupCosineAnnealingLR - max_epochs: 12 - eval_every: 0.25 - - - config: faenet-is2re-all # 2700544 - note: "top-runs" - frame_averaging: 2D - fa_method: random - model: - mp_type: base - phys_embeds: True - tag_hidden_channels: 64 - pg_hidden_channels: 64 - energy_head: weighted-av-final-embeds - complex_mp: True - graph_norm: True - hidden_channels: 384 - num_filters: 480 - num_gaussians: 104 - num_interactions: 5 - second_layer_MLP: False - skip_co: concatai - cutoff: 6.0 - max_num_neighbors: 40 - optim: - lr_initial: 0.002 - scheduler: LinearWarmupCosineAnnealingLR - max_epochs: 12 - eval_every: 0.25 - - - config: faenet-is2re-all # 2700544 - note: "top-run eval every epoch" - frame_averaging: 2D - fa_method: se3-random - model: - mp_type: updownscale_base - phys_embeds: True - tag_hidden_channels: 32 - pg_hidden_channels: 96 - energy_head: weighted-av-final-embeds - complex_mp: True - graph_norm: True - hidden_channels: 352 - num_filters: 288 - num_gaussians: 68 - num_interactions: 5 - second_layer_MLP: False - skip_co: concat - cutoff: 4.0 - optim: - lr_initial: 0.002 - scheduler: LinearWarmupCosineAnnealingLR - max_epochs: 12 - eval_every: 1 - - - config: faenet-is2re-all # 2700544 - note: "top-run" - frame_averaging: 2D - fa_method: se3-random - model: - mp_type: updownscale_base - phys_embeds: True - tag_hidden_channels: 32 - pg_hidden_channels: 96 - energy_head: weighted-av-final-embeds - complex_mp: True - graph_norm: True - hidden_channels: 352 - num_filters: 288 - num_gaussians: 68 - num_interactions: 5 - second_layer_MLP: False - skip_co: concat - cutoff: 4.0 - optim: - lr_initial: 0.002 - scheduler: LinearWarmupCosineAnnealingLR - max_epochs: 9 - eval_every: 0.4 \ No newline at end of file diff --git a/debug_faenet.py b/debug_faenet.py index c22cc2b1e4..f39050f1fb 100644 --- a/debug_faenet.py +++ b/debug_faenet.py @@ -96,7 +96,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.mode = "train" args.graph_rewiring = "remove-tag-0" args.cp_data_to_tmpdir = True - args.config = "afaenet-is2re-10k" + args.config = "depfaenet-is2re-10k" args.frame_averaging = "2D" args.fa_frames = "se3-random" diff --git a/ocpmodels/models/afaenet.py b/ocpmodels/models/afaenet.py index 47e09800aa..a204b6dd03 100644 --- a/ocpmodels/models/afaenet.py +++ b/ocpmodels/models/afaenet.py @@ -222,7 +222,7 @@ def __init__(self, **kwargs): else: self.combination = nn.Sequential( Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2), - nn.ReLU(), + swish, Linear(kwargs["hidden_channels"] // 2, 1) ) diff --git a/ocpmodels/models/depdpp.py b/ocpmodels/models/depdpp.py index d3c6784e4a..075075b269 100644 --- a/ocpmodels/models/depdpp.py +++ b/ocpmodels/models/depdpp.py @@ -1,10 +1,12 @@ import torch +from torch import nn from torch.nn import Linear from torch_scatter import scatter from ocpmodels.models.dimenet_plus_plus import DimeNetPlusPlus from ocpmodels.common.registry import registry from ocpmodels.common.utils import conditional_grad +from ocpmodels.models.utils.activations import swish from torch_geometric.data import Batch @@ -16,8 +18,12 @@ def __init__(self, **kwargs): kwargs["num_targets"] = kwargs["hidden_channels"] // 2 super().__init__(**kwargs) - self.sys_lin1 = Linear(self.hidden_channels // 2 * 2, self.hidden_channels // 2) - self.sys_lin2 = Linear(self.hidden_channels // 2, 1) + self.act = swish + self.combination = nn.Sequential( + Linear(self.hidden_channels // 2 * 2, self.hidden_channels // 2), + self.act, + Linear(self.hidden_channels // 2, 1) + ) @conditional_grad(torch.enable_grad()) def energy_forward(self, data): diff --git a/ocpmodels/models/depfaenet.py b/ocpmodels/models/depfaenet.py index d4138c26b4..3711d4154b 100644 --- a/ocpmodels/models/depfaenet.py +++ b/ocpmodels/models/depfaenet.py @@ -1,11 +1,13 @@ import torch from torch.nn import Linear +from torch import nn from torch_scatter import scatter from ocpmodels.models.faenet import FAENet from ocpmodels.models.faenet import OutputBlock as conOutputBlock from ocpmodels.common.registry import registry from ocpmodels.common.utils import conditional_grad +from ocpmodels.models.utils.activations import swish from torch_geometric.data import Batch @@ -24,8 +26,11 @@ def __init__(self, energy_head, hidden_channels, act, disconnected_mlp = False): self.cat_lin = Linear(hidden_channels // 2, hidden_channels // 2) # Combines the hidden representation of each to a scalar. - self.sys_lin1 = Linear(hidden_channels // 2 * 2, hidden_channels // 2) - self.sys_lin2 = Linear(hidden_channels // 2, 1) + self.combination = nn.Sequential( + Linear(hidden_channels // 2 * 2, hidden_channels // 2), + swish, + Linear(hidden_channels // 2, 1) + ) def tags_saver(self, tags): self.current_tags = tags @@ -66,8 +71,7 @@ def forward(self, h, edge_index, edge_weight, batch, alpha): system = torch.cat([ads_out, cat_out], dim = 1) # Finally, we predict a number. - system = self.sys_lin1(system) - energy = self.sys_lin2(system) + energy = self.combination(system) return energy diff --git a/ocpmodels/models/depschnet.py b/ocpmodels/models/depschnet.py index 8ddd77aa1c..2e4b748c84 100644 --- a/ocpmodels/models/depschnet.py +++ b/ocpmodels/models/depschnet.py @@ -1,4 +1,5 @@ import torch +from torch import nn from torch.nn import Linear from torch_scatter import scatter @@ -6,6 +7,7 @@ from ocpmodels.models.faenet import OutputBlock as conOutputBlock from ocpmodels.common.registry import registry from ocpmodels.common.utils import conditional_grad +from ocpmodels.models.utils.activations import swish from torch_geometric.data import Batch @@ -19,8 +21,11 @@ def __init__(self, **kwargs): torch.nn.init.xavier_uniform_(self.lin2.weight) self.lin2.bias.data.fill_(0) - self.sys_lin1 = Linear(self.hidden_channels // 2 * 2, self.hidden_channels // 2) - self.sys_lin2 = Linear(self.hidden_channels // 2, 1) + self.combination = nn.Sequential( + Linear(self.hidden_channels // 2 * 2, self.hidden_channels // 2), + swish, + Linear(self.hidden_channels // 2, 1) + ) @conditional_grad(torch.enable_grad()) def energy_forward(self, data): From a3588f0b81334be9390056a6e0250fd7eda76e5e Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 24 Sep 2023 14:05:54 -0400 Subject: [PATCH 113/131] fixed a mistake on two dep files --- ocpmodels/models/depdpp.py | 3 +-- ocpmodels/models/depschnet.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/ocpmodels/models/depdpp.py b/ocpmodels/models/depdpp.py index 075075b269..5636a77681 100644 --- a/ocpmodels/models/depdpp.py +++ b/ocpmodels/models/depdpp.py @@ -45,8 +45,7 @@ def scattering(self, batch, h, P_bis): cat_out = scatter(h, batch * cat, dim=0) system = torch.cat([ads_out, cat_out], dim = 1) - system = self.sys_lin1(system) - system = self.sys_lin2(system) + system = self.combination(system) system = system + P_bis return system diff --git a/ocpmodels/models/depschnet.py b/ocpmodels/models/depschnet.py index 2e4b748c84..dd20ad50f2 100644 --- a/ocpmodels/models/depschnet.py +++ b/ocpmodels/models/depschnet.py @@ -47,7 +47,6 @@ def scattering(self, h, batch): cat_out = scatter(h, batch * cat, dim = 0, reduce = self.readout) system = torch.cat([ads_out, cat_out], dim = 1) - system = self.sys_lin1(system) - system = self.sys_lin2(system) + system = self.combination(system) return system From cb697129997b9642255ec2641be503dc701780d6 Mon Sep 17 00:00:00 2001 From: alvaro Date: Sun, 24 Sep 2023 23:39:10 -0400 Subject: [PATCH 114/131] Changed independent models to have different dimentionalities --- configs/exps/alvaro/dpp-config.yaml | 34 ++++++++++++++++++++-- configs/exps/alvaro/faenet-top-config.yaml | 34 +++++++++++++++++++--- configs/exps/alvaro/schnet-config.yaml | 31 ++++++++++++++++++-- debug.py | 13 ++------- debug_faenet.py | 2 +- debug_schnet.py | 2 +- ocpmodels/models/inddpp.py | 22 ++++++++++++-- ocpmodels/models/indfaenet.py | 15 ++++++++-- ocpmodels/models/indschnet.py | 13 +++++++-- 9 files changed, 138 insertions(+), 28 deletions(-) diff --git a/configs/exps/alvaro/dpp-config.yaml b/configs/exps/alvaro/dpp-config.yaml index e3def57787..9a2ede949a 100644 --- a/configs/exps/alvaro/dpp-config.yaml +++ b/configs/exps/alvaro/dpp-config.yaml @@ -23,9 +23,39 @@ runs: # - config: dpp-is2re-10k # is_disconnected: True - - config: depdpp-is2re-10k + # - config: depdpp-is2re-all - # - config: inddpp-is2re-10k + - config: inddpp-is2re-10k + note:so that cat get old dimensions + model: + hidden_channels: 256 + num_spherical: 7 + num_radial: 6 + out_emb_channels: 192 + + - config: inddpp-is2re-10k + note: dimensions both smaller + model: + hidden_channels: 128 + num_spherical: 4 + num_radial: 3 + out_emb_channels: 96 + + - config: inddpp-is2re-10k + note: so that ads get old dimensions + model: + hidden_channels: 512 + num_spherical: 14 + num_radial: 12 + out_emb_channels: 384 + + - config: inddpp-is2re-10k + note: so that their average is old dimensions + model: + hidden_channels: 340 + num_spherical: 9 + num_radial: 8 + out_emb_channels: 256 # - config: adpp-is2re-10k # model: diff --git a/configs/exps/alvaro/faenet-top-config.yaml b/configs/exps/alvaro/faenet-top-config.yaml index 709ddee33a..2ac480c7fc 100644 --- a/configs/exps/alvaro/faenet-top-config.yaml +++ b/configs/exps/alvaro/faenet-top-config.yaml @@ -46,10 +46,36 @@ runs: # - config: faenet-is2re-10k # is_disconnected: True - - config: depfaenet-is2re-10k + - config: indfaenet-is2re-all + note: so that cat get old dimensions + model: + hidden_channels: 352 + num_gaussians: 99 + num_filters: 448 + + - config: indfaenet-is2re-all + note: dimensions of both smaller + model: + hidden_channels: 176 + num_gaussians: 50 + num_filters: 224 + + - config: indfaenet-is2re-all + note: so that ads get old dimensions + model: + hidden_channels: 704 + num_gaussians: 200 + num_filters: 896 + + - config: indfaenet-is2re-all + note: so that their average is old dimension + model: + hidden_channels: 468 + num_gaussians: 132 + num_filters: 596 # - config: indfaenet-is2re-10k - - config: afaenet-is2re-10k - model: - afaenet_gat_mode: v1 + # - config: afaenet-is2re-all + # model: + # afaenet_gat_mode: v1 diff --git a/configs/exps/alvaro/schnet-config.yaml b/configs/exps/alvaro/schnet-config.yaml index a66d61d761..e1ddf5930f 100644 --- a/configs/exps/alvaro/schnet-config.yaml +++ b/configs/exps/alvaro/schnet-config.yaml @@ -18,6 +18,7 @@ default: num_workers: 4 max_epochs: 17 warmup_factor: 0.2 + lr_initial: 0.0005 runs: # - config: schnet-is2re-10k @@ -25,9 +26,35 @@ runs: # - config: schnet-is2re-10k # is_disconnected: True - - config: depschnet-is2re-10k + # - config: depschnet-is2re-all - # - config: indschnet-is2re-10k + - config: indschnet-is2re-10k + note: so that cat get old dimensions + model: + hidden_channels: 256 + num_filters: 128 + num_gaussians: 100 + + - config: indschnet-is2re-10k + note: dimensions both smaller + model: + hidden_channels: 126 + num_filters: 64 + num_gaussians: 50 + + - config: indschnet-is2re-10k + note: so that ads get old dimensions + model: + hidden_channels: 512 + num_filters: 256 + num_gaussians: 200 + + - config: indschnet-is2re-10k + note: so that their average is old dimensions + model: + hidden_channels: 340 + num_filters: 170 + num_gaussians: 132 # - config: aschnet-is2re-10k # model: diff --git a/debug.py b/debug.py index 887eeaf83b..b374cb59da 100644 --- a/debug.py +++ b/debug.py @@ -92,16 +92,9 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.wandb_name = "alvaro-carbonero-math" args.wandb_project = "ocp-alvaro" - # args.config = "faenet-is2re-all" - args.config = "gemnet_t-is2re-all" - - # args.tag_hidden_channels = 32 - # args.pg_hidden_channels = 32 - # args.phys_embeds = True - # args.otf_graph = False - # args.max_num_neighbors = 40 - # args.hidden_channels = 142 - # args.graph_rewiring = "remove-tag-0" + args.config = "inddpp-is2re-all" + + args.graph_rewiring = "remove-tag-0" trainer_config = build_config(args, override_args) diff --git a/debug_faenet.py b/debug_faenet.py index f39050f1fb..1967d26e37 100644 --- a/debug_faenet.py +++ b/debug_faenet.py @@ -96,7 +96,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.mode = "train" args.graph_rewiring = "remove-tag-0" args.cp_data_to_tmpdir = True - args.config = "depfaenet-is2re-10k" + args.config = "indfaenet-is2re-10k" args.frame_averaging = "2D" args.fa_frames = "se3-random" diff --git a/debug_schnet.py b/debug_schnet.py index a3b618c951..b1fec82ddb 100644 --- a/debug_schnet.py +++ b/debug_schnet.py @@ -102,7 +102,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.max_num_neighbors = 40 args.hidden_channels = 142 args.graph_rewiring = "remove-tag-0" - args.config = "aschnet-is2re-10k" + args.config = "indschnet-is2re-10k" trainer_config = build_config(args, override_args) diff --git a/ocpmodels/models/inddpp.py b/ocpmodels/models/inddpp.py index 95e9f1180c..60365774f7 100644 --- a/ocpmodels/models/inddpp.py +++ b/ocpmodels/models/inddpp.py @@ -17,14 +17,30 @@ def __init__(self, **kwargs): self.regress_forces = kwargs["regress_forces"] kwargs["num_targets"] = kwargs["hidden_channels"] // 2 - self.ads_model = DimeNetPlusPlus(**kwargs) + import ipdb + ipdb.set_trace() + self.cat_model = DimeNetPlusPlus(**kwargs) + old_hc = kwargs["hidden_channels"] + old_sphr = kwargs["num_spherical"] + old_radi = kwargs["num_radial"] + old_out_emb = kwargs["out_emb_channels"] + old_targets = kwargs["num_targets"] + + kwargs["hidden_channels"] = kwargs["hidden_channels"] // 2 + kwargs["num_spherical"] = kwargs["num_spherical"] // 2 + kwargs["num_radial"] = kwargs["num_radial"] // 2 + kwargs["out_emb_channesl"] = kwargs["out_emb_channels"] // 2 + kwargs["num_targets"] = kwargs["num_targets"] // 2 + + self.ads_model = DimeNetPlusPlus(**kwargs) + self.act = swish self.combination = nn.Sequential( - Linear(kwargs["hidden_channels"] // 2 * 2, kwargs["hidden_channels"] // 2), + Linear(kwargs["num_targets"] + old_targets, kwargs["num_targets"] // 2), self.act, - Linear(kwargs["hidden_channels"] // 2, 1) + Linear(kwargs["num_targets"] // 2, 1) ) def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index b54d64a0c5..7f28070fe6 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -38,9 +38,18 @@ def __init__(self, **kwargs): self.regress_forces = kwargs["regress_forces"] - self.ads_model = FAENet(**kwargs) + old_hc = kwargs["hidden_channels"] + old_gaus = kwargs["num_gaussians"] + old_filt = kwargs["num_filters"] + self.cat_model = FAENet(**kwargs) + kwargs["hidden_channels"] = kwargs["hidden_channels"] // 2 + kwargs["num_gaussians"] = kwargs["num_gaussians"] // 2 + kwargs["num_filters"] = kwargs["num_filters"] // 2 + + self.ads_model = FAENet(**kwargs) + self.act = ( getattr(nn.functional, kwargs["act"]) if kwargs["act"] != "swish" else swish ) @@ -69,7 +78,7 @@ def __init__(self, **kwargs): self.transformer_lin = Linear(kwargs["hidden_channels"] // 2, 1) else: self.combination = nn.Sequential( - Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2), + Linear(kwargs["hidden_channels"] // 2 + old_hc // 2, kwargs["hidden_channels"] // 2), self.act, Linear(kwargs["hidden_channels"] // 2, 1) ) @@ -114,7 +123,7 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION "energy" : system_energy, "pooling_loss" : pred_ads["pooling_loss"] if pred_ads["pooling_loss"] is None else pred_ads["pooling_loss"] + pred_cat["pooling_loss"], - "hidden_state" : torch.cat([pred_ads["hidden_state"], pred_cat["hidden_state"]], dim = 0) + "hidden_state" : pred_ads["hidden_state"] } return pred_system diff --git a/ocpmodels/models/indschnet.py b/ocpmodels/models/indschnet.py index e160df99ba..5d793a5158 100644 --- a/ocpmodels/models/indschnet.py +++ b/ocpmodels/models/indschnet.py @@ -37,9 +37,18 @@ def __init__(self, **kwargs): self.regress_forces = kwargs["regress_forces"] - self.ads_model = SchNet(**kwargs) self.cat_model = SchNet(**kwargs) + old_filt = kwargs["num_filters"] + old_gaus = kwargs["num_gaussians"] + old_hc = kwargs["hidden_channels"] + + kwargs["num_filters"] = kwargs["num_filters"] // 2 + kwargs["num_gaussians"] = kwargs["num_gaussians"] // 2 + kwargs["hidden_channels"] = kwargs["hidden_channels"] // 2 + + self.ads_model = SchNet(**kwargs) + self.disconnected_mlp = kwargs.get("disconnected_mlp", False) if self.disconnected_mlp: self.ads_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) @@ -65,7 +74,7 @@ def __init__(self, **kwargs): self.transformer_lin = Linear(kwargs["hidden_channels"] // 2, 1) else: self.combination = nn.Sequential( - Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2), + Linear(kwargs["hidden_channels"] // 2 + old_hc // 2, kwargs["hidden_channels"] // 2), self.act, Linear(kwargs["hidden_channels"] // 2, 1) ) From a6e867dcbe29595a3918ffeb17acbc514ff19410 Mon Sep 17 00:00:00 2001 From: alvaro Date: Mon, 25 Sep 2023 20:55:55 -0400 Subject: [PATCH 115/131] Fixed config files --- configs/exps/alvaro/dpp-config.yaml | 10 +++++----- configs/exps/alvaro/schnet-config.yaml | 8 ++++---- ocpmodels/models/inddpp.py | 3 --- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/configs/exps/alvaro/dpp-config.yaml b/configs/exps/alvaro/dpp-config.yaml index 9a2ede949a..1edbb37d80 100644 --- a/configs/exps/alvaro/dpp-config.yaml +++ b/configs/exps/alvaro/dpp-config.yaml @@ -25,15 +25,15 @@ runs: # - config: depdpp-is2re-all - - config: inddpp-is2re-10k - note:so that cat get old dimensions + - config: inddpp-is2re-all + note: so that cat get old dimensions model: hidden_channels: 256 num_spherical: 7 num_radial: 6 out_emb_channels: 192 - - config: inddpp-is2re-10k + - config: inddpp-is2re-all note: dimensions both smaller model: hidden_channels: 128 @@ -41,7 +41,7 @@ runs: num_radial: 3 out_emb_channels: 96 - - config: inddpp-is2re-10k + - config: inddpp-is2re-all note: so that ads get old dimensions model: hidden_channels: 512 @@ -49,7 +49,7 @@ runs: num_radial: 12 out_emb_channels: 384 - - config: inddpp-is2re-10k + - config: inddpp-is2re-all note: so that their average is old dimensions model: hidden_channels: 340 diff --git a/configs/exps/alvaro/schnet-config.yaml b/configs/exps/alvaro/schnet-config.yaml index e1ddf5930f..c9ac15dbc6 100644 --- a/configs/exps/alvaro/schnet-config.yaml +++ b/configs/exps/alvaro/schnet-config.yaml @@ -28,28 +28,28 @@ runs: # - config: depschnet-is2re-all - - config: indschnet-is2re-10k + - config: indschnet-is2re-all note: so that cat get old dimensions model: hidden_channels: 256 num_filters: 128 num_gaussians: 100 - - config: indschnet-is2re-10k + - config: indschnet-is2re-all note: dimensions both smaller model: hidden_channels: 126 num_filters: 64 num_gaussians: 50 - - config: indschnet-is2re-10k + - config: indschnet-is2re-all note: so that ads get old dimensions model: hidden_channels: 512 num_filters: 256 num_gaussians: 200 - - config: indschnet-is2re-10k + - config: indschnet-is2re-all note: so that their average is old dimensions model: hidden_channels: 340 diff --git a/ocpmodels/models/inddpp.py b/ocpmodels/models/inddpp.py index 60365774f7..bd495bb2d0 100644 --- a/ocpmodels/models/inddpp.py +++ b/ocpmodels/models/inddpp.py @@ -17,9 +17,6 @@ def __init__(self, **kwargs): self.regress_forces = kwargs["regress_forces"] kwargs["num_targets"] = kwargs["hidden_channels"] // 2 - import ipdb - ipdb.set_trace() - self.cat_model = DimeNetPlusPlus(**kwargs) old_hc = kwargs["hidden_channels"] From d553ab0346d9d345cab79588a9e533004086cb9c Mon Sep 17 00:00:00 2001 From: AlexDuvalinho Date: Mon, 30 Oct 2023 13:20:05 -0400 Subject: [PATCH 116/131] reproduce results: new config --- configs/exps/alvaro/reproduce-configs.yaml | 75 ++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 configs/exps/alvaro/reproduce-configs.yaml diff --git a/configs/exps/alvaro/reproduce-configs.yaml b/configs/exps/alvaro/reproduce-configs.yaml new file mode 100644 index 0000000000..c4c834585c --- /dev/null +++ b/configs/exps/alvaro/reproduce-configs.yaml @@ -0,0 +1,75 @@ +job: + mem: 32GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + time: 15:00:00 + +default: + # wandb_name: alvaro-carbonero-math + wandb_project: ocp-alvaro + wandb_tags: "reproduce-best-config" + test_ri: True + mode: train + graph_rewiring: remove-tag-0 + note: "repoduce-top-run" + frame_averaging: 2D + fa_method: se3-random + cp_data_to_tmpdir: True + is_disconnected: true + model: + edge_embed_type: all_rij + mp_type: updownscale_base + phys_embeds: True + tag_hidden_channels: 32 + pg_hidden_channels: 96 + energy_head: weighted-av-final-embeds + complex_mp: True + graph_norm: True + hidden_channels: 352 + num_filters: 288 + num_gaussians: 68 + num_interactions: 5 + second_layer_MLP: False + skip_co: concat + cutoff: 4.0 + optim: + batch_size: 256 + eval_batch_size: 256 + lr_initial: 0.002 + scheduler: LinearWarmupCosineAnnealingLR + max_epochs: 9 + eval_every: 0.4 + +runs: + + - config: faenet-is2re-all + note: baseline faenet + + - config: indfaenet-is2re-all + note: baseline with top configs + + - config: indfaenet-is2re-all + note: baseline with runs' configs + model: + tag_hidden_channels: 32 + pg_hidden_channels: 96 + energy_head: weighted-av-final-embeds + complex_mp: True + graph_norm: True + hidden_channels: 528 + num_filters: 672 + num_gaussians: 148 + num_interactions: 5 + second_layer_MLP: False + skip_co: concat + + - config: depfaenet-is2re-all + note: baseline with top configs + + - config: indfaenet-is2re-all + note: so that ads get old dimensions + model: + hidden_channels: 704 + num_gaussians: 200 + num_filters: 896 \ No newline at end of file From 7908f0bf08e0f691c04f14c7aee5fda5cd1b2eeb Mon Sep 17 00:00:00 2001 From: AlexDuvalinho Date: Mon, 30 Oct 2023 13:54:27 -0400 Subject: [PATCH 117/131] add debug file --- scripts/gnn_dev.py | 52 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 scripts/gnn_dev.py diff --git a/scripts/gnn_dev.py b/scripts/gnn_dev.py new file mode 100644 index 0000000000..ddca60ee3d --- /dev/null +++ b/scripts/gnn_dev.py @@ -0,0 +1,52 @@ +""" +Copyright (c) Facebook, Inc. and its affiliates. + +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree. +""" +import sys +import warnings +from pathlib import Path + +sys.path.append(str(Path(__file__).resolve().parent.parent)) + +from ocpmodels.common.utils import make_script_trainer +from ocpmodels.trainers import SingleTrainer + +if __name__ == "__main__": + config = {} + # Customize args + config["graph_rewiring"] = "remove-tag-0" + config["frame_averaging"] = "2D" + config["fa_method"] = "random" # "random" + config["test_ri"] = False + config["optim"] = {"max_epochs": 1} + config["model"] = {"use_pbc": True} + + checkpoint_path = None + # "checkpoints/2022-04-28-11-42-56-dimenetplusplus/" + "best_checkpoint.pt" + + str_args = sys.argv[1:] + if all("config" not in arg for arg in str_args): + str_args.append("--is_debug") + # str_args.append("--config=faenet-is2re-all") + str_args.append("--config=depfaenet-is2re-10k") + str_args.append("--is_disconnected=True") + # str_args.append("--silent=0") + warnings.warn( + "No model / mode is given; chosen as default" + f"Using: {str_args[-1]}" + ) + + trainer: SingleTrainer = make_script_trainer(str_args=str_args, overrides=config) + + trainer.train() + + if checkpoint_path: + trainer.load_checkpoint( + checkpoint_path="checkpoints/2022-04-28-11-42-56-dimenetplusplus/" + + "best_checkpoint.pt" + ) + + predictions = trainer.predict( + trainer.val_loader, results_file="is2re_results", disable_tqdm=False + ) \ No newline at end of file From 122e0a8ce434f9f49909d616b3b51e0a290944e1 Mon Sep 17 00:00:00 2001 From: Victor Schmidt Date: Mon, 13 Nov 2023 17:47:37 -0500 Subject: [PATCH 118/131] =?UTF-8?q?=F0=9F=A7=B1=20(base=20model):=20Make?= =?UTF-8?q?=20future-proof=20with=20`**kwargs`?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ocpmodels/models/base_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocpmodels/models/base_model.py b/ocpmodels/models/base_model.py index 695f74c6d9..afc57adff4 100644 --- a/ocpmodels/models/base_model.py +++ b/ocpmodels/models/base_model.py @@ -40,7 +40,7 @@ def energy_forward(self, data): def forces_forward(self, preds): raise NotImplementedError - def forward(self, data, mode="train"): + def forward(self, data, mode="train", **kwargs): grad_forces = forces = None # energy gradient w.r.t. positions will be computed From 3ce8099612375e9c47085440cc81f6cad27957ff Mon Sep 17 00:00:00 2001 From: Victor Schmidt Date: Mon, 13 Nov 2023 17:49:36 -0500 Subject: [PATCH 119/131] =?UTF-8?q?=E2=99=BB=EF=B8=8F=20(utils):=20Import?= =?UTF-8?q?=20`build=5Fconfig`=20from=20main?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ocpmodels/common/utils.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index 3bee0cdc1f..87b712e237 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -33,12 +33,12 @@ from matplotlib.figure import Figure from torch_geometric.data import Data from torch_geometric.utils import remove_self_loops -from torch_scatter import segment_coo, segment_csr, scatter +from torch_scatter import scatter, segment_coo, segment_csr import ocpmodels -from ocpmodels.common.flags import flags -from ocpmodels.common.registry import registry import ocpmodels.common.dist_utils as dist_utils +from ocpmodels.common.flags import Flags, flags +from ocpmodels.common.registry import registry class Cluster: @@ -976,7 +976,7 @@ def load_config(config_str): return config -def build_config(args, args_override, silent=False): +def build_config(args, args_override=[], silent=None): config, overrides, loaded_config = {}, {}, {} if hasattr(args, "config_yml") and args.config_yml: @@ -1000,10 +1000,11 @@ def build_config(args, args_override, silent=False): if args.continue_from_dir else resolve(args.restart_from_dir) ) + already_ckpt = load_dir.exists() and load_dir.is_file() # find configs: from checkpoints first, from the dropped config file # otherwise ckpts = list(load_dir.glob("checkpoints/checkpoint-*.pt")) - if not ckpts: + if not ckpts and not already_ckpt: print(f"💥 Could not find checkpoints in {str(load_dir)}.") configs = list(load_dir.glob("config-*.y*ml")) if not configs: @@ -1014,11 +1015,14 @@ def build_config(args, args_override, silent=False): loaded_config = yaml.safe_load(configs[0].read_text()) load_path = str(configs[0]) else: - latest_ckpt = str( - sorted(ckpts, key=lambda c: float(c.stem.split("-")[-1]))[-1] - ) + if already_ckpt: + latest_ckpt = load_dir + else: + latest_ckpt = str( + sorted(ckpts, key=lambda c: float(c.stem.split("-")[-1]))[-1] + ) load_path = latest_ckpt - loaded_config = torch.load((latest_ckpt), map_location="cpu")["config"] + loaded_config = torch.load(latest_ckpt, map_location="cpu")["config"] # config has been found. We need to prune/modify it depending on whether # we're restarting or continuing. @@ -1039,7 +1043,7 @@ def build_config(args, args_override, silent=False): loaded_config["checkpoint"] = str(latest_ckpt) loaded_config["job_ids"] = loaded_config["job_ids"] + f", {JOB_ID}" loaded_config["job_id"] = JOB_ID - loaded_config["local_rank"] = config["local_rank"] + loaded_config["local_rank"] = config.get("local_rank", 0) else: # restarting from scratch keep_keys = [ @@ -1047,7 +1051,7 @@ def build_config(args, args_override, silent=False): "config", "dataset", "energy_head", - "fa_frames", + "fa_method", "frame_averaging", "graph_rewiring", "model", @@ -1057,6 +1061,7 @@ def build_config(args, args_override, silent=False): "test_ri", "use_pbc", "wandb_project", + "grad_fine_tune", ] loaded_config = { k: loaded_config[k] for k in keep_keys if k in loaded_config From f7169dcd41e5b786ed1a15ccd1a36de5e3ae4983 Mon Sep 17 00:00:00 2001 From: Victor Schmidt Date: Mon, 13 Nov 2023 17:50:34 -0500 Subject: [PATCH 120/131] =?UTF-8?q?=E2=9C=A8=20(utils):=20`make=5Ftrainer?= =?UTF-8?q?=5Ffrom=5Fdir`=20for=20gfn?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ocpmodels/common/utils.py | 63 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index 87b712e237..82d40a9dbb 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -1287,6 +1287,7 @@ def get_pbc_distances( return out + def radius_graph_pbc(data, radius, max_num_neighbors_threshold): atom_pos = data.pos natoms = data.natoms @@ -1296,6 +1297,7 @@ def radius_graph_pbc(data, radius, max_num_neighbors_threshold): atom_pos, natoms, cell, radius, max_num_neighbors_threshold ) + def radius_graph_pbc_inputs( atom_pos, natoms, cell, radius, max_num_neighbors_threshold ): @@ -1760,3 +1762,64 @@ def scatter_det(*args, **kwargs): torch.use_deterministic_algorithms(mode=False) return out + + +def make_config_from_dir(path, mode, overrides={}, silent=None): + """ + Make a config from a directory. This is useful when restarting or continuing from a + previous run. + + Args: + path (str): Where to load the config from. mode (str): Either 'continue' or + 'restart'. overrides (dict, optional): Dictionary to update the config with . + Defaults to {}. silent (bool, optional): Whether or not to print loading + status. Defaults to None. + + Returns: + dict: The loaded and overridden config. + """ + path = resolve(path) + assert path.exists() + assert mode in { + "continue", + "restart", + }, f"Invalid mode: {mode}. Expected 'continue' or 'restart'." + assert isinstance( + overrides, dict + ), f"Overrides must be a dict. Received {overrides}" + + argv = deepcopy(sys.argv) + sys.argv[1:] = [] + default_args = Flags().get_parser().parse_args() + sys.argv = argv + + if mode == "continue": + default_args.continue_from_dir = str(path) + else: + default_args.restart_from_dir = str(path) + + config = build_config(default_args, silent=silent) + config = merge_dicts(config, overrides) + + setup_imports() + return config + + +def make_trainer_from_dir(path, mode, overrides={}, silent=None): + """ + Make a trainer from a directory. + + Load a config with `make_config_from_dir` and then make a trainer from it. + + Args: + path (str): Where to load the config from. + mode (str): Either 'continue' or 'restart'. + overrides (dict, optional): Dictionary to update the config with. + Defaults to {}. + silent (bool, optional): _description_. Defaults to None. + + Returns: + Trainer: The loaded trainer. + """ + config = make_config_from_dir(path, mode, overrides, silent) + return registry.get_trainer_class(config["trainer"])(**config) From bca6df9ad11311f350c869f3dfa5dcb08b236e61 Mon Sep 17 00:00:00 2001 From: Victor Schmidt Date: Mon, 13 Nov 2023 17:51:16 -0500 Subject: [PATCH 121/131] =?UTF-8?q?=E2=9C=A8=20(gfn):=20Import=20from=20#5?= =?UTF-8?q?1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ocpmodels/common/gfn.py | 296 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 296 insertions(+) create mode 100644 ocpmodels/common/gfn.py diff --git a/ocpmodels/common/gfn.py b/ocpmodels/common/gfn.py new file mode 100644 index 0000000000..00ecd0cd38 --- /dev/null +++ b/ocpmodels/common/gfn.py @@ -0,0 +1,296 @@ +import os +from copy import deepcopy +from pathlib import Path +from typing import Callable, List, Union + +import torch.nn as nn +from torch_geometric.data.batch import Batch +from torch_geometric.data.data import Data + +from ocpmodels.common.utils import make_trainer_from_dir, resolve +from ocpmodels.datasets.data_transforms import get_transforms +from ocpmodels.models.faenet import FAENet + + +class FAENetWrapper(nn.Module): + def __init__( + self, + faenet: FAENet, + transform: Callable = None, + frame_averaging: str = None, + trainer_config: dict = None, + ): + """ + `FAENetWrapper` is a wrapper class for the FAENet model. It is used to perform + a forward pass of the model when frame averaging is applied. + + Args: + faenet (FAENet, optional): The FAENet model to use. Defaults to None. + transform (Transform, optional): The data transform to use. Defaults to None. + frame_averaging (str, optional): The frame averaging method to use. + trainer_config (dict, optional): The trainer config used to create the model. + Defaults to None. + """ + super().__init__() + + self.faenet = faenet + self.transform = transform + self.frame_averaging = frame_averaging + self.trainer_config = trainer_config + self._is_frozen = None + + @property + def frozen(self): + """ + Returns whether or not the model is frozen. A model is frozen if all of its + parameters are set to not require gradients. + + This is a lazy property, meaning that it is only computed once and then cached. + + Returns: + bool: Whether or not the model is frozen. + """ + if self._is_frozen is None: + frozen = True + for param in self.parameters(): + if param.requires_grad: + frozen = False + break + self._is_frozen = frozen + return self._is_frozen + + def preprocess(self, batch: Union[Batch, Data, List[Data], List[Batch]]): + """ + Preprocess a batch of graphs using the data transform. + + * if batch is a list with one element: + * it could be a batch from the FAENet data loader which produces + lists of Batch with 1 element (because of multi-GPU features) + * if the single element is a Batch, extract it (`batch=batch[0]`) + * if batch is a Data instance, it is a single graph and we turn + it back into a list of 1 element (`batch=[batch]`) + * if it is a Batch instance, it is a collection of graphs and we turn it + into a list of Data graphs (`batch=batch.to_data_list()`) + + Finally we transform the list of Data graphs with the pre-processing transforms + and collate them into a Batch. + + .. code-block:: python + + In [7]: %timeit wrapper.preprocess(batch) + The slowest run took 4.94 times longer than the fastest. + This could mean that an intermediate result is being cached. + 67.1 ms ± 58.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + + In [8]: %timeit wrapper.preprocess(batch) + 43.8 ms ± 1.66 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) + + Args: + batch (List?[Data, Batch]): The batch of graphs to transform + + Returns: + torch_geometric.Batch: The transformed batch. If frame averaging is + disabled, this is the same as the input batch. + """ + if isinstance(batch, list): + if len(batch) == 1 and isinstance(batch[0], Batch): + batch = batch[0] + if isinstance(batch, Data): + batch = [batch] + if isinstance(batch, Batch): + batch = batch.to_data_list() + + return Batch.from_data_list([self.transform(b) for b in batch]) + + def forward( + self, + batch: Union[Batch, Data, List[Data], List[Batch]], + preprocess: bool = True, + ): + """Perform a forward pass of the model when frame averaging is applied. + + Adapted from + ocmpodels.trainers.single_point_trainer.SingleTrainer.model_forward + + This implementation assumes only the energy is being predicted, and only + frame-averages this prediction. + + Args: + batch (List?[Data, Batch]): The batch of graphs to predict on. + preprocess (bool, optional): Whether or not to apply the data transforms. + Defaults to True. + + Returns: + (dict): model predictions tensor for "energy" and "forces". + """ + if preprocess: + batch = self.preprocess(batch) + if not self.frozen: + raise RuntimeError( + "FAENetWrapper must be frozen before calling forward." + + " Use .freeze() to freeze it." + ) + # Distinguish frame averaging from base case. + if self.frame_averaging and self.frame_averaging != "DA": + original_pos = batch[0].pos + original_cell = batch[0].cell + e_all = [] + + # Compute model prediction for each frame + for i in range(len(batch[0].fa_pos)): + batch[0].pos = batch[0].fa_pos[i] + batch[0].cell = batch[0].fa_cell[i] + + # forward pass + preds = self.faenet( + deepcopy(batch), + mode="inference", + regress_forces=False, + q=None, + ) + e_all.append(preds["energy"]) + + batch[0].pos = original_pos + batch[0].cell = original_cell + + # Average predictions over frames + preds["energy"] = sum(e_all) / len(e_all) + else: + preds = self.faenet(batch) + + if preds["energy"].shape[-1] == 1: + preds["energy"] = preds["energy"].view(-1) + + return preds["energy"] # denormalize? + + def freeze(self): + """Freeze the model parameters.""" + for param in self.parameters(): + param.requires_grad = False + + +def parse_loc() -> str: + """ + Parse the current location from the environment variables. If the location is a + number, assume it is a SLURM job ID and return "mila". Otherwise, return the + location name. + + Returns: + str: Where the current job is running, typically Mila or DRAC or laptop. + """ + loc = os.environ.get( + "SLURM_CLUSTER_NAME", os.environ.get("SLURM_JOB_ID", os.environ["USER"]) + ) + if all(s.isdigit() for s in loc): + loc = "mila" + return loc + + +def find_ckpt(ckpt_paths: dict, release: str) -> Path: + """ + Finds a checkpoint in a dictionary of paths, based on the current cluster name and + release. If the path is a file, use it directly. Otherwise, look for a single + checkpoint file in a ${release}/sub-fodler. E.g.: + ckpt_paths = {"mila": "/path/to/ckpt_dir"} release = v2.3_graph_phys + find_ckpt(ckpt_paths, release) -> /path/to/ckpt_dir/v2.3_graph_phys/name.ckpt + + ckpt_paths = {"mila": "/path/to/ckpt_dir/file.ckpt"} release = v2.3_graph_phys + find_ckpt(ckpt_paths, release) -> /path/to/ckpt_dir/file.ckpt + + Args: + ckpt_paths (dict): Where to look for the checkpoints. + Maps cluster names to paths. + + Raises: + ValueError: The current location is not in the checkpoint path dict. + ValueError: The checkpoint path does not exist. ValueError: The checkpoint path + is a directory and contains no .ckpt file. ValueError: The checkpoint path is a + directory and contains >1 .ckpt files. + + Returns: + Path: Path to the checkpoint for that release on this host. + """ + loc = parse_loc() + if loc not in ckpt_paths: + raise ValueError(f"FAENet proxy checkpoint path not found for location {loc}.") + path = resolve(ckpt_paths[loc]) + if not path.exists(): + raise ValueError(f"FAENet proxy checkpoint not found at {str(path)}.") + if path.is_file(): + return path + path = path / release + ckpts = list(path.glob("**/*.ckpt")) + if len(ckpts) == 0: + raise ValueError(f"No FAENet proxy checkpoint found at {str(path)}.") + if len(ckpts) > 1: + raise ValueError( + f"Multiple FAENet proxy checkpoints found at {str(path)}. " + "Please specify the checkpoint explicitly." + ) + return ckpts[0] + + +def prepare_for_gfn(ckpt_paths: dict, release: str) -> tuple: + """ + Prepare a FAENet model for use in GFN. Loads the checkpoint for the given release + on the current host, and wraps it in a FAENetWrapper. + + Example ckpt_paths: + + ckpt_paths = { + "mila": "/path/to/releases_dir", + "drac": "/path/to/releases_dir", + "laptop": "/path/to/releases_dir", + } + + The loaded model is frozen (all parameters are set to not require gradients). + + Args: + ckpt_paths (dict): Where to look for the checkpoints as {loc: path}. + release (str): Which release to load. + + Returns: + tuple: (model, loaders) where loaders is a dict of loaders for the model. + """ + ckpt_path = find_ckpt(ckpt_paths, release) + assert ckpt_path.exists(), f"Path {ckpt_path} does not exist." + trainer = make_trainer_from_dir( + ckpt_path, + mode="continue", + overrides={ + "is_debug": True, + "silent": True, + "cp_data_to_tmpdir": False, + }, + silent=True, + ) + + wrapper = FAENetWrapper( + faenet=trainer.model, + transform=get_transforms(trainer.config), + frame_averaging=trainer.config.get("frame_averaging", ""), + trainer_config=trainer.config, + ) + wrapper.freeze() + loaders = trainer.loaders + + return wrapper, loaders + + +if __name__ == "__main__": + # for instance in ipython: + # In [1]: run ocpmodels/common/gfn.py + # + from ocpmodels.common.gfn import prepare_for_gfn + + ckpt_paths = {"mila": "/path/to/releases_dir"} + release = "v2.3_graph_phys" + # or + ckpt_paths = { + "mila": "/network/scratch/a/alexandre.duval/ocp/runs/3785941/checkpoints/best_checkpoint.pt" + } + release = None + wrapper, loaders = prepare_for_gfn(ckpt_paths, release) + data_gen = iter(loaders["train"]) + batch = next(data_gen) + preds = wrapper(batch) From 0795e1a0591d857716ceac8af37a7efa5af74b5e Mon Sep 17 00:00:00 2001 From: Victor Schmidt Date: Mon, 13 Nov 2023 17:51:58 -0500 Subject: [PATCH 122/131] =?UTF-8?q?=F0=9F=90=9B=20(transforms):=20Fix=20`G?= =?UTF-8?q?raphRewiring`=20call=20to=20work=20with=20gfn?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ocpmodels/datasets/data_transforms.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ocpmodels/datasets/data_transforms.py b/ocpmodels/datasets/data_transforms.py index 89d1d997ae..959baa7d6e 100644 --- a/ocpmodels/datasets/data_transforms.py +++ b/ocpmodels/datasets/data_transforms.py @@ -92,10 +92,12 @@ def __init__(self, rewiring_type=None) -> None: def __call__(self, data): if self.inactive: return data - - data.batch = torch.zeros(data.num_nodes, dtype=torch.long) - data.natoms = torch.tensor([data.natoms]) - data.ptr = torch.tensor([0, data.natoms]) + if not hasattr(data, "batch") or data.batch is None: + data.batch = torch.zeros(data.num_nodes, dtype=torch.long) + if isinstance(data.natoms, int) or data.natoms.ndim == 0: + data.natoms = torch.tensor([data.natoms]) + if not hasattr(data, "ptr") or data.ptr is None: + data.ptr = torch.tensor([0, data.natoms]) return self.rewiring_func(data) From 62a6b146d48fd027a415a188ac33e10ce29c45a6 Mon Sep 17 00:00:00 2001 From: Victor Schmidt Date: Mon, 13 Nov 2023 17:52:19 -0500 Subject: [PATCH 123/131] =?UTF-8?q?=E2=99=BB=EF=B8=8F=20(all):=20Blackify?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- debug.py | 2 +- debug_faenet.py | 10 +- main.py | 2 +- ocpmodels/common/data_parallel.py | 11 +- ocpmodels/common/flags.py | 4 +- ocpmodels/datasets/data_transforms.py | 13 ++- ocpmodels/datasets/lmdb_dataset.py | 29 ++--- ocpmodels/datasets/other_datasets.py | 89 +++++++++------- ocpmodels/models/adpp.py | 108 ++++++++++++------- ocpmodels/models/afaenet.py | 118 +++++++++++---------- ocpmodels/models/aschnet.py | 92 ++++++++-------- ocpmodels/models/base_model.py | 8 +- ocpmodels/models/depdpp.py | 7 +- ocpmodels/models/depfaenet.py | 22 ++-- ocpmodels/models/depschnet.py | 11 +- ocpmodels/models/faenet.py | 15 +-- ocpmodels/models/gemnet/depgemnet_t.py | 18 ++-- ocpmodels/models/gemnet/gemnet.py | 14 ++- ocpmodels/models/gemnet/indgemnet_t.py | 16 +-- ocpmodels/models/gemnet_oc/agemnet_oc.py | 51 +++++---- ocpmodels/models/gemnet_oc/depgemnet_oc.py | 18 ++-- ocpmodels/models/gemnet_oc/gemnet_oc.py | 76 +++++++------ ocpmodels/models/gemnet_oc/indgemnet_oc.py | 16 +-- ocpmodels/models/inddpp.py | 18 ++-- ocpmodels/models/indfaenet.py | 72 +++++++------ ocpmodels/models/indschnet.py | 70 +++++++----- ocpmodels/models/painn.py | 1 + ocpmodels/trainers/base_trainer.py | 61 +++++++---- ocpmodels/trainers/single_trainer.py | 26 +++-- scripts/gnn_dev.py | 2 +- 30 files changed, 560 insertions(+), 440 deletions(-) diff --git a/debug.py b/debug.py index b374cb59da..0787829521 100644 --- a/debug.py +++ b/debug.py @@ -109,7 +109,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): # trainer_config["optim"]["batch_size"] = 32 # trainer_config["optim"]["eval_batch_size"] = 32 # trainer_config["optim"]["max_epochs"] = 30 - #trainer_config["optim"]["es_patience"] = 5 + # trainer_config["optim"]["es_patience"] = 5 trainer_config["optim"]["num_workers"] = 0 # trainer_config["model"]["regress_forces"] = False diff --git a/debug_faenet.py b/debug_faenet.py index 1967d26e37..56d79c3d68 100644 --- a/debug_faenet.py +++ b/debug_faenet.py @@ -118,18 +118,18 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): trainer_config["model"]["energy_head"] = "weighted-av-final-embeds" trainer_config["model"]["complex_mp"] = False trainer_config["model"]["graph_norm"] = True - trainer_config["model"]['hidden_channels'] = 352 + trainer_config["model"]["hidden_channels"] = 352 trainer_config["model"]["num_filters"] = 448 trainer_config["model"]["num_gaussians"] = 99 trainer_config["model"]["num_interactions"] = 6 trainer_config["model"]["second_layer_MLP"] = True trainer_config["model"]["skip_co"] = "concat" - #trainer_config["model"]["transformer_out"] = False + # trainer_config["model"]["transformer_out"] = False trainer_config["model"]["afaenet_gat_mode"] = "v1" - #trainer_config["model"]["disconnected_mlp"] = True + # trainer_config["model"]["disconnected_mlp"] = True - #trainer_config["optim"]["batch_sizes"] = 256 - #trainer_config["optim"]["eval_batch_sizes"] = 256 + # trainer_config["optim"]["batch_sizes"] = 256 + # trainer_config["optim"]["eval_batch_sizes"] = 256 trainer_config["optim"]["lr_initial"] = 0.0019 trainer_config["optim"]["scheduler"] = "LinearWarmupCosineAnnealingLR" trainer_config["optim"]["max_epochs"] = 20 diff --git a/main.py b/main.py index f1519dd1c8..e9dacfb137 100644 --- a/main.py +++ b/main.py @@ -89,7 +89,7 @@ def wrap_up(args, start_time, error=None, signal=None, trainer=None): args.logdir = resolve(args.logdir) # -- Build config - + trainer_config = build_config(args, override_args) if dist_utils.is_master(): diff --git a/ocpmodels/common/data_parallel.py b/ocpmodels/common/data_parallel.py index f5edd3bbe6..98f19984b8 100644 --- a/ocpmodels/common/data_parallel.py +++ b/ocpmodels/common/data_parallel.py @@ -55,10 +55,13 @@ def forward(self, batch_list, **kwargs): if len(self.device_ids) == 1: if type(batch_list[0]) is list: - return self.module([ - batch_list[0][0].to(f"cuda:{self.device_ids[0]}"), - batch_list[0][1].to(f"cuda:{self.device_ids[0]}") - ], **kwargs) + return self.module( + [ + batch_list[0][0].to(f"cuda:{self.device_ids[0]}"), + batch_list[0][1].to(f"cuda:{self.device_ids[0]}"), + ], + **kwargs, + ) return self.module(batch_list[0].to(f"cuda:{self.device_ids[0]}"), **kwargs) for t in chain(self.module.parameters(), self.module.buffers()): diff --git a/ocpmodels/common/flags.py b/ocpmodels/common/flags.py index da5d105743..7a01ebac44 100644 --- a/ocpmodels/common/flags.py +++ b/ocpmodels/common/flags.py @@ -291,13 +291,13 @@ def add_core_args(self): "--is_disconnected", type=bool, default=False, - help="Eliminates edges between catalyst and adsorbate." + help="Eliminates edges between catalyst and adsorbate.", ) self.parser.add_argument( "--lowest_energy_only", type=bool, default=False, - help="Makes trainer use the lowest energy data point for every (catalyst, adsorbate, cell) tuple. ONLY USE WITH ALL DATASET" + help="Makes trainer use the lowest energy data point for every (catalyst, adsorbate, cell) tuple. ONLY USE WITH ALL DATASET", ) diff --git a/ocpmodels/datasets/data_transforms.py b/ocpmodels/datasets/data_transforms.py index 959baa7d6e..1b3e0a37c9 100644 --- a/ocpmodels/datasets/data_transforms.py +++ b/ocpmodels/datasets/data_transforms.py @@ -101,13 +101,16 @@ def __call__(self, data): return self.rewiring_func(data) + class Disconnected(Transform): def __init__(self, is_disconnected=False) -> None: self.inactive = not is_disconnected def edge_classifier(self, edge_index, tags): - edges_with_tags = tags[edge_index.type(torch.long)] # Tensor with shape=edge_index.shape where every entry is a tag - filt1 = (edges_with_tags[0] == edges_with_tags[1]) + edges_with_tags = tags[ + edge_index.type(torch.long) + ] # Tensor with shape=edge_index.shape where every entry is a tag + filt1 = edges_with_tags[0] == edges_with_tags[1] filt2 = (edges_with_tags[0] != 2) * (edges_with_tags[1] != 2) # Edge is removed if tags are different (R1), and at least one end has tag 2 (R2). We want ~(R1*R2) = ~R1+~R2. @@ -120,13 +123,13 @@ def __call__(self, data): return data values = self.edge_classifier(data.edge_index, data.tags) - + data.edge_index = data.edge_index[:, values] data.cell_offsets = data.cell_offsets[values, :] data.distances = data.distances[values] return data - + class Compose: # https://pytorch.org/vision/stable/_modules/torchvision/transforms/transforms.html#Compose @@ -168,6 +171,6 @@ def get_transforms(trainer_config): AddAttributes(), GraphRewiring(trainer_config.get("graph_rewiring")), FrameAveraging(trainer_config["frame_averaging"], trainer_config["fa_frames"]), - Disconnected(trainer_config["is_disconnected"]) + Disconnected(trainer_config["is_disconnected"]), ] return Compose(transforms) diff --git a/ocpmodels/datasets/lmdb_dataset.py b/ocpmodels/datasets/lmdb_dataset.py index efc13ba7f5..ec953b4a28 100644 --- a/ocpmodels/datasets/lmdb_dataset.py +++ b/ocpmodels/datasets/lmdb_dataset.py @@ -160,12 +160,11 @@ def __init__(self, config, transform=None): # In this function, we combine a list of samples into a batch. Notice that we first create the batch, then we fix # the neighbor problem: that some elements in the batch don't have edges, which pytorch geometric doesn't handle well # and which leads to errors in the forward step. -def data_list_collater(data_list, otf_graph=False): # Check if len(batch) is ever used +def data_list_collater(data_list, otf_graph=False): # Check if len(batch) is ever used # FIRST, MAKE BATCH - if ( # This is for indfaenet - type(data_list[0]) is tuple - and type(data_list[0][0]) is Data + if ( # This is for indfaenet + type(data_list[0]) is tuple and type(data_list[0][0]) is Data ): adsorbates = [system[0] for system in data_list] catalysts = [system[1] for system in data_list] @@ -175,7 +174,6 @@ def data_list_collater(data_list, otf_graph=False): # Check if len(batch) is eve else: batch = Batch.from_data_list(data_list) - # THEN, FIX NEIGHBOR PROBLEM if ( @@ -194,11 +192,9 @@ def data_list_collater(data_list, otf_graph=False): # Check if len(batch) is eve "LMDB does not contain edge index information, set otf_graph=True" ) - elif ( # This is for indfaenet - not otf_graph - and type(data_list[0]) is tuple - and type(data_list[0][0]) is Data - ): + elif ( # This is for indfaenet + not otf_graph and type(data_list[0]) is tuple and type(data_list[0][0]) is Data + ): batches = [ads_batch, cat_batch] lists = [adsorbates, catalysts] for batch, list_type in zip(batches, lists): @@ -209,12 +205,8 @@ def data_list_collater(data_list, otf_graph=False): # Check if len(batch) is eve batch.neighbors = torch.tensor(n_neighbors) return batches - - - elif ( # This is for afaenet - not otf_graph - and type(data_list[0]) is HeteroData - ): + + elif not otf_graph and type(data_list[0]) is HeteroData: # This is for afaenet # First, fix the neighborhood dimension. n_neighbors_ads = [] n_neighbors_cat = [] @@ -230,11 +222,10 @@ def data_list_collater(data_list, otf_graph=False): # Check if len(batch) is eve sender, receiver = batch["is_disc"].edge_index ads_to_cat = torch.stack([sender, receiver + batch["adsorbate"].num_nodes]) cat_to_ads = torch.stack([ads_to_cat[1], ads_to_cat[0]]) - batch["is_disc"].edge_index = torch.concat([ads_to_cat, cat_to_ads], dim = 1) + batch["is_disc"].edge_index = torch.concat([ads_to_cat, cat_to_ads], dim=1) batch["is_disc"].edge_weight = torch.concat( - [batch["is_disc"].edge_weight, -batch["is_disc"].edge_weight], - dim = 0 + [batch["is_disc"].edge_weight, -batch["is_disc"].edge_weight], dim=0 ) return batch diff --git a/ocpmodels/datasets/other_datasets.py b/ocpmodels/datasets/other_datasets.py index ed068e77c5..5f615a75a0 100644 --- a/ocpmodels/datasets/other_datasets.py +++ b/ocpmodels/datasets/other_datasets.py @@ -11,8 +11,9 @@ from ocpmodels.common.registry import registry from ocpmodels.common.utils import pyg2_data_transform -# This is a function that receives an adsorbate/catalyst system and returns -# each of these parts separately. + +# This is a function that receives an adsorbate/catalyst system and returns +# each of these parts separately. def graph_splitter(graph): edge_index = graph.edge_index pos = graph.pos @@ -30,7 +31,7 @@ def graph_splitter(graph): id = graph.id # Make masks to filter most data we need - adsorbate_v_mask = (tags == 2) + adsorbate_v_mask = tags == 2 catalyst_v_mask = ~adsorbate_v_mask adsorbate_e_mask = (tags[edge_index][0] == 2) * (tags[edge_index][1] == 2) @@ -39,56 +40,59 @@ def graph_splitter(graph): # Reindex the edge indices. device = graph.edge_index.device - ads_assoc = torch.full((natoms,), -1, dtype = torch.long, device = device) - cat_assoc = torch.full((natoms,), -1, dtype = torch.long, device = device) + ads_assoc = torch.full((natoms,), -1, dtype=torch.long, device=device) + cat_assoc = torch.full((natoms,), -1, dtype=torch.long, device=device) ads_natoms = adsorbate_v_mask.sum() cat_natoms = catalyst_v_mask.sum() - ads_assoc[adsorbate_v_mask] = torch.arange(ads_natoms, device = device) - cat_assoc[catalyst_v_mask] = torch.arange(cat_natoms, device = device) + ads_assoc[adsorbate_v_mask] = torch.arange(ads_natoms, device=device) + cat_assoc[catalyst_v_mask] = torch.arange(cat_natoms, device=device) ads_edge_index = ads_assoc[edge_index[:, adsorbate_e_mask]] cat_edge_index = cat_assoc[edge_index[:, catalyst_e_mask]] # Create the graphs adsorbate = Data( - edge_index = ads_edge_index, - pos = pos[adsorbate_v_mask, :], - cell = cell, - atomic_numbers = atomic_numbers[adsorbate_v_mask], - natoms = ads_natoms, - cell_offsets = cell_offsets[adsorbate_e_mask, :], - force = force[adsorbate_v_mask, :], - tags = tags[adsorbate_v_mask], - y_init = y_init, - y_relaxed = y_relaxed, - pos_relaxed = pos_relaxed[adsorbate_v_mask, :], - id = id, - mode="adsorbate" + edge_index=ads_edge_index, + pos=pos[adsorbate_v_mask, :], + cell=cell, + atomic_numbers=atomic_numbers[adsorbate_v_mask], + natoms=ads_natoms, + cell_offsets=cell_offsets[adsorbate_e_mask, :], + force=force[adsorbate_v_mask, :], + tags=tags[adsorbate_v_mask], + y_init=y_init, + y_relaxed=y_relaxed, + pos_relaxed=pos_relaxed[adsorbate_v_mask, :], + id=id, + mode="adsorbate", ) catalyst = Data( - edge_index = cat_edge_index, - pos = pos[catalyst_v_mask, :], - cell = cell, - atomic_numbers = atomic_numbers[catalyst_v_mask], - natoms = cat_natoms, - cell_offsets = cell_offsets[catalyst_e_mask, :], - force = force[catalyst_v_mask, :], - tags = tags[catalyst_v_mask], - y_init = y_init, - y_relaxed = y_relaxed, - pos_relaxed = pos_relaxed[catalyst_v_mask, :], - id = id, - mode="catalyst" + edge_index=cat_edge_index, + pos=pos[catalyst_v_mask, :], + cell=cell, + atomic_numbers=atomic_numbers[catalyst_v_mask], + natoms=cat_natoms, + cell_offsets=cell_offsets[catalyst_e_mask, :], + force=force[catalyst_v_mask, :], + tags=tags[catalyst_v_mask], + y_init=y_init, + y_relaxed=y_relaxed, + pos_relaxed=pos_relaxed[catalyst_v_mask, :], + id=id, + mode="catalyst", ) return adsorbate, catalyst + # This dataset class sends back a tuple with the adsorbate and catalyst. @registry.register_dataset("separate") -class SeparateLmdbDataset(LmdbDataset): # Check that the dataset works as intended, with an specific example. +class SeparateLmdbDataset( + LmdbDataset +): # Check that the dataset works as intended, with an specific example. def __getitem__(self, idx): t0 = time.time_ns() if not self.path.is_file(): @@ -135,6 +139,7 @@ def __getitem__(self, idx): return (adsorbate, catalyst) + @registry.register_dataset("heterogeneous") class HeterogeneousDataset(SeparateLmdbDataset): def __getitem__(self, idx): @@ -153,10 +158,18 @@ def __getitem__(self, idx): reaction[mode, "is_close", mode].edge_index = graph.edge_index # We create the edges between both parts of the graph. - sender = torch.repeat_interleave(torch.arange(catalyst.natoms.item()), adsorbate.natoms.item()) - receiver = torch.arange(0, adsorbate.natoms.item()).repeat(catalyst.natoms.item()) - reaction["catalyst", "is_disc", "adsorbate"].edge_index = torch.stack([sender, receiver]) - reaction["catalyst", "is_disc", "adsorbate"].edge_weight = torch.repeat_interleave( + sender = torch.repeat_interleave( + torch.arange(catalyst.natoms.item()), adsorbate.natoms.item() + ) + receiver = torch.arange(0, adsorbate.natoms.item()).repeat( + catalyst.natoms.item() + ) + reaction["catalyst", "is_disc", "adsorbate"].edge_index = torch.stack( + [sender, receiver] + ) + reaction[ + "catalyst", "is_disc", "adsorbate" + ].edge_weight = torch.repeat_interleave( reaction["catalyst"].pos[:, 2], adsorbate.natoms.item(), ) diff --git a/ocpmodels/models/adpp.py b/ocpmodels/models/adpp.py index bf77349c9e..725d51bd54 100644 --- a/ocpmodels/models/adpp.py +++ b/ocpmodels/models/adpp.py @@ -25,10 +25,7 @@ from ocpmodels.modules.phys_embeddings import PhysEmbedding from ocpmodels.modules.pooling import Graclus, Hierarchical_Pooling from ocpmodels.models.utils.activations import swish -from ocpmodels.models.afaenet import ( - GATInteraction, - GaussianSmearing -) +from ocpmodels.models.afaenet import GATInteraction, GaussianSmearing try: @@ -487,9 +484,7 @@ def __init__(self, **kwargs): kwargs["envelope_exponent"], ) # Disconnected interaction embedding - self.distance_expansion_disc = GaussianSmearing( - 0.0, 20.0, 100 - ) + self.distance_expansion_disc = GaussianSmearing(0.0, 20.0, 100) self.disc_edge_embed = Linear(100, kwargs["hidden_channels"]) if use_tag or use_pg or kwargs["phys_embeds"] or kwargs["graph_rewiring"]: @@ -613,7 +608,7 @@ def __init__(self, **kwargs): GATInteraction( kwargs["hidden_channels"], kwargs["gat_mode"], - kwargs["hidden_channels"] + kwargs["hidden_channels"], ) for _ in range(kwargs["num_blocks"]) ] @@ -628,7 +623,7 @@ def __init__(self, **kwargs): self.combination = nn.Sequential( Linear(kwargs["hidden_channels"] // 2 * 2, kwargs["hidden_channels"] // 2), self.act, - Linear(kwargs["hidden_channels"] // 2, 1) + Linear(kwargs["hidden_channels"] // 2, 1), ) self.reset_parameters() @@ -721,13 +716,9 @@ def energy_forward(self, data): data["catalyst"].tags, ) - if self.otf_graph: # NOT IMPLEMENTED!! + if self.otf_graph: # NOT IMPLEMENTED!! edge_index, cell_offsets, neighbors = radius_graph_pbc_inputs( - pos, - natoms, - cell, - self.cutoff, - 50 + pos, natoms, cell, self.cutoff, 50 ) data.edge_index = edge_index data.cell_offsets = cell_offsets @@ -766,7 +757,7 @@ def energy_forward(self, data): offsets_cat = out["offsets"] j_cat, i_cat = edge_index_cat - else: # NOT IMPLEMENTED + else: # NOT IMPLEMENTED edge_index = radius_graph(pos, r=self.cutoff, batch=batch) j, i = edge_index dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt() @@ -797,7 +788,7 @@ def energy_forward(self, data): pos_cat[idx_j_cat].detach() - pos_i_cat + offsets_cat[idx_ji_cat], pos_cat[idx_k_cat].detach() - pos_j_cat + offsets_cat[idx_kj_cat], ) - else: # NOT IMPLEMENTED + else: # NOT IMPLEMENTED pos_ji, pos_kj = ( pos[idx_j].detach() - pos_i, pos[idx_k].detach() - pos_j, @@ -820,27 +811,51 @@ def energy_forward(self, data): pooling_loss = None # deal with pooling loss # Embedding block. - x_ads = self.emb_ads(atomic_numbers_ads.long(), rbf_ads, i_ads, j_ads, tags_ads, subnodes) + x_ads = self.emb_ads( + atomic_numbers_ads.long(), rbf_ads, i_ads, j_ads, tags_ads, subnodes + ) if self.energy_head: P_ads, pooling_loss, batch_ads = self.output_blocks_ads[0]( - x_ads, rbf_ads, i_ads, edge_index_ads, dist_ads, batch_ads, num_nodes=pos_ads.size(0) + x_ads, + rbf_ads, + i_ads, + edge_index_ads, + dist_ads, + batch_ads, + num_nodes=pos_ads.size(0), ) else: - P_ads = self.output_blocks_ads[0](x_ads, rbf_ads, i_ads, num_nodes=pos_ads.size(0)) + P_ads = self.output_blocks_ads[0]( + x_ads, rbf_ads, i_ads, num_nodes=pos_ads.size(0) + ) if self.energy_head == "weighted-av-initial-embeds": - alpha_ads = self.w_lin_ads(scatter(x_ads, i_ads, dim=0, dim_size=pos_ads.size(0))) + alpha_ads = self.w_lin_ads( + scatter(x_ads, i_ads, dim=0, dim_size=pos_ads.size(0)) + ) - x_cat = self.emb_cat(atomic_numbers_cat.long(), rbf_cat, i_cat, j_cat, tags_cat, subnodes) + x_cat = self.emb_cat( + atomic_numbers_cat.long(), rbf_cat, i_cat, j_cat, tags_cat, subnodes + ) if self.energy_head: P_cat, pooling_loss, batch_cat = self.output_blocks_cat[0]( - x_cat, rbf_cat, i_cat, edge_index_cat, dist_cat, batch_cat, num_nodes=pos_cat.size(0) + x_cat, + rbf_cat, + i_cat, + edge_index_cat, + dist_cat, + batch_cat, + num_nodes=pos_cat.size(0), ) else: - P_cat = self.output_blocks_cat[0](x_cat, rbf_cat, i_cat, num_nodes=pos_cat.size(0)) + P_cat = self.output_blocks_cat[0]( + x_cat, rbf_cat, i_cat, num_nodes=pos_cat.size(0) + ) if self.energy_head == "weighted-av-initial-embeds": - alpha_cat = self.w_lin_cat(scatter(x_cat, i_cat, dim=0, dim_size=pos_cat.size(0))) + alpha_cat = self.w_lin_cat( + scatter(x_cat, i_cat, dim=0, dim_size=pos_cat.size(0)) + ) edge_weights = self.distance_expansion_disc(data["is_disc"].edge_weight) edge_weights = self.disc_edge_embed(edge_weights) @@ -856,28 +871,37 @@ def energy_forward(self, data): output_block_cat, disc_interaction, ) in zip( - self.interaction_blocks_ads, + self.interaction_blocks_ads, self.interaction_blocks_cat, self.output_blocks_ads[1:], self.output_blocks_cat[1:], self.inter_interactions, ): - intra_ads = interaction_block_ads(x_ads, rbf_ads, sbf_ads, idx_kj_ads, idx_ji_ads) - intra_cat = interaction_block_cat(x_cat, rbf_cat, sbf_cat, idx_kj_cat, idx_ji_cat) + intra_ads = interaction_block_ads( + x_ads, rbf_ads, sbf_ads, idx_kj_ads, idx_ji_ads + ) + intra_cat = interaction_block_cat( + x_cat, rbf_cat, sbf_cat, idx_kj_cat, idx_ji_cat + ) inter_ads, inter_cat = disc_interaction( - intra_ads, - intra_cat, - data["is_disc"].edge_index, - edge_weights + intra_ads, intra_cat, data["is_disc"].edge_index, edge_weights ) x_ads, x_cat = x_ads + inter_ads, x_cat + inter_cat - x_ads, x_cat = nn.functional.normalize(x_ads), nn.functional.normalize(x_cat) + x_ads, x_cat = nn.functional.normalize(x_ads), nn.functional.normalize( + x_cat + ) if self.energy_head: P_bis_ads, pooling_loss_bis_ads, _ = output_block_ads( - x_ads, rbf_ads, i_ads, edge_index_ads, dist_ads, batch_ads, num_nodes=pos_ads.size(0) + x_ads, + rbf_ads, + i_ads, + edge_index_ads, + dist_ads, + batch_ads, + num_nodes=pos_ads.size(0), ) energy_Ps_ads.append( P_bis_ads.sum(0) / len(P) @@ -888,7 +912,13 @@ def energy_forward(self, data): pooling_loss += pooling_loss_bis_ads P_bis_cat, pooling_loss_bis_cat, _ = output_block_cat( - x_cat, rbf_cat, i_cat, edge_index_cat, dist_cat, batch_cat, num_nodes=pos_cat.size(0) + x_cat, + rbf_cat, + i_cat, + edge_index_cat, + dist_cat, + batch_cat, + num_nodes=pos_cat.size(0), ) energy_Ps_cat.append( P_bis_cat.sum(0) / len(P) @@ -898,8 +928,12 @@ def energy_forward(self, data): if pooling_loss_bis_cat is not None: pooling_loss += pooling_loss_bis_cat else: - P_ads += output_block_ads(x_ads, rbf_ads, i_ads, num_nodes=pos_ads.size(0)) - P_cat += output_block_cat(x_cat, rbf_cat, i_cat, num_nodes=pos_cat.size(0)) + P_ads += output_block_ads( + x_ads, rbf_ads, i_ads, num_nodes=pos_ads.size(0) + ) + P_cat += output_block_cat( + x_cat, rbf_cat, i_cat, num_nodes=pos_cat.size(0) + ) if self.energy_head == "weighted-av-initial-embeds": P = P * alpha diff --git a/ocpmodels/models/afaenet.py b/ocpmodels/models/afaenet.py index a204b6dd03..1aa4f6f7e4 100644 --- a/ocpmodels/models/afaenet.py +++ b/ocpmodels/models/afaenet.py @@ -14,7 +14,7 @@ GaussianSmearing, EmbeddingBlock, InteractionBlock, - OutputBlock + OutputBlock, ) from ocpmodels.models.indfaenet import PositionalEncoding from ocpmodels.common.registry import registry @@ -22,37 +22,40 @@ from ocpmodels.common.utils import conditional_grad, get_pbc_distances from ocpmodels.models.utils.activations import swish + class GATInteraction(nn.Module): def __init__(self, d_model, version, edge_dim, dropout=0.1): super(GATInteraction, self).__init__() if version not in {"v1", "v2"}: - raise ValueError(f"Invalid GAT version. Received {version}, available: v1, v2.") + raise ValueError( + f"Invalid GAT version. Received {version}, available: v1, v2." + ) # Not quite sure what is the impact of increasing or decreasing the number of heads if version == "v1": self.interaction = GATConv( - in_channels = d_model, - out_channels = d_model, - heads = 3, - concat = False, - edge_dim = edge_dim, - dropout = dropout + in_channels=d_model, + out_channels=d_model, + heads=3, + concat=False, + edge_dim=edge_dim, + dropout=dropout, ) else: self.interaction = GATv2Conv( - in_channels = d_model, - out_channels = d_model, - head = 3, - concat = False, - edge_dim = edge_dim, - dropout = dropout + in_channels=d_model, + out_channels=d_model, + head=3, + concat=False, + edge_dim=edge_dim, + dropout=dropout, ) def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): # We first do the message passing separation_pt = h_ads.shape[0] - combined = torch.concat([h_ads, h_cat], dim = 0) + combined = torch.concat([h_ads, h_cat], dim=0) combined = self.interaction(combined, bipartite_edges, bipartite_weights) # We separate again and we return @@ -63,6 +66,7 @@ def forward(self, h_ads, h_cat, bipartite_edges, bipartite_weights): return ads, cat + @registry.register_model("afaenet") class AFaenet(BaseModel): def __init__(self, **kwargs): @@ -96,7 +100,7 @@ def __init__(self, **kwargs): 0.0, self.cutoff, kwargs["num_gaussians"] ) self.distance_expansion_disc = GaussianSmearing( - 0.0, 20.0, kwargs["num_gaussians"] + 0.0, 20.0, kwargs["num_gaussians"] ) # Set the second parameter as the highest possible z-axis value @@ -159,7 +163,9 @@ def __init__(self, **kwargs): ] ) - assert "afaenet_gat_mode" in kwargs, "GAT version needs to be specified. Options: v1, v2" + assert ( + "afaenet_gat_mode" in kwargs + ), "GAT version needs to be specified. Options: v1, v2" # Inter Interaction self.inter_interactions = nn.ModuleList( [ @@ -186,14 +192,16 @@ def __init__(self, **kwargs): self.w_lin_cat = Linear(kwargs["hidden_channels"], 1) # Skip co - if self.skip_co == "concat": # for the implementation of independent faenet, make sure the input is large enough + if ( + self.skip_co == "concat" + ): # for the implementation of independent faenet, make sure the input is large enough self.mlp_skip_co_ads = Linear( (kwargs["num_interactions"] + 1) * kwargs["hidden_channels"] // 2, - kwargs["hidden_channels"] // 2 + kwargs["hidden_channels"] // 2, ) self.mlp_skip_co_cat = Linear( (kwargs["num_interactions"] + 1) * kwargs["hidden_channels"] // 2, - kwargs["hidden_channels"] // 2 + kwargs["hidden_channels"] // 2, ) elif self.skip_co == "concat_atom": @@ -205,17 +213,17 @@ def __init__(self, **kwargs): self.transformer_out = kwargs.get("transformer_out", False) if self.transformer_out: self.combination = Transformer( - d_model = kwargs["hidden_channels"] // 2, - nhead = 2, - num_encoder_layers = 2, - num_decoder_layers = 2, - dim_feedforward = kwargs["hidden_channels"], - batch_first = True + d_model=kwargs["hidden_channels"] // 2, + nhead=2, + num_encoder_layers=2, + num_decoder_layers=2, + dim_feedforward=kwargs["hidden_channels"], + batch_first=True, ) self.positional_encoding = PositionalEncoding( kwargs["hidden_channels"] // 2, - dropout = 0.1, - max_len = 5, + dropout=0.1, + max_len=5, ) self.query_pos = nn.Parameter(torch.rand(kwargs["hidden_channels"] // 2)) self.transformer_lin = Linear(kwargs["hidden_channels"] // 2, 1) @@ -223,7 +231,7 @@ def __init__(self, **kwargs): self.combination = nn.Sequential( Linear(kwargs["hidden_channels"], kwargs["hidden_channels"] // 2), swish, - Linear(kwargs["hidden_channels"] // 2, 1) + Linear(kwargs["hidden_channels"] // 2, 1), ) @conditional_grad(torch.enable_grad()) @@ -240,11 +248,11 @@ def energy_forward(self, data): # Embedding h_ads, e_ads = self.embedding( data["adsorbate"].atomic_numbers.long(), - edge_weight_ads, + edge_weight_ads, rel_pos_ads, edge_attr_ads, data["adsorbate"].tags, - self.embed_block_ads + self.embed_block_ads, ) h_cat, e_cat = self.embedding( data["catalyst"].atomic_numbers.long(), @@ -252,7 +260,7 @@ def energy_forward(self, data): rel_pos_cat, edge_attr_cat, data["catalyst"].tags, - self.embed_block_cat + self.embed_block_cat, ) # Compute atom weights for late energy head @@ -270,11 +278,7 @@ def energy_forward(self, data): # Now we do interactions. energy_skip_co_ads = [] energy_skip_co_cat = [] - for ( - interaction_ads, - interaction_cat, - inter_interaction - ) in zip( + for interaction_ads, interaction_cat, inter_interaction in zip( self.interaction_blocks_ads, self.interaction_blocks_cat, self.inter_interactions, @@ -307,15 +311,17 @@ def energy_forward(self, data): # QUESTION: Can we do both simultaneously? h_ads, h_cat = h_ads + inter_ads, h_cat + inter_cat - h_ads, h_cat = nn.functional.normalize(h_ads), nn.functional.normalize(h_cat) + h_ads, h_cat = nn.functional.normalize(h_ads), nn.functional.normalize( + h_cat + ) # Atom skip-co if self.skip_co == "concat_atom": energy_skip_co_ads.append(h_ads) energy_skip_co_cat.append(h_cat) - h_ads = self.act(self.mlp_skip_co_ads(torch.cat(energy_skip_co_ads, dim = 1))) - h_cat = self.act(self.mlp_skip_co_cat(torch.cat(energy_skip_co_cat, dim = 1))) + h_ads = self.act(self.mlp_skip_co_ads(torch.cat(energy_skip_co_ads, dim=1))) + h_cat = self.act(self.mlp_skip_co_cat(torch.cat(energy_skip_co_cat, dim=1))) energy_ads = self.output_block_ads( h_ads, edge_index_ads, edge_weight_ads, batch_ads, alpha_ads @@ -328,8 +334,8 @@ def energy_forward(self, data): energy_skip_co_ads.append(energy_ads) energy_skip_co_cat.append(energy_cat) if self.skip_co == "concat": - energy_ads = self.mlp_skip_co_ads(torch.cat(energy_skip_co_ads, dim = 1)) - energy_cat = self.mlp_skip_co_cat(torch.cat(energy_skip_co_cat, dim = 1)) + energy_ads = self.mlp_skip_co_ads(torch.cat(energy_skip_co_ads, dim=1)) + energy_cat = self.mlp_skip_co_cat(torch.cat(energy_skip_co_cat, dim=1)) elif self.skip_co == "add": energy_ads = sum(energy_skip_co_ads) energy_cat = sum(energy_skip_co_cat) @@ -337,29 +343,29 @@ def energy_forward(self, data): # Combining hidden representations if self.transformer_out: batch_size = energy_ads.shape[0] - - fake_target_sequence = self.query_pos.unsqueeze(0).expand(batch_size, -1).unsqueeze(1) + + fake_target_sequence = ( + self.query_pos.unsqueeze(0).expand(batch_size, -1).unsqueeze(1) + ) system_energy = torch.cat( - [ - energy_ads.unsqueeze(1), - energy_cat.unsqueeze(1) - ], - dim = 1 + [energy_ads.unsqueeze(1), energy_cat.unsqueeze(1)], dim=1 ) system_energy = self.positional_encoding(system_energy) - - system_energy = self.combination(system_energy, fake_target_sequence).squeeze(1) + + system_energy = self.combination( + system_energy, fake_target_sequence + ).squeeze(1) system_energy = self.transformer_lin(system_energy) else: - system_energy = torch.cat([energy_ads, energy_cat], dim = 1) + system_energy = torch.cat([energy_ads, energy_cat], dim=1) system_energy = self.combination(system_energy) # We combine predictions and return them pred_system = { - "energy" : system_energy, - "pooling_loss" : None, # This might break something. - "hidden_state" : torch.cat([energy_ads, energy_cat], dim = 1) + "energy": system_energy, + "pooling_loss": None, # This might break something. + "hidden_state": torch.cat([energy_ads, energy_cat], dim=1), } return pred_system @@ -403,7 +409,7 @@ def graph_rewiring(self, data, batch_ads, batch_cat): if mode == "adsorbate": distance_expansion = self.distance_expansion_ads else: - distance_expansion = self.distance_expansion_cat + distance_expansion = self.distance_expansion_cat edge_attr = distance_expansion(edge_weight) results.append([edge_index, edge_weight, rel_pos, edge_attr]) else: diff --git a/ocpmodels/models/aschnet.py b/ocpmodels/models/aschnet.py index 3baf364793..d0dcd5d6d9 100644 --- a/ocpmodels/models/aschnet.py +++ b/ocpmodels/models/aschnet.py @@ -35,6 +35,7 @@ NUM_CLUSTERS = 20 NUM_POOLING_LAYERS = 1 + @registry.register_model("aschnet") class ASchNet(BaseModel): r"""The continuous-filter convolutional neural network SchNet from the @@ -177,9 +178,7 @@ def __init__(self, **kwargs): ) # Gaussian basis and linear transformation of disc edges - self.distance_expansion_disc = GaussianSmearing( - 0.0, 20.0, self.num_gaussians - ) + self.distance_expansion_disc = GaussianSmearing(0.0, 20.0, self.num_gaussians) self.disc_edge_embed = Linear(self.num_gaussians, self.num_filters) # Position encoding @@ -188,7 +187,7 @@ def __init__(self, **kwargs): # Interaction block self.distance_expansion = GaussianSmearing(0.0, self.cutoff, self.num_gaussians) - + self.interactions_ads = ModuleList() for _ in range(self.num_interactions): block = InteractionBlock( @@ -204,7 +203,9 @@ def __init__(self, **kwargs): self.interactions_cat.append(block) self.interactions_disc = ModuleList() - assert "gat_mode" in kwargs, "GAT version needs to be specified. Options: v1, v2" + assert ( + "gat_mode" in kwargs + ), "GAT version needs to be specified. Options: v1, v2" for _ in range(self.num_interactions): block = GATInteraction( self.hidden_channels, kwargs["gat_mode"], self.num_filters @@ -238,7 +239,7 @@ def __init__(self, **kwargs): self.combination = nn.Sequential( Linear(self.hidden_channels, self.hidden_channels // 2), swish, - Linear(kwargs["hidden_channels"] // 2, 1) + Linear(kwargs["hidden_channels"] // 2, 1), ) self.reset_parameters() @@ -256,18 +257,12 @@ def reset_parameters(self): if self.energy_head in {"weighted-av-init-embeds", "weighted-av-final-embeds"}: self.w_lin.bias.data.fill_(0) torch.nn.init.xavier_uniform_(self.w_lin.weight) - for ( - interaction_ads, - interaction_cat, - interaction_disc - ) in zip ( - self.interactions_ads, - self.interactions_cat, - self.interactions_disc + for interaction_ads, interaction_cat, interaction_disc in zip( + self.interactions_ads, self.interactions_cat, self.interactions_disc ): interaction_ads.reset_parameters() interaction_cat.reset_parameters() - #interaction_disc.reset_parameters() # need to implement this! + # interaction_disc.reset_parameters() # need to implement this! torch.nn.init.xavier_uniform_(self.lin1_ads.weight) self.lin1_ads.bias.data.fill_(0) torch.nn.init.xavier_uniform_(self.lin2_ads.weight) @@ -305,7 +300,7 @@ def energy_forward(self, data): if self.otf_graph: edge_index, cell_offsets, neighbors = radius_graph_pbc_inputs( data["adsorbate"].pos, - data["adsorbate"].natoms, + data["adsorbate"].natoms, data["adsorbate"].cell, self.cutoff, 50, @@ -313,7 +308,7 @@ def energy_forward(self, data): data["adsorbate", "is_close", "adsorbate"].edge_index = edge_index data["adsorbate"].cell_offsets = cell_offsets data["adsorbate"].neighbors = neighbors - + edge_index, cell_offsets, neighbors = radius_graph_pbc_inputs( data["catalyst"].pos, data["catalyst"].natoms, @@ -327,7 +322,9 @@ def energy_forward(self, data): # Rewire the graph # Use periodic boundary conditions - ads_rewiring, cat_rewiring = self.graph_rewiring(data, ) + ads_rewiring, cat_rewiring = self.graph_rewiring( + data, + ) edge_index_ads, edge_weight_ads, edge_attr_ads = ads_rewiring edge_index_cat, edge_weight_cat, edge_attr_cat = cat_rewiring @@ -337,27 +334,27 @@ def energy_forward(self, data): edge_weights_disc = self.distance_expansion_disc(data["is_disc"].edge_weight) edge_weights_disc = self.disc_edge_embed(edge_weights_disc) - if self.use_tag: # NOT IMPLEMENTED + if self.use_tag: # NOT IMPLEMENTED assert data["adsorbate"].tags is not None h_tag = self.tag_embedding(data.tags) h = torch.cat((h, h_tag), dim=1) - if self.phys_emb.device != data["adsorbate"].batch.device: # NOT IMPLEMENTED + if self.phys_emb.device != data["adsorbate"].batch.device: # NOT IMPLEMENTED self.phys_emb = self.phys_emb.to(data["adsorbate"].batch.device) - if self.use_phys_embeddings: # NOT IMPLEMENTED + if self.use_phys_embeddings: # NOT IMPLEMENTED h_phys = self.phys_emb.properties[z] if self.use_mlp_phys: h_phys = self.phys_lin(h_phys) h = torch.cat((h, h_phys), dim=1) - if self.use_pg: # NOT IMPLEMENTED + if self.use_pg: # NOT IMPLEMENTED # assert self.phys_emb.period is not None h_period = self.period_embedding(self.phys_emb.period[z]) h_group = self.group_embedding(self.phys_emb.group[z]) h = torch.cat((h, h_period, h_group), dim=1) - if self.use_positional_embeds: # NOT IMPLEMENTED + if self.use_positional_embeds: # NOT IMPLEMENTED idx_of_non_zero_val = (data.tags == 0).nonzero().T.squeeze(0) h_pos = torch.zeros_like(h, device=h.device) h_pos[idx_of_non_zero_val, :] = self.pe(data.subnodes).to( @@ -368,35 +365,34 @@ def energy_forward(self, data): if self.energy_head == "weighted-av-initial-embeds": alpha = self.w_lin(h) - for ( - interaction_ads, - interaction_cat, - interaction_disc - ) in zip ( - self.interactions_ads, - self.interactions_cat, - self.interactions_disc + for interaction_ads, interaction_cat, interaction_disc in zip( + self.interactions_ads, self.interactions_cat, self.interactions_disc ): - intra_ads = interaction_ads(h_ads, edge_index_ads, edge_weight_ads, edge_attr_ads) - intra_cat = interaction_cat(h_cat, edge_index_cat, edge_weight_cat, edge_attr_cat) + intra_ads = interaction_ads( + h_ads, edge_index_ads, edge_weight_ads, edge_attr_ads + ) + intra_cat = interaction_cat( + h_cat, edge_index_cat, edge_weight_cat, edge_attr_cat + ) inter_ads, inter_cat = interaction_disc( - intra_ads, - intra_cat, - data["is_disc"].edge_index, - edge_weights_disc + intra_ads, intra_cat, data["is_disc"].edge_index, edge_weights_disc ) h_ads, h_cat = h_ads + inter_ads, h_cat + inter_cat - h_ads, h_cat = nn.functional.normalize(h_ads), nn.functional.normalize(h_cat) + h_ads, h_cat = nn.functional.normalize(h_ads), nn.functional.normalize( + h_cat + ) pooling_loss = None # deal with pooling loss - if self.energy_head == "weighted-av-final-embeds": # NOT IMPLEMENTED + if self.energy_head == "weighted-av-final-embeds": # NOT IMPLEMENTED alpha = self.w_lin(h) elif self.energy_head == "graclus": - h, batch = self.graclus(h, edge_index, edge_weight, batch) # NOT IMPLEMENTED + h, batch = self.graclus( + h, edge_index, edge_weight, batch + ) # NOT IMPLEMENTED - if self.energy_head in {"pooling", "random"}: # NOT IMPLEMENTED + if self.energy_head in {"pooling", "random"}: # NOT IMPLEMENTED h, batch, pooling_loss = self.hierarchical_pooling( h, edge_index, edge_weight, batch ) @@ -410,13 +406,13 @@ def energy_forward(self, data): h_cat = self.act(h_cat) h_cat = self.lin2_cat(h_cat) - if self.energy_head in { # NOT IMPLEMENTED + if self.energy_head in { # NOT IMPLEMENTED "weighted-av-initial-embeds", "weighted-av-final-embeds", }: h = h * alpha - if self.atomref is not None: # NOT IMPLEMENTED + if self.atomref is not None: # NOT IMPLEMENTED h = h + self.atomref(z) # Global pooling @@ -426,7 +422,7 @@ def energy_forward(self, data): if self.scale is not None: out = self.scale * out - system = torch.concat([out_ads, out_cat], dim = 1) + system = torch.concat([out_ads, out_cat], dim=1) out = self.combination(system) return { @@ -446,7 +442,7 @@ def graph_rewiring(self, data): data[mode].cell, data[mode].cell_offsets, data[mode].neighbors, - return_distance_vec = True + return_distance_vec=True, ) edge_index = out["edge_index"] @@ -457,9 +453,9 @@ def graph_rewiring(self, data): for mode in ["adsorbate", "catalyst"]: edge_index = radius_graph( data[mode].pos, - r = self.cutoff, - batch =data[mode].batch, - max_num_neighbors = self.max_num_neighbors, + r=self.cutoff, + batch=data[mode].batch, + max_num_neighbors=self.max_num_neighbors, ) row, col = edge_index edge_weight = (pos[row] - pos[col]).norm(dim=-1) diff --git a/ocpmodels/models/base_model.py b/ocpmodels/models/base_model.py index afc57adff4..8f92000072 100644 --- a/ocpmodels/models/base_model.py +++ b/ocpmodels/models/base_model.py @@ -8,8 +8,8 @@ import torch import torch.nn as nn -from torch_geometric.nn import radius_graph from torch_geometric.data import HeteroData +from torch_geometric.nn import radius_graph from ocpmodels.common.utils import ( compute_neighbors, @@ -51,7 +51,7 @@ def forward(self, data, mode="train", **kwargs): elif type(data[0]) is HeteroData: data["adsorbate"].pos.requires_grad_(True) data["catalyst"].pos.requires_grad_(True) - else: + else: data.pos.requires_grad_(True) # predict energy @@ -72,7 +72,9 @@ def forward(self, data, mode="train", **kwargs): else: # compute forces from energy gradient try: - grad_forces = self.forces_as_energy_grad(data.pos, preds["energy"]) + grad_forces = self.forces_as_energy_grad( + data.pos, preds["energy"] + ) except: grad_forces = self.forces_as_energy_grad(data["adsorbate"].pos) diff --git a/ocpmodels/models/depdpp.py b/ocpmodels/models/depdpp.py index 5636a77681..2b47114880 100644 --- a/ocpmodels/models/depdpp.py +++ b/ocpmodels/models/depdpp.py @@ -10,6 +10,7 @@ from torch_geometric.data import Batch + @registry.register_model("depdpp") class depSchNet(DimeNetPlusPlus): def __init__(self, **kwargs): @@ -22,12 +23,12 @@ def __init__(self, **kwargs): self.combination = nn.Sequential( Linear(self.hidden_channels // 2 * 2, self.hidden_channels // 2), self.act, - Linear(self.hidden_channels // 2, 1) + Linear(self.hidden_channels // 2, 1), ) @conditional_grad(torch.enable_grad()) def energy_forward(self, data): - # We need to save the tags so this step is necessary. + # We need to save the tags so this step is necessary. self.tags_saver(data.tags) pred = super().energy_forward(data) @@ -44,7 +45,7 @@ def scattering(self, batch, h, P_bis): ads_out = scatter(h, batch * ads, dim=0) cat_out = scatter(h, batch * cat, dim=0) - system = torch.cat([ads_out, cat_out], dim = 1) + system = torch.cat([ads_out, cat_out], dim=1) system = self.combination(system) system = system + P_bis diff --git a/ocpmodels/models/depfaenet.py b/ocpmodels/models/depfaenet.py index 3711d4154b..25f6a09683 100644 --- a/ocpmodels/models/depfaenet.py +++ b/ocpmodels/models/depfaenet.py @@ -11,11 +11,10 @@ from torch_geometric.data import Batch + class discOutputBlock(conOutputBlock): - def __init__(self, energy_head, hidden_channels, act, disconnected_mlp = False): - super(discOutputBlock, self).__init__( - energy_head, hidden_channels, act - ) + def __init__(self, energy_head, hidden_channels, act, disconnected_mlp=False): + super(discOutputBlock, self).__init__(energy_head, hidden_channels, act) # We modify the last output linear function to make the output a vector self.lin2 = Linear(hidden_channels // 2, hidden_channels // 2) @@ -29,14 +28,16 @@ def __init__(self, energy_head, hidden_channels, act, disconnected_mlp = False): self.combination = nn.Sequential( Linear(hidden_channels // 2 * 2, hidden_channels // 2), swish, - Linear(hidden_channels // 2, 1) + Linear(hidden_channels // 2, 1), ) def tags_saver(self, tags): self.current_tags = tags def forward(self, h, edge_index, edge_weight, batch, alpha): - if self.energy_head == "weighted-av-final-embeds": # Right now, this is the only available option. + if ( + self.energy_head == "weighted-av-final-embeds" + ): # Right now, this is the only available option. alpha = self.w_lin(h) elif self.energy_head == "graclus": @@ -61,20 +62,21 @@ def forward(self, h, edge_index, edge_weight, batch, alpha): ads = self.current_tags == 2 cat = ~ads - ads_out = scatter(h, batch * ads, dim = 0, reduce = "add") - cat_out = scatter(h, batch * cat, dim = 0, reduce = "add") + ads_out = scatter(h, batch * ads, dim=0, reduce="add") + cat_out = scatter(h, batch * cat, dim=0, reduce="add") if self.disconnected_mlp: ads_out = self.ads_lin(ads_out) cat_out = self.cat_lin(cat_out) - system = torch.cat([ads_out, cat_out], dim = 1) + system = torch.cat([ads_out, cat_out], dim=1) # Finally, we predict a number. energy = self.combination(system) return energy + @registry.register_model("depfaenet") class depFAENet(FAENet): def __init__(self, **kwargs): @@ -88,7 +90,7 @@ def __init__(self, **kwargs): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): - # We need to save the tags so this step is necessary. + # We need to save the tags so this step is necessary. self.output_block.tags_saver(data.tags) pred = super().energy_forward(data) diff --git a/ocpmodels/models/depschnet.py b/ocpmodels/models/depschnet.py index dd20ad50f2..69f9adf83c 100644 --- a/ocpmodels/models/depschnet.py +++ b/ocpmodels/models/depschnet.py @@ -11,6 +11,7 @@ from torch_geometric.data import Batch + @registry.register_model("depschnet") class depSchNet(SchNet): def __init__(self, **kwargs): @@ -24,12 +25,12 @@ def __init__(self, **kwargs): self.combination = nn.Sequential( Linear(self.hidden_channels // 2 * 2, self.hidden_channels // 2), swish, - Linear(self.hidden_channels // 2, 1) + Linear(self.hidden_channels // 2, 1), ) @conditional_grad(torch.enable_grad()) def energy_forward(self, data): - # We need to save the tags so this step is necessary. + # We need to save the tags so this step is necessary. self.tags_saver(data.tags) pred = super().energy_forward(data) @@ -43,10 +44,10 @@ def scattering(self, h, batch): ads = self.current_tags == 2 cat = ~ads - ads_out = scatter(h, batch * ads, dim = 0, reduce = self.readout) - cat_out = scatter(h, batch * cat, dim = 0, reduce = self.readout) + ads_out = scatter(h, batch * ads, dim=0, reduce=self.readout) + cat_out = scatter(h, batch * cat, dim=0, reduce=self.readout) - system = torch.cat([ads_out, cat_out], dim = 1) + system = torch.cat([ads_out, cat_out], dim=1) system = self.combination(system) return system diff --git a/ocpmodels/models/faenet.py b/ocpmodels/models/faenet.py index 433eac94db..cccaf24304 100644 --- a/ocpmodels/models/faenet.py +++ b/ocpmodels/models/faenet.py @@ -396,9 +396,7 @@ def message(self, x_j, W, local_env=None): class OutputBlock(nn.Module): - def __init__( - self, energy_head, hidden_channels, act, model_name = "faenet" - ): + def __init__(self, energy_head, hidden_channels, act, model_name="faenet"): super().__init__() self.energy_head = energy_head self.act = act @@ -406,7 +404,10 @@ def __init__( self.lin1 = Linear(hidden_channels, hidden_channels // 2) if model_name == "faenet": self.lin2 = Linear(hidden_channels // 2, 1) - elif model_name in {"indfaenet", "afaenet"}: # These are models that output more than one scalar. + elif model_name in { + "indfaenet", + "afaenet", + }: # These are models that output more than one scalar. self.lin2 = Linear(hidden_channels // 2, hidden_channels // 2) # weighted average & pooling @@ -531,7 +532,7 @@ def __init__(self, **kwargs): "one-supernode-per-atom-type", "one-supernode-per-atom-type-dist", } - + # Gaussian Basis self.distance_expansion = GaussianSmearing( 0.0, self.cutoff, kwargs["num_gaussians"] @@ -596,7 +597,7 @@ def __init__(self, **kwargs): elif kwargs["model_name"] == "indfaenet": self.mlp_skip_co = Linear( (kwargs["num_interactions"] + 1) * kwargs["hidden_channels"] // 2, - kwargs["hidden_channels"] // 2 + kwargs["hidden_channels"] // 2, ) elif self.skip_co == "concat_atom": self.mlp_skip_co = Linear( @@ -611,7 +612,7 @@ def forces_forward(self, preds): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): # Rewire the graph - + z = data.atomic_numbers.long() pos = data.pos batch = data.batch diff --git a/ocpmodels/models/gemnet/depgemnet_t.py b/ocpmodels/models/gemnet/depgemnet_t.py index 5be91d6060..8782d1aac4 100644 --- a/ocpmodels/models/gemnet/depgemnet_t.py +++ b/ocpmodels/models/gemnet/depgemnet_t.py @@ -4,13 +4,11 @@ from ocpmodels.models.gemnet.gemnet import GemNetT from ocpmodels.common.registry import registry -from ocpmodels.common.utils import ( - conditional_grad, - scatter_det -) +from ocpmodels.common.utils import conditional_grad, scatter_det from torch_geometric.data import Batch + @registry.register_model("depgemnet_t") class depGemNetT(GemNetT): def __init__(self, **kwargs): @@ -24,7 +22,7 @@ def __init__(self, **kwargs): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): - # We need to save the tags so this step is necessary. + # We need to save the tags so this step is necessary. self.tags_saver(data.tags) pred = super().energy_forward(data) @@ -38,14 +36,10 @@ def scattering(self, E_t, batch, dim, dim_size, reduce="add"): ads = self.current_tags == 2 cat = ~ads - ads_out = scatter_det( - src=E_t, index=batch * ads, dim=dim, reduce=reduce - ) - cat_out = scatter_det( - src=E_t, index=batch * cat, dim=dim, reduce=reduce - ) + ads_out = scatter_det(src=E_t, index=batch * ads, dim=dim, reduce=reduce) + cat_out = scatter_det(src=E_t, index=batch * cat, dim=dim, reduce=reduce) - system = torch.cat([ads_out, cat_out], dim = 1) + system = torch.cat([ads_out, cat_out], dim=1) system = self.sys_lin1(system) system = self.sys_lin2(system) diff --git a/ocpmodels/models/gemnet/gemnet.py b/ocpmodels/models/gemnet/gemnet.py index ac5213bc28..10bb2837c8 100644 --- a/ocpmodels/models/gemnet/gemnet.py +++ b/ocpmodels/models/gemnet/gemnet.py @@ -473,7 +473,13 @@ def generate_interaction_graph(self, data): select_cutoff = None else: select_cutoff = self.cutoff - (edge_index, cell_offsets, neighbors, D_st, V_st,) = self.select_edges( + ( + edge_index, + cell_offsets, + neighbors, + D_st, + V_st, + ) = self.select_edges( data=data, edge_index=edge_index, cell_offsets=cell_offsets, @@ -494,7 +500,7 @@ def generate_interaction_graph(self, data): ) # Indices for swapping c->a and a->c (for symmetric MP) - block_sizes = torch.div(neighbors, 2, rounding_mode="trunc") + block_sizes = torch.div(neighbors, 2, rounding_mode="trunc") id_swap = repeat_blocks( block_sizes, repeats=2, @@ -600,9 +606,7 @@ def energy_forward(self, data): } def scattering(self, E_t, batch, dim, dim_size, reduce="add"): - E_t = scatter( - E_t, batch, dim=0, dim_size=dim_size, reduce=reduce - ) + E_t = scatter(E_t, batch, dim=0, dim_size=dim_size, reduce=reduce) return E_t @conditional_grad(torch.enable_grad()) diff --git a/ocpmodels/models/gemnet/indgemnet_t.py b/ocpmodels/models/gemnet/indgemnet_t.py index bb1df9be26..2453541d91 100644 --- a/ocpmodels/models/gemnet/indgemnet_t.py +++ b/ocpmodels/models/gemnet/indgemnet_t.py @@ -9,8 +9,9 @@ from torch_geometric.data import Batch + @registry.register_model("indgemnet_t") -class indGemNetT(BaseModel): # Change to make it inherit from base model. +class indGemNetT(BaseModel): # Change to make it inherit from base model. def __init__(self, **kwargs): super().__init__() @@ -25,11 +26,14 @@ def __init__(self, **kwargs): self.combination = nn.Sequential( Linear(kwargs["emb_size_atom"] // 2 * 2, kwargs["emb_size_atom"] // 2), self.act, - Linear(kwargs["emb_size_atom"] // 2, 1) + Linear(kwargs["emb_size_atom"] // 2, 1), ) - def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! + def energy_forward( + self, data, mode="train" + ): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! import ipdb + ipdb.set_trace() adsorbates = data[0] @@ -43,18 +47,18 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION cat_energy = pred_cat["energy"] # We combine predictions - system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = torch.cat([ads_energy, cat_energy], dim=1) system_energy = self.combination(system_energy) # We return them pred_system = { - "energy" : system_energy, + "energy": system_energy, "E_t": pred_ads["E_t"], "idx_t": pred_ads["idx_t"], "main_graph": pred_ads["main_graph"], "num_atoms": pred_ads["num_atoms"], "pos": pred_ads["pos"], - "F_st": pred_ads["F_st"] + "F_st": pred_ads["F_st"], } return pred_system diff --git a/ocpmodels/models/gemnet_oc/agemnet_oc.py b/ocpmodels/models/gemnet_oc/agemnet_oc.py index fa5c2c474f..faf556e183 100644 --- a/ocpmodels/models/gemnet_oc/agemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/agemnet_oc.py @@ -10,8 +10,9 @@ from torch_geometric.data import Batch + @registry.register_model("agemnet_oc") -class aGemNetOC(BaseModel): # Change to make it inherit from base model. +class aGemNetOC(BaseModel): # Change to make it inherit from base model. def __init__(self, **kwargs): super().__init__() @@ -29,11 +30,14 @@ def __init__(self, **kwargs): self.combination = nn.Sequential( Linear(kwargs["emb_size_atom"] // 2 * 2, kwargs["emb_size_atom"] // 2), self.act, - Linear(kwargs["emb_size_atom"] // 2, 1) + Linear(kwargs["emb_size_atom"] // 2, 1), ) - def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! + def energy_forward( + self, data, mode="train" + ): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! import ipdb + ipdb.set_trace() bip_edges = data["is_disc"].edge_index @@ -41,14 +45,18 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION adsorbates, catalysts = [], [] for i in range(len(data)): - adsorbates.append(Data( - **data[i]["adsorbate"]._mapping, - edge_index=data[i]["adsorbate", "is_close", "adsorbate"] - )) - catalyst.append(Data( - **data[i]["catalyst"]._mapping, - edge_index=data[i]["catalyst", "is_close", "catalyst"] - )) + adsorbates.append( + Data( + **data[i]["adsorbate"]._mapping, + edge_index=data[i]["adsorbate", "is_close", "adsorbate"] + ) + ) + catalyst.append( + Data( + **data[i]["catalyst"]._mapping, + edge_index=data[i]["catalyst", "is_close", "catalyst"] + ) + ) del data adsorbates = Batch.from_data_list(adsorbates) catalysts = Batch.from_data_list(catalysts) @@ -77,22 +85,19 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION inter_outputs_ads, inter_outputs_cat = self.interactions(output_ads, output_cat) - - - - ads_energy = pred_ads["energy"] cat_energy = pred_cat["energy"] # We combine predictions - system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = torch.cat([ads_energy, cat_energy], dim=1) system_energy = self.combination(system_energy) # We return them pred_system = { - "energy" : system_energy, - "pooling_loss" : pred_ads["pooling_loss"] if pred_ads["pooling_loss"] is None - else pred_ads["pooling_loss"] + pred_cat["pooling_loss"] + "energy": system_energy, + "pooling_loss": pred_ads["pooling_loss"] + if pred_ads["pooling_loss"] is None + else pred_ads["pooling_loss"] + pred_cat["pooling_loss"], } return pred_system @@ -100,10 +105,12 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION def interactions(self, output_ads, output_cat): h_ads, m_ads = output_ads["h"], output_ads["m"] h_cat, m_cat = output_cat["h"], output_cat["m"] - del output_ads["h"]; del output_ads["m"] - del output_cat["h"]; del output_cat["m"] + del output_ads["h"] + del output_ads["m"] + del output_cat["h"] + del output_cat["m"] - #basis_output_ads, idx + # basis_output_ads, idx return 1, 2 diff --git a/ocpmodels/models/gemnet_oc/depgemnet_oc.py b/ocpmodels/models/gemnet_oc/depgemnet_oc.py index 935cb058bc..741e1b7cb7 100644 --- a/ocpmodels/models/gemnet_oc/depgemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/depgemnet_oc.py @@ -4,13 +4,11 @@ from ocpmodels.models.gemnet_oc.gemnet_oc import GemNetOC from ocpmodels.common.registry import registry -from ocpmodels.common.utils import ( - conditional_grad, - scatter_det -) +from ocpmodels.common.utils import conditional_grad, scatter_det from torch_geometric.data import Batch + @registry.register_model("depgemnet_oc") class depGemNetOC(GemNetOC): def __init__(self, **kwargs): @@ -24,7 +22,7 @@ def __init__(self, **kwargs): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): - # We need to save the tags so this step is necessary. + # We need to save the tags so this step is necessary. self.tags_saver(data.tags) pred = super().energy_forward(data) @@ -38,14 +36,10 @@ def scattering(self, E_t, batch, dim, dim_size, reduce="add"): ads = self.current_tags == 2 cat = ~ads - ads_out = scatter_det( - src=E_t, index=batch * ads, dim=dim, reduce=reduce - ) - cat_out = scatter_det( - src=E_t, index=batch * cat, dim=dim, reduce=reduce - ) + ads_out = scatter_det(src=E_t, index=batch * ads, dim=dim, reduce=reduce) + cat_out = scatter_det(src=E_t, index=batch * cat, dim=dim, reduce=reduce) - system = torch.cat([ads_out, cat_out], dim = 1) + system = torch.cat([ads_out, cat_out], dim=1) system = self.sys_lin1(system) system = self.sys_lin2(system) diff --git a/ocpmodels/models/gemnet_oc/gemnet_oc.py b/ocpmodels/models/gemnet_oc/gemnet_oc.py index e67538f66f..fdcd882b6f 100644 --- a/ocpmodels/models/gemnet_oc/gemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/gemnet_oc.py @@ -860,10 +860,10 @@ def subselect_edges( subgraph["distance"] = subgraph["distance"][edge_mask] subgraph["vector"] = subgraph["vector"][edge_mask] - empty_image = subgraph["num_neighbors"] == 0 if torch.any(empty_image): import ipdb + ipdb.set_trace() raise ValueError( f"An image has no neighbors: id={data.id[empty_image]}, " @@ -1216,15 +1216,14 @@ def energy_forward(self, data): if self.regress_forces and not self.direct_forces: pos.requires_grad_(True) - outputs = self.pre_interaction( - pos, batch, atomic_numbers, num_atoms, data - ) + outputs = self.pre_interaction(pos, batch, atomic_numbers, num_atoms, data) - #h, m, basis_output, idx_t, x_E, x_F, xs_E, xs_F + # h, m, basis_output, idx_t, x_E, x_F, xs_E, xs_F interaction_outputs = self.interactions(outputs) E_t, idx_t, F_st = self.post_interactions( - batch=batch, **interaction_outputs, + batch=batch, + **interaction_outputs, ) return { @@ -1252,7 +1251,7 @@ def post_interactions(self, h, m, basis_output, idx_t, x_E, x_F, xs_E, xs_F, bat if self.extensive: E_t = self.scattering( E_t, batch, dim=0, dim_size=nMolecules, reduce="add" - ) # (nMolecules, num_targets) + ) # (nMolecules, num_targets) else: E_t = self.scattering( E_t, batch, dim=0, dim_size=nMolecules, reduce="mean" @@ -1300,14 +1299,14 @@ def interactions(self, outputs): xs_F.append(x_F) interaction_outputs = { - "h" : h, - "m" : m, - "basis_output" : basis_output, - "idx_t" : idx_t, - "x_E" : x_E, - "x_F" : x_F, - "xs_E" : xs_E, - "xs_F" : xs_F + "h": h, + "m": m, + "basis_output": basis_output, + "idx_t": idx_t, + "x_E": x_E, + "x_F": x_F, + "xs_E": xs_E, + "xs_F": xs_F, } return interaction_outputs @@ -1357,29 +1356,29 @@ def pre_interaction(self, pos, batch, atomic_numbers, num_atoms, data): xs_E, xs_F = [x_E], [x_F] outputs = { - "main_graph" : main_graph, - "a2a_graph" : a2a_graph, - "a2ee2a_graph" : a2ee2a_graph, - "id_swap" : id_swap, - "trip_idx_e2e" : trip_idx_e2e, - "trip_idx_a2e" : trip_idx_a2e, - "trip_idx_e2a" : trip_idx_e2a, - "quad_idx" : quad_idx, - "idx_t" : idx_t, - "basis_rad_raw" : basis_rad_raw, - "basis_atom_update" : basis_atom_update, - "basis_output" : basis_output, - "bases_qint" : bases_qint, - "bases_e2e" : bases_e2e, - "bases_a2e" : bases_a2e, - "bases_e2a" :bases_e2a, - "basis_a2a_rad" : basis_a2a_rad, - "h" : h, - "m" : m, - "x_E" : x_E, - "x_F" : x_F, - "xs_E" : xs_E, - "xs_F" : xs_F, + "main_graph": main_graph, + "a2a_graph": a2a_graph, + "a2ee2a_graph": a2ee2a_graph, + "id_swap": id_swap, + "trip_idx_e2e": trip_idx_e2e, + "trip_idx_a2e": trip_idx_a2e, + "trip_idx_e2a": trip_idx_e2a, + "quad_idx": quad_idx, + "idx_t": idx_t, + "basis_rad_raw": basis_rad_raw, + "basis_atom_update": basis_atom_update, + "basis_output": basis_output, + "bases_qint": bases_qint, + "bases_e2e": bases_e2e, + "bases_a2e": bases_a2e, + "bases_e2a": bases_e2a, + "basis_a2a_rad": basis_a2a_rad, + "h": h, + "m": m, + "x_E": x_E, + "x_F": x_F, + "xs_E": xs_E, + "xs_F": xs_F, } return outputs @@ -1394,7 +1393,6 @@ def scattering(self, E_t, batch, dim, dim_size, reduce="add"): @conditional_grad(torch.enable_grad()) def forces_forward(self, preds): - idx_t = preds["idx_t"] main_graph = preds["main_graph"] num_atoms = preds["num_atoms"] diff --git a/ocpmodels/models/gemnet_oc/indgemnet_oc.py b/ocpmodels/models/gemnet_oc/indgemnet_oc.py index 83d6fce700..b2f5ff7f4b 100644 --- a/ocpmodels/models/gemnet_oc/indgemnet_oc.py +++ b/ocpmodels/models/gemnet_oc/indgemnet_oc.py @@ -9,8 +9,9 @@ from torch_geometric.data import Batch + @registry.register_model("indgemnet_oc") -class indGemNetOC(BaseModel): # Change to make it inherit from base model. +class indGemNetOC(BaseModel): # Change to make it inherit from base model. def __init__(self, **kwargs): super().__init__() @@ -25,11 +26,14 @@ def __init__(self, **kwargs): self.combination = nn.Sequential( Linear(kwargs["emb_size_atom"] // 2 * 2, kwargs["emb_size_atom"] // 2), self.act, - Linear(kwargs["emb_size_atom"] // 2, 1) + Linear(kwargs["emb_size_atom"] // 2, 1), ) - def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! + def energy_forward( + self, data, mode="train" + ): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! import ipdb + ipdb.set_trace() adsorbates = data[0] @@ -43,18 +47,18 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION cat_energy = pred_cat["energy"] # We combine predictions - system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = torch.cat([ads_energy, cat_energy], dim=1) system_energy = self.combination(system_energy) # We return them pred_system = { - "energy" : system_energy, + "energy": system_energy, "E_t": pred_ads["E_t"], "idx_t": pred_ads["idx_t"], "main_graph": pred_ads["main_graph"], "num_atoms": pred_ads["num_atoms"], "pos": pred_ads["pos"], - "F_st": pred_ads["F_st"] + "F_st": pred_ads["F_st"], } return pred_system diff --git a/ocpmodels/models/inddpp.py b/ocpmodels/models/inddpp.py index bd495bb2d0..a5130424f6 100644 --- a/ocpmodels/models/inddpp.py +++ b/ocpmodels/models/inddpp.py @@ -9,8 +9,9 @@ from torch_geometric.data import Batch + @registry.register_model("inddpp") -class indDimeNetPlusPlus(BaseModel): # Change to make it inherit from base model. +class indDimeNetPlusPlus(BaseModel): # Change to make it inherit from base model. def __init__(self, **kwargs): super().__init__() @@ -37,10 +38,12 @@ def __init__(self, **kwargs): self.combination = nn.Sequential( Linear(kwargs["num_targets"] + old_targets, kwargs["num_targets"] // 2), self.act, - Linear(kwargs["num_targets"] // 2, 1) + Linear(kwargs["num_targets"] // 2, 1), ) - def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! + def energy_forward( + self, data, mode="train" + ): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! adsorbates = data[0] catalysts = data[1] @@ -52,14 +55,15 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION cat_energy = pred_cat["energy"] # We combine predictions - system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = torch.cat([ads_energy, cat_energy], dim=1) system_energy = self.combination(system_energy) # We return them pred_system = { - "energy" : system_energy, - "pooling_loss" : pred_ads["pooling_loss"] if pred_ads["pooling_loss"] is None - else pred_ads["pooling_loss"] + pred_cat["pooling_loss"] + "energy": system_energy, + "pooling_loss": pred_ads["pooling_loss"] + if pred_ads["pooling_loss"] is None + else pred_ads["pooling_loss"] + pred_cat["pooling_loss"], } return pred_system diff --git a/ocpmodels/models/indfaenet.py b/ocpmodels/models/indfaenet.py index 7f28070fe6..56d27ee680 100644 --- a/ocpmodels/models/indfaenet.py +++ b/ocpmodels/models/indfaenet.py @@ -10,11 +10,12 @@ from torch_geometric.data import Batch + # Implementation of positional encoding obtained from Harvard's annotated transformer's guide class PositionalEncoding(nn.Module): - def __init__(self, d_model, dropout = 0.1, max_len = 5): + def __init__(self, d_model, dropout=0.1, max_len=5): super(PositionalEncoding, self).__init__() - self.dropout = nn.Dropout(p = dropout) + self.dropout = nn.Dropout(p=dropout) # Compute the positional encodings once in log space. pe = torch.zeros(max_len, d_model) @@ -31,8 +32,9 @@ def forward(self, x): x = x + self.pe[:, : x.size(1)].requires_grad_(False) return self.dropout(x) + @registry.register_model("indfaenet") -class indFAENet(BaseModel): # Change to make it inherit from base model. +class indFAENet(BaseModel): # Change to make it inherit from base model. def __init__(self, **kwargs): super(indFAENet, self).__init__() @@ -56,34 +58,43 @@ def __init__(self, **kwargs): self.disconnected_mlp = kwargs.get("disconnected_mlp", False) if self.disconnected_mlp: - self.ads_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) - self.cat_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) + self.ads_lin = Linear( + kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2 + ) + self.cat_lin = Linear( + kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2 + ) self.transformer_out = kwargs.get("transformer_out", False) if self.transformer_out: self.combination = Transformer( - d_model = kwargs["hidden_channels"] // 2, - nhead = 2, - num_encoder_layers = 2, - num_decoder_layers = 2, - dim_feedforward = kwargs["hidden_channels"], - batch_first = True + d_model=kwargs["hidden_channels"] // 2, + nhead=2, + num_encoder_layers=2, + num_decoder_layers=2, + dim_feedforward=kwargs["hidden_channels"], + batch_first=True, ) self.positional_encoding = PositionalEncoding( kwargs["hidden_channels"] // 2, - dropout = 0.1, - max_len = 5, + dropout=0.1, + max_len=5, ) self.query_pos = nn.Parameter(torch.rand(kwargs["hidden_channels"] // 2)) self.transformer_lin = Linear(kwargs["hidden_channels"] // 2, 1) else: self.combination = nn.Sequential( - Linear(kwargs["hidden_channels"] // 2 + old_hc // 2, kwargs["hidden_channels"] // 2), + Linear( + kwargs["hidden_channels"] // 2 + old_hc // 2, + kwargs["hidden_channels"] // 2, + ), self.act, - Linear(kwargs["hidden_channels"] // 2, 1) + Linear(kwargs["hidden_channels"] // 2, 1), ) - def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! + def energy_forward( + self, data, mode="train" + ): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! adsorbates = data[0] catalysts = data[1] @@ -100,30 +111,31 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION # We combine predictions if self.transformer_out: batch_size = ads_energy.shape[0] - - fake_target_sequence = self.query_pos.unsqueeze(0).expand(batch_size, -1).unsqueeze(1) + + fake_target_sequence = ( + self.query_pos.unsqueeze(0).expand(batch_size, -1).unsqueeze(1) + ) system_energy = torch.cat( - [ - ads_energy.unsqueeze(1), - cat_energy.unsqueeze(1) - ], - dim = 1 + [ads_energy.unsqueeze(1), cat_energy.unsqueeze(1)], dim=1 ) system_energy = self.positional_encoding(system_energy) - - system_energy = self.combination(system_energy, fake_target_sequence).squeeze(1) + + system_energy = self.combination( + system_energy, fake_target_sequence + ).squeeze(1) system_energy = self.transformer_lin(system_energy) else: - system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = torch.cat([ads_energy, cat_energy], dim=1) system_energy = self.combination(system_energy) # We return them pred_system = { - "energy" : system_energy, - "pooling_loss" : pred_ads["pooling_loss"] if pred_ads["pooling_loss"] is None - else pred_ads["pooling_loss"] + pred_cat["pooling_loss"], - "hidden_state" : pred_ads["hidden_state"] + "energy": system_energy, + "pooling_loss": pred_ads["pooling_loss"] + if pred_ads["pooling_loss"] is None + else pred_ads["pooling_loss"] + pred_cat["pooling_loss"], + "hidden_state": pred_ads["hidden_state"], } return pred_system diff --git a/ocpmodels/models/indschnet.py b/ocpmodels/models/indschnet.py index 5d793a5158..16df76945e 100644 --- a/ocpmodels/models/indschnet.py +++ b/ocpmodels/models/indschnet.py @@ -9,11 +9,12 @@ from torch_geometric.data import Batch + # Implementation of positional encoding obtained from Harvard's annotated transformer's guide class PositionalEncoding(nn.Module): - def __init__(self, d_model, dropout = 0.1, max_len = 5): + def __init__(self, d_model, dropout=0.1, max_len=5): super(PositionalEncoding, self).__init__() - self.dropout = nn.Dropout(p = dropout) + self.dropout = nn.Dropout(p=dropout) # Compute the positional encodings once in log space. pe = torch.zeros(max_len, d_model) @@ -30,8 +31,9 @@ def forward(self, x): x = x + self.pe[:, : x.size(1)].requires_grad_(False) return self.dropout(x) + @registry.register_model("indschnet") -class indSchNet(BaseModel): # Change to make it inherit from base model. +class indSchNet(BaseModel): # Change to make it inherit from base model. def __init__(self, **kwargs): super(indSchNet, self).__init__() @@ -51,35 +53,44 @@ def __init__(self, **kwargs): self.disconnected_mlp = kwargs.get("disconnected_mlp", False) if self.disconnected_mlp: - self.ads_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) - self.cat_lin = Linear(kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2) + self.ads_lin = Linear( + kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2 + ) + self.cat_lin = Linear( + kwargs["hidden_channels"] // 2, kwargs["hidden_channels"] // 2 + ) self.transformer_out = kwargs.get("transformer_out", False) self.act = swish if self.transformer_out: self.combination = Transformer( - d_model = kwargs["hidden_channels"] // 2, - nhead = 2, - num_encoder_layers = 2, - num_decoder_layers = 2, - dim_feedforward = kwargs["hidden_channels"], - batch_first = True + d_model=kwargs["hidden_channels"] // 2, + nhead=2, + num_encoder_layers=2, + num_decoder_layers=2, + dim_feedforward=kwargs["hidden_channels"], + batch_first=True, ) self.positional_encoding = PositionalEncoding( kwargs["hidden_channels"] // 2, - dropout = 0.1, - max_len = 5, + dropout=0.1, + max_len=5, ) self.query_pos = nn.Parameter(torch.rand(kwargs["hidden_channels"] // 2)) self.transformer_lin = Linear(kwargs["hidden_channels"] // 2, 1) else: self.combination = nn.Sequential( - Linear(kwargs["hidden_channels"] // 2 + old_hc // 2, kwargs["hidden_channels"] // 2), + Linear( + kwargs["hidden_channels"] // 2 + old_hc // 2, + kwargs["hidden_channels"] // 2, + ), self.act, - Linear(kwargs["hidden_channels"] // 2, 1) + Linear(kwargs["hidden_channels"] // 2, 1), ) - def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! + def energy_forward( + self, data, mode="train" + ): # PROBLEM TO FIX: THE PREDICTION IS BY AN AVERAGE! adsorbates = data[0] catalysts = data[1] @@ -96,29 +107,30 @@ def energy_forward(self, data, mode = "train"): # PROBLEM TO FIX: THE PREDICTION # We combine predictions if self.transformer_out: batch_size = ads_energy.shape[0] - - fake_target_sequence = self.query_pos.unsqueeze(0).expand(batch_size, -1).unsqueeze(1) + + fake_target_sequence = ( + self.query_pos.unsqueeze(0).expand(batch_size, -1).unsqueeze(1) + ) system_energy = torch.cat( - [ - ads_energy.unsqueeze(1), - cat_energy.unsqueeze(1) - ], - dim = 1 + [ads_energy.unsqueeze(1), cat_energy.unsqueeze(1)], dim=1 ) system_energy = self.positional_encoding(system_energy) - - system_energy = self.combination(system_energy, fake_target_sequence).squeeze(1) + + system_energy = self.combination( + system_energy, fake_target_sequence + ).squeeze(1) system_energy = self.transformer_lin(system_energy) else: - system_energy = torch.cat([ads_energy, cat_energy], dim = 1) + system_energy = torch.cat([ads_energy, cat_energy], dim=1) system_energy = self.combination(system_energy) # We return them pred_system = { - "energy" : system_energy, - "pooling_loss" : pred_ads["pooling_loss"] if pred_ads["pooling_loss"] is None - else pred_ads["pooling_loss"] + pred_cat["pooling_loss"] + "energy": system_energy, + "pooling_loss": pred_ads["pooling_loss"] + if pred_ads["pooling_loss"] is None + else pred_ads["pooling_loss"] + pred_cat["pooling_loss"], } return pred_system diff --git a/ocpmodels/models/painn.py b/ocpmodels/models/painn.py index 2eda37dc1c..0d60ef6e0c 100644 --- a/ocpmodels/models/painn.py +++ b/ocpmodels/models/painn.py @@ -613,6 +613,7 @@ def forces_forward(self, preds): @conditional_grad(torch.enable_grad()) def energy_forward(self, data): import ipdb + ipdb.set_trace() pos = data.pos diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 8dd404ea32..17c2243f0b 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -154,33 +154,57 @@ def __init__(self, **kwargs): (run_dir / f"config-{JOB_ID}.yaml").write_text(yaml.dump(self.config)) # Here's the models whose edges are removed as a transform - transform_models = ["depfaenet", "depschnet", "depgemnet_oc", "depgemnet_t", "depdpp"] + transform_models = [ + "depfaenet", + "depschnet", + "depgemnet_oc", + "depgemnet_t", + "depdpp", + ] if self.config["is_disconnected"]: print("\n\nHeads up: cat-ads edges being removed!") if self.config["model_name"] in transform_models: if not self.config["is_disconnected"]: - print(f"\n\nWhen using {self.config['model_name']},", - "the flag 'is_disconnected' should be used! The flag has been turned on.\n") + print( + f"\n\nWhen using {self.config['model_name']},", + "the flag 'is_disconnected' should be used! The flag has been turned on.\n", + ) self.config["is_disconnected"] = True # Here's the models whose graphs are disconnected in the dataset - self.separate_models = ["indfaenet", "indschnet", "indgemnet_oc", "indgemnet_t", "inddpp"] - self.heterogeneous_models = ["afaenet", "aschnet", "agemnet_oc", "agemnet_t", "adpp"] + self.separate_models = [ + "indfaenet", + "indschnet", + "indgemnet_oc", + "indgemnet_t", + "inddpp", + ] + self.heterogeneous_models = [ + "afaenet", + "aschnet", + "agemnet_oc", + "agemnet_t", + "adpp", + ] self.data_mode = "normal" self.separate_dataset = False if self.config["model_name"] in self.separate_models: self.data_mode = "separate" - print("\n\nHeads up: using separate dataset, so ads/cats are separated before transforms.\n") + print( + "\n\nHeads up: using separate dataset, so ads/cats are separated before transforms.\n" + ) elif self.config["model_name"] in self.heterogeneous_models: self.data_mode = "heterogeneous" - print("\n\nHeads up: using heterogeneous dataset, so ads/cats are stored separately in a het graph.\n") + print( + "\n\nHeads up: using heterogeneous dataset, so ads/cats are stored separately in a het graph.\n" + ) self.load() self.evaluator = Evaluator( - task = self.task_name, - model_regresses_forces = self.config["model"].get("regress_forces", ""), + task=self.task_name, + model_regresses_forces=self.config["model"].get("regress_forces", ""), ) def load(self): @@ -267,14 +291,14 @@ def load_datasets(self): continue if self.data_mode == "separate": - self.datasets[split] = registry.get_dataset_class( - "separate" - )(ds_conf, transform=transform) + self.datasets[split] = registry.get_dataset_class("separate")( + ds_conf, transform=transform + ) elif self.data_mode == "heterogeneous": - self.datasets[split] = registry.get_dataset_class( - "heterogeneous" - )(ds_conf, transform=transform) + self.datasets[split] = registry.get_dataset_class("heterogeneous")( + ds_conf, transform=transform + ) else: self.datasets[split] = registry.get_dataset_class( @@ -282,7 +306,9 @@ def load_datasets(self): )(ds_conf, transform=transform) if self.config["lowest_energy_only"]: - with open('/network/scratch/a/alvaro.carbonero/lowest_energy.pkl', 'rb') as fp: + with open( + "/network/scratch/a/alvaro.carbonero/lowest_energy.pkl", "rb" + ) as fp: good_indices = pickle.load(fp) good_indices = list(good_indices) @@ -410,7 +436,7 @@ def load_model(self): "task_name": self.task_name, }, **self.config["model"], - "model_name": self.config["model_name"], + "model_name": self.config["model_name"], } self.model = registry.get_model_class(self.config["model_name"])( @@ -1103,7 +1129,6 @@ def measure_inference_time(self, loops=1): with timer.next("forward"): _ = self.model_forward(b, mode="inference") - # divide times by batch size mean, std = timer.prepare_for_logging( map_funcs={ diff --git a/ocpmodels/trainers/single_trainer.py b/ocpmodels/trainers/single_trainer.py index b6f55fe9fa..c83c733728 100644 --- a/ocpmodels/trainers/single_trainer.py +++ b/ocpmodels/trainers/single_trainer.py @@ -507,11 +507,17 @@ def model_forward(self, batch_list, mode="train"): # Compute model prediction for each frame for i in range(fa_pos_length): if self.data_mode == "heterogeneous": - batch_list[0]["adsorbate"].pos = batch_list[0]["adsorbate"].fa_pos[i] + batch_list[0]["adsorbate"].pos = batch_list[0]["adsorbate"].fa_pos[ + i + ] batch_list[0]["catalyst"].pos = batch_list[0]["catalyst"].fa_pos[i] if self.task_name in OCP_TASKS: - batch_list[0]["adsorbate"].cell = batch_list[0]["adsorbate"].fa_cell[i] - batch_list[0]["catalyst"].cell = batch_list[0]["catalyst"].fa_cell[i] + batch_list[0]["adsorbate"].cell = batch_list[0][ + "adsorbate" + ].fa_cell[i] + batch_list[0]["catalyst"].cell = batch_list[0][ + "catalyst" + ].fa_cell[i] elif self.data_mode == "separate": batch_list[0][0].pos = batch_list[0][0].fa_pos[i] batch_list[0][1].pos = batch_list[0][1].fa_pos[i] @@ -606,7 +612,7 @@ def compute_loss(self, preds, batch_list): else batch["adsorbate"].y.to(self.device) for batch in batch_list ], - dim=0 + dim=0, ) elif self.data_mode == "separate": @@ -717,15 +723,18 @@ def compute_metrics( self, preds: Dict, batch_list: List[Data], evaluator: Evaluator, metrics={} ): if self.data_mode == "heterogeneous": - natoms = (batch_list[0]["adsorbate"].natoms.to(self.device) - + batch_list[0]["catalyst"].natoms.to(self.device)) + natoms = batch_list[0]["adsorbate"].natoms.to(self.device) + batch_list[0][ + "catalyst" + ].natoms.to(self.device) target = { "energy": batch_list[0]["adsorbate"].y_relaxed.to(self.device), "natoms": natoms, } elif self.data_mode == "separate": - natoms = batch_list[0][0].natoms.to(self.device) + batch_list[0][1].natoms.to(self.device) + natoms = batch_list[0][0].natoms.to(self.device) + batch_list[0][ + 1 + ].natoms.to(self.device) target = { "energy": batch_list[0][0].y_relaxed.to(self.device), "natoms": natoms, @@ -743,12 +752,11 @@ def compute_metrics( else batch.y.to(self.device) for batch in batch_list ], - dim = 0, + dim=0, ), "natoms": natoms, } - if self.config["model"].get("regress_forces", False): target["forces"] = torch.cat( [batch.force.to(self.device) for batch in batch_list], dim=0 diff --git a/scripts/gnn_dev.py b/scripts/gnn_dev.py index ddca60ee3d..ca8b47aa43 100644 --- a/scripts/gnn_dev.py +++ b/scripts/gnn_dev.py @@ -49,4 +49,4 @@ predictions = trainer.predict( trainer.val_loader, results_file="is2re_results", disable_tqdm=False - ) \ No newline at end of file + ) From b4ba938f2252445beea67867817e66a1739e3432 Mon Sep 17 00:00:00 2001 From: Victor Schmidt Date: Wed, 29 Nov 2023 16:26:55 -0500 Subject: [PATCH 124/131] import changes from #48 --- configs/models/tasks/is2re.yaml | 2 + ocpmodels/datasets/lmdb_dataset.py | 199 +++++++++++++++++++---------- ocpmodels/trainers/base_trainer.py | 29 ++++- 3 files changed, 154 insertions(+), 76 deletions(-) diff --git a/configs/models/tasks/is2re.yaml b/configs/models/tasks/is2re.yaml index cf47f159de..787e20295f 100644 --- a/configs/models/tasks/is2re.yaml +++ b/configs/models/tasks/is2re.yaml @@ -16,6 +16,8 @@ default: otf_graph: False max_num_neighbors: 40 mode: train + adsorbates: all # {"*O", "*OH", "*OH2", "*H"} + adsorbates_ref_dir: /network/scratch/s/schmidtv/ocp/datasets/ocp/per_ads dataset: default_val: val_id train: diff --git a/ocpmodels/datasets/lmdb_dataset.py b/ocpmodels/datasets/lmdb_dataset.py index ec953b4a28..0a7abaea85 100644 --- a/ocpmodels/datasets/lmdb_dataset.py +++ b/ocpmodels/datasets/lmdb_dataset.py @@ -6,6 +6,7 @@ """ import bisect +import json import logging import pickle import time @@ -16,7 +17,7 @@ import numpy as np import torch from torch.utils.data import Dataset -from torch_geometric.data import Batch, HeteroData, Data +from torch_geometric.data import Batch from ocpmodels.common.registry import registry from ocpmodels.common.utils import pyg2_data_transform @@ -36,15 +37,35 @@ class LmdbDataset(Dataset): config (dict): Dataset configuration transform (callable, optional): Data transform function. (default: :obj:`None`) + fa_frames (str, optional): type of frame averaging method applied, if any. + adsorbates (str, optional): comma-separated list of adsorbates to filter. + If None or "all", no filtering is applied. + (default: None) + adsorbates_ref_dir: where metadata files for adsorbates are stored. + (default: "/network/scratch/s/schmidtv/ocp/datasets/ocp/per_ads") """ - def __init__(self, config, transform=None, fa_frames=None): - super(LmdbDataset, self).__init__() + def __init__( + self, + config, + transform=None, + fa_frames=None, + lmdb_glob=None, + adsorbates=None, + adsorbates_ref_dir=None, + ): + super().__init__() self.config = config + self.adsorbates = adsorbates + self.adsorbates_ref_dir = adsorbates_ref_dir self.path = Path(self.config["src"]) if not self.path.is_file(): db_paths = sorted(self.path.glob("*.lmdb")) + if lmdb_glob: + db_paths = [ + p for p in db_paths if any(lg in p.stem for lg in lmdb_glob) + ] assert len(db_paths) > 0, f"No LMDBs found in '{self.path}'" self.metadata_path = self.path / "metadata.npz" @@ -58,7 +79,7 @@ def __init__(self, config, transform=None, fa_frames=None): else: length = self.envs[-1].stat()["entries"] assert length is not None, f"Could not find length of LMDB {db_path}" - self._keys.append(list(range(length))) + self._keys.append([str(i).encode("ascii") for i in range(length)]) keylens = [len(k) for k in self._keys] self._keylen_cumulative = np.cumsum(keylens).tolist() @@ -71,14 +92,78 @@ def __init__(self, config, transform=None, fa_frames=None): ] self.num_samples = len(self._keys) + self.filter_per_adsorbates() self.transform = transform - self.fa_frames = fa_frames + self.fa_method = fa_frames + + def filter_per_adsorbates(self): + """Filter the dataset to only include structures with a specific + adsorbate. + """ + # no adsorbates specified, or asked for all: return + if not self.adsorbates or self.adsorbates == "all": + return + + # val_ood_ads and val_ood_both don't have targeted adsorbates + if self.config["src"].split("/")[-2] in {"val_ood_ads", "val_ood_both"}: + return + + # make set of adsorbates from a list or a string. If a string, split on comma. + ads = [] + if isinstance(self.adsorbates, str): + if "," in self.adsorbates: + ads = [a.strip() for a in self.adsorbates.split(",")] + else: + ads = [self.adsorbates] + else: + ads = self.adsorbates + ads = set(ads) + + # find reference file for this dataset + ref_path = self.adsorbates_ref_dir + if not ref_path: + print("No adsorbate reference directory provided as `adsorbate_ref_dir`.") + return + ref_path = Path(ref_path) + if not ref_path.is_dir(): + print(f"Adsorbate reference directory {ref_path} does not exist.") + return + pattern = "-".join(self.path.parts[-3:]) + candidates = list(ref_path.glob(f"*{pattern}*.json")) + if not candidates: + print(f"No adsorbate reference files found for {self.path.name}.") + return + if len(candidates) > 1: + print( + f"Multiple adsorbate reference files found for {self.path.name}." + "Using the first one." + ) + ref = json.loads(candidates[0].read_text()) + + # find dataset indices with the appropriate adsorbates + allowed_idxs = set( + str(i).encode("ascii") + for i, a in zip(ref["ds_idx"], ref["ads_symbols"]) + if a in ads + ) + + # filter the dataset indices + if isinstance(self._keys[0], bytes): + self._keys = [i for i in self._keys if i in allowed_idxs] + self.num_samples = len(self._keys) + else: + assert isinstance(self._keys[0], list) + self._keys = [[i for i in k if i in allowed_idxs] for k in self._keys] + keylens = [len(k) for k in self._keys] + self._keylen_cumulative = np.cumsum(keylens).tolist() + self.num_samples = sum(keylens) + + assert self.num_samples > 0, f"No samples found for adsorbates {ads}." def __len__(self): return self.num_samples - def __getitem__(self, idx): - t0 = time.time_ns() + def get_pickled_from_db(self, idx): if not self.path.is_file(): # Figure out which db this should be indexed from. db_idx = bisect.bisect(self._keylen_cumulative, idx) @@ -89,16 +174,20 @@ def __getitem__(self, idx): assert el_idx >= 0 # Return features. - datapoint_pickled = ( - self.envs[db_idx] - .begin() - .get(f"{self._keys[db_idx][el_idx]}".encode("ascii")) + return ( + f"{db_idx}_{el_idx}", + self.envs[db_idx].begin().get(self._keys[db_idx][el_idx]), ) - data_object = pyg2_data_transform(pickle.loads(datapoint_pickled)) - data_object.id = f"{db_idx}_{el_idx}" - else: - datapoint_pickled = self.env.begin().get(self._keys[idx]) - data_object = pyg2_data_transform(pickle.loads(datapoint_pickled)) + + return None, self.env.begin().get(self._keys[idx]) + + def __getitem__(self, idx): + t0 = time.time_ns() + + el_id, datapoint_pickled = self.get_pickled_from_db(idx) + data_object = pyg2_data_transform(pickle.loads(datapoint_pickled)) + if el_id: + data_object.id = el_id t1 = time.time_ns() if self.transform is not None: @@ -112,6 +201,7 @@ def __getitem__(self, idx): data_object.load_time = load_time data_object.transform_time = transform_time data_object.total_get_time = total_get_time + data_object.idx_in_dataset = idx return data_object @@ -137,6 +227,27 @@ def close_db(self): self.env.close() +@registry.register_dataset("deup_lmdb") +class DeupDataset(LmdbDataset): + def __init__(self, all_datasets_configs, deup_split, transform=None): + super().__init__( + all_datasets_configs[deup_split], + lmdb_glob=deup_split.replace("deup-", "").split("-"), + ) + ocp_splits = deup_split.split("-")[1:] + self.ocp_datasets = { + d: LmdbDataset(all_datasets_configs[d], transform) for d in ocp_splits + } + + def __getitem__(self, idx): + _, datapoint_pickled = self.get_pickled_from_db(idx) + deup_sample = pickle.loads(datapoint_pickled) + ocp_sample = self.ocp_datasets[deup_sample["ds"]][deup_sample["idx_in_dataset"]] + for k, v in deup_sample.items(): + setattr(ocp_sample, f"deup_{k}", v) + return ocp_sample + + class SinglePointLmdbDataset(LmdbDataset): def __init__(self, config, transform=None): super(SinglePointLmdbDataset, self).__init__(config, transform) @@ -157,24 +268,8 @@ def __init__(self, config, transform=None): ) -# In this function, we combine a list of samples into a batch. Notice that we first create the batch, then we fix -# the neighbor problem: that some elements in the batch don't have edges, which pytorch geometric doesn't handle well -# and which leads to errors in the forward step. -def data_list_collater(data_list, otf_graph=False): # Check if len(batch) is ever used - # FIRST, MAKE BATCH - - if ( # This is for indfaenet - type(data_list[0]) is tuple and type(data_list[0][0]) is Data - ): - adsorbates = [system[0] for system in data_list] - catalysts = [system[1] for system in data_list] - - ads_batch = Batch.from_data_list(adsorbates) - cat_batch = Batch.from_data_list(catalysts) - else: - batch = Batch.from_data_list(data_list) - - # THEN, FIX NEIGHBOR PROBLEM +def data_list_collater(data_list, otf_graph=False): + batch = Batch.from_data_list(data_list) if ( not otf_graph @@ -192,40 +287,4 @@ def data_list_collater(data_list, otf_graph=False): # Check if len(batch) is ev "LMDB does not contain edge index information, set otf_graph=True" ) - elif ( # This is for indfaenet - not otf_graph and type(data_list[0]) is tuple and type(data_list[0][0]) is Data - ): - batches = [ads_batch, cat_batch] - lists = [adsorbates, catalysts] - for batch, list_type in zip(batches, lists): - n_neighbors = [] - for i, data in enumerate(list_type): - n_index = data.edge_index[1, :] - n_neighbors.append(n_index.shape[0]) - batch.neighbors = torch.tensor(n_neighbors) - - return batches - - elif not otf_graph and type(data_list[0]) is HeteroData: # This is for afaenet - # First, fix the neighborhood dimension. - n_neighbors_ads = [] - n_neighbors_cat = [] - for i, data in enumerate(data_list): - n_index_ads = data["adsorbate", "is_close", "adsorbate"].edge_index - n_index_cat = data["catalyst", "is_close", "catalyst"].edge_index - n_neighbors_ads.append(n_index_ads[1, :].shape[0]) - n_neighbors_cat.append(n_index_cat[1, :].shape[0]) - batch["adsorbate"].neighbors = torch.tensor(n_neighbors_ads) - batch["catalyst"].neighbors = torch.tensor(n_neighbors_cat) - - # Then, fix the edge index between ads and cats. - sender, receiver = batch["is_disc"].edge_index - ads_to_cat = torch.stack([sender, receiver + batch["adsorbate"].num_nodes]) - cat_to_ads = torch.stack([ads_to_cat[1], ads_to_cat[0]]) - batch["is_disc"].edge_index = torch.concat([ads_to_cat, cat_to_ads], dim=1) - - batch["is_disc"].edge_weight = torch.concat( - [batch["is_disc"].edge_weight, -batch["is_disc"].edge_weight], dim=0 - ) - return batch diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 17c2243f0b..0b36b08405 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -8,12 +8,13 @@ import errno import logging import os +import pickle import random import time -import pickle from abc import ABC, abstractmethod from collections import defaultdict from copy import deepcopy +from uuid import uuid4 import numpy as np import torch @@ -26,7 +27,7 @@ from torch.utils.data import DataLoader, Subset from torch_geometric.data import Batch from tqdm import tqdm -from uuid import uuid4 + from ocpmodels.common import dist_utils from ocpmodels.common.data_parallel import ( BalancedBatchSampler, @@ -36,7 +37,12 @@ from ocpmodels.common.graph_transforms import RandomReflect, RandomRotate from ocpmodels.common.registry import registry from ocpmodels.common.timer import Times -from ocpmodels.common.utils import JOB_ID, get_commit_hash, save_checkpoint, resolve +from ocpmodels.common.utils import ( + JOB_ID, + get_commit_hash, + resolve, + save_checkpoint, +) from ocpmodels.datasets.data_transforms import FrameAveraging, get_transforms from ocpmodels.modules.evaluator import Evaluator from ocpmodels.modules.exponential_moving_average import ( @@ -292,18 +298,29 @@ def load_datasets(self): if self.data_mode == "separate": self.datasets[split] = registry.get_dataset_class("separate")( - ds_conf, transform=transform + ds_conf, + transform=transform, + adsorbates=self.config.get("adsorbates"), + adsorbates_ref_dir=self.config.get("adsorbates_ref_dir"), ) elif self.data_mode == "heterogeneous": self.datasets[split] = registry.get_dataset_class("heterogeneous")( - ds_conf, transform=transform + ds_conf, + transform=transform, + adsorbates=self.config.get("adsorbates"), + adsorbates_ref_dir=self.config.get("adsorbates_ref_dir"), ) else: self.datasets[split] = registry.get_dataset_class( self.config["task"]["dataset"] - )(ds_conf, transform=transform) + )( + ds_conf, + transform=transform, + adsorbates=self.config.get("adsorbates"), + adsorbates_ref_dir=self.config.get("adsorbates_ref_dir"), + ) if self.config["lowest_energy_only"]: with open( From 6445a5c6de24aef09d3313c1fe6c34fb1ce58945 Mon Sep 17 00:00:00 2001 From: AlexDuvalinho Date: Sat, 6 Jan 2024 07:39:09 -0500 Subject: [PATCH 125/131] fine-tune / retrain depfaenet on per-ads-dataset --- configs/exps/alvaro/gflownet.yaml | 64 +++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 configs/exps/alvaro/gflownet.yaml diff --git a/configs/exps/alvaro/gflownet.yaml b/configs/exps/alvaro/gflownet.yaml new file mode 100644 index 0000000000..bfb88b3eca --- /dev/null +++ b/configs/exps/alvaro/gflownet.yaml @@ -0,0 +1,64 @@ +job: + mem: 32GB + cpus: 4 + gres: gpu:rtx8000:1 + partition: long + time: 15:00:00 + +default: + # wandb_name: alvaro-carbonero-math + wandb_project: ocp-alvaro + wandb_tags: "gflownet-model" + test_ri: True + mode: train + graph_rewiring: remove-tag-0 + # graph_rewiring: "" + frame_averaging: 2D + fa_method: se3-random + cp_data_to_tmpdir: True + is_disconnected: true + model: + edge_embed_type: all_rij + mp_type: updownscale_base + phys_embeds: True + tag_hidden_channels: 32 + pg_hidden_channels: 96 + energy_head: weighted-av-final-embeds + complex_mp: True + graph_norm: True + hidden_channels: 352 + num_filters: 288 + num_gaussians: 68 + num_interactions: 5 + second_layer_MLP: False + skip_co: concat + cutoff: 4.0 + optim: + batch_size: 256 + eval_batch_size: 256 + lr_initial: 0.002 + scheduler: LinearWarmupCosineAnnealingLR + max_epochs: 9 + eval_every: 0.4 + +runs: + + - config: faenet-is2re-all + note: baseline faenet + + - config: depfaenet-is2re-all + note: depfaenet baseline + + - config: depfaenet-is2re-all + note: depfaenet per-adsorbate + adsorbates: {'*O', '*OH', '*OH2', '*H'} + + - config: depfaenet-is2re-all + note: To be used for continue from dir + + # - config: depfaenet-is2re-all + # note: Fine-tune on per-ads-dataset + # continue_from_dir: /network/scratch/a/alexandre.duval/ocp/runs/2935198 + # adsorbates: {'*O', '*OH', '*OH2', '*H'} + # optim: + # max_epochs: 3 From ca497e82ce6180dc8b2cde9e207e904a46daa80b Mon Sep 17 00:00:00 2001 From: AlexDuvalinho Date: Sat, 6 Jan 2024 07:47:31 -0500 Subject: [PATCH 126/131] improve docu of continue_from_dir --- ocpmodels/common/flags.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ocpmodels/common/flags.py b/ocpmodels/common/flags.py index 7a01ebac44..a32e44c20a 100644 --- a/ocpmodels/common/flags.py +++ b/ocpmodels/common/flags.py @@ -87,12 +87,14 @@ def add_core_args(self): "--checkpoint", type=str, help="Model checkpoint to load" ) self.parser.add_argument( - "--continue_from_dir", type=str, help="Run to continue, loading its config" + "--continue_from_dir", + type=str, + help="Continue an existing run, loading its config and overwriting desired arguments", ) self.parser.add_argument( "--restart_from_dir", type=str, - help="Run to restart, loading its config and overwriting " + help="Restart training from an existing run, loading its config and overwriting args" + "from the command-line", ) self.parser.add_argument( From 3f025ac5f3cc08e2fccc82c39028de88ebe6c62c Mon Sep 17 00:00:00 2001 From: Victor Schmidt Date: Wed, 10 Jan 2024 17:56:39 -0500 Subject: [PATCH 127/131] fix setup --- setup.py | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index bf2daae37e..429f2d433d 100644 --- a/setup.py +++ b/setup.py @@ -5,13 +5,31 @@ LICENSE file in the root directory of this source tree. """ -from setuptools import find_packages, setup +from distutils.util import convert_path +from pathlib import Path + +from setuptools import setup + + +def make_ocpmodels_package_dict(): + dirs = [ + convert_path(str(p)) + for p in Path("./ocpmodels/").glob("**") + if (p / "__init__.py").exists() + ] + pkgs = [d.replace("/", ".") for d in dirs] + return {p: d for p, d in zip(pkgs, dirs)} + + +pkg_dict = make_ocpmodels_package_dict() +pkg_dict["ocdata"] = convert_path("ocdata") setup( - name="ocp-models", - version="0.0.3", + name="ocpmodels", + version="0.0.1", description="Machine learning models for use in catalysis as part of the Open Catalyst Project", url="https://github.com/Open-Catalyst-Project/ocp", - packages=find_packages(), + packages=list(pkg_dict.keys()), + package_dir=pkg_dict, include_package_data=True, ) From 4bb9aac53f9f46b4bd1d5cf7c260fe5038b03a02 Mon Sep 17 00:00:00 2001 From: Victor Schmidt Date: Tue, 16 Jan 2024 12:43:17 -0500 Subject: [PATCH 128/131] fix adsorbates filtering --- configs/exps/alvaro/gflownet.yaml | 4 ++-- mila/launch_exp.py | 32 ++++++++++++++++++++-------- mila/sbatch.py | 28 ++++++++++++++++-------- ocpmodels/common/utils.py | 32 ++++++++++++++++++++++++++++ ocpmodels/datasets/lmdb_dataset.py | 34 ++++++++++++++++++++++++++---- ocpmodels/trainers/base_trainer.py | 3 ++- 6 files changed, 108 insertions(+), 25 deletions(-) diff --git a/configs/exps/alvaro/gflownet.yaml b/configs/exps/alvaro/gflownet.yaml index bfb88b3eca..7a2289482d 100644 --- a/configs/exps/alvaro/gflownet.yaml +++ b/configs/exps/alvaro/gflownet.yaml @@ -42,7 +42,7 @@ default: eval_every: 0.4 runs: - + - config: faenet-is2re-all note: baseline faenet @@ -51,7 +51,7 @@ runs: - config: depfaenet-is2re-all note: depfaenet per-adsorbate - adsorbates: {'*O', '*OH', '*OH2', '*H'} + adsorbates: '*O, *OH, *OH2, *H' - config: depfaenet-is2re-all note: To be used for continue from dir diff --git a/mila/launch_exp.py b/mila/launch_exp.py index 8bd00e7c9c..dec6bde850 100644 --- a/mila/launch_exp.py +++ b/mila/launch_exp.py @@ -1,3 +1,4 @@ +import copy import os import re import subprocess @@ -5,10 +6,8 @@ from pathlib import Path from minydra import resolved_args -from yaml import safe_load, dump - from sbatch import now -import copy +from yaml import dump, safe_load ROOT = Path(__file__).resolve().parent.parent @@ -143,14 +142,16 @@ def cli_arg(args, key=""): s += cli_arg(v, key=f"{parent}{k}") else: if " " in str(v) or "," in str(v) or isinstance(v, str): - if "'" in str(v) and '"' in str(v): - v = str(v).replace("'", "\\'") + if '"' in str(v): + v = str(v).replace('"', '\\"') v = f"'{v}'" elif "'" in str(v): - v = f'"{v}"' + v = f'\\"{v}\\"' else: v = f"'{v}'" s += f" --{parent}{k}={v}" + if "ads" in k: + print(s.split(" --")[-1]) return s @@ -175,10 +176,15 @@ def get_args_or_exp(key, args, exp): n_jobs = None args = resolved_args() assert "exp" in args - regex = args.get("match", ".*") + + regex = args.pop("match", ".*") + exp_name = args.pop("exp").replace(".yml", "").replace(".yaml", "") + no_confirm = args.pop("no_confirm", False) + + sbatch_overrides = args.to_dict() + ts = now() - exp_name = args.exp.replace(".yml", "").replace(".yaml", "") exp_file = find_exp(exp_name) exp = safe_load(exp_file.open("r")) @@ -231,6 +237,8 @@ def get_args_or_exp(key, args, exp): else: params["wandb_tags"] = exp_name + job = merge_dicts(job, sbatch_overrides) + py_args = f'py_args="{cli_arg(params).strip()}"' sbatch_args = " ".join( @@ -253,7 +261,7 @@ def get_args_or_exp(key, args, exp): text += "\n<><><> Experiment config:\n\n-----" + exp_file.read_text() + "-----" text += "\n<><><> Experiment runs:\n\n • " + "\n\n • ".join(commands) + separator - confirm = args.no_confirm or "y" in input("\n🚦 Confirm? [y/n] : ") + confirm = no_confirm or "y" in input("\n🚦 Confirm? [y/n] : ") if confirm: try: @@ -267,6 +275,10 @@ def get_args_or_exp(key, args, exp): for c, command in enumerate(commands): print(f"Launching job {c+1:3}", end="\r") outputs.append(os.popen(command).read().strip()) + if "Aborting" in outputs[-1]: + print("\nError submitting job", c + 1, ":", command) + print(outputs[-1].replace("Error while launching job:\n", "")) + print("\n") if " verbose=true" in command.lower(): print(outputs[-1]) except KeyboardInterrupt: @@ -283,6 +295,8 @@ def get_args_or_exp(key, args, exp): if is_interrupted: print("\n💀 Interrupted. Kill jobs with:\n$ scancel" + " ".join(jobs)) + elif not jobs: + print("\n❌ No jobs launched") else: text += f"{separator}All jobs launched: {' '.join(jobs)}" with outfile.open("w") as f: diff --git a/mila/sbatch.py b/mila/sbatch.py index de82809f8b..a4b24095c2 100644 --- a/mila/sbatch.py +++ b/mila/sbatch.py @@ -1,12 +1,13 @@ -from minydra import resolved_args, MinyDict -from pathlib import Path -from datetime import datetime import os +import re import subprocess -from shutil import copyfile import sys -import re +from datetime import datetime +from pathlib import Path +from shutil import copyfile + import yaml +from minydra import MinyDict, resolved_args IS_DRAC = ( "narval.calcul.quebec" in os.environ.get("HOSTNAME", "") @@ -24,13 +25,13 @@ # git commit: {git_commit} # cwd: {cwd} -{git_checkout} {sbatch_py_vars} export MASTER_PORT=$(expr 10000 + $(echo -n $SLURM_JOBID | tail -c 4)) echo "Master port $MASTER_PORT" cd {code_loc} +{git_checkout} {modules} @@ -41,7 +42,7 @@ conda activate {env} fi {wandb_offline} -srun --output={output} {python_command} +srun --gpus-per-task=1 --output={output} {python_command} """ @@ -247,7 +248,6 @@ def load_sbatch_args_from_dir(dir): "cpus": int(sbatch_args["cpus-per-task"]), "mem": sbatch_args["mem"], "gres": sbatch_args["gres"], - "output": sbatch_args["output"], } return args @@ -417,7 +417,17 @@ def load_sbatch_args_from_dir(dir): print("\nDev mode: not actually executing the command 🤓\n") else: # not dev mode: run the command, make directories - out = subprocess.check_output(command.split(" ")).decode("utf-8").strip() + try: + out = ( + subprocess.check_output(command.split(" "), stderr=subprocess.STDOUT) + .decode("utf-8") + .strip() + ) + except subprocess.CalledProcessError as error: + print("Error while launching job:\n```") + print(error.output.decode("utf-8").strip()) + print("```\nAborting...") + sys.exit(1) jobid = out.split(" job ")[-1].strip() success = out.startswith("Submitted batch job") diff --git a/ocpmodels/common/utils.py b/ocpmodels/common/utils.py index 82d40a9dbb..05974fa44e 100644 --- a/ocpmodels/common/utils.py +++ b/ocpmodels/common/utils.py @@ -901,6 +901,37 @@ def set_cpus_to_workers(config, silent=False): return config +def set_dataset_split(config): + """ + Set the split for all datasets in the config to the one specified in the + config's name. + + Resulting dict: + { + "dataset": { + "train": { + "split": "all" + ... + }, + ... + } + } + + Args: + config (dict): The full trainer config dict + + Returns: + dict: The updated config dict + """ + split = config["config"].split("-")[-1] + for d, dataset in config["dataset"].items(): + if d == "default_val": + continue + assert isinstance(dataset, dict) + config["dataset"][d]["split"] = split + return config + + def check_regress_forces(config): if "regress_forces" in config["model"]: if config["model"]["regress_forces"] == "": @@ -1182,6 +1213,7 @@ def build_config(args, args_override=[], silent=None): config = override_drac_paths(config) config = continue_from_slurm_job_id(config) config = read_slurm_env(config) + config = set_dataset_split(config) config["optim"]["eval_batch_size"] = config["optim"]["batch_size"] dist_utils.setup(config) diff --git a/ocpmodels/datasets/lmdb_dataset.py b/ocpmodels/datasets/lmdb_dataset.py index 0a7abaea85..8dd195328d 100644 --- a/ocpmodels/datasets/lmdb_dataset.py +++ b/ocpmodels/datasets/lmdb_dataset.py @@ -53,11 +53,13 @@ def __init__( lmdb_glob=None, adsorbates=None, adsorbates_ref_dir=None, + silent=False, ): super().__init__() self.config = config self.adsorbates = adsorbates self.adsorbates_ref_dir = adsorbates_ref_dir + self.silent = silent self.path = Path(self.config["src"]) if not self.path.is_file(): @@ -128,10 +130,23 @@ def filter_per_adsorbates(self): if not ref_path.is_dir(): print(f"Adsorbate reference directory {ref_path} does not exist.") return - pattern = "-".join(self.path.parts[-3:]) + pattern = f"{self.config['split']}-{self.path.parts[-1]}" candidates = list(ref_path.glob(f"*{pattern}*.json")) if not candidates: - print(f"No adsorbate reference files found for {self.path.name}.") + print( + f"No adsorbate reference files found for {self.path.name}.:" + + "\n".join( + [ + str(p) + for p in [ + ref_path, + pattern, + list(ref_path.glob(f"*{pattern}*.json")), + list(ref_path.glob("*")), + ] + ] + ) + ) return if len(candidates) > 1: print( @@ -147,6 +162,8 @@ def filter_per_adsorbates(self): if a in ads ) + previous_samples = self.num_samples + # filter the dataset indices if isinstance(self._keys[0], bytes): self._keys = [i for i in self._keys if i in allowed_idxs] @@ -158,6 +175,12 @@ def filter_per_adsorbates(self): self._keylen_cumulative = np.cumsum(keylens).tolist() self.num_samples = sum(keylens) + if not self.silent: + print( + f"Filtered dataset {pattern} from {previous_samples} to", + f"{self.num_samples} samples. (adsorbates: {ads})", + ) + assert self.num_samples > 0, f"No samples found for adsorbates {ads}." def __len__(self): @@ -229,14 +252,17 @@ def close_db(self): @registry.register_dataset("deup_lmdb") class DeupDataset(LmdbDataset): - def __init__(self, all_datasets_configs, deup_split, transform=None): + def __init__(self, all_datasets_configs, deup_split, transform=None, silent=False): + # ! WARNING: this does not (yet?) handle adsorbate filtering super().__init__( all_datasets_configs[deup_split], lmdb_glob=deup_split.replace("deup-", "").split("-"), + silent=silent, ) ocp_splits = deup_split.split("-")[1:] self.ocp_datasets = { - d: LmdbDataset(all_datasets_configs[d], transform) for d in ocp_splits + d: LmdbDataset(all_datasets_configs[d], transform, silent=silent) + for d in ocp_splits } def __getitem__(self, idx): diff --git a/ocpmodels/trainers/base_trainer.py b/ocpmodels/trainers/base_trainer.py index 0b36b08405..900f1f2008 100644 --- a/ocpmodels/trainers/base_trainer.py +++ b/ocpmodels/trainers/base_trainer.py @@ -302,6 +302,7 @@ def load_datasets(self): transform=transform, adsorbates=self.config.get("adsorbates"), adsorbates_ref_dir=self.config.get("adsorbates_ref_dir"), + silent=self.silent, ) elif self.data_mode == "heterogeneous": @@ -1134,7 +1135,7 @@ def measure_inference_time(self, loops=1): self.config["model"].get("regress_forces") == "from_energy" ) self.model.eval() - timer = Times(gpu=True) + timer = Times(gpu=torch.cuda.is_available()) # average inference over multiple loops for _ in range(loops): From 1494d91264f64ee3c1093f371434ff1632b16e1f Mon Sep 17 00:00:00 2001 From: Victor Schmidt Date: Tue, 16 Jan 2024 12:49:02 -0500 Subject: [PATCH 129/131] fix ads filtering for val_ood_cat&both --- ocpmodels/datasets/lmdb_dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocpmodels/datasets/lmdb_dataset.py b/ocpmodels/datasets/lmdb_dataset.py index 8dd195328d..c07ae1773a 100644 --- a/ocpmodels/datasets/lmdb_dataset.py +++ b/ocpmodels/datasets/lmdb_dataset.py @@ -107,7 +107,7 @@ def filter_per_adsorbates(self): return # val_ood_ads and val_ood_both don't have targeted adsorbates - if self.config["src"].split("/")[-2] in {"val_ood_ads", "val_ood_both"}: + if Path(self.config["src"]).parts[-1] in {"val_ood_ads", "val_ood_both"}: return # make set of adsorbates from a list or a string. If a string, split on comma. From 7c96385abe7733b6f82c8108c76502f1fdce8fef Mon Sep 17 00:00:00 2001 From: AlexDuvalinho Date: Sat, 27 Jan 2024 05:35:02 -0500 Subject: [PATCH 130/131] config update --- configs/exps/alvaro/gflownet.yaml | 45 ++++++++++++++++++++----------- scripts/gnn_dev.py | 5 ++-- 2 files changed, 33 insertions(+), 17 deletions(-) diff --git a/configs/exps/alvaro/gflownet.yaml b/configs/exps/alvaro/gflownet.yaml index bfb88b3eca..35bf57929b 100644 --- a/configs/exps/alvaro/gflownet.yaml +++ b/configs/exps/alvaro/gflownet.yaml @@ -11,8 +11,8 @@ default: wandb_tags: "gflownet-model" test_ri: True mode: train - graph_rewiring: remove-tag-0 - # graph_rewiring: "" + # graph_rewiring: remove-tag-0 + graph_rewiring: "" frame_averaging: 2D fa_method: se3-random cp_data_to_tmpdir: True @@ -21,7 +21,7 @@ default: edge_embed_type: all_rij mp_type: updownscale_base phys_embeds: True - tag_hidden_channels: 32 + tag_hidden_channels: 0 pg_hidden_channels: 96 energy_head: weighted-av-final-embeds complex_mp: True @@ -43,22 +43,37 @@ default: runs: - - config: faenet-is2re-all - note: baseline faenet + # - config: faenet-is2re-all + # note: baseline faenet - - config: depfaenet-is2re-all - note: depfaenet baseline + # - config: depfaenet-is2re-all + # note: depfaenet baseline + + # - config: depfaenet-is2re-all + # note: depfaenet per-adsorbate + # adsorbates: {'*O', '*OH', '*OH2', '*H'} + + # - config: depfaenet-is2re-all + # note: depfaenet per-adsorbate long string + # adsorbates: '*O, *OH, *OH2, *H' + + # - config: depfaenet-is2re-all + # note: depfaenet per-adsorbate string of a list + # adsorbates: "*O, *OH, *OH2, *H" - config: depfaenet-is2re-all - note: depfaenet per-adsorbate - adsorbates: {'*O', '*OH', '*OH2', '*H'} + note: final depfaenet trained on selected adsorbate, without tags + adsorbates: "*O, *OH, *OH2, *H" - config: depfaenet-is2re-all - note: To be used for continue from dir + note: depfaenet full data # - config: depfaenet-is2re-all - # note: Fine-tune on per-ads-dataset - # continue_from_dir: /network/scratch/a/alexandre.duval/ocp/runs/2935198 - # adsorbates: {'*O', '*OH', '*OH2', '*H'} - # optim: - # max_epochs: 3 + # note: To be used for continue from dir + + - config: depfaenet-is2re-all + note: Fine-tune on per-ads-dataset + continue_from_dir: /network/scratch/a/alexandre.duval/ocp/runs/4023246 + adsorbates: "*O, *OH, *OH2, *H" + optim: + max_epochs: 3 diff --git a/scripts/gnn_dev.py b/scripts/gnn_dev.py index ca8b47aa43..832bfa354b 100644 --- a/scripts/gnn_dev.py +++ b/scripts/gnn_dev.py @@ -16,7 +16,7 @@ if __name__ == "__main__": config = {} # Customize args - config["graph_rewiring"] = "remove-tag-0" + # config["graph_rewiring"] = "remove-tag-0" config["frame_averaging"] = "2D" config["fa_method"] = "random" # "random" config["test_ri"] = False @@ -30,8 +30,9 @@ if all("config" not in arg for arg in str_args): str_args.append("--is_debug") # str_args.append("--config=faenet-is2re-all") + str_args.append("--adsorbates='*O, *OH, *OH2, *H'") str_args.append("--config=depfaenet-is2re-10k") - str_args.append("--is_disconnected=True") + # str_args.append("--is_disconnected=True") # str_args.append("--silent=0") warnings.warn( "No model / mode is given; chosen as default" + f"Using: {str_args[-1]}" From f2591b770161a4c29004555e97ee26a3eeede35a Mon Sep 17 00:00:00 2001 From: AlexDuvalinho Date: Tue, 16 Apr 2024 06:13:24 -0400 Subject: [PATCH 131/131] add ocdata --- configs/exps/alvaro/gflownet.yaml | 78 ++++- ocdata/LiFePO4.cif | 54 +++ ocdata/__init__.py | 0 ocdata/adsorbates.py | 64 ++++ ocdata/base_atoms/__init__.py | 0 ocdata/base_atoms/ase_dbs/__init__.py | 5 + ocdata/base_atoms/ase_dbs/adsorbates.db | Bin 0 -> 102400 bytes ocdata/base_atoms/ase_dbs/bulks.db | Bin 0 -> 23715840 bytes ocdata/base_atoms/pkls/__init__.py | 7 + ocdata/base_atoms/pkls/convert_db_to_pkl.py | 130 +++++++ ocdata/bulk_obj.py | 343 ++++++++++++++++++ ocdata/bulks.py | 40 +++ ocdata/combined.py | 366 ++++++++++++++++++++ ocdata/constants.py | 113 ++++++ ocdata/loader.py | 81 +++++ ocdata/precompute_sample_structures.py | 174 ++++++++++ ocdata/structure_sampler.py | 187 ++++++++++ ocdata/surfaces.py | 348 +++++++++++++++++++ ocdata/vasp.py | 230 ++++++++++++ ocpmodels/trainers/single_trainer.py | 2 + scripts/gnn_dev.py | 17 +- 21 files changed, 2225 insertions(+), 14 deletions(-) create mode 100644 ocdata/LiFePO4.cif create mode 100644 ocdata/__init__.py create mode 100644 ocdata/adsorbates.py create mode 100644 ocdata/base_atoms/__init__.py create mode 100644 ocdata/base_atoms/ase_dbs/__init__.py create mode 100644 ocdata/base_atoms/ase_dbs/adsorbates.db create mode 100644 ocdata/base_atoms/ase_dbs/bulks.db create mode 100644 ocdata/base_atoms/pkls/__init__.py create mode 100644 ocdata/base_atoms/pkls/convert_db_to_pkl.py create mode 100644 ocdata/bulk_obj.py create mode 100644 ocdata/bulks.py create mode 100644 ocdata/combined.py create mode 100644 ocdata/constants.py create mode 100644 ocdata/loader.py create mode 100644 ocdata/precompute_sample_structures.py create mode 100644 ocdata/structure_sampler.py create mode 100644 ocdata/surfaces.py create mode 100644 ocdata/vasp.py diff --git a/configs/exps/alvaro/gflownet.yaml b/configs/exps/alvaro/gflownet.yaml index 35bf57929b..2432f47339 100644 --- a/configs/exps/alvaro/gflownet.yaml +++ b/configs/exps/alvaro/gflownet.yaml @@ -61,19 +61,83 @@ runs: # note: depfaenet per-adsorbate string of a list # adsorbates: "*O, *OH, *OH2, *H" + # - config: depfaenet-is2re-all + # note: Trained on selected adsorbate more epochs + # adsorbates: "*O, *OH, *OH2, *H" + # optim: + # max_epochs: 10 + + # - config: depfaenet-is2re-all + # note: depfaenet full data + + # - config: depfaenet-is2re-all + # note: To be used for continue from dir + + # - config: depfaenet-is2re-all + # note: Fine-tune on per-ads-dataset 4 epoch + # continue_from_dir: /network/scratch/a/alexandre.duval/ocp/runs/4023244 + # adsorbates: "*O, *OH, *OH2, *H" + # optim: + # max_epochs: 4 + # lr_initial: 0.00015 + + # - config: depfaenet-is2re-all + # note: Fine-tune on per-ads-dataset 10 epoch + # continue_from_dir: /network/scratch/a/alexandre.duval/ocp/runs/4023244 + # adsorbates: "*O, *OH, *OH2, *H" + # optim: + # max_epochs: 10 + # lr_initial: 0.00015 + + - config: depfaenet-is2re-all + note: Fine-tune on per-ads-dataset 10 epoch + continue_from_dir: /network/scratch/a/alexandre.duval/ocp/runs/4023244 + adsorbates: "*O, *OH, *OH2, *H" + optim: + max_epochs: 20 + lr_initial: 0.0001 + - config: depfaenet-is2re-all - note: final depfaenet trained on selected adsorbate, without tags + note: Fine-tune on per-ads-dataset 20 epoch + continue_from_dir: /network/scratch/a/alexandre.duval/ocp/runs/4023244 adsorbates: "*O, *OH, *OH2, *H" + optim: + max_epochs: 20 + lr_initial: 0.00015 - config: depfaenet-is2re-all - note: depfaenet full data + note: Fine-tune on per-ads-dataset 15 epoch + continue_from_dir: /network/scratch/a/alexandre.duval/ocp/runs/4023244 + adsorbates: "*O, *OH, *OH2, *H" + optim: + max_epochs: 15 + lr_initial: 0.0002 - # - config: depfaenet-is2re-all - # note: To be used for continue from dir + - config: depfaenet-is2re-all + note: Fine-tune on per-ads-dataset 10 epoch + continue_from_dir: /network/scratch/a/alexandre.duval/ocp/runs/4023244 + adsorbates: "*O, *OH, *OH2, *H" + optim: + max_epochs: 10 + lr_initial: 0.0001 + + - config: depfaenet-is2re-all + note: Fine-tune on per-ads-dataset starting from fine-tuned model + continue_from_dir: /network/scratch/a/alexandre.duval/ocp/runs/4071859 + adsorbates: "*O, *OH, *OH2, *H" + optim: + max_epochs: 10 + lr_initial: 0.0001 + + - config: depfaenet-is2re-all + note: Trained on selected adsorbate + adsorbates: "*O, *OH, *OH2, *H" + optim: + max_epochs: 25 + lr_initial: 0.0001 - config: depfaenet-is2re-all - note: Fine-tune on per-ads-dataset - continue_from_dir: /network/scratch/a/alexandre.duval/ocp/runs/4023246 + note: Trained on selected adsorbate adsorbates: "*O, *OH, *OH2, *H" optim: - max_epochs: 3 + max_epochs: 25 diff --git a/ocdata/LiFePO4.cif b/ocdata/LiFePO4.cif new file mode 100644 index 0000000000..2b01776980 --- /dev/null +++ b/ocdata/LiFePO4.cif @@ -0,0 +1,54 @@ +# generated using pymatgen +data_LiFePO4 +_symmetry_space_group_name_H-M 'P 1' +_cell_length_a 4.74644100 +_cell_length_b 10.44373000 +_cell_length_c 6.09022600 +_cell_angle_alpha 89.99726981 +_cell_angle_beta 90.00071024 +_cell_angle_gamma 90.00075935 +_symmetry_Int_Tables_number 1 +_chemical_formula_structural LiFePO4 +_chemical_formula_sum 'Li4 Fe4 P4 O16' +_cell_volume 301.89584168 +_cell_formula_units_Z 4 +loop_ + _symmetry_equiv_pos_site_id + _symmetry_equiv_pos_as_xyz + 1 'x, y, z' +loop_ + _atom_site_type_symbol + _atom_site_label + _atom_site_symmetry_multiplicity + _atom_site_fract_x + _atom_site_fract_y + _atom_site_fract_z + _atom_site_occupancy + Li Li0 1 0.00000100 0.00001200 0.00003300 1 + Li Li1 1 0.50000600 0.50000900 0.00003100 1 + Li Li2 1 0.50000300 0.50001200 0.49996900 1 + Li Li3 1 0.00000300 0.00001400 0.49996700 1 + Fe Fe4 1 0.47524900 0.21803400 0.75000400 1 + Fe Fe5 1 0.02475600 0.71803500 0.75000300 1 + Fe Fe6 1 0.97510000 0.28190400 0.25000000 1 + Fe Fe7 1 0.52491600 0.78190200 0.25000000 1 + P P8 1 0.41781800 0.09476500 0.24999700 1 + P P9 1 0.91789000 0.40522300 0.75000500 1 + P P10 1 0.08218100 0.59476800 0.24999700 1 + P P11 1 0.58210900 0.90522200 0.75000500 1 + O O12 1 0.74186800 0.09673300 0.25000400 1 + O O13 1 0.24192700 0.40323600 0.74999600 1 + O O14 1 0.75813200 0.59672800 0.25000200 1 + O O15 1 0.25807400 0.90323500 0.74999400 1 + O O16 1 0.20694800 0.45707900 0.25000000 1 + O O17 1 0.70682400 0.04293300 0.74999900 1 + O O18 1 0.29304600 0.95707800 0.25000400 1 + O O19 1 0.79318000 0.54293300 0.75000400 1 + O O20 1 0.28451400 0.16549800 0.04703500 1 + O O21 1 0.78450500 0.33453800 0.95297200 1 + O O22 1 0.78447400 0.33453000 0.54706100 1 + O O23 1 0.28449100 0.16550500 0.45292700 1 + O O24 1 0.21550200 0.66551200 0.45292200 1 + O O25 1 0.71552100 0.83452400 0.54706500 1 + O O26 1 0.71548600 0.83453300 0.95297000 1 + O O27 1 0.21547700 0.66550400 0.04703700 1 diff --git a/ocdata/__init__.py b/ocdata/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ocdata/adsorbates.py b/ocdata/adsorbates.py new file mode 100644 index 0000000000..316ab2e739 --- /dev/null +++ b/ocdata/adsorbates.py @@ -0,0 +1,64 @@ +import numpy as np +import pickle +import os + + +class Adsorbate: + """ + This class handles all things with the adsorbate. + Selects one (either specified or random), and stores info as an object + + Attributes + ---------- + atoms : Atoms + actual atoms of the adsorbate + smiles : str + SMILES representation of the adsorbate + bond_indices : list + indices of the atoms meant to be bonded to the surface + adsorbate_sampling_str : str + string capturing the adsorbate index and total possible adsorbates + """ + + def __init__( + self, adsorbate_database=None, specified_index=None, adsorbate_atoms=None + ): + if adsorbate_atoms is None: + assert adsorbate_database is not None + self.choose_adsorbate_pkl(adsorbate_database, specified_index) + else: + ( + self.adsorbate_sampling_str, + self.atoms, + self.smiles, + self.bond_indices, + ) = adsorbate_atoms + + def choose_adsorbate_pkl(self, adsorbate_database, specified_index=None): + """ + Chooses an adsorbate from our pkl based inverted index at random. + + Args: + adsorbate_database: A string pointing to the a pkl file that contains + an inverted index over different adsorbates. + specified_index: adsorbate index to choose instead of choosing a random one + Sets: + atoms `ase.Atoms` object of the adsorbate + smiles SMILES-formatted representation of the adsorbate + bond_indices list of integers indicating the indices of the atoms in + the adsorbate that are meant to be bonded to the surface + adsorbate_sampling_str Enum string specifying the sample, [index] + adsorbate_db_fname filename denoting which version was used to sample + """ + with open(adsorbate_database, "rb") as f: + inv_index = pickle.load(f) + + if specified_index is not None: + element = specified_index + else: + element = np.random.choice(len(inv_index)) + print(f"args.actions.adsorbate_id is None, choosing {element}") + + self.adsorbate_sampling_str = str(element) + self.atoms, self.smiles, self.bond_indices = inv_index[element] + self.adsorbate_db_fname = os.path.basename(adsorbate_database) diff --git a/ocdata/base_atoms/__init__.py b/ocdata/base_atoms/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ocdata/base_atoms/ase_dbs/__init__.py b/ocdata/base_atoms/ase_dbs/__init__.py new file mode 100644 index 0000000000..525320ee30 --- /dev/null +++ b/ocdata/base_atoms/ase_dbs/__init__.py @@ -0,0 +1,5 @@ +import os + + +BULK_DB = os.path.join(__path__[0], 'bulks.db') +ADSORBATE_DB = os.path.join(__path__[0], 'adsorbates.db') \ No newline at end of file diff --git a/ocdata/base_atoms/ase_dbs/adsorbates.db b/ocdata/base_atoms/ase_dbs/adsorbates.db new file mode 100644 index 0000000000000000000000000000000000000000..6ebf41a7362598147933e53095e6b0c8224add31 GIT binary patch literal 102400 zcmeIb2Y^)7^*=tdWoBomq0)=22-4;CqUaq!ib@kv(TMP>U}#c|vF?C1QG-SkG!|lF zHwL@0y|pU_T$L&ytMtC~Cip-1+$rp`ko`sX`_DJuH_ChOoO}1po6o)X+|%xT7hW)M z>{O@Boj7^CF||w#GztWRfmB&pAP~3={+-U|>1S5QlluaV zLURH!lKd>XC3;2ahSJ`V2O?U@_a(DR`i3`$9}nB%1XS=pJ_0@hJ_0@hJ_5%;;POVH zW*vG2r;MFoJJ(K_Hr{k5k2>=Dm(H(8U1N-!=1e)E&#=C|hW9NycknrVN0c2|wQR`X zES<032`)PiPllSE(lz)?yyH~o+Ntc`c$Kf(sK%Z1)#!a|RmHpZOoW=XZ4$f??@Diz zy_k}$ai?4ny%7*o@UI8NL(Mw23Xa4-nlj0;#yV5j9Wq3s#!YI}pf}1eI$hTJl31u& z_g2AacqhvkXH6StOr4m)Z_2NyOm)UjiPyMU?Miq*R?!^W=lE`C=E3`wN=ny&pLH%t@}l5+|SCFA9*umcOD)IHS5|c zcr|;t31hFG29IXfc)HRWcbitT&XIS_6?Oha2|VAa!JDTV<~V2U1g1ZYojBo$%Sh&~ z!+Z4}*thJ6g0fDfW$>XOl@0GZVt6+svDYHqIo%~3Zq}nmaC#x$KGS#e9q=edNsX^M zsyOq7aD$`155~wq_WfgR7%Id2_U}84(fx)D>w9kh!DZ+7y|k>;Si5uCu)h8J4(mI( zPu~m6GM$C8JEz-+!p*vN4W=0uwFW>fGF;<9YZV^nG4n9}yN@}gINYpBlVJK>gp`gh z`Y%-Dc_^pOhxm8DG#x7nH*3`@n4XB=ni-0jQ*n*&&M-4ye_748m{61acOPeHAq<7C z!Asf0Ts;mn|4~yM7!VUIXG%@7sOH1fEbK(PZa4VL&bNny_yg~D*ge^3&vcK1n)hI3 znID+{31vfvoja)4uuHL{^}1;IkaGva-wo&ON8&lx}5ogio1jjGr{6 z%$Q)8Q8|GF2!~E?SYr1syJ+ya7hKer^xDd_29;5p-7I95FE1@R?H9d*G1X z>=C9+o#wh_Q^$aXWIOKI2@ZXTvR+dsj-OIC(Y(sBrs88vnmA?bRFDCgKSiG9j2p-9 z#HyH9t)`p+e`1UqHQu;#JiQ%$DOMeuDY3>Fldptt%$JNerc9~9m}*>EqkQ6cXTntD zh`+Glp{7m-VLEjR`=N;Jkz5LDUZS%{&&?1=fRnE?YLYP-y1`Uua^{}+N6rLi@|C}4 z0JAU2+{LlWCQY6=$(cMA<}a-3PIjD8N7iTDiIbr-9`Wq(;LemI8TQyo6URA6)HoVD z#yO&yj|LIOI#U$jcZ|*c1bkvB>CS{PE1}TX2HtzKf z-iP%Xm>J5(8=z5KSq1}bGW_^5*MO;ZGW)gTjcbp1%qca=YbK7HR-?DlmOr98`&G#3 z+`S;wtXHdG0H?XkU^b>rorv_P%!nMtXD$jsy(*z9Rx$hwCs)wR7RiIjJ<09K&B=Ai z?-`4-{g01;kARPWkARPWkARPWkARPWkARPWkARPWkHCLA0?h})6E;i1p{WXoCa1%p zaeFwN)DjLSCQ9Krtf*rkepgXd$tRH~5=&!Whd(S4;|Ighk`dtzp_1rrF%rHmw6^5& zXtQX~;s;|dB>EPwinT1A5$2-LMaoO(C0+>)i<}U8I61!b>G+F914|pn7bSjI^j1U( zKNhWw*-<09JAP&C&g7{@R~P*u@{8nspzDNaAp0ZoEyrcd1^ING?ylUR)g6 z7V2L7Zfsxaj>J`^R}^&&y_XzNvN=%}zNmOAJgWck5%3ZCpGTm5;h>-^JE|;cye_Je z%?p}jnxe#ssv&TeA@RyET1k}+nHMZqP;AL`c~^5yozq;)v{geiB>e(fN!L_O6>ZJX zRM~MI#nf$Eb!|)36hLHQ@2HZ=bBfUyzm8KKNtaDm zu@y;CY~2w=L4;qSTB@$t;yJXE;>fzH*gVGxvZaZp0`*nHbzDnv;9HbFw31<}JTxrF zyOt%2a0}=|yse57U~Iq-=}jxCg6`Ue0Z(R1GOuZXhAWAZ3O5xk!Ipc`O0KFyBZ;;y zh^A&3mSP*Op}DRrIU;mdIYlc0in1s;vL*3?tcsju83O!DUe$G%Gfj=qN`fqMvS}*1 z!*h-xD4Zg34rfTFBIuSWC}-13GCT{%Ndhm4&}O z#%b_7cn3xluLuUG&`N@-^DgH~DvTk=Rb^K|}x+w36ydoGQo?FDtH~ITQj((oIKlBuRB0p$DyGC?JwdQP3q>7ogWbmsAyt z(?No|x~{9IT%A1icHb1mD2P&@Vxv0OlNsRtW|PNM6lyWJi-a(@K^FO#oe2kwG?FqNPLIN(S^# zL9iuFmOIf(F8p#3QIY~(8(Ish%T%z~@Vz<+NxLJh1mZ%6p~~B~3pHR&z=XolacXfc z1@JU{CP@<2f*@UD*Y>Ct4PWFpzks(n=D@LKkGV;=<6?WWjX+K}Q8#HB}N# zwH;P+L4rfK6yO_l0cIi*Fwh(rsJhNOig609qzfP$4LTdbKyq~G*i?i=BU=&-XxA>I zl|Yu5f(s}?FA@ZU2YCmLC&SR-bS%aV5&M<8bheQX70GQBz@lGHgrJVQP>p4aA)&KwlAH+}K=eT8VzU z21N>Q(FfLbfrn`UL^%k4)iSkKw2}z&PjW1felP_H3iK(Jw?PeXU=&!;6`W1{RhjTeg(JUFHhv3RhX(b2r5Z;2$ z3i46Wbb7_}hUrSW0xf1ZO=u<4q$+`^X{O`Cqrpf9O~!$F8%77r{Cs0t$%2+Q0COHL zba+%~9+}rcvH)z*Z|sw3C0*h)n45H)mo3Ghb2fav1oBvsERYKFiL?^vF{;3Ua$?!g zH#pw39Rcpei=1hhqRE{=D}nT|K^O`kmL!u36bodz!m9!ZY|ap!Mpy~j2_{d_R#Xw@ zPf3MY1*9U3F`3SNT#{CTfeM`n#uWh1n}z{uglq{gUSQ0@q@W~dCD*V8+l9#!CUKCP zrXlN&3F5?p3CENrGfpeXiX$kX0f?aF@EY_*=(==l!ej&uZAme_6VHRn15YG@=4FB) zkWG-_&{H@I1g>O9X(iQ$iPJDmn8|=?37V;a2!bD>!EHs=ETxq|@!%yx6Gadix@qaq z2LwTY2a;i~gLyGREAb#~bbtkBNeM;@$MGPhVd4Ub4M=cW39V$p00D)N10{rqNgcEZ zTZB;#QpE)oNfyGik`7v~0?M8w0WL7mVU)`vj6&!YmaOVrh*pBR1zHUTqN>1TX2_rm z^BkQUC>jng6w^wo=z?ZURR$P(ppv;T&uW||tBwtQMUsnXB?EplOwJCpCMejZ2*1vi z6sn@i&>0Q6kXEvx6Kc@0HQm%fs{>tBadb}LEt_`)PU8wrEgTfF1(3r4q^`hx33Dch zE*o@a6(lbxUYs5WTOyd;8G!%&kB@+lfRBKWfRBKWfRBKWfRBKWfRBKWfRBKWz<&<{ z(O_Y#U}hb_uy}k$pt59BAh|!eBe^lTD!DBAb@KD%2g$dRFDIW*{x$hX@`2>t$=i}Q zC9g?NNV>@@k{2fjC;KJOPAbXMlcyz5PBu*@lA*-m#Gb^~#M;ESiA9Ndi4PNRCtglG zpZH7S;l%xk-zIKJ+>n@(7?-dTmnDWJ&P()4=m{>-DbY63ERjrv;)mmV;tS&cjDHY+ zGyYQinfUDZtoZNZx5aOYPm7O>+wot-FN~iT?;SrQ&c{2)Pl-2=H;R|UevIvnZHujq zeH&X8n-}{i_IB*$*mJSRV-LpejolHuIrgjAgqRzEC^9edVdSmIOOaHf%qi+*@9IF#;-e>NzTJ_k92!s$Nj-UCDF-pCqQobE-x_WV#fg$%eL z_*dr@r_ZJ(=Y`W}(LV|ooEJ*>#IHX$ls*$H!^IhtQ!pTu){z4j8vc3zP+G;J{^7L3 z{vNJmcGWMGmataea9U(6xDwdaIpH)hIKH+o^R@$dHeL8)t-r;n2#_1hO zccb4_)GM6s%2>TZ=`Q%?sc^b8W2K7Io#@|@P`V>NDO{X}&j1%4XywAQL+SRE6FNJb zK9&6yu-YN(tWf$C{1Uh*!#_JKoNmi10qbOB^$exk&`P16;dEO z4_${3$Ax~41zh-AR=|aRg#|rA*I+@9@HAG?BQzBYP7h7Ng44s3S;6U{tFfSaXc885 z4^LzT-9r zIXMiXhyKmUASFZae;U8PpiMZPd4M*dUij>-Ln-`=)?pAk^eL8he6!X=WA9hQX$YRjIt(F z&@9B`Uo##;Vz7B90KtI^8BQ5C&oA_ z4AO-@%ZVW-T}}w0a49+=#AM3}p;PIz6*USm!O|#n3f>_ZDx>_uWC)}R$e?71$&^G0 zMM_~J1QG@KiBK!77Z0^WIvxUH0#6VNHOJo!`|BV-7HW$8Xs8MDqahF?@b}RWijacR z5J(W9OG77My+|>Lk4P{Q0^tGdNQjA!l46h?aQhI*4PciPM=2Q&l_D7qfykhxp%SD+ zA&?hPtvCeI0{BHClobU<#YObb3PT_#fL&M&QXCPuVnvZ~wfRBKWfRBKWfRBKWfRBKW zfRBKWfR8{10pI@jyMm8^kARPWkARPWkARPWkARPWkARPWkARQBe?J2L{(t}dZS4O- z9|0c$9|0c$9|0c$9|0c$9|0c$9|0eM|C0#N{r{5JIFqN0oj5`B-~ac2vJdz_+DG93 z1p$1$iU0mT!+-xDyxPcr|DXT#@_y5650{!>@`S1Vp-~Z>o|L=e4`~T4X zk1h@*cO};)ze&zZR=~dhPbMEs-j%#LIXUShMw z_*LtoAe^JDMDUWq*wdnk5ytUNXa_UykjHZVqFLaalq zc`O<|9NivW4G-ymd<1+1d<1+1d<1+1d<1+1d<1+1>Ovq^7!IbSK@T42-1vuKIil0B zei!73j(564bHr)y4jYmqI(&EU;2hC@+kruOqUDPNbHu3wA2>fpw7a?Ec{$>gXWl$F zN0iMweL#+AyY>41c_Q&%zZ`LLS!LfG(dLwg&&d(3`$hZYh*s0D?wuoAeln|9j%cy( zf>fU1dXgN`-1y+^9MSCVm1pIMrZeyBnIoF~K73}5X#CL^XXJ>Jc9-gTqQ^)rN1S-Y zS89$pVb%sEM>JaUf}AH>_Lp)*a>y5Aj!0a8zmOy14}H$(h}ishxjfP8;~qI8+V`T< zb401NrF)Ku%xc*!N0iKdq-%}{e;n+RBSJg=+&NFg7In%I#mcshIihITgQw+)!gRL| zIildMdF^vVFunZL91$p=-!4n+|MB6Tr{svk@0?kdBMvQkr)`e-VeO%lbHu?Nfi^kf zz&fpUo@n-`Rykt->EV_+VqcdfEpo(Oy}Egh*z=41&2q%<8~ZiQ5xedyXp$p#{)KOx zBX+zRKPg9Subz5hp6K5Hgd9;l#%Poywmn&v%n@5xUy#TXT^h%8#FlAOV>x2;z2l-e zV$;&KrFlZUF_I%TUbU_yM{M}R{BVv~KTi+kh;{o%7Uv0WT~Ur$d+EuAIbzLCHy1<; z!x1VR_OG5b-M9bi5}!WLN5DtGN5DtGN5DtGN5DtGN8o=I08*!2Zt@VE^X{u>bP}*#CI~?EgFg_J5uL`#(>B{huem z{?8L&|K|y?|MLXc|9JxJ|2zTqf1UvQKTm-DpC`cn&l6z(=LxX?^90!cc>?VJJOTE9 zo&ftlPk{ZOC&2#C6JY=639$e31la$10_^`h0rr2M0Q)~rfc>8*!2Zt@VE^X{u>bP} z*#CI~?EgFg_J5uL`#(>B{huem{?8L&|K|y?|MLXc|9JxJ|2zTqf1UvQKTm-DpC`cn z&l6z(=LxX?^90!cc>?VJJOTE9o&ftlPk{ZOC&2#C6JY=639$e31la$10_^`h0rr2M z0Q)~rfc>8*!2Zt@VE^X{u>bP}*#CI~?EgFg_J5uL`#(>B{huem{?8L&|K|y?|Jy{e z!U667#Fc^M56QjB9my@p^~qJqZ<9-t3zAonK#LmRl#D>J`#LC1siG_*E#OH~R6YoJj!Rv{a5`RlPo%l=Q(Zqv^`xC!Q z{3da0q8#!Iu1Q>-7zYpOe|!Xd1bhU11bhU11bhU11bhU11bhU11pddN_|B&ZUO|^w6Ik`q4vQdN_w3`p`pfdgw(DDS9CEa5g=h zMGrmc;Y@lsgC2Bx(C9&>2ZbJFdXVTrqz8c>czWRIp$9#jP7mGbp&LDPrH3x`(3u`O z(L+aiIE@}U&_jEAIF%mS(Zeb9P(}}J>EUF0XhRRJ>7f-pw4{d?^w69hn$bg3dT2rq zjp^YedN`3DPN0WI^pK>71UMf6Ze4+VvVfkqInuAig2KB>qYKwfGb9`{Q@UuZ>?Bzc}7EF2&2@vDm)Yrr6@x2eH4$ z{sj5`<6;-Ylvul1lUPagVDz2nGtq~mzl&ZUJuBKSaVxArSdjcQ`40FS{*;^vK86WN z3;YVbk_z||+9pp*mL&Ej)`H*Q!^A%lFC-oT@4!`wOA`YUYNA`>ltiQWkMXVX74d(@ z--lPW60u-(S9E1`Npx=XgXrI*k4GPfrlV7%ZgfQSf@q&;k7(oM!DMx^Dmg#- zQu5EqTa!~^l|=t!*JKOuZ){F1OUwly#(jxNiBXBciL(+-6UFge@l~)I;=TBs_+#-I z@yT%m)Dc`-Cw57!M|4YcZS>3NYtcKRGU@|Hy^nwW9wRDPk%zo@8Sfn=-e$yGNW962f3VtbAn$d? zdySo5Wks(r>ScC%i7{Vfr#X!Icl_%Y81XkGo@c~!NIc7kXOMWB5l_)mN#IH3Jwb_3 z;IFLcFN}H|i~h{&&1OZ9G3rq)dW6+`m=*mAc@Ht(gN!$eo=O6L#G*g2q6Zl7e&qe0 z@$O^1dyzMj@qWj6_aN_X#=DF0ev7={Fy5U=+`)+3S%uq>cPr!F!gw=~H=Xg)j8|S% z7zy57To??7ZlcGM8!6)kWL%Go>nP*b$ha06zoLw5kTDGzQz>H#GA1MAYRZ^|jEVGe z0$z^CvT>C0OJrPyjIoq41{qf(!=(%d88$L3$}o{(AY(LTj6%j0$QVf(mm}jB$heF$ zE=9%&WL!cS7bD{$dN~{~FQk{l@bUtz2F83OI0PAkkpbp>BsdTm=OY6Q`bh9xWDGzC znDmigKV@@ z{XWyLNVE?O3WwWB=^N6mOKSho2^R)|6)q73TefXU6Es8TO~a66Nwx%C=NwZsC0@Zd z6TmAJvghnhg(3KBdWY;k`UNxFT3avcy?|72s?bk8ZyD*-{DB3LG1X+jLu=bSHD)&% zdjFo#s9BX{#Q2X6zjXZ`5?EvWd6D#YPZH^851D=EobNU~u!ju&JOuT{!66fqmPZ zQP!@@kUj&%cHPR_nG+}2qsH3T0`nKr$Qz4Oq~sNSL?d&0uQ&s^TY6w!0AIiWb`4vw zU0D=NSLJxcG-TZ|bxpTag*Ro%WWaW5oIM{4;L!l>+3Q!5)OPEiBhOz?Mj!5TV9TZj zr0nxXpS=0TO0s0sHA7Br`juB1__iSOn>A##{>rDF0p z6Zep+4G+8&yE@PRSrCMq)C92(47rgv7Nt%?i(I?-#2vl9Snhp#zItb`(OXE>3msO^c=T&;-x-qtzU8EO(Z$dOw~)7g z8QDL3-7d2H)mv_;6!((U${~0Eal%Ut2BOeq$WbVED-&sN%mUe=lmXd3-TF8qTD48v zGE7rq%f-*FAq+ zF@u!D*llaQMpjeFIB_Q#I;VuPF>%3%WqR88}Zd$r-DuN(@*pn?@cO5phY!Yu|IEgpb z#)*#DN_XM9tGccwlg8Y;?~qv~^o_RFOALmlHWzKzQJdOxbg7n5uic)iD`e zO=dnb{ep8J`38hGoerM%%24fCOtk0DzT_7#eoq4Dz`R;f-C$7EF|rC$T_SH@A(I=X zheP#1LRSqz;Z$CdKu1$`L6qS?#o;v5v1C=}5CM1{RrY)&5_v@sIthNNGK1z-FH2|7 z``)*o9X)(|Lm^RDpD0N6Eco5!op!&U9$US*>2hXn>Owwm0Lgip!eTalB(UEF8|&8E6C6{PQG>11&c_a$BOo$^gE={ z#B|X|zx%F%y2OwHLT#7?Dkp@(#ieIW`s?&?VLe8N0rRmX8jivl9L&ZdOgpZm*qSQK ziX<5fCU^@Q!Ug9nOmt*WsA_*6dXRqpL9RZlbn#y%ZOQ#~DscOM2Xf?R`qb<%bHJQ^?$#z6ll`Nj)U5?2@c*7q4 zRyb#WYhCiNVSj5~VlUO>#~ZfXJ7x;C|6_sM0*OcCXGbeaUyA&u#0TVdLNz zQ1CzUnclA@lSjv}Y%>Oh2%<=GEYUPn3$z!Rw^iN-wZ&0g$8v1ez2MD|2sxaykW<&e zk?sSIs=|9g*PSbT_~PlyNq~x)2VN$Dp&pFP^wjo${Bh>{-e@Yy?%qPQ`#y&$eU*0yRe^@>$XLAj0sB+2tJ!7y4CvD5=x*Ds zjHd?08JX$4cZ=f!nK5rQ7mPAh)g8++B+ZmL4oJav6w%;#QFIVMc&ldid@LZVqQ3{- z<~?uU%$p9LG3hIB_FI3LnY?bkmjb=U+q{S@iHvDC?pn}r9x7?|=oRb8R5WLIkpW*& zEqgJUgqGV*LcKTF&-mI){gqlByGR+#4OhLrvwgAdYIzKs<&c>msHe z>N0q+Oxb|5qrepDDgdS`tF~zHiX}3!4{swy5aE0*5UD_TV^!lepRL(Mz|KAI(%Zfz zl`|fl_v{yI$WX4!GbdFpCri>(`rW^0fj1g#jSkbk@u-(z@rj2r^}ixRzkVC;u#-H! zk6-&jhmGFUs)tW_;;h|-qsCs>Eu_5F4Tf;hY@)TS_(j=!J2NUNIK+OYPpzKXXTZ_3 z33SuqRNtbvuexdfvgxCm)dyi&Ryo(z6i(uKM|34ig{f6{P0&eTb`_aD2d{WVgyCEl z!W3QlFLedHlel0hIq`{i-fMQ+E;2`@%4U@p*tKkNddibt^&IL1{E(FWlPV-DNS0qg zd$a=j2_m?U1X=*_lXsB^yHHtz!?vuL+w{g@7=UMf`B)9K^H)U)f8RiB_8H(m$MYjl4XGtT-Jx-y}5`m zoR5WYYW>pZ4_sbF8qKJf`{%yPNyRYgtJy-{08h)ymsXKC%$t{<_3K5X?E4YTUjMSn zn|UD>rQlsSrR}b97ccWhBW!y~xixZmzrs0WAlTA>TDXUlT>boSmyH3`f2PmT{j|3F zhtl+%8L#yPMR|JfCiMZ)(KVH~WI@m^UI9NVT=BeNx{|Kwf@Lsi3hx_65aE0*5YhLy zcpVuKD*$KBRuULV-PYfca_aVK^%)6J_R<%;^1iR_JhY&~n|(!RaK+B|7G1mHG`?h$ zSN=es-;P_ik}T-zwD^9>PBIQ$gw1a`MBV_m>|-r=G!%#f`W)30Yk=6HF$3}VJ}%QV zKv@DLXQ+h&+M^?`h7S>jIH=lB~*DMTVZ-^r}HSS9`Vj<==lY z>#aY)w1l60{Z8oXI?zx+)iHZhTSE`O(D=IV`cLn0Qhji;1n}r{uFb=R4)ct|sWPt% znqYxR<~q!&3a_w6oZws=Cn^hv-Vz`3_xo37%No9p{bxa3>%^slLNYXvEvL7iLc zy(;Q3eYuKs>(hGE_sVyq0wm~##XmF{PIdLC!qhnhb3i|Ta(eW{OnHFh(Uy7Zu5pRD}sPc)2&i1n1f?aWwEVqa5_(&7YgWj zrtJvOZ+MY2EpX{D&3SwG{tS-p{f~r$0(#^>+Ji>7pn`lJ;ZSp4L1t4ASNS3`XEJr0 zf`bPGW#5q!&{hf_+C!St@I~WI-X!qAU-|MXQcgohSIr}HUZWF36&XXFgkQmI4E?KR z#dBT>7`|uy<`6;8*gtlYG7Ri}p>0IwapkCcNC@m+3drkQ>U@a%7E z!arm{9nmCt}2|yWH|b#R(mO`pAu-ir;ya!)XYw(!*eA+f_^h z+P#lZuh#BP2MF!A%l3auxboLi)4yL$CP6@X_XwqO&n~UTueD0 zT@)w{K0xjNp3!egFDY3NzNmOn(X_&ofa!n!|3P5-_3i5;)D=kqb4UiOlapQBPz*5S zA&g*?~Fe!nM_s5sR-_e-&No&ZpI25*cC8K5yY;sqnS4D@Kys&|UdY`(|EMQ24QhjfI zpGE?vk2iZxi;09nq&i<8&8WY5*J99eQ%0D;Btad>XYD1_ z>hElGUw!Kp2 zmC)3HN7j%==oSD27Q>QTN!4$t4{#?bhxCne#xC{NQ;T6HxD2Ud>rt3SAt=HBs)}^t zX-3CBvIOq>R>xgrba;M?3ohG3?!cC6uxYany`%=fH=J@@0g_RYCD+s>#})*T89LY% zE=2WI%@Gwt*Lg=_`pAzD_oP7Yhx4%jrow~%<7g`@2z8kyH|MG2UOi~c~=uv&d~*1bS=RV4Z{K*-Ex^Jclez%5q>!TEBJ@*ya(JWJ4jhC z$ksw_*$8yly*+T`GuB=GX}JLU{s<}np=op;wiATg$FmlrsL3iQ8w zEMH~^d7diuf$bUj4%u*+0&kR)9IADAgY?}t1u+2nN%qTC4kp+)ax z&;JU31}jW!C}N%?qtW-el*|S_^1~Ou@-TRC(;6}x^uf!2w}{+%(=guWn2>){LN%-qIlYb-? zXoG#yQ1A~J0%3@w@MmUet_h~!aPo1RAR~YjK?AKtgG>SyqGpg5%JCq2M4huB!mijT z;rIWr_7FA!oc~o5RH0__iC03?>;AEd1U{ho1dGUW8sGT%Dg7S~j{&h0qvFHA_WYku|GSDEO6o=Gm4^tYq9Uv*B7keY9 z&v)oyLX-Q>T)D=>>^;80yaJ%X2uHO5_}p^v!@w(=KB^6Cf@9dG8QV!SZ15)OmdU9G zq~#idDsh}BD_}rzAct6{ilys<J;-wL@`>|2Vq^*R6zoCElpQN zU9vPmP(;CXT|st0d&JItU>(c4g7fhZPJv=P@5~p-h~5AA<~N5{W?aIH$ZCjBzI)G3 z@&?2rzWL2cQbyx)=dSQ3fuR58Lm!a#&<^*VQAH}~co?>q$(#aDqCb zt_!?rflt6tK!#Xm2H9qRVn{dp)A0~y_W7<%WNkI6Kt*sV3ET(ifbyf>Od9L`Y8eT< zNaICsdjn4Y@svY*4v=y*e%|y-Xu9Ml+sX4Nh4&Nb@mC@!D;&DJ!A~cC*8Ay`~VKl zL5~-us68>U8irOGO@zA3tE6ic*01rHFoW-fYG$JbON^}3rwO)0IMpxoDP3kTqj~rG zG>sxEx~hpBgkv2C0*4w8imT2^;Cg^Wc@=|R2TH9i*femiPtyd>qYjAM$V~JYKpw>y zNV$FW0kVLq|1Chy(`n@P`#rXjU;#-X^q-TiWEw0+XN+GFva z3J=W?$Z;;C=0>^a&7z^*=k^c<6EOp+eN=z`BaC&-X?@%qckzucyk>6qI)M+nq!lEf z!+MCN&o|)mk2=gc<;bQ9bYo3(3}=5vvE_AD7Hw6N=p?Sv30@XmK~xp+R#+;B6V8Fi ztd`g`aQ;_KQ-zbp2j0V&m$r$NL9RxRfnSkoI#Mp)L1tqpyn>`oP?@KwljhyO z<}E^-_%LB2a}61Qt7x`)EFSqasYE*grgbVISN}-njQti=-L<3&5(Za2v4-42gTXVl zlV?E(yJpB{ubQq$S-Q4iL?ui{AS>(eTVT8;l+37 zqvpcr8%a4h(aziW3bVGK-r!-P`pvt&O6tK#t|yZ)GO*EGO;b+1g47)989BU~NSGmU zh;TS#ReF{9JsU`h2K`!Akh??GO$Q9!?R|=#?(Kx56W_E2-un>oe{JVJQijVc8Zh%6 zl!c@6pI|Nvrv}}MS*L`v0YA`3Twh4b6#I}KR zecGlGx{;QW?2?n2h}CMco~|Bvp_;r6+M4q#@LXWF$+zBY(BcMvxzb~);oAtCT(*(X z)B`#fu*LL}#iSFRBOcyMW`RFst`Fi zz&4sq`xzNG2z&v%_L3qh5eF5f0^_c|?ul;MaOO{+*K6rpGPKz=zVaV8|0tD5D=j)vCAfzXaWW81*FK5c`G1Lwl*fzj({37r&Xz4a9tO;zEJ zA40^28rYAMq11zyTtiZ@@`_(O*JCSY4wKp7+|c@NAS0;j@8pAoxeT{>vvD@6BxCl{ zwKHqHU1;5G_9|)a+SDy%9}WFXJ46CBlKgl0e*C^Uq?-D0H*NHooxg+3LAOl9Wew6) zp`+RcXZ*~#nSMj3<4!9XWz4c%lY^Bb3S?(MIyjiIXuxMXYUN zi~d#Hq%gz|VFvI`Z?~TJD$r}VjZ6YD>ADL@b1eAMn~i>rDvyKAw$+7@v-SWT40m~x zz*L$0=iQlE`D5<}jKpp7D$#2QIYTaOnA^O!X)e<4L!=SpJUDl3A{8hHmv}!=u_x>x zW9ZQ6(}1~fb%fy1SkJ~AZs=Ga_+3zebw!ZCVa3ZJybM@S2c8nJ*?F)jdGr<@C|0&1 z{BZu4@YAHB=ierQdui?_C^r~|`IImv3Q`AAysshURFq6z?hQpZ)mo3LLXUpCiv(s< zMRhchFyU>xmjVOw>fcm)BQO;aL>AQw?gXXm2P(phj0suw_H3Ga;#7DgG&S_Puf0jA zl{J{BoSK+N_5PahpH?6EWnF~~IS47+E?{qHf+0bkw;-z)WPgK=g0L42doX+cSMblt zWW>cF{IKmQ@Oe<*>toM)=TI0+Awv_FAb;#-aq#mD6v?Y{(N` zc=0CkJi4+rc~4_+e7)~^Q6h}Gp`$SCTTV?o-xF2Tws zNXD^b$CYU`2K>>6!&w~bS0`Ta74|DQAFp2>>1C)urF@P@9htA*0x5Ge7=~^D6nv_BM>y0lgA_H()=~j=Xd!I7`lm(hMRq`BRA+YTpWN|w!z!WXVG@s?( zMk>4?4y15WoUxpgqcZY}$I?x9kzHuYZf4#HbuYOVZQMNsL-8+yRHoTLo!%uWwB_b| z&%)O(sovlPCZju~f%+qr#dX-Ypp2BH`rST%Y17-g&XAS*v>WfhdR5C2Bo~q$bPH@; zO%p`O64xR5USKId2Tqu^0ox7E^=-E|&?UWzP;_pJfgC`kZaGO|;{E$%Cf!hC;Co&r z#*;UbB5;8UQai~kn)-WiTLZNl z)$i+QH}%MNYc8{PJHAIxFm+fQ1#UdSwH;THRYO-46EsOzQ8Y-Y!1ijiAQ0k67$Ijcu`=Ou}^E4@nAR{*{?z#gDzi)P5a(n51wq#rs}2Iun0f z?cG7WLT4`|<(T5-dClnxMk|o&Nz+p1ttNqP^!`tI8XC-?xnP#!W-qXchK*)>XzcB+ z&ZO6VmhmX=Zm21?&Za^A{;cH6GwsV-%}}J{4ynxQD#L+B0Z+OK3tL@>H*AT6ly*%r zIfzUfE-r07(5NNmh|=@FYMQLy;tlYJcYbpxSr2~miKD>r`N#>7Z1FlNN5|*WUJ1>l z=>Db0)(pHyo`&`IKmPd#LRY#JJXDYhd_%|Dij1h-ip?{PxCAw|+|ubifw4q8U2bL!jC+X+hO@`D*~<9afS##^7+?p0yR z>1b>)UYEbS=^9m4qexF7;;8HAxF03kiXr=b_v6pKC}JO#|rprFR%IW{gvU zseO>*yK%}YG6@%Td`Er&*Te_CcX-nvA@$;WKqZAmcN6~d87ZUVYSl-cjmgBDJZ2E? zA{u5bttV)pw1$Smtph&tTGPCSkq~NOUJz0N8&WJ=yn{@l*(4vW^IpRxJ^M%{`YR#) z_%nTK>QoRE1Zh~OO;M7{HJnhNhT|c5Pj)%YrU^HYuxdj*&~Pg^ zxxNjD7T3KXfDpk7G8$LaEFqK7l=~`^;PEvXjp6M@ULzW_>i+{l9b)QXQa*vkGG6j3 zAL-s zyhfJ~JQzsbg;hm* zk^lyvqMH)r?QpOF1J*mkE+f3bv+aQn#2@+`8xGF(Z8*%_8M&R5p!)F@xS-ffb1nLC zw~*GbH2#f8w`UTlz9TsB-uZ!Gj`8K2J+_?cJJOu4`g68;`>3+gs%cE@!zBdQ z%v~Nx&88y$j0OyhfcA0ph(q-wJK|b!tl^HKeq_{r#Wi3D9ZgX!XerK6ZAi&ibVn3n zrJ*LNEHo0E|1LHhoa@tYGcjKZ!Xmgz_+v5?eY|Vg;@5q<32FuO z_lq+56mtnI^9HZjAyNT}%@3_zN^oI#;XaRrs9=2~?tZh3tcTF3mp-aEy*)-g5?$Yc_`s&2!^6gKQ! zV``9Jsn~{VXy9d*9PsI}rP>Et?87|=={eiA!tkva(94|LGJ~n%(DuTHoSAdm= zpKRV&6=AE!p2}dE=by^%a}=Nb{ZBvE>;{7e46aMfgk8w8D-Q_+@%Sq=MH><%1xL0d z>h^?XHkKjqkSwX{x(h)Xw%nivhuOjh;G6}K+5uT0$Iy)v{53a+(0LBnFif^N9v2U3npT*EWzC%xTAWsN(>*8ULPZ;(t-Mt8+BQWp{ z&NA2*GzXg9y%N!fb8Yl#Z>2-Nb}IyMW&4vJn`EmpS-X{lPPxsCc9PnB-1f*SO4a7$ z$ln!8U52}5e&Nv+_)=K({U}WvQuiHoe_ZLf;gE;h_wC(H9!B%`OE1g+8>)1RJzR|Q z7&sW|%1Ee>nS67<%d{?pYksCrP0=!BK%MOddnkb|6z)nF@!b;Z$=c1nrOrJLGf6A1 zW56bdjt2IS4Dl5Oc6_h|6Bh7^4s5=eNm^+-^FC}hI3L+=wcDyTO-B`NR~Mzvx8xqo z!2Ukt>|af`Vnp^!f(Z)z320Z3S6^i7ordaLn{RNZ4xDuf31ZqfwB;-Dm%~mpsba zn0$?q%pL5x+7zdf7%SRCFh=oiAjL-H!OYsv^_f1oBSU}hy@d<9stMgu>kS&Y#$d#O zpKWTd)fWa2?gV4F^HKT+55qW`8g}at>W#Xk)0wP0jJu`IO>DEt`34MDL4;HQ1D2$4 z0=O{@9>gao?G{YkOo%r5J#O`L0M7rf-689%zYmr7MY%4)_E7jdlMM4gu1m~8DF2X2 zo|&KP5;$t_+(K@{^*-O_y2Lze%k>^ghl{u8y2N#8lK<0t2%C6eu1jEcpt*}&j#~>Y z%XJAVTrN&*C-=2NF5kYA0U|xkaCjb4!TWG=(|kV26jk58*ZK#z=iS8d!y-&9pDSS z16L?P<{>V_-s3%u;g{v44EC$I=fWl4e42apP9Rl5w_3jYV~?%zSdei!?<0T3oQUmcLc?+8^^OSXaC-G*r}25EL_lMlQ~x+hJbid13}JUF5;AqA97wC+Cgp2h_{1*y?EeSAz<4DS2P zCQMri?4t|2G6z!uOhsMFSTflA*ZV4HE0< zE@SV4m4JzscY5DoB2gfP%eUTGL{iv@3nBA@Zd?9@mrYv@peFZB15NQPW>>evGm;v7 zJKelwzzjuWt$GaQEW3f5AS$p2hc2l$Cpa2p5g96PK=vJM{O&jz6aSJW_rC_^2WQe3y>eV^56TivJ^G@W;BG`Iu-h>C;{D9d z(aV^FbK@q09pl^G@XEN=9A)0E1Gc97AKY5vjp$_5&a7__Uq;d0?tw*TM3Kw?Jx zlIZTzYa-{Bq(a{ozg9H4usiJdco2rmf9q#@hk>lm9^dSx`*?yUitbLt!2wczV7)!0 z^+Ets(;%f6B~bg8FJ{l#KATlKsl;iGo0)z|o}q4`UKqDu!|cL*ryJB0AYgjkuXt_&pt=HRAlZmJ?8k{Cd08z8+NPXVUJuDwp3IF*eKYPEE(3aJFdYTneA@wh)}?} zHWb;O!*oAbulqbTM^c_gcX(;Poy^5}*eZgX1a#V(<7qVW2ZdxCV~j(+8|~%Ipzglm|H8(9=(d(fd={-G7~jk$o9ogo<9s{ z-JOIr!tPxCEFYcgI>fksrcW&pkA4#f49TJtABOiYbWcq0FrYr7OXh5ylO=H6Nf7lA zRa=u`vAIB3ne&dGNs=j>cRFGS=ejW5fj@L1K@;-Ubr}QY^&B2+aU{5EKr zWbtdL>}D_|YOiiX(Zv<7x^VCOrPD|CXE+^y4XguswXj2&41shPGS3~@cn7f#0-v^1sUe09xHgSIvw?Y!>a65J^3g}tQqU}j3-w?_@;EdOWf zhPB?NGGuTa2;)0x`WMnI65pF1?pKe=L59^Zuz$E=I${}QWRmU2N5=k zQB6o;gJr6a!!5yfF^(#MbA#=hvi(iIptx*3eH)qvW8yvOuG>Mhg$echY2R4B(nQFR%Kogb2yW5 zvO0$=Ta~lLV~Hq7^K0Ly3}xPzUKiY78Zw>mQ$^54J7gEB&1dC5{a9=67$Vf6(ZQe% zr7p>6bffyzM=U}z5m?jZ3qL*6 z&3C)rzL8vu>E<71rrmEzWAsOT@73nhB_OU_?J>XDdP1|LXH@JbS$-Ang};$djVP2} z<>3px#6YU{eSSK)><%>{2C;aw&WvL5!T@_!SLfdK5e9|{=CA9*e(~^*5fOSey#EX~ zHHUX2$%+9J5hB}a`qkNUZG@?k!zK!F@!%&v==-BKn0;|Vp3h`yc1ztnpZ)z$Kh{KQ zaF;q_YPh5lUdEEy!t%Oa#}x*d7%tu6&jxQWyeCeQc})Yo#+Bf0T+lJ$l}I*Qe$?`l zS%?*!kHjjg4AP#Ny=I}uFG&Tyc;X|{6TLJqdAM0_k5z=lde?ik`78*qn!Ui{s?A<@ zqGpQZMnX4pscg5Mu&ouAW@5uX9L0YH12u~Xnt<}*jIj9G@gGpA@wXijlWcA}wd^8H@uF8bL^Ys^EPV`o945nXvlX>~% z-x^5A9y0jo+;muBUX<#errwUbrjHt0A9EYtA_87ATZ4EgEJJk^Q@3r^r7zx7=#Df_3pl~~ND#9|4F!{4ug<(Gj=dVBir~gdP2oL2nD?=sAkOD5_iFQBMeAo9VXvS& zNG`!Vl1=0!G}CvGYW%~`yyr2HRYBPD<-J5e`#bBAf(Q7CpPEJ_WSJf9=Y{v^7N=4( z*n9eV4LNQ%&BPQ8cxNHJ=|+@X#{{!muwf$$#js5aRspN3$oxsCy*ms?ES!%-INMF> z85abu%5SsGery%d(DvI)*a+UA8^KvVO9uKrH{zGjZ7KV#%+cBV{6z3_e_j*C!9$LE zyEw)ah6HAuIN^`?q{j|sLhrcmzf|EBckoVe*Mv7p*swi|=IF4=ku1ZCC3qi+%+_yq z`0ieW5Y9(JSo7yKv|y`CuBEzVe6`_0^$_r|uZAPYf~9c01p`r5;bpK6yhDSBMaJ-+ uKNiw#zwH42RyZH|TWL4B9(`R$hcwGEAMersv1YGRz@Ht2_g8%U^#2ED5N@Ub literal 0 HcmV?d00001 diff --git a/ocdata/base_atoms/ase_dbs/bulks.db b/ocdata/base_atoms/ase_dbs/bulks.db new file mode 100644 index 0000000000000000000000000000000000000000..bc13ef5011615283d1e197f3ddb10310a07afe8f GIT binary patch literal 23715840 zcmeFa2V4`&*FK&Q2oRDDy(`!OQPW8**|nge2=?AWNED?Bh@z+j0lSF3VebVMd&{Dt zg1z_NdqEVe{Ld!YK%(Az-}nFC_jm92-Tja~bDljrJNxYH?4C2TLzhmWQAsLabzDM> zBFR@sv!~JNG|bnRMhiSlqq#1jRW1l5Eh|+Ty-NBwNywvNllL5MInAEAmBvQ7#dfdl z`m>(1TG-C9mD)V8nQs$p{mOcUb)+={2Gl|^KruiuKruiu@EZpD+B4niG@>U)#YU<| z#tx5(P$dkgnjWf38IY`q9lT`nQk@e z(}(JEOj3IpKHMU2W&M!hxLBwJIHs+p2o z{M)jaZneDV3Vm6VqRjHMGs#a`xxH#;I@rIfEz_;O7k!w%aIsOthC|UJO-jcy$!)l) zP1PKYoBaFOK>2FX$0aEuqE%6``uC@(xY#O5TSKm0o3{uJ_N}tP*N^233qg`^*WjLA z19hsd*6MVBO@OtvTcbwwbcQZ_L*1>cfIk5?CWrcIv*Cb{!B5A7dn8nU{HRE{Rk{YZ z3GS@dTZeZJ4rvqS+djCLuU}N8zi;Q@*1?^F!&(M+@ikNy0`}L`W?H+|uTR(LxiGr{ z%v5WWf|_mogJM?F^xrLJqLsCqiwj*7qN9|!yNDmtq&x)j%R>Bj52|5XTDy69(KT_p zqZ)2SgRn9=wgEI8zPIVMh&5IJyT$3qfLozHy_dc)!=mBMe?X!NZirZ=D$!K6G%cLz zMn9tJ2Ev~H6RhdF6P%{f=c&Kj4b{WKG!OkY!wFWNf^WypAsw1`?xm|}&AW9C4+(?) zIs}Jx4fOSG(7-pORlUSOU*Zl=Oj5+eC;BR4BYlZ>LU#k{ZXM%osjJ?;-NHgTbqm(l zVqF^mRhO*gF+@gHbh5#za%1RkpG4mlq2Vp`MMz8Ql*ZHRS|{oZJrbt zljs{4F+`!ovAuOr5aGx zUXR8lKxM2_b||=zdK(r8Fut(-<U(O9l|&~I;jYgH`x z`z;qa(`XjUXG&qcY*K;+l%CGL0&DweI`NM=8>%1_0~7-k0~7-k0~7-k0~7-k0~7-k z0~7-k0~7;)#(@3QI*bk$(g>MYp-_t>6+B^(P%KkMD#S9r3J`%(DBugMlpM&2&Ml$A zAGJ^nPz+EEPz+EEPz+EEPz+EEPz+EEPz+EEPz?OXFuxhKag^y1bKx#LGB?pkW0uJ_zgCjmRn_7nzG>BNGt~ zG73pVq7fC+AL)T~L_&}jNE1YaG(hSiK8QQwfUpn-=NsoE=MCp6=N{)e=K|*>r;u}y zvx~EZvyQWzvyd}OcN+gO*NHk2iUEoNiUEoNiUEoNiUEoNiUEp&|7-?a2Wnn;fh$%^ z^E8o0bGfT|913Eg=3y-m3eCOIApA9Vy1>`k4>h*}Ky23BfN!~-hib0&22o#gSxBQf zJ=I*a1F=DKP6LB=j1_;Z$qxguU$YfJ`_Y=s1lU2du_Fja&HCCj z8uz|t4HTEVL9?neh~b(QejwUwmW~7Ap~>wEW7OYj7Qs0ng_`*QA`3Kg39z?jwm%4O zO%8yZ`*5Nz)!PI&c`H;5UwuMM{+3X8z=7vZb zje)PL2kSYj2gAC{%DS-jSO&Po^(DZ)#k@s8g+=Isny}{11)P3;HqcLBokOJiWCKpG zHx<&fyOSYJn>axQ>*kqtV2!1#tU&bVHm`dUxXY+4j{Xbt;hz504zb~BQuey zNEVU?F@Pi_78#5vklsiaBoqloX|U&w0Ul$hpP2%sIn3 z&MDyR)tvyG1GP{LPz+EEPz+EEPz+EEPz+EEPz+EE{AV%XazV3sBt!x-H5b?KSusKrGYD4+0UXnF~IxV~}QcA_%%>mIFBbXEiyEK;&w&iAcau&9tE)B$}zVAXu8o z;GWvw)JzNpv0jq}?x_7>O(x6^Z!gx2_W^JJt40HUI`>!wc=Fuz?}K37{Z+Ubr*jvW0 z2762Qm0-^}wH)jjE0%#hBWelQGkA-^-s0mzu(vogAM7p0%msT3(QL4{V9o@4`uQBN zr!UV2dwP#)U{4R20`|10lfa&~d;-|hqBFss7Bn90X_9nn!vAlHz{CJF*NOiO{~wA% zEffP30~7-k0~7-k0~7-k0~7-k0~7-k1OIvkT$^042EmHASCe2p{|d}k=Yn2_ere}x z7aw}Lm=YDX_&9hQF97<>>WiXpeNknX)*NkJFF;z z;R_Buj*Wt~{Za65?N1j9VV!h@3+wuaV5kH4Rsr<;=lk{@7oVi z2_$}>JFGc-hC_T|>F!RjZn&#H#1yXWfZ+%1vD;x30K0mA9f&3zAbfbc{#&5TcCKxTBUrr%f-KCg3oT*InGZHr1@ri@j+zTzz2*DaFu=od_$(MO z!6?sx^JT1=0VQE{%ZBI;!)n?PhyrYx3TI@|ZVCdP|My97j`aMAkSD!M7Mu&6nF(h{ zTaw`m>(F$<GKJbZ_D#l+x* zWkmv5haDq6OZa{igZ0)z@S^QI9}a^kz^6knGM_sS?+fd;+FIcMe?9>IGqQC*_}fUx z-jNUmc)SONb-&$A@c+N=gmDk-B|Bi8KUe5u@c-9ug6IIN z9VY5i1~mVdJtKJag-!rSRQ__2?xK zFR-qjOL+bp7qy3V$AyltX3mFi2dwhv`NF#09O$>VJT)7l2A07yAzHyWnFHQFBWnf& z*7c`DjKkvmG>DH_#7u!Wh=s>wh)~cEO;o{p@C1mb&^!b2+L{heQ@~Q0r2u32wM;@H4 z1!KSMACgGowJFbQh66H;MB>vD7T17pt?jKzB(~3~(Z17Q2Y(O`zk<;{B)ame1Y2$7~gI@Ru|j5 zi(DT7l1}c`aq`!%t}yE#%brBSX#0A(_`>M-JQ8WYs$P~ejH|Z|Cy|hj*iEPYFdqIL ziL`zk{?%~|AUPz`YGTNGhsA&dkVuP$N3-nr!4LhfD!Y7{e-L?n0f`J5m?jCD*nUv--eU*lWtCT(~l_qDWx@%C26tJ7fYmhG(B zFb;n)Y2$w^_k`_s7>ECrMCz1|EU~!&$b1s1*~PW5^?N}4NW^E_WFKo{#J-k9ybtBv zV#0R;7E%)NI3EAS3cgvO-zO2*PgBoZ#sHE`BF=+$yE3N2tyM-M_S^bTu-FKQkqxA? zy@q}QkOb0(Ey`<6drtWO{+tD{cAd?A&waqX2>Er zI@GAvfH({Hg;Dj5099pFeZXr)-Wb*DjX@n_RI9m~B)& zAn;-L#i+Vp0d=5Jbvq0}hm%ItH4oHgM%85@gd2_+Rp%^FLyf9a0)!Q=8C6Fj64BqN zI<$j;!bhWOF99{hsB&vU;NYTBMG#Os8CCWd2pQZns&-F6?O;?{5E`(YVN`9k5EnRW zRBbkZ+Q+C`&xMGzJl{yQWanWpsE7+ z!-Vxt=c)jH4FPCY1*ivI+K$_*0ChV8(6kCrM*)8I`6@tda6BEV9;McJ@U1JVSk#0b zX8V~{fEud-@U8;*?gEELafa)-!0=ZfRsP-Iy=qi9WxT&0nRRKEyWK{q! zZ}7zrRRKI30MMce;2{Tpd`}g?9fm;IiK_r^1HmUhPz7)u20)W4fD1Uawil`Z&a(g* zS_N=|5ec?dRRG7G;H)pN0yrE4Kv)H^zX}ff^(p}O836HB0NntA+f@M;c=1E3030s> zY^nfkm`l%UeigtDChxOiQ~_9F;M{ks0@w}!f4(Y(Ht_)Tt72iT0iXV96@WPtfT${f z)k^T|KUV=PcK|S}3cxrD0JjQY0dW%As44*cDF7)|09v^PmrHBUJ_xvLN6r>*M{Y2Z zh73m@azZ&rx$C&OF!KL2rw8XI`wrsAoq&WObJ-WUjX3YQwK*|d56*OC2*(%32&8hp zaai0-oaRVZ&H`i#au|8T9mcNUjOVHmM~(|3O&J*@?WD~a-u|@`P zwMaGO8mB&IB(fa&3|||}WS4U9azAqRA-g#e?h7{Duhc>@KruiuKruiuKruiuKruiu zKruiu01VhHU^3_ye%(HSM;<~XyM#)>_0AxYE=uriE%p&fhsZkMZ?+|pftH8C^R7=M zL;5*_JJ^m$)@ThQv$-gd^p17|v^kNi6A6AArvZ`l?A#Xe?MEa9iltzy%eP}~=s$Hn zPN3Dg!Z>@I6e5YpMt~=Fnn>FBnGOcsiDbRHHsBUsCX((oV2*dYzC=?E9`NEIjuL_BzQ0~CxUKfmvcnYIgk$5!iz{&=k9@sMLv;i zBpwKd7*8ZU*%0ZV>oViC0vFzXB!O}mH=#H>&U~ccKQlHEs7s^Q5NmizBt=bnL2-hJ zWCLa^xGcFuGTb2za?qW;uQd#Tr_U!)dmgZ3tD{=r;s>aXYJ>qiFyjaVUl|8-xI`pH z!$gpSyKXlZ@Uh`P`56|6R08y{+y|8Dj_VdU3eNHsfx5Ec+z}rl*|xz#*x>?^wDW+1 z(fSZc>vkz{;jM_Imle4G4i-eRrc(qIVHA<9Va0=5q${~s*m^kObOLpDfhY%e2a#+W z-41HpXCi5DTL9&{OC-51GQd^WT_AtQhESX_1j<9xrEWtB^IFwUrgklCXNRoNe#0Xtuiy zSBw%A0~7-k0~7-k0~7-k1OHkETrIhb8WynwRMDy!Rculs4Lli_SX+jRMNE8DWRD@B ziA^oKCUMu(n8XP?JCb%?Yk?*=wd*t;Uj90f7M)Imh1S%fOLed~Y>I{dfLGC#pVEqB}H^rLCVXuDVUQMAJ7nxvc(YQJ|EXzrY#?XC= zTsO67k_Q;q6q8N|_o;;3OA1|?9D7p>aV%JLB`w6zg~-vET1b9?#b#4Xcn2JdjpSa! zIMB|TTJW@h*^w4}56A;EwGf1XMT9BVs3G_{(d1qYkARkKY7vNlXJ_O$5L{H&2UCj% zO8~2FiUo{?y#|u`27rTaJI2(aUVm_FjrrC)1Qupzo@=Nl_wOqjBjHyL!IbgL+u^QE3FD1E`FAkbnJ*)Nu9Lb%eMRoA|tuLD9;WHgD zcT>ze7WN7t_wsfEO>S!85e9DfdeXuJI)<4`Of5X`fW>Q5%uNeE@*8q5w?SaxXlmiS z1S~vA3l|5l$T78WZ3q?X!jWD&alSA0x5@}(#7cB0WTCmRm=3t`I6H>F>4LR*M-OHvrU}mi)BNp~*Ozvf& zfcO=y4hJ4DG*>V1{~vL$aErM6xSP03xHDi5z$C7k+mqW4JotuOU-$~Z68VHYLvA3a z5G}G5S%J)kxd2B(oS-ifhG2*Qz6EfF`2dORsD)yHVt`_RVt`_RVt`_RVt`_RVt`_R zV&EUcfJ+YP2iAv(p7VWD3lM-NBsITR5X&>h(ul~2V5YbvR^Hifv(cTyuF6SkvC&FKlmX39y8h(FjEeRqc$pwY=~ zMC8GG9I0^|L)_#PsS)u7CPr%PN8rgHCN(0mVC6myiPZ@_#;(fy9Hi-xd+Ue1B|CIn4HVj&lI!`kT#3<3z(Z z|E*x&e{YTryOe#8eTuz@y$Z$)q(X0gS2o5DWV^F1?cUkl(j5?vNi7ru6ay3k6ay3k z6ay3k6ay3k6a)V~23%^I_WlLHkZkAmrkFqB=QS|J{MLbgmv4&Iy9b!r2$Op5;OpV0 zcDnD}!%eX|6T$EMYKqk^0BnsZR;wJa_GTE2iMIb}iq#wpm|3rHjb(sIP3?TIffsqz z46_Gpm?`Gd7BFv9ta=)FnPwwes_g-6xT&4@JMcIw%rHN|qD?Wcfq>CWG0*wnpXQok z9_IiHG{bDbS3PKoxnqEZnPP4u!Ha!jin(qDY_uun@(eIHGt3vf+yYa~xhG(;rkK+V zz|8u%9gD#4Jz{DHL(XY7W*j(30W%waU>^@&@*&fGx$6LnF~yL3;4zn*VeWvLjTzu{ z1WaIR$DRlt^+Qw4t^lxfQ;by(zP4FCW$^%uGqtlF41V_`Q_N-=V6#mz>uZ2DGQ;e_ zOE()w!fXqe($vl>4KOQH%yJL-^xI4^#yh}5%rHM1jb=6$$YLO1X2+n<2h41|4ecC+ z5olKQi!>yhEoEvMS&*cbe>Vf;c~-S(9jvY0yrVVV^uR!xcQnn*!I~DWJFPAbbZeTm zwGCe&76wTa3WbCxQS&7tzFHc|SMvBWg-XGfDV3P08_K#qXay;C^N>PdkVgCx!WwGB zrA}Dt(z>hx4ehWNgBlF1w%Qka_fj-NQuY{)s(6qaZGRW7i@%7uX7vFr-CFO};1qjI zR{~hQyfzzfhQa=$!JdneIB7@9h}Br^H7h5~ngsr*rD1lM(PIj{?LRNoE-t6S0`h@hh*gnq`SC>*_99)AT^NX3^0!Na!va2x94BxM~8SAR%wy9i3k)FL z()sgQ4dAFbUVLl37S&p9VH^w5whK{Ax9@qAx*Ff!@ zlryzvthB*M+_FnL$d47k>SL$v;n7n8e2z!M84KixM1 zxNn&9qSH$zNqYKYy_CR7tjgQV`x0YNO} zoud}3q!B8aK&nuyL_tESSgC-YkS`GNgVZ9S{?557Ow`>uAa!>R@pSwvKloCQM*RNj z!svDJal{1&4D3tyjP0%6yioJ4!$~Hg>z1eLnQJl+GBB3s(N7&tenLt6x;JjjSia^3 z+HC`>p6Og>!0DI)$4NZH>R`{=`#lZ#7hn5NI4j_L6)_=L`d zg&Wr}F*6(`aWWs+-=zFxInB#Y9;Ya~#mdZsPR8S!;*5gpP9>v9x$Af#Hqo8fukRIaY>5oG+<=oUm*M$7N1nN6!KPkm zlgx3g0VnfG8*#j>qT`M!=$y@(w+(;HL&@W8mU5XjcTB+rx3_&30j_^P2-en(E#28; z$0GFYu;Xrp2|04T)M#DG>ahUFXBF2`9lWzdYp|EoZG)~#EAz(<_VPEGd;Fpx|4Z3> zrq%l~57)P}3EJfGoYIdqX3M|N?sBf)`7E;hGy|?Zl$)$A@SA8Xze&q0e}1OC$Eyy-|v%p}NUoQQbo6i&ZkQ+(+ z+s=#!mHBWm5-06QsmC4v8EsExedjT|#55jiN7g@m{T?DWujd9_Yru{5gFHKfOf#t$ z0-jX(%Lg(;uCYGiGNerhtmBjVD_BvNu~9epb)S#CJia0Q4}6bsL!A(cghHi2!B5Sv!Kx~XB4gjd))1# z-@W6l85oJ1OMU)-<6C9rwG3MEX{X=ZQ2n(`lk3}p;l^uazzuE0@L%7{(w{Dv-t!pW zCwA82C#NyF{{`pq-Gi|at(R((i*^uiH%tf@aGu+px7}-nTRQ4u2j(PDI>o?$i-GZt z?EauZ=wxURBBcU}P%Y;3B9wfYTB3vqL`0-Q2#QDn4-S#8A-HF2PfA^bK)m1U-|=ad z4|jFYYNDgt_nPq;#f~;AWGLRFejON@Gi!Z9?NYzD>NVpzdT(u;HJJ`9?09^rW@fFQ z8w5T632!;)H|E3#d zKVK2D#bN|6d3n)jG8N>7tvC zzwttIq!{b<1sZQz&j}-J^$u|8csAhxvo&lQ zBy}%hAc%!@fceS@72)+pNM!K5l?a3Q@ERtRLf9x$##8VFx+`^iwmx(OQg_ANAcJ51 z$S_h5+wFZvzF}Z``N8ODlDNfm_LLouIT%q8V)?JxRbFv2zdzgG)Q&7yWj^{+RIc^* zKRcwP#b4e2x794NAph58!jFrhD>RZuO5oA6Kg797a8suYDo!ux&>!?a~iJn^`LF{&;G_D8HB5 z!YO&nR*EXLnml@A#kUUz2kpMLq(z^0G0qk8nQu3*v^sK08{71KMUR`ewBH8%6rIkm zkYE1R;`-_ym*f}TtJ_Sv|4=R%^h_nb&O~1ZuBV%&*~J>K z=hHEM#_aD}(>tCo6#e>+w?WyphPG{YAq4CH#=91OjT@F$J}vxilgJDgn?H8I$opOz zsgm&dGKo?qma2JDo|4ZOiDhbmTFO^J#5huS-``TBq%_|54WZ5vzuzqb{Yyu<`IVvF z+pLvW47I>Kw#{DIS1=EKGj4m7_{~$a^_Ue$LJyRnR;L3Zf_F2pt6b+-V>~Ov$CLcS z{rcrT$Ln@~SpG2fHr}It2Y=xmJFIP{^VXYaIX>;w+TE8%=b$0|S6`aQ$-sBhi4o7qSg9_mFKy9 zK-J%)qCKoFF|T#2y)l={dQrmk7Hs!_j#n%Usn^x-HXgsG%itj|?Xd3y1KK#omgAnE zDi${1H6Fdv^vFgx+bs0M;2w92`u*eTh3Hee@maSw=$*JZhW2E+JgP_Trtu;9@*SJL z9&eE}5|1pud9UgC@puQ1S-srH%*HphnJ##dI1+8W;G|1g`gruJIzxCib+%c(5b$|F zJ9_$hF)uB%)X&QmYu!PT5jKU5jm{|;?-)Bj!xAoz9J*_7=oU!vY|BEgd9Z_!H^&OXzG zSYUx6`Ob6Zenq4DRLmQBqXJD$pJ_Q`@k|)}|LetX4DHIrrZyP4)#p^uE1OOw_>_5L zOP#jd#AmP%zG?B@8YA)3{<~Mc*_)1^cWTLVU7CrPH63?jUE_aTz$R!!i@SyNT+4P|j+eDu)8|XUcHDrY1{^iu#y2Yi8)+mfrtO79!29KkBLm%aXl!$2 z;uEaZh9)hY6^F4{TBnzp)%Otozm>&Y8r#}-y7hgl!H}dDih=*f3}lAe>l-VPCKcS8 zzI|zs&_|b3bw%rAa6_dkHBS_zREZ;?7d(=P7YKx6nG8Dm6at7|>R)c21n$!{Rv>ka z6%n8&e}p4b_2qqWr?_fiSrzE-BuNap{dYQvlajRmwXBRvllG+4+yD9Ys^>uN`yV4K zU$z?i+^<(YPh&W{O+ou3$Vn{ zLtTqA-(@6o^))&WI;p+sp`kQL=xek-hSkdu3KPi`Jc(E!2nte(Rcd8~NFkPrr7D?7 zsEAZ3A{4rs{HU}KDRniPXvK*iG0KC;F7as6*_->Vv6{6?cisZI;z2_@LcZPQ<`e#S zjl6cKw8zGK8|2SoS5FxEWVQUl=sumV?=M4}j5?3q0NJjhrJB&|J0w+qbjks{^k8ld zA zBO&{A?R1-v3VY3-#}@nL|GHXRr6$@3T1Af@yT18@!PjJE+PI{d`FWKxk({Oby5iQ13bl{{lz0M^bsGo=5uSy)1e zmFQsdoACk2UkK-kA|v@Kl^CLUFu+JC<*QW!nM@*4@|A+fh#-+ZIP`F=pRU${G&%!@ z04&tGpO^9l%n4j!Ir?i^`C@0jr{4b0xBqRmr}8QbH8BGN_Q3a>x+TpZe<#T8roCu< zxR-zZvTlJj*<6UsiipHw>!iNU8Jv)j%=wG0L35b9rN|ZJX{DWvIFQ-D%t9HIMF9 zVU3wf(q5mBHD)gV>h{0wI+F$XpO%&Nr!jr>z-r6CedzRZ#k8Vta;y2rFEu^+O-|~| z0`gkU4=D(m6RWZ$G&`J>wY+{zO8d+* zYbAnYGXBqQ0P$F|MBN4k)nVMV#@c;9g3jFOizQheTl+RQe?l_*&*I3We<+GjNnp&V zL?9N3WHOZs0{)R|2qW{ve5i-Y2wj!C?|+Pxx~fO|homH`U)7(@rHl9XeSV4#Pb&Pr z>D6acUi;Kr;RhScTqo_w?W9iHB}d72dA;Of<}yHf%yN~3JvOjf%3|yNHW)8`$SSLT zUr=+Mv?I5ZI%$`)XToe-kEf`)Y}%+t(9zQ04^8~5+y8Y{{eSw_`MdM0yfxdcnzgy4 zdEN2ju1>u5a0FgcT0kwf<6?nKXo2-a=Sq%?QpKN$>P7C zKCg?vEXu#N{$*V}G;COK*^|2Xho}LQ4}7YN8gR7b`sRmvzpaZtsK;zH?_OQAxRB^K z12$~XjIHSj;-~Ae-isCvn9S;fr47h$fONMc-Wn||m^51ktf=MxC!@{%b&Yt_nS5QoUxaD zH9F_j872w&C8KF^tPWTMQ$CyUDw3>0^29OEP+BWm1;-YA%}M=HABV!gzdrhtX%44Y#4E zIsNvJl5D~C9Y@`?)pIS=lD;P6y}E78YIHpp*Cr7iBjeFdBhKY>!G2)D{(#96GXG$$ zqE*hS-jIKdzW0)IuIA!%yo2qJT^x^>iYJAfhw_wdc3+blMCKnXUod6b_M)H5gPNb$ z9@d>f#~~l{^4yO&bcudtJZ^s6aLh?@1bFCO%6@K}my9l39aHL_J`UH9VjL)MU#FXV zq&Wo5@^$hiSC>z~@3jGslk zu6G$V4maSY9qAIGL?rvQvu%&=mT-9;zZ}Gp{Re2Sc8$iC9qlhhQ{PM*doq9Xfc}X@FEl2${NIZ;be!A=aqH4(e|&Y zH_%RR^M@2fdSXXot{-b9amC8iGecL2XqezhvZ!AG%ZRJ8WE)vs=-BaQPKWmJ-HiD- zJ@|C6XwrGK`uL#x9SRvn{Dpk?jnti1Sor1JKe`WniIVwGD0L57JJb<-AnCI8+W~uH z{_7&j54X(t{V1Vf*|@`=CA-q9*e^PWkDmwmuf1Pm^X4k{2eyj)q!eePcG{Jz zldBwe`zz~K%Zsw{Ub9sv#x+XE?|;uavJK9Q{LDtwXx+1W3h0rYwPJldBS7GjSsOF;(Qd6R<0a`yC(l@7o(Nkf*7?)`#zEHUX48Yjz+V>6do zjj;t3YWWieGLqf@TAW@I3B7sHH!YS4RXkx(5D&h%R|g4%3V}kzhwr#`uL*CzcO|7U zPEQWHHGJ{%%ksEQaC&yZ#|AT(P4gCL&-#?3<`V5LX?(-Y5i^%$=6ZV@%v}D}?f<&q zWo2{L((_el#$d}eO(aW?ReaRmJ>2a`ExP zg}p8z-nG2&`i=UY*xmZJoRq`boX$z=bX#7-0$JYfbamAIu8+-!lhx7OcXKZ-IaS>> zSSOW;;WOl44%e|n12(mFn^GOe+7ErXt;Y6ZY&cIpx^Jl4A6Gn>-a$7$pD*MEMXI3_ z4+1q(X^*s_KkBwFFBOPWFXovdnGHBH)+m{B+BMh@ct+C#F$ zI^LGWoa+&QjUE6)!#cgpNOt|>+D3*11>zt@gj5|Nkpu-P!9n6H#VT2lSi*}?2T27= zv96ZA-msRGCSFkoDD5A0BR7k`yCa~94fbnUj_UDBc`l*R5=EdRW-dumew|Ncck`cZ z{}Wvd()FKBQug*{^<}Uz5-0QD^x)m6JM|vo`s3PRB%V~`>rn^$!Dvy#kkHrjqtX2JFN3z; z`iBf~CQst8$jaxd$o)mF6FcJ#JEfV+DVL7!KD5vh4Vo#RI=efa91@&nz;Od^J_z=w zGMH36fmr-=wIg1JEm79%!l!j_uff6?tmoejxMBlS`yh{&cFIV0{^M##ws#_hGBPp< z#&z&v<|MIDtqOvM4u&2I1w2)RBtrjM_j2SNU9|&g@>)l{(zS!%$7P@6El1g4qXsO@ zZ9B>xd)>F`oPnTQ4!W>T3i4`=h1DB^JiMa3nG)pJCag{^MwO$hnl(5m1<5NrD;Nf{ z_SooZoBB9nBRk(NUjp)Bp5NsYAoX9Qe9%5d{oIND->$yOkb$Jf&iphCNW zhEER>uNVS)h0UYs2*`N1MjiPeXO$0ZvGAo6`Y&$pg1MuYl!VyS%U+s<&+UZ{+10Nh zqg`(_X@LDC|AxKM?58p92k-BJFRpc;UoyBCJ}EW1RbEt2{N#XkC37d&z;{2qUT@_4 z`gpQBs?~wr0l0op)<-$}3NN7b^V(>OB_g!2o`>mcoh*nS^oxq8Z|~F-KR;f%Yzr2; z8exwvyoZe$d%sPd^QepwPJdkSV7#k-)-I()ErX|OghC-z@uUJ7d_k{L!W>>AkpM;u z%5)X(MHBspvLJO8&l4&e@jBPQ@B{J7#Vgw;Rn*?Pz+h6lQbe!n|AJP+$|f7$1v&4t*Aqc>}wd#oUyftD8gXl$m9o#jPH zPz%Mtzk-1bS8sir`MVitj1OyB1P?xX7Q#FNDyb5_c!Qb1l#wd6Oeqt{cmjQ!S$gfE zuI&VAd{`6hrtlBAxOJk5_dLrF=m6`t^%_5Zfx2ZMT-A9b4b$TcOpkvt;P%+@%x0$^ zWL3TglJ*aLlCI4)=EK5B-2Bjb-2R`@jZo#?Zd_vA-u7yJD0G9|x#DZ8fAUJXzJG43 zd{e-S^sTnIcE-)u={GLyHhe(4SnDh5*kwb}HaUCG-HB5gZIVA~n%KOh?@H7DJh23Z z*#A5!9Puz^$d5c)A3pW*Dy(fDdbXR_94tCVH;6V%?4_?5e>Rz-PyydL3Lq*Mp%4n; zt5}7Cr;g;o$FV{&Ppy*Z`|>^VTHmu!#-6cKe90)4X{ksWO@3 z4Dag!fTQO4xShPfuidOL^NT!oO6Nud-etG98~_;CD;X-V(lVHw9{z|Pw4HC&r!N;H?PosF zgpWJ?5jV#zZ8nUk4*74hovCi=S$W*2)sH$PSkQFolNLjPTf$~(E6d!P78U4MuZ z3d=f$mucNu*O1n)9%>5?Bz5}6r=dp70`hS%7`?L_fIAKHFNlrGZ4n5>K=pr5Odb>Wm^!q|Q zyx;KBfb!|+qUznZCf%gtr*6;%XIDF+Zx%d!G_Uqjbkenyu}81b(dm?YIc7?{lDApBvNhZP2AH z-k!U1SB&+!@il9T#~f@{Xzcfv)2dGx;qhm;J~^EP1lc0t3vrkyTFjFuMS>t0|3ajc z5QdA8iuJ?)-v@ru-TEMPw?654noA-KLj3#+ZfAFOWMT>H@`E`EwwU7>OP`9V@6nh2 z7q9A0`-FRL%477o!o?Qt&&nIyg@x^N9Jm5-)Es{?;aO_u+Mm$n$h$I0&U2K^CsO|T z9XiP#GslS!To|MxkW)bCFe&up%d}^hAu}8-l zT^)ZO9f>{p3OKXieviuoyTa&8;&{JW^yQ>({ax7CZ#| z3160$Mt?{!%;z~>K6nx><`md>2wHK6_CCSrxs&sWma*sAwJ%9G+S{I3yJlUjord`v z7m_anY1gcMh8{k>L$2Sx+K8Jx^o2YKw*M4q*WdZ+C2wx!o|=mFeKl&{g*~^i=|xYY zr?i|)`2P(0bsDFUWj+XM`A=aWv%8Pp3;NwB^yIr7c)i^pt3@KAL?V&GWPB>2 zk|&K&M?(LKt|2e4x0{r@2ED51L-rW|p1F9(;PfNf19@AXj+MA!m_U3oe2M$p6N5t0u=(p^Ae#fQ^ zXKZiM4e^vm$Iy|x57ha(&kH- zPIg`@jrxj)b(xo3?-&Df*;%`-cb*frtKIWP_58o0^0CWSojm>pRSV~)*yh_}HST1m zuNvx%-As&hyCieNUgh+hTg~D+I=f*%p7S72>|<#)%Zp<_qMv@uA9pC$0{arUBkR~o zTMT<#;Pm2Ybqt@fFJ?&59klxOl2w8zXDrV-eYY^)6MJL7w9S|K_E^h?yKXtTeMM`~ zh9AQoyu|M>_n6!N+Xr-V|L_4*70wv**owD}!)s#2O;<3pw{fvcZM*UkdN^ZE6K3ve zxW^Y8_9T5W%&mz&dLrAqS@snj5f?khZRK|S?b^U*aqyk^7_O@4bEY*mW$lMI_Zm54 z>n(QOJ)~k`X;#g&akV|L3y8F)cdrlVckAPWTEH;oXJ=}8t+l?7?rrrvr z$)^t&o435_us&H_>;-e~!Um^ou}|$kRVxm5!ro*|>iVsOiT%i2H*b-x2L@fM=L-+g zu#+o~t*SHk9xC&GQ|)t0Z*1+gAG0RBdw^ytziksoaDm47Y^X9%@dvW2=%{_DMmZROATHUtxor9KsIJ>+x?J(}+*ynU= z#b&&p^vrM}a}|EHnb3LfwORN9OIB3-I$Q9}>-#2;ey|qb@{R7dz91Xj*7C%pZr1DZ zkV}D~yIxJlJ9{4Ndw`jaRuoQux@FKhbWx`f)8{r@jL(-PZ{J?`5UPAMZ$gaq8vILK zTv5jX6Y+-1-O~^Sj?Qj{o!uI-8@=rPaH&;-+;1OXl7(j zUXFG>DtzVJDg*EQjM+6LFBO$ve5TltGaI)_zG>@ocOrU)SvX=<$sD{|OfawQ^(;Jk zk$1J5eU_tc>tsR0gI3}3BYLkL&^H(L*ppp-ck2Z>R`&Q*a^@!VfWyX=n0+&GG3W8p zq%kv5b(HGFvSzE%@{TL&WUrlrZg(lp*_=NCAKd=s`W+!#QMZVXZQPlg(BhCCb&&~M z@yjn~w&e&jQR%kqL0=Xu#$#zY*LQ5*i0|}#|19q6H2lz)N2jxvufX5W*jW_YVJYgd z)M@JM5u5SXEms{MDo#dSZl133kk=G{eMR6g{po7F2%}F-=|2mNo|3z3kYfh=T^`my z(|!>e-C*Sr+nC|#=G2-wd1*`0F{MlAN9|p0`tl(W@%ZAOhf5QE3(n|~&4oP!wpYiJ zPe?f-ox)%_Tx)%?k!-@P6p^~Z{;1{uR50$Pz(BDb&z9P5(PA73vlJ>ur2u9kOOTGP{ zZ?Ad|=6U?<^~wgMMXKFbR{18w8KwP3nxC+_o%#0P>E`>HPBQ38`DZYYF}#+(rTN{2VPrrS#{Tfd3K0ym zm5QL(QlOAT2xL+j49SfQ;=@#8x)$c!lPpr|S{h;+AmT^5Er0c#pDIeL4_Ix9f&Kim zZF+0lmAqJ9Jb{7f@ltequl%_8&uLinPFim9gb(PAnX!3DIf4ZRy2QMH{<9CBS6%=( zt$@~HXnbD#%6Y=g%cIZV`QP`7T-id@cYMP8^!kqZnHlsnGu)^*QLkIsdd^+>?YjG` zu|yZN%|;C_9F}=NYrwSzTyDTkzX;;<#J~9()>8gpX0la${he5suUu}X-!&|`NcSIH zCXcMC?+W<+zf>cgYnV}57ztg00=`J1j^IZ|s`>Ci10T|oAihGP9|igKU`Jie1F5Td zWXPYC#Pi{gek%DcRlAgnsH)lMn~j>iLkFzkuZl%1u+nWKQ?^R2uocJ4G|4Jwj4}7R z?>#pcY+Catjtn;zcDGZK-@$1%7-@gq<&sZuyelTF(Z$C$*V|}+>iLiKkzexA()rDL zPf(vm=Xu^fmooVQ+Mz7-(2(L+sHIZW_T_t5tmX24n?f$TV6VQOb{RLEhP~VnQ)&_S z1-*Mfw!ir#2Ieug)}y!etucqkFOK$a?tq1!+T-x<2pgMKHX~Vd1HrCly7roF?`h0u z@bD(xP-tAEO#l?mi`;yYFweug%otcB_@D47rc;oRD`Kj(uVnEqo=N~Jw{Ty*)`zD?c0p3G;#+m@Y+ZC0Xv zP6kYKuFQu#E*@TJ`#cr$F^O!#5McQ1;g@|eZ1n8Ye*B)2Em(qNPUBGPcG%?6UCPc} zcbPD<#-H`Vl1;BF68^866z1N6DY`{s_;+fNDwqoprfd`ldGN2*^aC)y>>8?TdO`j& zuGsV8s#|6D_&xCK$5yMYm9d>+4n_rzxeLP*k+TO4zF{so>I)l&imJzw6_fn zv+-ZgcX=|X>AspbzgFaYK}r1FZkunN3KI@wWm!c2nCFg>`L9|K)nRX8IeO7*LDW}> zQ<~xUB?E5EC;ZICy2r}#&7GQJ*WMz=|kk#beE7}Ynzt6T|qtJMJ|Jy|tYsooB z$j-n5Px$-aO5wol&pBA9-W2lG3^ z=XDYA4|Ad8ju$BrL`e8j5&ZLRp8nt1e(um+cjJT9-S}Z}J`Jk;$kQ{I(Zhz7&DfaT z^i`vmTRXT_pih>0{K&8Pfs#78oy5)e*W(D*@7(6x5Vs24APxB#ag^L{W>4azeZjx( zMy#yZvP8~~(!n2c9(Qnh*Y1g&lm^DqQl>x8be#W4E@;D#_62s+b#-T!YXuV{Gm)U&lUu?{Vpsphq+jg3n^bKP}9AP0`XpkJi1 zZEQFosO8_!z|^3+`qzlm%s}WL)kJHeX^_-)kAff;GIpR=gTDr|=ZjSlFn~)aRRoFP zKYZ{NFhpLUjF9O5i<=#71&3%&2&rpLh${16`H^EinYljCPOd9KbM2qf-`2Ooh=LHy zi|*Gx4_j!9wMp4E`aSmxI{nC`F-^sem^rSuhUqHI@*(YG8q_#-u=4}-dU8=lAJ@m| z`U3Hh%-b%Q;=VgJ7Z=3gN z(6tIwJuk<*S{DnW{iV6NC2Kl6V=yt_EX$@z#)sld-_YAvuRp=hKDWAW;p2>vyqQaV z{xvDj&zu-a|9cpyd^&cj;d9CAd>zNzGnYLJ&qUkI^zBQ}Ovk%EvLBWhP^{&Iv78%p z%*1m?Z4&ItNJqDH9#Y7BQ6zscVAz5BfRCE$baMybO~!|INvkN*_S{<-{u1&TlD2od z1>|$j`9sAkv{-)gOA$Z4PY$|g|7iJz&$H00>+NnH@lHh-wAeiPRqq^pFn7pszt6Mq z$on($U%02DC&quv)PVg7LE9IEea`9Y^$&Tc;ss8w0bC4?)vQyb; zuUm)G*!wyfUNeK`%U%rf-g+tK98`C7tn^Jv+aV!>mN)m>cD* z)vJrkI-`c!ib#9y_|Ma)M-3m3DlgD_pNPyd*msj#ZL8TUc?3+kCt1=bU{IFvwbbM_ zln?(!;@1u+f-X{wJ-_+k9xTRrTH7A$MOfyc$^IGriYAP#^H&{Ec-MeY|4KnbB>a~O z1^ioYnEpQF9!s`>S>D`)m8&`BASP_cKb`_Z~NLvp3|AT{)@j<5G#! zXuK`TGsm~)d=aFjU?WR92H4K>K z<8PCdEm2o>iC>e6(Yx=m3oow4QFhPEXzRB5xJ(te0-g6={xnansl5Tm9VZXX`2X1Z z4uB?_?cdOQM~b3?*hNh*ikZP)u^<){B_u&bK`f}C(iKo_2zC?;ir5uIELl{rAgEZt z-g~1-Q;;`FcEU#x{qFnU@BOd$vAX;+XHU-VPNtqYbI!(o$BE~fPm^y)*Yf32A=?sW zyX`LYbGiUdjHK7;q}1}?*u}dQY$1qk=b!fU8g?9TJ!(-b#S8oh?r$ z4?2#cLrRVd?TwFov;_V436oO$Um$Lrxpmlj*bm8mYL&yuH@bw@g`L%LLp4w=<0q3o zjaDPrQ$y}`ceeq&rHRqL*;>Sz^@|L5xTu2$HJ{6-qeY>g>YlX0g+aD8+0%mHq*}X) zZ8p*2`k~>kIvjoz>EHf$RUf$)lI=m~cyhR6q~u2dfKk*5wALi`iRp!WaG-SnX@E&t zmH%PMz$mNwJ<)IZvlA~sm1?QoIG14nE!rG&-@oiTa58opeKeyAR316V?2=ywMm}y4 zFACL%wA^CH{wqx(ri0|mUhNA$g0jPbC(8+Kh~;!}YSUuEJ`=cRj@Bpd6WWdV-@!fG zSDmzK#25W8q`gJezk_rl zr>^2CjsMqEgM~gcAAdUsA{TW1U7Lyh6mhs-G@*#bq4JRHKToQd;U(mv*geQ=42Q-- zftO?xY>g2eG0NJ^rtFXLaOsY_Bb9$j70R&`moy5dOJdvxyPNAn{6ha4pQ_ULet#c) zUuDGiS4KHs)5D>^d;O176DnuKe|oyU&2)Yp+Rm&-54%0x!M(8Mm`0|gXwGHJthwtX zwQ(*tJLNBzyq{$Bb;a^ba&Lc(1iOXBZd6HqJ{cr=5g$*47!Y;q7_*lcnq%%EvVq_e_q|-zU zCX0!xJ%h^;a=n&aqUct@! zivO?isX<&GvAo}Xzabxfu6j9G9!5ET(}ShId%d~h;~f2mu)g9CkBVEw_m%P&0zAaDEXQTf714Vbq3V)g}4+MFKUlAGHyLQSg$<1#= z3pP&5l!WZrxqn#1UCD!1Pgj0EmoMot{*j(xr*z3XyUTHw$MeW&+4=mtHGS_VjejLc zGN(^n*!nuZ@5$tjWv;jR(?|5N@y&n9Uw(RGUW8wPB>r18s~ztyU+(klqGZhHuXUk& z?(*e3x^GM5K?gD$R(v|0#s0a5M4SJ1!iE;52gkul$Cf=^vS1usoGMBme5Xx+$^i)ggWc41Qtlr~}9i!AnDo=ZO z?{U+Mz>azI_aAru0Gu~!j5>Tq1-2dj;mN&NEjZHV_L{z%)nLf!ypP@yD$pmr*N$iY zb)Z6x^6rJhH{k1d`BwP}6SylWb;pT$M$l7b!9*<^U8v=Jz;^5=6WCPA*%Wx--50j5 z;=1|nUQ=JyQ~;wjh7-TAzkwAsL2eJm8^dmu%Rj!Uc>|2Q&N#Sbo+q)*xbMVC9ZpKr zV|`s*3;)Y?8TIu8x*4;JT!q$P*i^fMNU%0+c;TttsYLuN-R*XXJBXcw7au&lG!b-t zredlWu^kM)kg#Tw;5f0zAfLW%Ng{FAJU#Qyvh76Hc;bSk$8k^_FG(tR5J?=ip}p2g zN(HaSFD^+=!h`-4{;3(;-w(X=Q9>JF{-u~MX*dZ$+kl&x255kjBdtfAjnjf79=o5O zc<3Mu?2_-?JC7Z?pwsVqG(40=&T@oQPr8`R6nddeqXI7)4;6A`Y)YjHym)Mx0{}gh z#4oavjqsN}8ukhDvh`$>X**P*9P5X&n107WcT3Bn`Zc3}jZalkim|-keZL_eey(~s z6s1GX-}F%S?_O{2(Udaw-<_0}AeSzb0An;=bX- zQ8)SDww-bBIpVoQ9%?y5QfRF9?dG6-$;OhTy=KHczFoIjSzDdc8un;36f3%kq3lbq zF4wo8QG*kEO@DcEwhs(k1hkgq%HscNQ2J90-s<^i`K$LqAOGbeazW3(+j!8#$p0FR z=f&fAiD;ezCR%GoPE(l-D)QFLqw!?E*!1+gV`Pm7LRsU{13l4SPpmV<7(MXVnXEqA z5Zn+ho)ByWBg5C^T6Jy#HG3|&)xAqG=4wSyBCWu)As~`04tGUDTeJ0;j9P zEAJW@Dfqo~cKILQ@dLC9Gd{|{{1V{jnHL2H`C6I4MVkitx7lq;%4yneEmt@Bm0M=L z{haa-bS8Q5Fio45%?r888T0Q>*Eb%02M&AC##-8Y$qtQsDfu&AZv^6Y3Ddt;1P~-S z#4R-d?;c{G z#7m#PhYqYD?z3k08a_a=)E`ngc(Hfvffb-8EXyo!zL@)=Xwz7t!*1U;s^PB>~nq3*IUsQlL+oL@`uQP_{v{!+<`%U5J3w!8$ z47-5@uQtPT?e(CG-pd7{N#8-=(??i#<4oXr=IkL-I{mncsB1X`RJ@M8_T8u(XqmNV zn3I`hbM+-_4W+)CQA?otMU6eLvevyZ+eIVX!sELAc-x6cKXJry@JJ`3g(RYzja4|< zXRh;Xz{>z&KYEvXs3aCJ!YTAZi^*WMt;<0F1aF|9{OJ8Wx5>oznBGOk#y-R`SGeCO zk_VF0%cD!rOa);U=?ezU6o9d7n5PbPpG6pNSrAv*lMAl5n{A%jKAeb6$@3BmKMouEVTfY~zZ> zRGN^9oUw?}bX6>3a?rZICzmE-ix?cag_aJ*LsoMT{&KX9Yg5PbK{Z0OXGMFOdO7$xFv|Ix)*t=d>&>HWoTL8`*4O0WZNh=L zWx8BDaETlpuj0!`&DRy9Vr=L=GJC$HTh=wn;YrReEkxOTdhp~CBCUMMw^_GW=NISl zuRT0$KdStK#6PAaOnuu;iNwERiTB{^{7Yv|=V-6VmXQ4XL8lVCsEoNR$-9*k=d}2e zWYNYvb34;~a`c?dmyfECNrvvdr}xTTLMpINO5~&WL*(fGEMIP!l+kds&1E#vU-Iu= z;>Blj-JrW|uKy^_6L8t1WKN~TG-9E{udTeXn|Guvrn1q#ab%N8V=<9Gc?Me2XL31I zPt<>+m3CQe*D92`uhBy&tM$^+h_uh_PM%HurapG6pJT1HGL^5(>6L`9?0O~9c}7L2 z)%$7)B|2zG*UD6uhtniNd>`esT+Scu=ACnP**o^LSp0@0hBSdnY!phoiuv;%=Jt5F zsJK3U#mLJM=Jt3vr{vjH>gN3?(>!^A0eka^%Oh8eNBSM>*t+(;;KIuO9-l*YMObu` zm%QIO%SH+vHjgFs0>)vpI7rdMp`${`5wL_Pyf=>_Gw#sSMEl647QqM`^fxN#4L&X~ zOS*Hhh%j88t2!sW609wb7!^Cc1gx0xXy~`fhZ$Zlf;{dFmH4mRoz{w+QLK&0K4E`gySRdQRT1KEaqy zX&JUZ@>TD74efr|Ow>sH-0nvWT$mC)`c&l&*vGz8T8_bSI59)B{o|f3rR)D1Dpx3a zdv$tfrmF+=vHAD|b0A_q^VeMqR^JMEUUX!ri8c)(wMzC?b*WPnV@$^xsC*r#F=9Stl*_3ffGS^? zf8Piy({UPK|KkRD^$ms7_Aa+##(Lt7KKRWr%IU7y8VQp_$PXb(*D;-%)MYyjK%>>M&RDszXth6?{V(Afw|O$mLL=!@?1- zp$V3w3o#w@VZ`_SgNq$!;w$Y==g;6b`W@n8CLgySampxnw_?0Z(j_EaBDdH- z)o{IvM&kXNt{+HcXU=$&7Jdu z1`p|Lyu?(rwn0OY3E5%+U4%d&UnDh%?k{WZ5Xy?3)FBTpa?*)hQ-}5G{%5+){tC=` zFKhSX^(O#3+NHQ0SAiWZ-)=g;*AO~?dbv61pa%5Fpam~o*%He6)uEifhU7PaM~3#6 z@#FhoIa)5;&eqGX}X-=iPY&w!oXJrpGMcBFD{l^j4|C2m2>QXU)}w*Tc5Ve9=n}GJBez zn!+)Fm>N_mTX09=1th zB!~1+A>kyqd5z=%EQidS5RAT#C3WK;Zfn$nsNTq~TA$QkG4aRzUGlPpJ@@x-r~shR zQQX^~4&Ze_Vq_c6`p|qd=Rk7~ z{1@av2G{(m5=ifa!(e?(sZ@XrGD~SH8?((ON!sKqwnC zU`Mseh&T5dYxKJAmTEzS2JcvZ=xYT~*D-T~$bnzQDBCCfpM3@A*vkt`R~x{2a}M97 zB6?309}a zABjjUc(27lJ%2<$x-9PEB_x0C>Es)(QI&vIr#UJTJBysBXKEbNb ziNwa>sqYh(uP4S#m|bplb|?{c?(11c>m6Wsz|$pW_hJd>9#7pLZgwK(?)KSvbG#UM zOgQTi?Y)cGQJXvD-18BD*U~l9=6oWU)WUV?$xiD5qu@1d+~Z;3W=?nu`*u4Bhn&K0 zBQC{(Eq6D>hfe7SoamcXHVcOmx5uXJh`rt$ybGS_7=I>L@eO^dYXzW zK2n1SMP5vfP^yAsa=qASZbJ9uF_Ezj8%7mE%&JusgP;6c-lW-ky_V3cc31KO)8SuB(_4oNqI{B-#_vB^pe0QkU2HQziub#px+|44xm$O!$(&3+ zZ*}f-X*#wf+*x{EO=m@0DRSe@=OmOJifSABlPPiVR8yZ>{n*m@~JKv}rBj z$Eo>dQBRccKRWF0$n5+@X?2^;LF>+qMw9YoMtz$$rCP@>L+6a)m*JicaM2uT!E3#o z&6XFurVPSx!J~VzkR3Tvh;X@{EFsO4BNB+%LY~yCp;S{W?`h1=7RdA?2xSGYEh=<> zJ%^*T^gRFK{#>I^6nN^`cP;$`YOvV<=jNwI4x77XYcVHH~e%8_M;-XWMY~qYzk14aomFq^XNnidB@6_e!Jg+x&sfj?r)Nj~w+aJnXgF z_?vStynk@kU<;#pb48>dKR;4WvrLj=ey`}-x>=+nm1PoaRziHQanqY4!h5Po{mn9d zR7krmGuF-mcz+?@r#RJNzn9f)RdVllAX%aO_~UfWSryhI0$7GN2*N3t+f=JI(pw14LdVw*V%%56AlUK6j&U!+F)+R z4}jD7I;JbjDU$IU!kvC2K24)jGD(C=bj9^1=$JqA4{ou!Q}eQXBYftIKle?`cqhR~ z=2(c_+W(_uLyJ2qZt4_V^+RwPU&s9G-&{4X9DGmm;76}VCtU841$RKh7MsW6BDXw^ zT5M^-Lks&}hRmAX$Kl8&y!M5L17YCFTP6Fyzmv}YRdm!T=zsIkoCD1{(3}I!InbN~ z%{kDV1OMOTz^VWq8BtV!xxO@_sD79}B@}&@t*;`G9(a9KEHxnUWO<5^zJ$X-ni8H6 z=}M7p6I$#Suvs#%XL^?74$21L2xS9syv-RS4GH{LPBZ7I`-QXAAV%2}p+85fW@!cA zECwJ(xV19T5|-;8Wyasqhd7O|W4d5>px=QurVt~RgK=gK-`(_;fMz7%j_{fY>bwiAXEe1vS#(M0a%Yy54swLA_(5AgSH z%e}U{OtC&*JkZEw&X^j0@bNT?xR(l+6QXmY(Dwvd<+-QTcIJh=SC~JfYV+$w9gk9o zTTj|`78hw_eu~9`A4MIW1%Sn$UmkCDIRNO|881Cn5Cg<7`#iBI4Irjyr7bynJpfEf zh@<-D#(<3XI$e9D2Y{ry^>TDAu_$GGu z>2AGBiHN?YcImmgP)@G{g~z^>+&3!&>t3|J;HmWnyj@WcICg|3lsTT*rwX54UE%j+ zs0PGzd|x?#1;BDh`9N8I{>LpRtg`f=vV0_G)O+oT5<->5O^eomH%{)H_3eudi~`>G zRndKK`CX-_MVAB1sU4p6K+mg8zjp40aI(d7X-#P$=f@Pi=)(t0$mehD@`QY^%(%z7 z?*?_M*Mj5s!YF^Qw#|Ja&X8u5=RkcQGbz*CVe>k@td)locQcPjcKitF&8M9sy#t;i zU8?MtBxp^^8GayxFZcE*;SZnJ-)?~@3*4yI4rv*aPP}rQ=hV;kgyaitx=U|F*Pl7x z>ha2SAa~tzmM^#U+Qm<9eKzR9%FBe@Ti`x&6X+^_cGUZ~r7JI!s|+6eAi=T(H&GU` z%HFM2)yi~i8z$N1x^O{1MBin(=SD4(FJFE;N_rpMCwbfTfPF`Y6#j;U`#LKE6N&Q) z3nZDHqa^a@yC~UbzBf%u$Ey%C#wQ#MA96vGNuOHwDj*TaH?i&|UBw*WM^8eOdnj1F{{&@&4HeSw>{rwunop83Vc53aEtW;y~z2`@M!7$CtN=q&418m z+588A^uTQ|oz9dl6^oe2)f0+9hIVk!>0E&)ils~yQ(0bePo`D}BV=taLRs5uhy?sk zPQ23B6z;038~XrQLPwFHeNq2Xz+d$&@89@pJlIK5lI1rh(S+XMc#h&;q(-+)(NI?g@DJ*`R&7iaFe|DAM5Y zR%1ARiSfD8Xj9m;BB8jOvkDxLdUfdZfiJ-X-9yz6CK|v+2V+dlnfE~-tAyCt3+nKb zM$TsU!EK?*D2Kbf*QmjBM`{bs#e4ynp6NgBl*{3>V5;@np0&j)5X*74b1J89)q>g1 z6D6=ea4ITB>sDlGsr3oCP1!xx!7x@aWtc@svz+cxJMuWqhYraCP34 zxBa{8!vS}{t$USV0>7qr_&UT&13p?$sqML_8rWx!8DV?B4n%dh1J0yaKqswp`4eA! z0f9q@*K$Xefi~;IRyy>l0_Nt4X`Sql3yl+Dqhic!!TFFf8$XmEt!6*0?l-{<9#|W} z_xn+<7}ZTaxA+5V+Xvu&ug5x_k){yyJLWj8+jl7+v=^b7L=!K^@@%C};hNQl8h`iATHp90F z2&>8p>&KqnVBQ8nc}m|o;Pr(&_r@W*`>JSL<7YF8XWjTcK#Cth(n0>64i|Kv%>*5_ z{T@4|_yJN5AmxMZr+W@eMshHnluwZI6>>=VfRwMeFDaiO$6ul^m_t$zR`V; z;kS*7o@LLgxkkgjClEJm*HsyO+o;G0@H|_21 z1Z^tx&vD>e^y~(==gJP*{9kPqMMneuX+D~Bpg9Nr_i|u`f1lr}z_4F1Pa(3|r!fUg zPfvyz#eL==FF_)t0P_?H(0)IW+@qe&$6VPo454fqCbioA)rtLsVf>;qzYGzK(y%suKIm z=M(f`Q>e_>R2tJU%H=h^EcAD;|8aJ1I0KsTpUHvxVPgL)M&28CUOaj2{i4s#t3lfh zslg@As|gYnX{GA`r!f}voiZf9{(JcQ#LB|m24*80$pa)E%OOZP1Stp9>}%Dz7(EZB zlW7H?^7A#3O(7)x{}vTu8w`UujGy~s)CXHY-~-JekKw#(tqec*3b-}$O3iv<`Km>Z zzc&ED9wh`c5#1BHtU%sUX>>0diti!h3Fyd~1V!A0^x4}j4 zpW)#nCH3(4XD;(Az!Gz}j=7uE75wjXRZR^hw1O4qjvt$Fp$N2YQ#JVBRued4RG)*i zPlbe!)?NPU$;L3a_bOq_!4`0Hbm#Xbd#i{iM{c$+E~LOJO2oUlpac!G%{rD&eNQB4 zY4z@X(;PbWp0&SEl?tr3uwVDXt&Y%mQh0HVMKL%(U;DsqV+*M3w9n#2kR_b7>htoJ zr>x+ES4&jd-Zp{zd@m1o3H|`I&8D}fT%o|ML!R8D(JIhuiCJr z)yAAvHl?7(XY2FSk>-$|ajRFqE9MIN?!*zJzI87HpU2xj+>;v*upIX@c1{bvS;3zB z;;i4!Fjm|*xy&}*_Kqo>bSB~K;eFZ=Kj(u_OWGg#PzNSY?K67$6|4IEXITen98k@~ zzlS{kTPm)Hxew^>9qzBl|L;3rrk31jjb zi{9qr$sf}K%NH8g>W2E2UK%&>R5u>(nXA=bG2HiC*0pAj`fRuTd({X-hl z6tqm%9`yZn96|CEM?@;6`w@SHiH#ZW$VTF53&-u%LskOsNw(nzY*SE~F}tYxbsUyY zkn+L4x>WC8i2t_Q-STgU|ILv7Cj~51!lCWmQY0Vq15!R9<%3n7b`PvV&qK;5Ncjy7 z0B8)(&pp~0gH7e**1Pm<4Ht0_tNL{K1h>6Y%>UI@PEhoSoCv98D0lDOy_EOfCvT+AI{`-e81?z5!{~aIxzA~SZUY*LI{?x2a8+ycWg)_rL;l@5_ z-~B=DXf}{n_{rhY#yQj>)PaIN${lsd8)!U_6bVIihJZ=+r16AwFCkmN_C)GDDxKwp zEG)PT`QH1seN9-*wl|8CBRHFu%fZ~HEIQv!6u23fpL(}wznF)KNPb>Ze+Pv7~( zmw=C7mYRKt(SV|&PqX$-z5=cf^FOipFp4S{GPeiSG#ebgX)JiU*9?9!=-J=8vjzOr zaT+agS1DM&QsjPjmI*vG>fkH(NfUUvS4Wm}$~O@HEwED`;X5$>?NPT$ju!Cbnd%K+ zC!4}EV;&xVdi*2NscVn#i*igLQK4g@GYQ4a2xmmt-_d~Qe=K@dSD*p|+RrvCSY!cD zM6C)AsH-6G@8WBay88AQOGpeU-2OdN9~$0#cjx3zU3fcFOZPR@f|0%_y`5f~ll1!e z7ydp*Z8s0I3nELnqhmK8Lp|M0OjoUqr(8&m0|Am!&xr?`op|AV`Qxy@0fKK_4;FJ* zb$XE$)+uBy*hp#ZxqjL%uyR6MZ}x{hVBv%X-&eLj3NG)x@p*stAtF@s@i@Uve}d#E zY#r53N%_s=G%}_9YF!_+pAfv35O)d7n`gfk49o1@+vGqB=wEl?+XN(MZp1C_sPrwM zICNkT=WGf=(uwfz+h0lPD%GZ4xrk260ay-i$m})7NDlct1b!X^0;YMP=OK;5_|f;y zY}wc*j#%7p`8UD`VMrpt?B*fv>_Z2M{L^RZUyxOw`P{7sX~0b3jRaJ@dFM;t_7+y?Is0&{;J?#<{#ZJVyXokQ&4^L zdb~NwuML~3IPtnSiuL^J6?n>grnC#wy60Ork5rU1=HH#JZzzUd>`~+3@d)4GRD=oZ z=>5G!H+$gl`ce=c^M+*4J$(1k{k`ChIi)}@=RGN>4q$#PZ^OFBNPZn?$gcx2 z9rIyKu3EN8`aJ)b!UNyv-;MRnC$F@}Ag@-YN*=l?O6d^t0Tj4w@8*LOLpJl}E_P!1 z^2p!&_#|CI(#eS6`;=VkFn_YYN$xrMpaJv59qV~;z_BTV+P*sgXYiJ&9(kM+;otw) z#SYuGi^L2R6hq`C7NV$X478V*#u2koabt^UY!>#L-2U@!S+PSXD|UYkPS6&;sfEW1 zuEatOt}S+|di~=YFtxqcO)ttA%IRe&7><d0Kn+TtNzb$t%y&p@&VxdCDWuW*N94}9_oXkP% z$^w>1C=jE-@p4b`_Sv&!(fm3oWEdO!U5P2qJ;sL$t?6o}Dzp1Z_idJP_H zP?YI7jpfPt>&qRcW0dnZz1It0|AWgNbIyJHxkD@P8#jR=BwZPsU{{bazf!0#I*Zj< z@gysEaQGGV)#~3RFi6k#)3+It7*=2T{B6lH2dUy|R_H7W`Y2O8A&?$;{)T#1GsN$R2! z&cD2fG{B>dTHV{B0=)vgjk;$W!r2-f&b_>-0$XLqU&{%83-&&y-JKm_28Z6Mu26ee z&p%<4TgHa`3L~9YaC6Ft|7V%YfPH1JPw}Gh5%olGPzKrzs zI6(5_K<_(}Ta$t9@dJHQW^7XI+9c@&Nhf_kdPqq5fRs-v*cM4h`M~yw_ql}O%M`J@ z6OLIp-rM6s zGvDGoD}@5*M5EciRh)Drp-Blq{{Qt%v^Qzms?SG>{})H(qG7*NIAUk90tN~bCPcBr zxhxioEf%tAOfNJgM7|CfD4w`bJ|pbjOCG}sp{&EgZpf9f2YNZ_x#RNG&4mP&KQ1-wnJ17l1edE7bPC1x z;t9cJ{)ji{Jw%sv6!-1r{~A3{V};@|r**05IWtZ73-+#XtzSA-{!P=`-#yml@*fzy zNBKqm)6?}0;8(U~XNT14|NSmA|Ioi5N_2u;Q1kEw< zQw^eDO&RL)aZ9eG9V>V2u}d%cV>LTztgy%eU&pnYxk)`&l9So4_dwBk!uOb8?mFvK z&_?Z3*Z|*a5^C_?2SK9qVEM&`TCEXXU>thtwdysAOs~nmCAqt=y}DtGEJ9;fC-Inm z*CYjBdW6O7)FzDHzW4v~L5)yNSyMWZc+J1CufVw=QImLcYyOB6nzqE*twCpwJG^S> zbCk=bBd;4jUmOKlt%fe7KDLAI)k$5q78t^~D=wAS?^}d-8TtzY7Hkl~^kRvTDFjc% z5}`Fx5nIebQzW)P%oKB2X#2U`fW<-6RyIsUsF;vT^+0=vyuH@GwgM<)Q-wv&*Vupn z^Z(uJ|JVelz7$ah*~IAzmy=HxqJ;Q?m9gQ37TN#&+(EG_=yn`+59{Yg z_z(H*JXT9;ZzN)IsX{tUz(V>vF%B1;Wj~hY(Q@mYg5=6@+e?=d$qPClKGWqejmJ6_}HekG4(0qcB)BWE8DaM~=Ek zkJ+yL!AH(+DJ}Ho>*Va)n{1y%253*7cnNu40%V4dCeBQufFl&k%3Bxr@fd$Pn-$EtT0C%Y4VV=cur{!R0gOqDp55YH zU86MSbF)*en78)~P)7Vc%!lPIT;GLyShc>~;oQRbcd!3r%?o~ze>c{byG|-#?Ea(> z>zgksYes(}g6zMVzD4M;syqMH=uaS1bxPzLlpf8!Gv+f&bJcsFEkbFX`yN5xP%NYdZImfYzFCs zs`F5};tpYhf+rnzOHX$j|M=w2)nJ25d|PdU&){~k;RmBE6NoNR!xxN|q3Ok#_5c4DJk~`~2uQxb+4{5GP_a&Ti#<#k3 zRRcD-_?JZO!LBb%9R9#i1oZ{a(T(wG!-k{sC?Saw^SbX#x-3}FFY26htHmk-kOyc> z#Oo0fOqVxg@e*u@LDEUba&hE(MKtDDDtPRln_;U1!KxOIyQd9<12*0HmfgE692*qn zQ`N0)#DampE_JxMLu(OS)Y1vqY&I%-bRM0_5>jab0cr+Wo-W&$NPTdR6|=Md`CuNiVb!a3CW42RC=TN9j{W zZ5r~hO`2bYn<~-WF%}d|(kEUGyq!N`lL_I#JL{Gw`OcRsJ#tBxR#ANa$vUZfK@IKC`51&T%uiM+5y~oMPjnZ2N^S1( zvD932`VvhAHigQ3O{Fm%qgYr&)j_;;`W!IjdkP~Z50PY(IoAx$9873J%gu8f#Z zL2png8B_-I=eBt2(B)xx#OA&$;bN}~Rh-maD;AEFtHgfq0aMIlvCx1+ETYpmUP1<1 zzF{y095fz5KG;M&j(oO7XG2+uLnte8e_fZ6ewMCY|MkSi6Br-98m6{oyegpB1cxH8<6;&2h97_IMdV4a16 zUq>=4M?BdQ&YTwJlMRM8xva6c@%|^$H3~Bx#&w={Et7{H_rLCJ8nK2;;=1kp2 zw}>bFz8jhis*T1rgdsz6fn&e6@x+Zi$*Yg5t-xyok{dbRi^448L4Ln(b@RSQE7to* zYM&nB+$J7;T_y6$S6M->Netju6^&>SaX*@vxUrvY{>zn^Kco%YKrroQG*BPm2K@#dAYlC~=NU5W}$XUE8{A zYs0}e{inMirK2Eyu>H=?jG40yq35{kg~RslhN1kln>3Bd5%ZjX-P7YqADhMGGMOkg zs+WL4<8jbGfk1?`4$qgGxA+rO=YO64@&+8b=XO$?B3&4q8S}u`ziyf92zjYv zhAu%i8?tCXs}Vp_UfrUp1k(*SOzFMXR?M*Lm_gJMF3^(;AR`)kYxaZ~D4ij=?`?HdFuO zcX`CPa%9qV@)1KCJ!(S&zTR+shfZfVwi}c0Q0}$V?a25R)`21PN^8c?&YVm7)TMQ4M0Ljd18;$5W?j~JF`a3Z`H^{G%6%UV=+4x1 zD;s8!`F+?}#}~b=G94YZzsP5mf;m?z$8~ybl)1U^ZpdbaaptSr{l6Lol>mlQRjPw& zHKFnS@Puz&Y%>|_>1o?_tAKrc)xZ|*T4nl<)p>uXr*Wp!$6iBS##MqHC&$gB%{R$h z=C^u!L}%N~UeqOt9neH^(3JNbwlOs`y%`0e3&vYzZu7`*@%eNr(Q@&T*kEUi%r|-Zb+ZkyG|=YWde-uxCk}3!}7c=6`kf@oy-n|6>vLtyup{!lxs`KKgUMo^f5W z2t2Y6GaT5%59p^>M-F@tMqIwqX?tv?8(8?h$F5Nq{XvBJso|$upC<0Tj@Yg>E}h^V z=;5CemIU&-oR&dgIWU-!u4@~ypE!R%?#epj4C1@>o7TE!DZqJ##kHMkDL~J8?9M@l z(!kS4Fa2ZRMiZXy(~FB*rUH$mj#pz^CKF#v(jQptO9UeyGIZXkYy|B6BaPf+SAy$b zrgR3~lZX*{u@bA7u>}8khWB>+xkSY%gY&VSjyKk?(tobl*9MZhx2|AQDV%C|=iZ}^ z{b1ZesehHc<&3fNFs@4uc>n{erQUvRrX67(H}i4Nzinxq0X5(Pn2lFFBWpu}{z1w?xnnEn6e?Y#u-l>*w_l=Hv zQ6GvGGm_Yaqe26@FTs4yY2)0f^~+6@PE?&wzf%L`{Pp(Xa(*?~a@#E=heFCnnxZFF z!GYW|(Duiuww&V$1bP+OaoeXOJ?*vGKiB=@8~rI*U(s08$Dg>=Vo!j-Pg&8GzQoCl zF~f!)977~sB%W?jSp)h%F}jxAbusaA>G%sTqE`|pPx!Bs*op{}4oEs6>0rqECjk$x z^aVG%w$ye#I0le%h@x~oExSE~!22UL+iO3M2mO<;2MVu`#dJc|E^CtZm+{18>kS;= z>th>M93JoIR*9NmP3yB8XBzm!ajy?q&r19beLUjQk4KMyR??{AWzSetL~)PnL%X?8*F-M zP*^ok#-<91oKFWTBj#78OD+^`(^B@BQ2z5BB0WZ306`x&q~kvzZd>Hk*3R zd6X$pId|J6=vbzNr1LS7bY;^t4ujgb;K`Q5_<2TMp3WU*2uHDw25t3Efs5w#L(Bgi zA}q$p3tszB8#E<%bflmU@+rA|VN6l*m?FBEMn`6-Y)=MP>?uUCAL%qPRV+j;Hk~0? zz_apK%L*PsS;3Qrm6HC5(lc_|6Fot2B~ZV{DS6%QBY4|4FzfQnIz^KziHv)tGn@kU zo;rTBAfvuX#dP&9ZEjq={07M7Xh9^h!9mI=*56VI51%Nr5NL26C*=4257~)WUtk>T z?93_(Ut$GKD`t=H)la7?Is}r=XC#%Ut3MkF9GCUp8r0^T;=0;JFMiQe1zov0;<1|0 z)u@4UZL);XN4#O534M(x>;}+#vD9YKa+&Mszo)BVqk=(4Aws>xVkVQy6$zM3p>+K2 zCE{}F0*2g=6LT|1Rxl7M`f)?_i%3tbEyO>NGA3^@DR`YTL9)@L`iMDHg?kHjn|yv> z4K~+~)gHX94w#J)959EaU`J|Ns`pTBs7(LV$Lo4RpLal6&O+laSF3v(L6gS;YJ;~K zLFN0_`RA)6`GoDM{TgjO8pjC5{M9!uZa=!i^uN2Wt}m#4#tyB@JyL?-p2U#s?20-I zoePqZ=`W+ws#5v(?V?z#?j%cy**92OQ{#X$F+ANW0?x3FeXwbow{-NVsq%wj5T$3N)j}gz-3ld|kN;0{Ai{Feue-ZRCMq)P zUbaZcM%res7X!I`;R%JFOb%Cw?A|y$E{`Q*^JJ|V%#^oo2ow?>V z9p3dnq;;xqMeHXXKjpkOlFX$L<=hgrav!Q1&>LV}|D~6^;jS>8CP+F#(v@2U8kffT z`PzgU2s_xV?y)ci4!PRKt;^0*80fq!{PvXeaGKk%b-1!(!JyJaRH}%Lf)6mfXkG|K zXz;^jF}&zv7LDd5D;6O3DMnebXp7#Cq5Ot_Ni&mTR}ESD6?od8^E!`gBAsipyB^9> zfjUo)js0@7ev!b;!cRs&ckfM1J--3O^uLIdeO&RPRkuUP2iZS1jIRGVTYhx*TRgoR zR@7r8jjTz9JYHU^H@@S?{24VFM@1 ze=~DOG?J_fgB-N2O2`tTfPn%k1Nk^$d7(I} zvfqk7Wj#j4$W=K%aaud(jOT^R^&`wKF0mU13O^Gq%Y=dpm%kEU;|`YV)u}*C$KS)) zyLemKd|l0d?RTlq>&|ceJNVURkxFNdbBPL4d>rdjGhyBw!dnX6+`5h4IRIciNXky_f5G) zU8U2e0#JiC^-6wN1~7eByUsn2KGBE0c4nt#eAFTN>uc)I?`uxp_l44Zw~_b#1Fepg zU-UmcU0;j(6`np&HS-1ez#;xIC1L8@j`C>(%)b=V591}pkWXQeD?Vq>B|e|pF-hg! zd|bjp`nOMim6$kdGz z%)i~gI8%}&Cd~V9{WxN_5X%oqf0MrSYLXZfS2&sG`3f7(^N`~fT4Qrq=`xr$SYq9J zuFhm_IKk+Es-;O0T<8`5t(~7w#DejEPw~WL2)Jms|Uo)`}uj^n6mqQ+M+8bH83iX>AIXvBiw!ok<<5fHLCTP{#4guh<^xs1217 z^KDo%z2X46u8e>8`ajlV;s^M5V|}@1#{aOI53P@|E}7>AKjkZ<`o-XfYd)F)Wt=-> zr%py?xdbC)mQ@efqC#SDCe0@cQ_!NVO6mxt^-(FOoIOCUtsotR7(^4lRs`<(#{K&H;5q`JAWMT zugh*&@EBCI)w0q2UuJ^@vg?-GJB>I8o#!N6D$7ZLks-)IdF@x}Fhot|Hk$vd4@b~^ z{P{T$v0%!tn-Z+U7JJf=vons=!be0GQ0W|Gk;C#5a#=J_Cd-SQ)b|fNCTmI%D#or- zD;(U1EgkC0V;5BdWjy(0|5Tqc6R3<$LJ>$0+>dd1A{3WGDB=n|sc7p0i$iDeM96Qk7n_SbJ~HHc zX8MiVEh~Ho6@^dQV*O7~tYgP0d$OPWx&6?6cIv=6#JM$XRSf&-z*`JYy*FGFIBQOe zuIqvf;H60q!XqbM0~fB(nIovNgsG2Xyf)jY!vL0hz}_ld82Ub{)3A7JxGN?r)3Jj( ze0r9W?KIsAPMtY|n{$c+-E2ohFe43+cjM<#{;RYgF_XXPv%z-|k?A&=zt;km8iXD& zFEN2TpFKZswF`ZZd!jDA*K^=(m3@HCF9*8{tqRu{SwYLM>!uHIe-6571&(_?s1|@V z4|=3O_yN9eTu{CS#Z^xk(qmjmnHlVJ;>Z1*J+&a&^?uG^wEFc-r(mM(OmpaRI(pXV z9=8eC@!Y`9lhvSxwzXK?u@3bA^kY-@7-KjsnPR)lMGelk?3i$JY8lX5OTBeB+ZLYQ zyVEaekRd!f;{Lw*0Vc5frp;d|n#S<(L00Xg`DW1P<2Lt?!D`TTU(%=RC=UJdk3J5s zk$GNxpD9O{2rOWcvFpLdgVBb;A&;MRTTl)RnY;E-HI3kv{O(49bvHqWh|wvB7MVe~ zPIvVR{kK5l?eYuxZS~=jrJI)B9B2wNC!hXsX`mWBWq-?T<%||^bn**bvsT{#zv7es zhyE5YCa&``yUsSSyXF{TqQf_E`a*=IE1DqBKOa-J;E4t-9h*vTH_Q}PUW=P(Q}PyU zAPBp-3&o)P(4LVi9#}zKBsN%29pZ518J>1fnCW^?gnlhlbhD@wU)_a$usk^x=-LL-$wtY$=G*=e7NhK#uae^X^Wl4 zPtcl@GyK3+$@?9XyY38~0k#E)Iyui4fCnLxmpN-z6W&XD%%Xb86mYsjONI*OuxBJty zwX2D-Lkq=|0zCm}ac1w`oO#5obdAQ3!IIqe|?E5`^W>k&P3d@p#=a3gUcZ=6kmj}Sy{ zCVKgYtponOj;TouHWH)2lpoczgv9iA!@Soa`gwJS^WP0N0=G?F&pi}GfiIH|nQeUO z3NGL4+9xG95Uic9I>5m*ig>-SjYex^VK?G+R3(B`Sjgw`*Jo1f7MF)HTOPg9F{Wsh#WS4NebBTzhit z7-H1RI$ghmqlxr^Q@6Ra_Xa`tri|F&GzRE}94Q`@I+}Q0HhAT5dvAgWiZZZpMs&h( zafgi2V4+3jj};xfiLBxq<}Sm>G@OUg*vuyUuvvfvgCT)=?~|c_8x!3*F8ko}mCP=d zqn1YaPyY3w9gmyXJUUu~6EcxwY~*=@%|j8R(0)$_Q^@1exg3^!CC;hzv~1jjP&RJD z3TCPDN$O|E;L=Cm$%zKgv8YJpYpFWycu%bWBwNEN_Xq9hlUW9m-h)G1zI+6;UytYA zZK(pAs*G}eH7Mu*PV$?;{HO6U`S^QPRtLFKId_xF{vsa*WG3mD596C-YQ65Z{0En6 z@Qwc6Sl>?2x>VhGT9XpA_IZ?T+!cw?>2=gl_w$maoBt1cUjg1k_VwK+X);OENn2bN zcZ~~@8Kk(o6p1eGu1!;*xGW1Su*f3IvampL*#_6e-DPnt?(qF4Gq-I5?DBrk`##^F z_iG-)>D+U2=gysbkKKFE*_Ev-yWQJ60Jl-^+ed;2UWtj)>oY zG?ZKu7_hvdQ)Bz+C-y(T;pyy?F|AJJJj>Qz)O`KP7Ax6an`+$t^}Cy#CLJbT%EkYS zpr%R(3DQTht6&>+OhPH9D;TnAJkiL zqQqbpmLzMfJ?obcfPM+Vxpc^vJ3^A)U2>^_YHz)|ADCcBx+u7uToe44Affv_{POZ4 zPyg@u^w036Hhi^0%G25xd%}p1bQ<{A@;i)r@Yb`I-f4SnQ=dingQpjLq>lysh!*un_SyCQc@2}OnoU`=)kK1coPW`@wY2Dk|k(FDv zE<8;LygbRuZ&6%m?Obo;cYO~p5>%6>WGkb!>i!PZ^Hf`z%&Rbyle50a_AuwWb%B0F zixI1^pZ6n*ty_lA|7oeRE*mpAd%j|!2DZnRhWAF~*^toB_RqR7abRna5XJ$%=FmD^ zPMjY#ne2A6&1OS%OM~6y;um3S4CJ@600#IFIc~@4#lPR0kYh;+8GCVW+*%H1p3pFk zgE#CoHk4kz!@Bs=Pczz4E3FM5#Mge`aE~>A+=ULmmw3sntdy(3O28^@jxgNyJkz*s z@7qPOlNw*1^Z{_$!0XqzYjwR>Lq0Cslsw0CzX4Zo_n&^+^ef=raXu*r`K9qC*5^h&FY+CWLOg_UZ2b&t&c79oSJ-Ov7_uc+b|Tu}rb{d_UF|smw8~S@nee?f-e; zk?oK!liKDmaclh=yTNGVHoG}YZUgdnm`rY+laInyy=qZEM*>V8czH44II+ZitHu<2 zH7C1hEB^F<>UH=_d?7OJnv@gB(?9VEkois3t%~bP>NP3%?cICTHDZ-wE2*Erzo+Gx(?p}C|Ko}EZlwf2*;ii25xOf?Cd)>wJxkoXq&!Y2588F2#wyxp zw(jvG@9f$UvdLAhu+?)%eB8VFF59m3yTn^FS0wan_s=7busoaA?J}#iNac+r(HbXG ze4DWC%!S=&PB*@Mx|o=ubNnL@Fm>c{ra#$?yJzFdYtijqGr_RwSV@U`vP^6+dfSJMT1@fV{~;Sf6r-rf zu=(O*`170iBy{cY&l3jzGQTzCL|!GE#SRSt2XWLcjm_!Sn9ORoT8r7^((`GS8!V&! z69zCPvOv6P?FWXyIgOr9{JDHu#Gc@Ca!u;x>AxJYho}Ft#Y6Dl z@Psiny;-51XE*5)0mgj+29c#+68hAF;6DPa6$H&QeqJ|~u(j^>t(T?IWp?=3x}CD- zsh`mA`+pv9sSMhsv70qo3^0w;4u_mmgCHkrquS|k;7FL3KNwiQHNPDX>2J&WSGdedU(!5-%CY4a=o$lhk>zm?piN=Tz7F(!@Jf3*^4deAA8rT zcj)J8X5Fzie{^3oY2F>{!&*B|_nCAjm_Zvfrq3VHk}xXc$sLo+9Aay$M&71p&13tl zi?r80_9UT;`=19M$(LY9)DXK(Z81O@Mbr?tR;yR*5RKG?4T5fioc1_+Uurg3E*A0hx85o_z4{3uJ3GNICGQE{W|^V-U_hWJ&1b`q$yc zAN|3>ABV=G$9W&4*{sn!^$1An(sNfDH9|T0hgn@&4g&qdjdRFw5l4xq(LQDVkCi%! z*d3kR%9n~a%*m#s4>Y-7zGTzOen-x+V7Z&vzaaVXJj0cQ|Q(9`^fF4<>i$ zwQ@{9=f8!%CHaBf7;7%A#_qylGY!HdA!ni4ZBg4DNcf=Ex_PHh?b9Fq0}n8D;BkYG ze9407m!PHR!q*AK9y0NHRt&1UL&dhP-s$S3Tz8p!P0DX}%t4y?bLZj)zf55|U2QpR zZuJ~2p>MOQ(x!>kUovqsO4&F?MYnb^Z; zhwr<+B5Nvr|ICjJ*>3D-a#jAW>VhsW8B+h)CS^pKOKLmN~JGz$I<2C z2qdm;RQH+6eLnSh>FIYRncpc`L(|H$R-VWy(0`dvACH;v^N75e*)I*ZW+g_=j2fGp z{pKdtmmVmi{J*CM(tI}!ei>bH-B0I*YU7s9{CK|U#mT<6&e@Nv?5>~ci`h`tFy!uq z$&9A_nl|S*)n^EO$B8G`sxD0SY2GzC{(fy*`L;?^r%VC;p2ka#w%C|fPqs&LMW-Lf zG2v~Sv+d&Nk$OV57xZn=O&{mm)MoRG_wn<{eM6>Z%TiUqEK(Oms=Hkh6Qar(P%ymc*4nfB{Tnn;=>{8ceefADVtkl zj%DM^W`Eb9Mm7cdt9|I!5Y;)=8r1|*3snbIWmRta1$~5GN+-~5X)_%iaX(^r#Jq?> z5lte>L{Q3W$}P%i${xyE%0l5E!q0>!g^vq&hqK{170(oh6iXEGidcnF5h=eb-zlFX zA0TfeFC~{!SEx;)H>s&qH>w6zK=w{{O14rqMl?+36lrBvS$63Y>0i>t(l}`=sa{Hl z-45FxHao0eScA~5VI{&Ol1q{ek|~ldl4_ECBBl6^_=NZmaiZ8Rt{~1TN)hcBEexF* z+9$MLXt7XH$c40#7`iv@@Bf{D6&BYOX*4#IONT?KPUJ{5xeR)z(QdTZEiRoCXHuQc zQd4*;tqu#qun6=Bf1>8XifLh>s?}PD#c9`@OK#_>oKCe7E=HTl?m(3;4UTrhPhwW% zf0x-^vNTU+K;mGn*=SM2$aUe!fR1xQA{sJM-}g;^=KhYS zf)mZ*&>^vrLvKQTZX}LI&Ml`wZGyq+&O1_|LZ@0BI7)(y1TaSRI<>{2*IRI!1i^~U z#VQF@8okkGLHjjo1EO@H(N667ce-&$o551l%u_k+E{h94*CC<+ymxpEyi!Cxwqayi zT!oSZDvMU@L?j9)vMe}EPOifciOFrmfYfUZ*?aL+cyl@fVl-);T0PbY?Wj+$cVd{? z^*HxlOvh8%99o0Ptiyo9k|lm`M?yQ7QRmR<%npl7Hc6l|J6%Qx3N$VoB0st9E`t&0 zKQU^wIQ%J|D^Qut7Fa=c1Z%eAC2O4yw+#pNuww1N(m>%^0u^$Gz%WA^2Q$v(JF$w1 z0i=UZ(qXcz&6%Y;{cDrWwPT1 zAPJQQXVB3f1}#QK*bh9F*?{n9n6-9`!M|vV=9r95rv(i}viSn%1u8YPX@n~>Ym92U z1&u+D3HYxxNJ!5q_P_B|CX)_Z5i!VM@EP7@;5=8AW(5(AlzU}7&i{Y z8-UZ*iRrI);-fZ2OctmRG|BC>7>w{?Sw%t01LHzFGp64v8+bCd^p} zH~sAfyN-MLcoNJ@i{vFwWpo;Fp2?&^|2uKAN#jC9MVG;Vs9YAa!zGdlR2sAYyon9W z)2udXEe7xbv(bbxZFFlh=?Rs=W_KA)$g^QYY+^Gej^3g}cqc@ibJ>ds^O1AzS)4Y6 z38iC5- z*6Yv>V6IM_+5w-oSn#;miE7ll%oHt9sSyIi;j&>Ps@iQ=V~!)Hgc;EhFq`a}$VWVt z*5pK5HWwxw%r#6#P+=#lL#H*G5Ll-8M1cxji~yLBC~gxZmk9}VFc>k@j0U&G5oHjl z5ba-U2dC3x#2`>Emj%mWLm&o(+%oAT7kMg;3l+G**_>K6-V@?*K~90Oo3TAXt1zbF13y8QxG_?!OH2fp+gbm5K>aUR=A^D zZ?R(v;V~hvY>06Ur-&ORc=1kGZWB+18n}FM7&6FhtTRKxVkqiO8kY%uVazg~P~n6W zy2)Vz8?!kvqafNem_Rs$t~V84Cr}|vsvBV^Fvh?M;RJO1o1nJ9O=Hw3+xn^CmUcKG zxAg|RMX(9(sJac)!fDYv)n-e?4xS3_&>HP<8~P6zg9U5)UEDM< zL#knNXfZ$3b~Cm#Lzi(P+`Y}1tEoVxcbGYr$&Cas2Mt%N4em}8xRcHh=H#gy5C#T^ zR*g7p2x{VXm^4lRus$Oa=9nWRcq%($L}JQ=)NR&4C^#|Gk-ro|+Ko*mxf%#m&{`~d zj4o(PTDKOe0yl%57~J@|yI@a&$_~x}v1fL>v7%tmVXiq{22|(3@N!bTbXRj*QuG+i z(1D!XSaf156Br}5ZtDzoSCK@aE<6mFjT3!tF*>0s!HerZA%`h!GASqUR0xx9#xXey z)Dm>233CNp)4@@iT_y*$Q=l@qp=%(Rx)~$ZA84J6iV2yl(;zQvt_wVs#cT(2vw*9) zxGsRyc53iiIhpJ*S@ISTsPtTh3yfs72G453U^du{&^UE2$P#^?%mS4b0?nauI63_W z-WgO+P8OZRqP6G>W)-NQc&RNmGr}=IINELKGN=+REWIF}ygB#JJQY%PU=Zl>Y?viB z1Edjz7J?P2ZD4bTT)PA+69PCIF^(NzNNR{&7c%Q1sv20i*{;iL<*AH%XlOdM)(#XR zE4j5u%I9(-G!{gO$x!U0K!ulK!%T%cRF9s*5nHDTx-r56VyluR@~A)sr3bdQm_U=XW>lubP($ETGlnx5t)*BPPldez8np&v&SBDm z$v_c9R8%OGMo1c)BV>y}#R*`jW>ARH?KsviX0WLdv)nJ6^(G63rAwo86?jIdTy{?5fFi*~FVI30 zfEtA1q_Nvvw%kI0=^ez1``j;iWU)J;i}HvO`w9V2lWl30Q!v%(GwtR zAx7cKHo7(HyeoMsZ0-SfaY8XxL)Sp3nso>>iwT2qq}EXc4%rH-I<-;RT^=UyFTX=C4bP!?DrzQDi}s1Lh)0QS%7Wt8blb3PieJNK!Eex5 zI$Zv9xJxxb`Z%->^+Wh7g-X>StZ!HYRc?hr`D4Uf<>s*B)a5X-$a>hU|ZQogK!9)8c^1$-QH|IY>>sP%R|4$>5N^_ETxGbr8%+r$hV!#fg0wvL>&+n64wp=w^V?5F<@l=p2 zNX`uI52=b(NDY>X{1O&6Hg;fy@)i{&vtK2)KoLZRCL2}~2U}CzA^46qbE*;Dt^eMQm8s?}Q5i!8q&^VkfS1seIAeX_= zAy+s#pf}+p6}kcMQ;UcQ7N@kHP?rIU0~ox^WYI!$Lt79_*c1Yq-o&{=+n9Wcu zG%!ruNK+3d1^58wqS>fM3$o1+>Vgu2Lt0vB{?H)sGSIzFm&<~=3N=0}FRryV8^kR#NI(@c8N*Wey6grwXTO4Vz%)eK6}%VdB@P$%TSNF~dGDu! zKs7*UXt4DRU4qpYM0`Y+OR#t@YJ^cWn5V+)F&VHY%?WKD8zk-6my0_h{h<~|5zNwR z0u{Ge5{w&47nqsB4mS^!AK2Lt?;5q@15X7-7mo=?5kwgHIwpb{dOI8iFgPvR%)BVm z!I1)O7cu}^2qd1t2-OfG0a`1v|gc8o%cDL3z+0Y`Fb)LMfZ-DU!b6Y8BC1KI{_8pMi0d|hTY z_y{NJI6oI`F8J*r|2Q#(>0nXJ=ARMG#a;xMov>7$a8QD`SkQOyXg~omMe)uaEjGy5 zICIht7YsM5A*M0-kzO0@#q7*CpRWsB{@oVHQ;Ql(8%%61v>zjw0eFGYZZE_ujXH}R zJ{br&Gu{hW1?*q+1GY!vbfn&F%HEr=3$7xVTksLWdV|h|(1#iu6hiQJH&VCeb_=b? zyF-Q|&QFAy1??VcB|M4fF7QuH4qml{F9|Odk{l`t7mMC(a&t2T8mgT$aAXaI*4r?- z;0u9!0{sLV-8hk|al=qRu4oJqn+9UgbdEtTA<o;_PWM(BGC0l$!f^$7Q&Q{&RO z(JJ&DG-P#t-WbQ=gj*Qu2wnlO&|%O_Zizr=gh#-Ui+8IcAp<9iVXk8#!UD&#*3CJr zARNFV9j3yp|Bj%tfhk~w!5fMB@4~xBjhsi(sCHO#3v&o=FevFR6k(F9(HR&IPS}_5 z#i3z^Ht}_V6GBsh?ZDYb4vrBc+9^VtI`w)uY75QbpAkD(+$N6Kz&Qd%4&<0DTF5^g zyx02gc&8o4A6~c#IUmsjocu>}364!+g2LNp%PZWG+jn4tIq3Ftd%t3i@uSAr$DTh& zNgsblZs6M&(7-Y~zo(QJqI zZ-==Cn+eZq(P+Q>g>M%|JRS(=Dxs@6jZpkBEc6h5@TnObjslPQ{(>(C)6)bE7+efi z0@On|j5N^J9dJ<=_z`Bx*Y-X9U05U zCNx60PaW*)}l;qlgp-s1#O4>OXDn9Kvnsx zTMspaGd)ltI0i;M_W&@+Y_J!g1=#Y*RBOMwF31SHMNECjTqrtDH=+l_>VSI%lG3K7 zPYXi_b}BXw!%+pPU@}6Z#w@}XPP85)srVXh%GUn(dHYoq8SDRF)lL7gME((zvHqX2 z{-3e_pRxX*vHp*}P#Nq08SDQU>;D<+{~7E5aI0~ zeMmSt2X|E2fsHQMY>0y*E*rLDVk0MxzL{}&%*BUUYVzo~KiUnTKiUl!^ZLK?{ZbSh zQqDzbN&l@F(70#qT{a*WWBWI)Fj^79&Z~N|^}a3z zAB3>l>J<-eUH+75Q)6X#7Nfx-A-I)Y!(BhScu78%w&arQeAS+>w0PF?lm_TH|)78mhpMd{w-e~%Y0b%a=5Mhk>m(P_UWS+$1-;t zm2?eJ?o6(|GdXJ3^jKzGQEAWBNwEw`?DY?9*QpUD+WBOQvVvCIf+ z)*m;1j`gi*)M=;m?%rf|_UN0Xeu-sLTpilv#eFvBn0V;O(pcY#UFF|c=fyI72>W$b zWBs9)tckHqQN8zc(xO=3f%AV9Z16nE+U7v{ja`0>^)1yMit>4XPhK*$adg!MvCQ}f zYv;Y(y)}6$JtKSs?w9r9;zi;GvA*)X?Ah8bi}jW4xUhK1T=SB(ky)Md&-#n{gi6v+7MjrFdGbFX!C4raYiq*P*~|=G4aI4cSX|>a=01Z}`!&t>3Mm z>U+PqRL-9_PW9dSb(!-puEjt0Ep}$(ROaTq(^s#cyyESG<+5y=>f4+*^g)YtQ+=)` z>t+?$FqL`SuGqjcfX(AC4ePpjsxMDObcdtd@Bdz0bP38wKVtjzUO$!T`^!Y%N&K!x z6L+In-1k7slDRFY|Acb?N*n0%bUAjX0pNs*Exeb2e|TH`WD~CC>Xqy5pflg8{aXoK ze~g+t=N_(amH542ky10QFGdY7|Gw>ZE1#2Mck;3lcjH&e4<~o4@c3i%^RtsXHvf5< zx90Zb8Y{NUd~&)|GM|KDhLH1Ojgaf&$7GVcp-0!IN46Mt1co`p*%h1>`)t0g?9BWp zQwNo~ycc=4cIKP0X)e2{dHsIv&is*(xf35>^GiujNSH%}QsPc`YY{I3XH2*gg-&b< zMgVJ^Jv$SH0~WB!^p_Jo3s3xDQ0@JhtqZ?65LX@zX;U#l)| zl>Pmf3E4DLw5j$T=0@Hsw{yS!kQ#U_m~*Ta>LKMXN8Y>1(FfKeXGuAsBk=f!nRjx( zrT;TR{iQJk9E$2PYxJ65io6*zHENG_!%rhOEL3l`@(kdFRVYsu%9Dli)cot@=?Dyn zD~)q_pAHKy44c?`#?y6NLth8BYU2E#%Z!}E#;&jQB>Hskgnk|WIqWL22XOSt;lR-f zgrdTUR4$M?cXSQ00I=O1K@j+`(hcwMb^)M&Sa7lbxi1&xm&*Zg;>POk+BForsLKR> z8NgDtR&O!@uK#046YZMp?9P%GT9yH1GFfYJ@UQH>S{EG^%{DJmuU|31RXLWA$q)Ev z*s=BIHLsYC%{Ld&0J=_`yj~8FpG{|ymlkdpszbeps=VIG-LHXt(H8DLB&z+&W4+FC zw{Pqwh_n8R_b% z`F*Czd>7xxzS)oz&NMhbVS9_l*%*E8t?NZyRm^Y2yLo;Kf1g}u*P=fbWy#@tYCm~t zMJ_oLpE&Ss_UyE8L$^b#8b*fuBJS5J(|bT^64LttF{F(8|=z;V-kkGHk*YPH^3nAa&uz=ulGu?hHK__g?N&h|_beU-x_Zq=-RvPIU%l`qF%wFbil z)$@y6bckev;au@jam}vptiiC^y^f`%YjXIGoqf`-)#@BRavdNNF%)n#^|QK?geg+F zn&lq#n635Kv=M*B#;`-y?_FbQ5t-1p`#%pnk`ounz7Sgy$JNaihs%Y?k}e!LMDz|s zYe2w4Eg$;6Rp$i%zynMjcq9&aUcB{Hl3Tlv?-sFHHT7Fx9sh>uTWifPCo4y?iczhW zRxF`pk2nWQI^GqtE4$NuSZ8*Yr+>om^mhgN5S9#}U`XmA<$V1iLG>tDQcmayB<=Zz z2VVKa5-zdxI(edCC?uLo{$XEX*PoZ>2ryMq5R*K~8vJ{59Wd~?&0Sx} z@nKz0SDx3st(mR9^?1|VWA3tD!&`7U7ZUn)`{!{-lraQ7KqMH1{KHXV9H>Sh6ulau z4Y@d6h*fIm6BD+~^_zd(0j7>Sj_GkH_&BEbrOz#2Y?XTq#RkKKn0+Unl#OPC;mTNL z&xgz_CK!_X`ErU4hF^>ShR0piszX}*K4t4`d+!gM2Yg5lhW$OOBcH{GF>wp$|5iI8 z4QBrMc1ZDleGi+3lJ>ws~`$v%te3k)xM{-ezw z#Mju`=fdJdANH|BZ{*p~@Lobf->&~W@Cd`h(OfPzE*BX>?NTFbp$Spl5VFOBs9!j* z&ojK{`;zO;x32K5I;3)#xKRjex;Mbk>dX@+x#i%I5tk!bpqj{dWsGJhqD#Ety*1>hIW=p@E{x5oB=+GJE!B^!WD zcfr@6^*_UKOxLb|{WDF{ntweaf0(5gpGkA7z{o~e1HBDX1#iO$;)Mt{GUJ3h68K32 z0tOzUKWIPAzTi*xVV!p@&*Du_tFle1-XAq$^IxoY?)?2*y_rT0=)z}TsN>}pq&!NG zG6aA6*P{Tq4`F$Te}!Nu2r9~jwL`3B#L>}msSpu{2=Q>VX8(w5)@~+&{*lMAy3c*< zp`82vc>lGGJ5?hW>ntn*#_=-%=8`K`|4ZAOQy88%7*NT7R1%<$KD{ za-0f}VuQ<#x>0>QDW)gKmFT}E>4H499>$v|B?@(Zt1+gbWfG1+QYRu+#E z@X1oTMxE6z>DIG(b~PI1%ESgB(_Qe-^$n)GPxlPp#Gh$1v&r3xx@sS**a~F7nYbvc zZ>u<_&|}p;pLqW7j@mJD--cd;WD)CkF@{xFU2W)%3@I;NCHK8+>vs7jc5f83B3*f( z#BR$5t%&p;KP)ZMBKtn(&CoyhKdmBXN-X$^?Xz~5FYka`Ss94b{Axdm)4yw`#Klnne}JRii$hQg|=?JH0DMDCZ8h4 zdgf)WPwamL8U94%a8sXR_T}jD$LLLAY}>-}4S&qo&Q2e{;IyJ*56=FVgc>;ee_WUb ze`I|Ay$41O?DKE@l4OcStZM`@L7+OllZ)|ykT(dYWkSlZ!wD4PmNlS%o%T?f+`^r4zQI6tonD%8J6}#}OlJ)|9bC;Q`+JKJ6`q}-P2yC=@IpXy0xF7HLTcxusM zEQU`z?tf278V_l*WND@Mi`&+EvVO*!6GNEpj_1s;e_l^6Tl(mW^35hQ(WT$_Il8|K zb9T>!b-o7!n3P%f_8IF>_BA{`ufc(vU6^cJHO*^09^hM;zg>=zO(*--kCrvk-0I@9 z4nJm+rVL=R<=Ve_CF(i1z|+xwpo?$gEKAa&leb zQpxX^hkW!2kRkNsn&7`sPUyFOb3#6Cz%wzYchq`tqw4aV)32X@k!%{Yf1#_GER}td zYl0VgF7CvWYc(cazL88I=}}??5J>0)gqj%%nAD#)0wj$5c>ak|&iriMtaIv1mknb_ zROr1fX8wUu1A6|OK1+rkVv1tG=@5}!t2HA&9AfSv8XjB$NEC&5>wNzC*tJdkLk}=O zrg6jWbKj7VM6vkAj^tt`pTv~UD`(~J+k{8Ik+ZMzJvlvhTNvAIR`g+W-q*~Mf~y_Q zhS<6g(!u3n_F8+16Mk!2J*_;qyzw7b=R9YWyR*IwYolTbeb;h5_FtLv+;=Fv%Zv2o zn@&$HHxuRCwBbCC7IkYI^-S^K}1|v(>8KVMu%4NN)@qRU?wEyr$xX^yPE= zWSF%rZ0mzZQKveej%LY=42Js731{0i%&`B1`{lp=I&Es! zy&S&%_p7t!MVE{1nE$S|@3_bDlG!V*+cxFw_GZe}#aWMW%$S+_feq^TQPFAJ%VJ`_p!P zIQil5LBo1iJbSbFBkPc5a#Q1wE7p0&4(nIAqkK8{4QzGxr@B6Qs}{cu%wN;D;=;8$ zXB9?Yl_RB`^4%4f%5(J0&Y~8k-lp#>{P^xWrs=77Ia>9p$Q&GZwQciRW@g9RU+%TL zp$&9tAnLu=@}+GSP0CEEIKRpTwoX!==H0GOV|zWn+CzD!*$;#Ie*JkQjb4wuM97F| z&>-Hr8W}RUyjWQNL2@Igfc(m%eRd1~-~&t@eB1yG{)VFLzvEkN%lEP0*L-hJJ4ETdwl9x!ZP*g(eC*nM%;=I-g}gmN^` z|2g9u*JE*m*9@Vr#`Y^ZcOS*73g@f-=xG=$kcD)`jbLky_8btcAPeN(^H+ zEIBy!#RZzZn(uhk1#_a=yX>H?Bilu>HT#*T&R7$gN-t|Ya^ld)Ol*S_{Z|fd^)U5$ zjm@T?xcuNH6a2j7xtG;l_Mpm%G{4Qv50_dr{v!wbqMc;jyiM;?pLa)%C)5ADk(oVE z`NosOS>7uFG74o}OYC8`u9dkBp?-Pc!lk`LD`&VXHIs29&{Xps=^rRlr|Ae0O zKWQ&{kK}#v&y^*$KM5UyJbkt=;`w^M%kVpq4$FWUJ&@4@pY}kS^4Vlri1J6v#&>JS zn1?i5=p&@>Q#R_~Eb%Zxc`D}=%6&ofarrJc8uxaIC-wKF9s+~vA@ro)IDwA9kmZRp zZ$E9glvzBld8U5Zer62oUQOC#o#Jafqe`nGU4KcvPp=;bwV1bat#5w##9w+Bw567p zKQQWT!SAQ~gnE3W9(#U=*Jqu=2=uA{sqc*}4L-I;3()S^WuG@@A zqsWh<=DqM8H}iHnpkEc{b{vyq^&I@S&*)J%Q$AYWF<*k@_XHn&&6g`#QXUMy7XJ;~ zVTMCq@yS;`geQ*^oV2N%?D*@Z#QsmU_k*0d zpB_)6!)^`fwe9rflJAo@w%^ps-0D&CgmqPljGsvRE=PTr>-sO0um7k?`-jton1|Z> zDKT~xQzUY0wjNgwTVMOGM#Mj!#9Ym`@A~r#awbpfJ#%9!kM~i>2Im-jBhJ^O!K!@T z19GOm{>Ao`#^aeAOLye@`$U}YhYKAOt{#yK^?XeJVaH#8pS(25m((KbgzSYBKGBf! zNu$)`nW5dkE1i9D98;v2{^J~4;j@Nq9UzSu@5|%4UU>}49p9}lEy=D3-h*dCy3o(c zVEEu4kCLoBw3cn-J}!&RH;|ov>SEcPbCV|x93VJgN{y<6JtgsV;_HNX@V9?c2>|yY zUSTe0m`$TY=6WPR$C*zTa(^LD5SNJ^Eerse~Z52 zzq!>OP3YV_f{kpOwOf-HGIoi#=%)IBeEEAOxcp@w-S3k^v$Fb%^$v9J`h>|gEa7~g z$7#}QzGGvZvTsGKJ+X(w4LqrTc#GZ>2ZC;*t#G%9x#4o+*s|kNwO4`{%E?S8+HUGKp(ENefjKVL zZ1dxWd5rtya^L(tQ+zXX6uI4TZj!Zsok5*{*)xTiG~XfP@RHxdpSAyUk9G8pYS|Wm zPAK>B0Xz053-$QO??~X2TL;hHSy*c%Gize;qEDYBGBHQpHLb%E$+hqOo*{Xs0Drjb ztwjrmCo(NcU$u5+jnoxsXqDsUl%VaxXAip(4PGJYEETFBp>L%4tTQO zMd+G0wv9;cFq8y}Nha6l$CcT?2fyP3$czl!bjD>%)PGj@$jmG#-SW-u1v%Lsy)S2f zxQb$@jsETJtuEs@`(F}rAtYjqbb`1U{`gBz6W4yGf&Cy>X7 zv*bD$el7kRw&YC-#~+?lv<-f8!i$2-gRZ~C)8c~YLIN#$e&Rtz(7muwA?FjxU>%IMOhbA9)qxOF^OU(j^3c__cWe?nAVBi`s$MK6?D`vfB(u zRE(^|W}DvVGICkR4ZIJj>%((?Qy$Pi^0+t1eaYw}aG5>UwENf#M*n;1lHy9CY@U4| zE>vYiEJ#25A@rf4P3!Jj@`$OqsQl~J*=d%P7tJaapPa5>53ZJp*Uz|6?|UzPn7*W(jas+$=U$5_p`I@v5arvMw(J_*NWT65A^S#x8~6X> zI!(G){kd)3CM*sCA;jLRd218l=I=gb|g1^ac07#JV`{Fha6fn zWm3_DbA9ihiQ2z$&-Rgrsk+-{PB}Q2DI`nsb#Tv4eX|Ko$22#i{M_?%BWk+R=tq_8 z-u@)&8IW)w>ac5e;9dmmfzf?_H=2}L)Nua`!vHN?Cu)YaLFjC@$KWRioY_Lh4;&&` z!pf1{Nz`~{JVK-5Z~xo@;67yTI1P3K(n#C&YUJK_JCI7)j^yS>#D750ZnewppEF(N z@>>!B{d1=@DoFn&`=!1XURuuD{eiuBPs|SbV`;;E3!~X>1v7uBmosbXHNk(WeDK@6TMG7l_enjZyvLp@ zKh>&&{XzmH_a*Hhux#YC8<{s|Nu?w7q?~`=+$?#XVEAu%zJD?-)8Kx~xc+GTv$o3-c}= z!d(}?6uqCr3*~VhO;q7XTnl(cs3-Y)h2E8_{fozzngRN=Rc4ft zqC8M*M4We74`YoHjDL zLX36moQzozx^?*eS}%s&Q?X>U1jF-E`}CE+e}lST+Po~3u)Eo0Y2B6DDgz$op2EB@ z--FV}j_@7TtHT@Y>B$J?zI-L*?G3RbnBa1Oo|OAYJpw(cCvf>eqyKd6L4)J066VLD z_hO{GtFukZ-MrdJzl-hJEryHz!P)=Pkh6&YFCQz*6t+v;7#IH=J_-Fil7F60HY3cjQbkzVc6!{u}S5(Gh$AAGervu>L>mQj;**qS`;tJ-t8M zaLekGZDzD#N9>=leQb%&3Ee#6e;s)g$H}!iqtS*QlwUQnlDZuh z8^4g>Zv-Ff7tlZQI7@|F$EzoN$q3kSYh!J3=Ma`aQXUM)t}4i8I+qsjm(Y`Pa!r26 zmw)-3Cr|(7kxB61@W|5~ODrxv*Hq|{xFAS6Di{(vQvRju)bfClXVh7YhELs2TxS+>kvOoH6GLo)LL2`c#S-7-q1~;aJ*2M+GpzcI$+)ww$ak7!!)_?b|)~caQO1_=57X95Mp^qo@ zpXV>x0j9^HYlM0>8?fmHhhJ@aoztjwn@wti$qo;)*58r6S9l5ZcPKY^iPYfWmp;8Z zes?r;ix8GTLie@TgpP+_o}XWf|1Z5H(%Ao(Fm<48Ei$F?<43c@f*TX$H;xnVsZhWZ z3x$-21pkiE1q?W?<_pLFCDwYv4zPw1FDHc-9myv2o^CYqhKp&5&=j-zQq>hRdhS{K^)yGs-d}|aZm#yy?nQ@FmmceR--qQzL3f+_Dwhm^nG79Ms=f%XFPHy++Spn={*9J;=;<-tJ?z{J32ext=i| zO&EBzepz4G&DJ0GXzjk^#WVd{>}XG4pQPuBOP;l4sy3gKt>vr1ObgZiZQ~>R`s!9I zx1{5imVq`HB7bTupAY>(F}pVB+;u*wHQW4QqnMLL+q0vr1&dtG(kh|f@P8IL#8}a* zoffshW^lU@Y*Ayd8FjE(Tu%4|+y?Cb$NAHZ0b?pu8kzI*bnDuB<2^^$y<`%1w(rKl zEFZ_7=HT^gmy+`DL;S>;yG=N#FLiSw2cNY2_1Qea*DeT zIUMRJe`_mOK6pi>k}Lmkuwe)8di9{YVQuy&eanYG&3i7l`>R`v=8x`+Ce1r*<+nea zwua6tYp+`UwDovc(GF{l9H#|!FJ`P`dFqDn(WjTK^|KDn5@W3Zfw6VpVlil~r0* zF;#XIO@E-D(bws7^nQ9Py^>x`Pou}tL+D<#oo+$bpsjQnx)7b2Rz$psNQt--aVlbW z#FmI<5x+!CiAanX5Ya87ZA6oZsuATPN<`#~h>VabUnuV@FDQ>Gw<|X&mni2bCnyt? zeUzP)t&|OvtkS3~qRg#SDn;Q>!f%J42|pB`9KI%eVff7OG2wCHJ;L4L&BE)1R|waJ z7YNTDPANVp9xARYPAK*(HYrvr<|(ErMkcSia;v*e_v|!$yYn z4eJ!vGOS)$#V~DH{;;fJVUjnJJCaM1gOcr%Rg%S$A0=ZX10~%gZ6u8(l_W+L@&KzvPnRJ>cf7V+AD5>F5h755U`#LdJp;&S5R;ymIAu}Jh-bW?Ovv|qGQ^oQtY z(PYtZQE!n=)KpYOWD*s|a0m&J$IHZ7L|uDybcQ6T;-%vJqHgV6U0hvV-TL$n@yg@F z2-d5Lmjq(td?>+sRPiB!*f_aoIKjrzo?+trp=tLWPI-okvxIVu z8cutL1mK2Ip1}fc80{GpfE!AA1`4>Lv}Zs7ZV2V+FW`pIo_+zi!IY=3fE!GE`UKzx zQJ&rcZV>J16@VK^d3p-CfwZSb0B!)~=`P>~(4KAqxc-!l?4u9T;xfa^+oS_I&_ zP@d)jt_$sH7J%zad728i&a|gV0In0|X)NG6(Vj*DxQ>*kp@8d1dm04bI#8ba0_P?Wqxfb5fq_0?tW$ss-R2lqW{O zIcQJS0GyrjR1t7?+EY0IXQMoo1e}fbumQOCl!p;;?P-rS0N0N4R1|RSXitRzTwBUh zUcj}bJ>>#$Z77dLz_p=0<^WtQ^o@_7n)fHKaWG1zbbglP>_*fb!%O za1Cfro&a2Z%JZFot517!2jJ>ao?HU19_`5)fU8S+atOG(v?qH2t`6nNCgAGOo~!}5 z+LR}YfU8Y=G6&#lQJzc!t`_Zy4#3rXau!z*VO` z$^cw7$`dZ&s?i=r04|2|$OT*s?V$p2RVfbypEuxr$^$Xz$5o*{kb>!O zl_?K|pdVM6_CN-v!&Ra@5P^PNCE5cCm=4EMkbiy@O9x6nh9bg`p#x=~l_H|gN(V~5 ziWCui73n~^SAim8uL2z?^~zI3=#{4fWnMXoh`e%ipv1FKMBrKIKzV1Th`2M;fzr-I z5n*Sdy9D%sks_kbNC!$f14RU#few^&dWwiSJsl|JbQBSCIyz9sX(=M&v~-|^(@;df zY3M-trlyE^Q`3Rctt>@^TUk0#ww0lXXe&bpO19Dz5p1RDK)F_mB4Vu+9VpdGQbeeg zqyuGI35tlc5_F(MD^3xCR-6u$XT>NY&Wh22(yS;&gjrEKP?i;;h$t&U2THQS6cJ>F z=|DMFh$3RF5FIGR3Q|Of6{G`YSOJQNumW_T1j|nm0hXT*lwbKMBEIs`fzm54MTA#g zI#71yp@`_pLkCK(?R&I)jt=x2=)XGH>p_Pjclvz0`BC>MQff6eRMFdt3 zI#6C^r--=9P6tY>Y!nez+2}x7m6al*Dk~i*sj^T+P-US5nH+NOC$*9#Iq#M-&|>jbs!NMlw237D*{0 zillU)BnqR5APS=c<&cCTVn{*TTa|@+} zri7>-s;;Sy!1_;8EmlocCBpXaq-p`%znrR!Dz_>Umj4s_3Vnj!POqaE(lh9h^dPze z9ZT1O)nAIvN7J+jHvjdAW3c(xL@bGz7BL1Ef0u|>5%nS}M5rT>0X8B;`Am6Dc}lqx z*8XDUOl6{S2yFef$~v(1%P0#dBb5?Z`ZvQ*gzpVs7rr!nM)3ExMYKXHeZQe`S*41wS7G;I(;o`k61FmIPS}*NxUfE9_ONDQRm03-MZ$81QIdC(yRhj0l5Cay zA^AlzNs=JxC2>odNNT{IFCob;QHtM)ABr!CkBF1SN#c3psp5EXKe1EXLR<~jd@*ru zu|o7gbYFBubV#&av{JNCG(|L0)JN1o)J#-MWEPbYB@Yq)> zFBF0F*q3rIQ~@6QQssp*kRJO&?u9zQV_&GePzcgvpUb^a33%*tl^04udh9c~7is~I zeWvn4F-VVnD)&M);IU6tUML6Yu}|b)s0TduiOLHFAwBl7+zS;<_OY6p+Kpz=cTNRK@)_d@mHvFBA@C?Dyu=j2|fA3XM)$_oV~J@%~J z3l)UNo>h6Fgrvuwk$a(r@YpjdFBFmV*wb<^R1qF~TIGc@k{)|X?u9zSV^68PP)O2a zPs+VeNqFo@l^04$dh7|g7itNQJ)!bKF-ea-F84w;;jzb6UMMH&vB%_Is3$!3n92(U zB|Y}2+zS5WYS~z$h{E+yGP}PGLs&=TkeHA z!~b@-$_s@iJ$9Ge3zde)?oxT7)TGDmlzXAp@YtOyFBF^f*d1~&R2v?TGad_-Dl{X|1yHy@PoM5-A;)exdee(FB1nX194++F3%i{+V zY_ckTP$2eCdHg_v{Zkb`AP~Dn9-k_Ix2WQSbS&gXHged3;yW=Jl%hAo;sa9-k_I*Qw%zxYfc@_}z2N45*D3|?>wb|{qH=z;O2klDh2F+=jsJF|2s!1VE;QuFSz;N z*-8QX-`RS>&Hv6)3fTY7(hF|>ccxOn{&%KcaPz-2lmhm@GxUO+|DCQBu>YN|7u@{s zG^K$3?=-#O=6|Ou1?+#P>IFCdJ4GpA|2su5xcT47N+D2c-^qHx&HqkP3fTWn(hF|> zccN0j{&%8YaPz+tlmhm@6ZC?c{~fOsu>T#e7u@{sIHiF7?>N2S=6}a31?+#v>IFCd zJ4PvB|2sx6xcT4FN&)-d(R#tn|Bg}$*#C~w3vT{*q*B2Cccfl$^S>jM0`|Wn^n#oJ z9j+9x{~fLu-2Cq_rGWkKFumaBe}^gs?0<*q1vmfun^M63_cy)Z=6{DM1+CP+L-c~1 z{~fFpu>T#b7u@`BhEl-(H$yMD`Cl8l{-X6W;1i#2JY{{U&EKbV5 zGvKs3bKDE=`rnqo`2F9+YPNw548=kFGr-J^Q9y!?u*0s8qT za=G$r`JXlA`a1hZPMn|jr`sRJwJc|yllR{z0e@D3|1JC1CjrBb%J*E#qNIYm4mcYY zJZO{yW`V^eJ{>O2A4_-HIhWnx$}ia+V@Tgh&7KaI?1YrxWyj^@dt$mZ@YHli^5Xbs z-nHjT`O{r?&Sn2DzxR1i*`?n8d8H0`C-#?G#T#t3}(K`qC z+&A$FkMC5yV{6QhSS6T}D;0wa>*4W`hP;0Qx_ zQet>=VzTpD!2ZQwi9;XKZ-)NQLcvK5K*!a7{ocaz6S|fyZ8@ssOJ>R6pDJ~% z@N3Iku6(B_RX-ZUIFRB;Diyojp+oJj_Is8K>!Xsgxd-7Dnx9{#ZDuJqHoA@*zb`hR$I>rwK)`P8GZK?4`QG9Nyg??{=73T|pd z)uE$~Bs-cH{WOCPeQthP?m;km!i}-lJTs^56Z_gBX%C8aNOYVDbbP$^ zrh$XZT>5%&tjy8pRk^})B<<8HapXfPj(q7c|0rnEgAJO!K9}~|akzn>y65F=N26V#Q^-SK z9Qy)Vft}9$FnZd=4U_3ir(K`<^0h}s)IDBk>yy6Cbwf8^pf8wxIN52sZ&$9TF!sfhLO0FxioWkZ!SjNB z^DeI0&c0n8lXX!IV@iAtlNi+KceSqAVrk-tdqANOnDX~yw;<=b_RYLj?TMYQo;an* zsRavo%QKIJnA0zQYhY10-NMh8ot5T?Cxs=YKy=WQ_)r6cqE3nc=KyQAgoYX;Ba)H~ zCg&Mp-}<%1&H<^|IZy+j*wlaZCpD&fIwRlOc6SfH=gLPskJwu08Fx<~+@|LlIj=aC zJNM9ucO5?FSltcNOK#jg+wd(zo^$ktzf6C3Ow{P6pPxb7NPhV;BzVR)G^06+yvh^vKaH;#roO9K@XG3e(25mk& zLO0HuI(IkAr@pIVSW)8?w_@(5NA=HL<>nopCfYYV-qx(d;ZI!p`?q=P486?lZnkmZ zipzpC++3)PI;uNyxwt4)v^)a%E7HY zJgZIXc{{ijw%2kCEG(q*S9Z3xbT}5$)rX9pyTdWOY3<|m=u?il%ii{yG3s zTf<}YDK5EA^MSp<9x*Xjx0x?@*`4K0HlIJgZCZ;S$L&kYubXr1liL!2p`oEaoUE+m z&Q6<~q`9r*8^tCQ^3n|7|0tUOFJAvE$bp1*uv)`-P?G@vef?Vl|CegOmdm33>oXo{ zs%=sTtf*iDUltB0IiV4Th=}+kQ%X{NWKu$SaJVZ(qisi^=+qz;otifs2AcC^{ENeR z5N8df*-Wq~?KeQ3N(=O+o_V+Hk2VbD?LeqvB8eS@LXRXVV|ISS8aC9~le*Pz(lBd_;;VBqRmq z!;1aE%Ds?Md=32P5DL~4H){+kLGUH?BbIfBl!}>|d)#&n-&q%)FsOtFpF4WZ#Z5C0YF2c64 zV1IS_#CcTC_(MgKcU~K1_kL__r4g37YOTBdR^{y{2f%h`7{4QSXHeW__FXf3+cS$F zw14bX#d8Ae&)?!KS?b8Yl78)9hBcK=wCHtuQ`>^~xbuh1mnT==xu|#{uIc-O6ISFt zY#;L^WZvh!H_gSL7RhM+qLPDL?bw#|fpX0L+_#)LWTjnMcYk!>g@qiyZCXfM4DZZq znjfz3mvGnq$DjuDmU&j@4(#tP%g4O>c0JrMlgSYA{hWU>gaTLDEBHkJoICiKIxPde zUKQlin+<$-rQj*J|DyiY`!DgyiE!B!86FaxoD?4#4ndeAr7LhN&v z58hHL_Pw9ochNz8qQ4a-5WC{~kBIcKF>zU~kV}&z#v<&qsxQxp)`s^Qxb`@jKW@ ztti=^TYkY@^1=1Um>`y4(z?o!z%V60utEse8OmRf`CFm8L2tPUH_oxIEFrFP5BQO~H%h^!)eXBEuBPx3k|Yf1%k^Vp?VE;R~r z6bx8b+ZLXOYqD>0>-oq3Ci?g*ef*!A=j+%s7c*Xdo0Vp|@nZd>1tlDFy!9Pqnz>xc z!UZ>*)+pz2PN$#gn0amIuo8caaonsF-ZE$o=ZG&h>~M0iIb5CF$Esv38RsawA}qhX zP*twb)E%?R$xCso0^jGTd~Tkjj<0^=VX7jRq?+J&sKy*_Tf*Tr0W-#N{T1V?o-I_7 zt6uMBmS+B3M}GOIRWlPNaKje}Wu8B(?kMosui<-H3b$6XJ#tY*W5>vP#BuAqqKBVWVM#IO5Z%c9l|b zD89C#Sy?*evawr|<@pLaLXIP(Vn^5`A0DWW4 z&b7*y*+)L!IOom&G3J~rrfo#qrQonm@3R~Si%qR^FV{TS{txy#g{P)?wb)oex^!~- z(A<*wM5iH z*HX}9%+8RAd%pELsOSAO^v8j8eZ{f)?0nrG7xx*a_vACq7hciv@+;_h%3thx@u8uK z2_cDzMkCzv4^4ny{tO0V2t=JpPKYqZJ3~bc+<#E)d60@d@6R`N(D`uDIi0F+EJgFH zV+REm>-~jmyZzok4$|J$Mi<#s>NV$4QWjkl-tSx}UtPYpp1-+c%E7f@@598!7KQgW z%4GzbVSB-Bc9l=T-DYA=>$`Oet~c+a@AdZMr_-5YP>%Dga)#M?R{JHs}1>~QAWo);Dz0wVN7&7;g8 zFZY=}HswBFwR6kU{ffu(9b-Rq`>^kt3g-U@Q}KlUt^TS0j{d6ttp138w|=XBjefCy zmVTmsgx;!8(|6Oi)wj|&)W_(n=*vMgfD-!rdT+f>uh4zez0^I>-OydsozU&q?bL11 zE!WM{P1TLj4bctM_0o0JCF+{$>Ogb=P8X&Ng!llyx*R&Cj?li*KGELR{;oZvJ*?fO z-K<@yov)p$9j(pK_S1ILw$ZlG*3;J1a@tUBDQy9*w^pl_Yu;&|YHn+;XijMkXm)DW zYnEzeYbI)jYb=^nO(#vFrims_Q&m$=6QU`m@z>aMRmA3P+dr!SFKkoR3B8&Rd-caRcBO(R1Vcf)pFHb z)nwI3l~vVO)kT%8YNo2As;;V_GO9|b0#sfqwMwRZqkOEqrM#p(q1>n3u3W2JtemMF zuN_`)n3_jy=NeVmGrZ+4<~Lb~MBp z=*MOjV{F6T%c@{F$7LlKw(x(+?s3zyJoK=+DC1N_ymionbde{C^d-cMRlO!sYX-`Rhf#U%0Qd~ zU&@nWVhX`j22&uWQkY6$Du$^Drb3tsU<$zG zkI5HPUQFJYyfEd&(-};sFrC134AT)zhcF$$v=7rBOuH~S zFzv*&9n)4!n=x&~v>wx1Osg@i#IzjKQcR06EyOe*(_BomG0ns@9n(}ylQB)iG#=Af zOrtT4#55e!P)tKGWni*mvS1p7X#l2vnEGN$#ncN^4@})Kb-~mLQwL1#Ftx#yj42UQ zJf>EdT3~91sR^b=m>OWJhp7&xI7~5^qA}IPR2@@QOqDTJ#Kd8$fT{3nC@b_jp-Jq8u}T8T4S$RG&@NYGD$ zz7nKL&`W|I5_FTGiv*n{=paEm3ED`IEJ30K@e;I>poIj@BxoW*BMBNvP)~w762wUm zBSEwTH6^GnK~)JVOHfe)PJ#*&l#?J*f^Z2!B```5B0(7m0wpLVK?wtUGc3Kf3-eNi`=R{@f$Q zO~pl+t-e>W4d$yaQp|uE>%S=m!kqQ?iqun{+GSUcwaJD4h+W4RqC|K)ivvVw^#f zt|Y`9$gR^r)Pc8fAK?$}W$j5Z+Tc2fJupi<0iqA2YkNcdfdp-1h(Lg14hCzBYW*Mz z0jv28aR?r0u0te(qhjR2Rhk75jbIF{Gu2`#^+(Bz04WQBX~cMOa*&58@Q4)C5E-c%-@su?mi> z_KFb+7eTy&ajM@SVnI(;dx%-kP*n?}7NEFon!!nSKg2Uw$1Z`01{2s} z5YwPH+YzD~G-hKVu0dHg7$O_^u{j{N!Dr?bL^rt3oQL=ZyO}Kz;a~y8E*!%QX8J>v zgSJddh;tCd@DS;sG*b{_9q1Seq8&V=??AkR)AT`zc(8$91~CsN(IX)0K_9v^#64(A z*M`Uk@Urec(dSU8;sT-y) zm^xwVfTh1-b2ZA9TVqHr5gxQ!^>Mig!% z3bzr3+laz#MBz4~a2rv$jVRnk6mBC5w-JTgh{A0|;Wnah8&SB8DBMOAZX*h}5rx}` z!fiz1HllDFQMipL+(r~`BMP?>h1-b2ZA9TVqQ16KJZ`O4m|9?JhN%grMwl94s)wl# zrZ`M7n4&Se$qRZNvJRm8+$s(`5+rbtZTm_jibF@<0%gDDVGDNH3W6~j~nQz1+R zFa==p$K;DCFD7qHUYK%X^2DUaq{XDhq{PHvQecu}l1U{0g6R{c518IzdV?t&(@RXx zF+Iif7}G;c_c7hYbQ{wxOgAuPVY-Ux3Z_e#E?_!`=?tb*m`-3ihUo~VLzoU=+J|Wm zrd^mEn08{?j%h2V&6qY~T90Whrq!5MVp@)ADW=7k7Gj!@X)dPOm}X*{j%g~U$(SZ$ z8joo#rqP&2Vj7NVD5fEpGB8;&SuhR4GyqdSOnot>V(Nvd2c~YAx?t*rsRO2VnA%`U z#*~OD9#bn!Eig61)C5x_ObsyA!&C=T9Htmd(U@vts*b5DrplNqV&X7Wz*G)XB&Kjo zp_q)ALNJxV6o{!5rV^NnVJd>D5T*i{0xu41}^=@O<3 zn9gB3gXt8e6PS)+I)dpCrURJvVcLUf7bXX$otU;`+KOp2n*XmLJ~^NN&q2Kd{Qu_# z4}0eL)rUPp4KS}3=Gg@s4Ds=%NLaWnI35<@Ob!l*g*Xy}L&XPfhCe(crQ(A((NLE^ z`l|(Y^f%vJ>Ej>UJ@~WvQ&tTddY|j~Bx>P}xhlTdv4iC%`M>7!C$3xmXM4kQZ-e71 z)BKJzzgOCFSjkKI5BPmBX19IqIC4Lt_sHBH{DMMjhjY80bA08JUtYI;&9#mn{f(U< zGVSm8Pckq5lGVNU(081azuM9hBr4xhBhS(|8@}2JV$SWOj~AIcy_~)AN=N~UVc!sGM!ny^9y2o^_;w9cTb()`|o61_j>uSzyJEMXQQI`O{LDy z%Bku(ZO_i}T&u9?oJ8Od$6UW@j|NV?XZP|RDtotjykkP@{6h;=LmbYfT<+OV?YY=2 zuy8)dv!FEJCbG#~olB;yh8ynLRYAG7j5<5su_rFqiQX3nbE?gWLu?@r%xkWV?xXl) zJXfO5)pw`X4d#aLsB>~$#|P$(BWhP3?CZ(#olbUcb!?I&u(IF7n_&;kqkE(!>|dJS zk@xkTPji;#<(&KT)?9Yh0lmS`lj}42Sgry&WSqw_-{m{~KYTmE03NwBg#CDxMwXi_ z>!fe!NakDYeWU8|;W0ls<@lxCLrU4aES`To^+bxW5t(EN3rmU*3l28HbDyEc_^>co z-v<^8jSmh@NijLs+Z{GIOC0%-iX&fIPaZw+>3!?f?^z=~_?k(4_e$*;{>F_3;~i>WcI6)b_52ER$VnFT2ZbmL8>ZroHgwTG!E~OX)X{ z0UHbW zVYNS)?j4^H9tIC(MkYm=5+fsFg(k5}3@tugO2w{$o@zvYe|{?S&kUO;6q?=Gez5%B z?Hhv>e5-DJ(kn{FgZ+oEM)x1)2wL@)BiGMvq%1m}i|TgnV_D5}?k%}&ew`*PFWG%c z=8igIRq|b$4vebW{yrC9#V@LDiEK`?-)NGe-BzFFqvOUWd3>OF$!;sxgKM$*EhpKf z@|pPbrWYFMc&Qx8Ugi4yCZ@*UM_6#$$;@7gkCQz1>-irZ3>clq`>#{@S^fOi>|cjq zY>#Gb2lo$t%qLcFmFI{#$LB%)3ii2ScP`!D+U}YM@0uz*RC+$z{`&V#D?31iC zG0=#=S1%D+Z0VvMzN^|U$PxL4YcVxxo=4L!ob+DWF4^7Nee|LPd#AkewS^jr+DSzfa_$lnFAC0rkhrM#WKe8cT*;n>67pC-YoJ-9)pG&`L z528_qplA5(C5!G2zxd-}0_aN)^mux_6#{oqRPN4$;loZ-n!={?tb!x?={3?I#{ z-_lgL+ZWdJb4!19`#-qn@ybStOJ~k%{JDR#HTB<^-P6KY2!VM0SIA1j{A(*XZ{{Ud9@TH506Tm;;&BP;@5x-{X}k?{3o zh>qgnKIG+P=RWN2itF#*=a4l^c%M3};pO)1Vrz{e+z76k-SQ9#h7 z)u6wql^&sw&Too;%l$ZI`Ja8%ng2g;|6db=uctuyD>X^0;+XH+({#5z{^|Q~24DzG zzzP0IXdjXF!1e-NuRijZw9_jZb9(U0ZMl1z590Q zy_8D%+*4;cTHf6*y?1{8=gW{w+yB>^TH2BScKWpo71#b@owvER>kHD|Q}>%>|7k~D z`R(rIeADINw&Zt$j{*!*=Bj;1-3%U4mXCc^wUs>U5T9Pd*l#442kv=#{in-YDZn6{ zW0>M$Vk^Yi2#+_x9K*!q(1dV^yb+$55FC*lCU&?n^%qO&H>bJMJx=$OAo46;Q1yU) zADv8Wo1)|gf5`4opn{5D(Y@Qqvn6T1UE}ioH&1!TJ+C&tVcvl#z+jbxKgu33|CX?S z-iozf?Hg26!veitb3N{Kh+gnk&rA7BS2-5}c89zDMT@CBk3;#N7aUJ2JL2p2el#^| z-id$B@ec3hYuDcAr2VP0cuDN0X&$`0ech|wdC!#0&AT^WiCvw2&XlAx2TZM)HO%ny zd!E|HD>8Ep{P*(Kz5f2|@4pVr25(;%dbzN~b^V_3oAUSO;cKhJeq7+^NGaprA+X&+ zbDa-tqv=gO9cwym7-QZjcirR@)EuUPz`D z>c<^^&7RKlv|YM7YH}t%%~8)!f1=l@F1UQ}X|b(FZcpQGD`w5`Pw(Q8%Kv8I z!#&RsQ^*g;KFbZ-J#WgA^{@FBxfBT={d@D17YFJZQhDH>XRcqo=V=TzBt!I4xUP*1 zO^ytYj1Nr+PBw(YB;$CPb?lsGJnAwm`Q3n1Ly9jmq+dIJ)|zywrM+qoc*sqEvU*Y9 z=a0Er)vDF>`HkU6=YQ17Q1l~LdS=e`otJ**g0x$5EF^Aod19&X9)1ijl~*ggdXZ6I z9ynI#sJfgPcEZv5h3(}nn&BmT_Vbgeo7YWoEXp&XSf0Dn90y-S*H#qv`g=?z{@NqU z-EB=TywTq|6@Lw$Anlx(e$?K+NPkw_ZH?oo_npJL?yYsif9$QA6MEEL9-Ay$8?3cW3Z@Uf0hbIO93o z^UV35oPu=EGcmb}s#hbz+jbfJ=X&fh|V%I>k&VTe5 z1$OkOu#C9YNuc=UV;9XYuVDFtT!W;y9iMSK25lYQJWsY`%OPRr?0s5({k1`pdd_G0 zdt>?z1v}?%&l41E9Q5cVXT09^?r_;%PAaG6?2y7Ubb8+14qu&r_eo#7)Bc8&$_INy z72j6&yc6!EtXRe1{b6cE+(i4?@XE8+D^`8Y|7JOn)k(<3!{@$vb?&nQ>{RB>^wrU_ zJ)ZnG8~^mixu5^QSPOqThJOTp9fGxUHYuKwQLg&ADwoRcIB9Q5%KFQgSq^7Rf))0_ zgy)|hDW-7;RuB6$STPC*h|bzIHX?^&n!~T(x!9fZQI0Fw4PzEwT5eC9_Ice5Vj5?= zH{-@<)hNzoH@`o0t;g@gG{@SBgHOIuj*t5?teD>#zU2)Nz<$qz+hppL?{oB~0@N37Rz+U zZkNu!U3M&_($PujH$B7fvq1iG^C4Zzl4-@B@s-nS#NUOd0GjWgwq158XmF^-*V!-1 z3#H+HT*rVE%>FS>-*EC)D^VK^}Iw?+`pZ{C++s?TjrFI)!l4&h5Its z-Z7?(e^i#)86<17SqcM|-&DKavV%A57ny(#yN@j}JL4r!v0KKr7*=fDd%N=R;zdge zEp)|Eo??FA@yw`G&pv(|lE?rTBR>{Y+WhflyZ7VjTKEPRMwwPzCinsL$SvCrcz`SS z`88Lb2ut0Bz!Dg6rHVXvL_%40wSIe8j;|Pzc?k36$cKa;=n=oM5Vt!-_q_^ zB(;}Mq0gL@*6Q|R)eg(GENxGB|D1GxR7$@Q?9d4KarDRM!jw6-<(7rV>OJP878l;&JEH*Keaz#r zZM(05o|pI6dR|IGB>a97Zc0diFo3~m{guRE2>X{15)zsa7VnBiJSnNB*z+L8JrB8a z=|nQM>gmnpx4z-r(**<1cY1KxlXp+03UN<|U*0n)Y%9aNr&2z5yY&87xBr8CUbrK5 zxuHhgr2HduXg8`H?x}7=sog`Kf3^p_XI_Q9`kGo+$;!rmlyg$Lvc-}xZ#l)i>^!>e z0RxTGgN}O*JWJEx-hPP;j|eq>uX)a2VZ96J843fib_R~3dG~Zw@AYTqJn-P%Q|a^W`K9;2y8R#A^CFK?womdW>3oO>!P^j}_AU>xpgKwf%C_fNFLJ$=@`LdVUM<(zx^jpo5EVdxp(-4b^7 zyx61$2U45z)!Oy#*<-?cKIvlUn6ka9qpm>_c?s=MdL%gkKzi9yEd~C%OG=B;q{Jt~ zO+a`C(qxKA2LBtG5|(I;gdmj$xM7?W;=Ccas82hwOF}AkNfeqL{l&We&Sdmee^~N$ zIjK^YltxSK2i%StVPpFYQ1OsWd_5^uVfWT|dS1%EvG=QjLpEh|1xHoiII5b8m-0*Y z(#5;3pET+%=gj~8d8kybGrxE~ESbJZ{Z0*H9#{LDJUjc%p8bb6U(XjSWXI;YyRxr) z{#6gJ@}JC;@C`0$yY zq45#0>`fA!1coIiLfH8vvC}Qs5htZ$=aW3Jlp5e5{`@y{=dv-O%stLMwVfYSG|z=k zTy~`f`?^2!)^aJkl z1<%Psw~sh=-|n7I+P-vJ`_v|nzrPzTwbid~|HtXCbb$Yq^y}cPMmDWBqLrRKWJB57xAjgT3@j{fQiq zF21(N;tgqyu`9h)ust~=y15MA7t*d7ay6IWsME(M?Hr_6g9l!`4C%zH#pZ3xC2;#3 zn|x0~+NDqXJ=4KHE=BG;6VmQaM@4M~`@NNMJx)P7xqht@7a^6lOP~M7AN@Q4_uM60 zSEYn!^YM53cJ6RM;D@hYhkl?O*~b!4l89j#Zwtg?ftx0RM$iz5#rr0ZpqF$%EzDqo zhj1f8LPH_`z9}58_)<*CDPhT>iJ>8purQi9gC-;By7X$hK{KstF zvcK)HKez^=qb=p8=H`d5yMN(R+YelqiM5t_Rr%zQ>~+WF$$d9%uH$OaCX+ty$j!H| zmA(*Mu`*4GX z!G1dA@)3=mT=;r9J1JvuaFr9;?%smvcz9dk9?d*>Xa3KQPBqG`$hk?yOZi=PPO{tb zT?>J7IHx@y56vRJocR@*uJ6rs=J#=xqsVk+!-?~I^Zz~WUE;m^7yhk*|6vV$bshDu zy~*M_6j`0W`|MrOy&Mhv@_#rcq&s>iSSzO{^yS7)@ryljuB+qC@uH(M!Mj(K=<#Vp`Iam3PF#$4No7qv?C#~x%1@+`=CbugU;62EZ9CqKJ8P;Da-{C6X;VR$Z%I|RH=Un+Y zSNRTC`JAhKN8nbWezBL`I9t@a8K3%Eq~n$B4wqfp&+pD-UG<&rR`1e#_tY&L@!j@s zTwjR6_~S<~Jh&l=XWFefFqluP>-{dZLI^*%1FSo9@(w%>n(wc#x1>|v2&2galMF(P z;V{`GDLE-3B_cjNIN1~#0*e7BhBwUVEI_(X@7w$Bkw1MLv@zN9Roh!9_?UX$iYG_1cw2#f)jmC}r-k@`$>b$x{QtZ9Yx;Bg zqxwDiZThwPrTV%0Df%(`ArP;xm%g(;Mc+bSUmvZnEUo}hQeRM?SMRA;>IvOj-80=? zh}w5fcT~4mw?nsHw+!O;P1B9l4b`RV`atBqHo8{2hPqf?Rb6?V5n}fh(B;v2=vZ+T zfT!BK+H2Z#+N0V%+HKl(+NIh#+R55cuns_fZ4YgGZ9GKqi_=!qR?wPYC4hojAFU_E z@cW{9t$D1uqqzoA{Elh%YPQ2_0LwIUHB&UB;e2tRrWdRSkfdp*si%q7RMJFhf;Gi8 z0h-(ztwy2#q<#f!0^C$zQlC^GfM|Xj)yvg$)sxjD)mC+1br*HAx|zDJx`vulhp9`e z3&N@Z9%@?kN%d0oKy||v&u_bGoob0{mTH1(xXPmH1M32$sG6(ls%oluRk$k9EuvqR z^1Sk>a<_7;avTbVV?B4!3No*BlZ z|6qN9e2f>PVMzKNME1K&U!~8`hiC`AiC#g^qo>fL=?uCb-5pj4XiYbyYtfbINV*JN zg!ZL#(5&LK;+5io;=1Cz;;3SeVk^XpTnzCd$0~*>`YU=U+9_Ho>MNoYoFY_FQd}WG zMSY=OQTJWZA|2EQYAH3Fnn(?!(y885M=C*FAD|KyK?PC;DQ`*(u>#-7AIWdXFUXI= z>HwSM%jI+AljI}hf;?5;NuDTg0+9kM%OmAM@KELKQB#y~kSlM&*P(TJ$EbMp*VXoWWGNCL1W!Vr>}&I&CM z`ccGmMre+*1`*R(p&2sz%Mmt37)TOR8KDW4b zMD-B`5tA9Ap7SGc zp$f_xLyTsH%Hrqc#ArsSgp5(dC|0N_euyMSF#_-06(A0gkr>GcW<~qdx&u3{ePSrG=uDQb`g5D->~k*un^fo%t-R zP{{S+bXF*c5ZWu977D<2S$dABx5F-S@MwylbQ$A$!8^{R$$mA;r@)L;~zzDvG z7!t?_QHZ8LBjiOCNCD+R6hQT31aCxs@-$k=jf(S0V+1c`ilO^5LM}wnBv4L7K~x_` z$brb;qYo{3q7THTGJ*#(g^)mcF&o*N7IbK%KY$j27C=KOWwIQ-7(tCPX=xx8B8KWo z3rdtLpeG}+;zl_TBW{%UU<4YGH>_JCC{VV*?uP%bG5JY!oEPb720`)-@Lv~^; zsfdh-dLxP@J2IADh@xqro`@K#17qod$d3i;jwnjgfwpu*1^cyUEM1XFN4BReU63iR z9c}51AgCQ<>4diVlRzC2#gc7lO9%0T0PPX@v}G*q(6&Ib4Q**FngH4$2x!AtQqZ;_ zGKH}qIfSDGLTU(W+qApMj1#nTofe=?tV#scbxdMpr9Kj@ULX$ulR zSpWdiKAE;PV*%-t$>PXXv<0LOx()ycA8c#ISdi?&p|+$gpn6a!0B9a;YspwZ@u2fl zEf@<@Jef8}3)%vLr<56*(-x3BD5^PQL1HKKBY{Bc(DyW>EueK!1OOI{s2hfWC4vB3kV$QB@G!12pqII zA|!CKSh@jY0eypEM*@Mqp(d`+SdhNSv=k7MH)ynav<1`+$^}5$CJU*@SU}oP&DEtX zAZ@T~br}m1HkmJ3hp`}CgT7RUwt%d`ApwA>!3S$I7LYWVtc)B8Ng7mC9AiO>Ci6xF zf`(cf2nibWv{=RhdWOo4r7fUm&>>?O3#b`t_!!!P)J(Wsyu zW7TO3=opOsYP1Dp3^cJCV-ZzMO9F{1=1W$kElw2!02PCt2>>DnZBvD@h#KZcR-rAR zVK4x|1QLdhx-xA64TCBN011OCtjt(I!JuyFN{j_03@V?kL|ZgyV@O5X0vZN8Q<1TV z5*Ewwv_+IKBhOew35z2+Se^-e(7*zT5*A3A84E}ls&XJv!{Vq4j78KiBMT&Im_bvX zv4|QLL;-<@K~JK}F`!}43~V_DGz|2OEz5`+rt>JvfP^8Dk7Pi?P%B3=AYn+mBN$P_ zeCP;9RIorYoCX1d{vJ+)e8Dk?GopCaBEuL_y8@^%MwG5Vc_e_ z1S7(a5!EO}?#qZ`^7*UA4<#}jP zhYa2f$PnzT+?xg&0?(S84nk>6Zbp9#uT3S>ZZ!H5t z1N%j37!VpXvNen-G_^=IE$U35O5ut6p^5>GL488WfX1Nir(|>pf=HGIiGgZh84wsG zNDM6sj5kArzCd-*45$kfPST91E(TJ;IK@RyF`~Bk0*KNQMamgbS+puS4Z;G|4X5tt z(*}~Hk*X-8;j)8}@#J_ypRIqO&w|ze4(aXs_4+0HnK)8^JAF%iJ$((mS#Q)A*Zb>p z=oyI9|6F%RcSUznw@N#^*nx$<%Cw>-ssG zNtzL`%3p6yM@_t@p?FOnp$XCy(&W|XAv*s%^%M0i^+ok@^VE33 z;?=wqo4=UaPwlB@RiEKn{vNFGcUpB&Wml~iujI!=Wc~rF?y5Gb7H}P3T~%HcqAIHL zg}D3*<$L8*CNm?zY4m0~Fs+#e5R0E0NDrse z>0WdPxLS9v=NCqoqVv;Uv`X;>uGQ}=vJ__(hZUKMO|Y8Z9K}S%Fh#nex1s~A?9f0F zt*EF7Q_Y64uT52AWf?WtB&11g&0sZcShzZa#F zf01X)AIPtZ>-c5LH_DgEXUiwbhvS(3jpQ-%O7d`dpu8Zg;-`_z$k*gUi0FTgJWM*s z4dhaC7IeU`FE#_lGhES9vWjd58q#DCE92P=G^j}!9)M_AlL1N@$7Y~`P5NmO!qAqH zu_~FF&4AGjd1-`bfRp~DtOA<>LmaGfgy_5iu4!fE*$gz$Nk)zkhB{ntk+O1Z28?zn zjFgpSGthuXA1ljbpdn8hWsz(K40`xpIYKn<$soBbg2{l94;Qnt2sQ%^eG zCmBR=mVq{hF&Q3k4MdW%Fg6280Q$~QCIdtOZ3aRrKKYwLrI@TGvM2;uOHGq&DnyEmdp0+Iom3$z?@ZTX%%fBBJvVGK!WJVQuJ)1iX^02xIGl3W$;wW^J93QL91N zNz5vj6=H22T~@#j2!m9zf~>7Q!Vnr^JA`qRtN?3kE9M1cY;91nF|z!uEd?3jumTuc zGHOZezqpZF%KgQMykruAGY%S5|AX4Vb z+FFP?PUoj5ki;uVr}SrhV-TpqB9!Q0dui7s1LA`2%+tvgOReF zj4cLzgEyjDh(csJSQ`vpvHt-^A%xN4$=GT-b9%D28lsVwd9b$X2%%69##Rk&kCo|J zTUBI)0|2ao(3h3zSX*U;Fi3Qat&+1SEn};QsFqB_*my)bnTEA-C?j-#HDfa)qYt7A zhyuX-+R7thWGdEH4rTOVWlGjoRxI2DVI;yBjf`b&5iToWI6|l%hP8zugnb4KMaa-H znzfk_`e+au#k?Asg0&eC!cYQ)lOZ@Nm5gF-=zNF-H32MxFp!nWSzC~pSA{STAshn9 z*h-__^_G#Wt(0gaWir-Q5+R5ZU16tWb3tgrc&-ecf!vCMv6e$YyPztZ=jxLUeLP!Y$ZWtWA&5TSVwKPzN;elC^=j zLW_tfs;iblR#9Dpw8RV6CaPm`P+!kk8>p*T7$6KssBxNj#@aw$L5WF(qP_;I zh^MR#^i}){Kv7@=%Lf8eB00;jEYwe8Ck47l!ER=hT zv35jVKu6qSt)k4vYKfby)hV+|grdwEN#X`;b;_)WZNwdu6W19l=q&Vo;yPkvA901YqDw@QxWZaRm4z?5%vzhG&jhjvL1v-J<-{e%D(Y+yqDJV`A;d+-D$1-M zae=iuWtJo^FjkORbnhCBqRPV02IpC;sIoc{MU@R=iF2$~R9R^5bBq;K78QDywZ@|F z43ZOP8LKF=fy5csS_>KBw~RBiHCo)|(~K1~7S#e!O*Bv`;uLEYCDxloC`zo4hB(Pu zL1M+`2NX3nN=}?$t)j-lh&aJmL1R%h9%rmhi6xG+R*+cnD~~Z&QDXxLAkbKJxBDn< zHH*7(l(mAwf^L$;5!MP4EA|XPr^d>O!;BR)7FEb$)+$PDEtWXMTAdQBAr3LtaI`&y zILKH*VbN{?fxsdaIlx*)eFYT(6!jH){C?Id>MKJb6!jIVb{}I6M)lx9>|?Dic_sF; zR;RpD2t|2?A2Rl^*3xKK;ox_(R#9KQRm5(_3i^t6br)j=c|~0W2-Fq*Dv-%qMRA2j z02IZw7DYH%t0=A@Qh=hk0@_)tD6Sw)fTFlUci+icMR5%fF~3;7B(a0Fg1U+-1t`jE zEK6)>t)jewN&!mpx{bAhyuv6@5rCq;LYLjjT19<@J_RW1D~$RrtX0%k7|DR5zQUN@ z%vwc#1?y(U3i^tEnc2ixL0(ZCY+|jTuCOx{v5~ck@(MKzD9S4|@&?xGlvkSAz*u#t zzDZ&|Yn9X$P$TZ4oLI+NMRkQr1{Bp5nsF^-1$9O800iQSzI6>_1#Lyuy@s`l(h739 znzf43ssm)KAg!ojRd-GKv7v?$ggCrlClCim6aq`u!5+p1{R^HtbSTzIV*_D z8lprfDl3f8Wvn17D|{KCsH`9WOIblwRs)4lR95J6OBf*)9k+&9!V03WLK`e*1yNXi zNra-X!uKv>1yNZ2G{hoC0AWQ(yO0q;SJ5Z}0$D{ry)9q`QB`5s1B$8&vOAv@L{$x> z5Q3_TJ$4=|h_cGC1fVFZAiHx}L6lYSdw`;>!fwxD1yNR^1VE5g7^xI78?OHeInjtv z?jsLD^51&dI$H|=_0udcA76||muy5cSQ5fuYBWsd3yX+M4v#k)!;&DvR7z5cFp5(N7x{hl|F-SuXRfX;C1>juD!yYCYs1C|vbpyU;)a0TA=#T>z1#@w-#s&Xh#OT=XyCzj z+x!9Ye|^SQ%D=c&URlywCEuZ3vvt=8KZ7UmrtZ$2=Nb35?XdgQlx&B;C9k3IOizAj z?6d`S;o0GLzSo-V?ERVR@b3I+-KLk^%^A_99k;#sYUP_T+AVrs%Ktc(3O&DFm;hC_wH7$R)d+|-ObCN|VI@8d6)4-NTbMYN~rtVJ7NAl5Y6K+rK{lQUd+|;Ey zcy@SrsT}0R*D#0Y`awtH87<#+e|Z1ZyYFy5&$kx77q8+=h7Ugt`{}r@PCXyjey2k! zXJENPTZRo&@RjQ?dmg_vH$JZO=RXkey?nWvO*iJK^~sUSow&1jSZ=dXT5!WcGy{OicDwy>oT47@54)W+$mk)tp9(X9d@smf8YPZ zYT)ZLiuEj0qOZIj?%Mx!x8soBOR2Q^XYVDul;ZqfF8Upx$`9ujW;Ofc`r)ozS>09)(i4ZCgQdea|IZf|3C2=k9bOnu+Y(Wkep#z{>&SKRAfXM>_Q*CkND^z6F9 zj*6eFy{l8S2G=dMM-`&2)$v?;<3S?WUHKiZ{0>)sseFg4e9l!qSEpe$a%|^zj@mPG zpUXHp+>vH(IiG*uO|o+?JLj@X-|KLFFX#GRu4bb5*5uI^$K~hi+8W@vOvhJEI0*aW zvU4uG!)51O_26*TgTqx1j&GhFhDV1(znjE~e4Yn4^3H^@fjKVmX`wxbs*h~tXI6Rx zi<$g3BHU8YIbp7U28uOe>1gRl0E)9_AVDuF+6YASgD}xa3Bh4WkxAjEdYI{xV^p=Y&dwtrG=`S4eP4k+J^(XkkhT?%WKYZZ= zpKcoL|MCsDQDK_3eka8%UFBpt?$@k({V6ByPc@bAscxo^j)u(bf{D)<{;=l8)*cH! za7)6P1l$?>ntR;$^M*Q8-#hZZ=v4YifCq1yRbbFn?`w`7;{KoG>V@9%Q7ak#CfTdY zgX1Lc5bfEVvm7-a?8@)gD&$hoL%pjWUP$FT{6)L8KR3D0_k_tEAA9i5LJs)|2cTcF6EO_=lh3)|$IlLTcL6tu6YdamuPx zVCTn!IdOjCmhRj!`F7p-KEoW&h?sNiHD;JkR@vO0lU4;316POXWX~ zSaY)%>`(Zb&H*WZ+MoW*Ht(GZ`{Rm!W0%VBZPmWYE?~1e_h+1Ib)q!4)r!)jrBhr_ z^*6Ae%p_9_4;g@0Gjhg;9fP0lcRkH-cZT`<&0gpA#ZhDOjdOjEV1L@}fcM_+7PGWn zYPN4YM@UFm@Q-7j=(rKNh8?k=7r$meu{%S(ecJ;i`9cx4Ww3rTn@9^DkeHc)=Z7 zUw`|QUL>~rwC(XB$|nbZ=VAHene#k&shr!2&H+#N>Ul+tsC-psKjtL+O3#Tyi|6^k z9s20cSxV$~BGv5R8wf&~cJdj|^}X^JSKU{}D34J=2oWw9&v-WA1)4HQvS^u1Gd2|Rkv`G4PY zzVAKPpS{G%{`Q`G%1-&s?94r-ol=Wu*S-#4tf3#6-0;7+ryqxRZ8Z9U@_jh8gDUTx)Ue<($N-6pEaKH*_NkL9pbPqhF z&v8*ni6_>+z#RwLKZ_3~>zby*8b$l+x6%7*eOV;@HVg>B(Ol(lM9aP*^UR>!DY8ce0 zukUti9N&CzvL?#~AE6e^Pg?Jcv*&&8oBM@_zs3w(pMBQ`udzG$`0H~nUKgIyWUkT` z-@?2FBQjj@qJYfnLw>s8j5^8cBmI7$)hB`mUn#c1m5x5>Kig~O1Gz9t6X9{}y0>1F1#Vo%O`N%2vj|MRvE%Eb^M{S3Lo`!K%n?{13^n2Rj zt=BC_&+q@(F$}_W$bVdK9fbSn<;jz^UvTm6TJj~A+I&Mx+u?Iv1XZ!$&6%UegPrO# z^<A?tRahwV7LgMTe+-1W-3DwBG;V#hxC)LX#6O?d8q?29}8_#`!F+(b`o_eYf$ zX`m;5&6-X_cRfcr^Pdhm{^kSP9~QEFGsHjfQ!ne&-aOor+v-%nr>a=;X3@Q;pl1M) zQ~1c<1K;G@=~^GM!^xQoCKUmm7;cx>(Z?2l^}9AORrd)}r9z*Uan>$ep9@go2ix8k6OLxv&>D7&Dli8d{4L!HKjw4z4ly?T7@j|}SK(j7w|x7&u6 zY+T))+p!xp^0sJGhy0!B+Hv3AnA}c{T>g6YkTDC<yP#xO7#xVWX$o7 zN0u&)tcqkSk?*wpsiN57)W8w%AN}YZOT9ht%IoySl_=x9`+&}xHYoGbX`8$S3n_I; zV=t@4%c!OGKHq-j(+X|V#unC1k40B{y&(_GUqv}tdWG7o7>TNl6O0^m1EC#Md#9KC z)2Yor_l|6qzM1-3*n9Axdkd%~^?v$@1HkV4{fis>ga@FOt(QDY_Ul9)*xjP2dy`%0 zOb4Gzdp~YMHJ#451U~(y>-vd(-R)8|I=?YE@E93O1>`-t|D6az?BS{@^S^GS3hHVW z>@D7k1gkzI{%AZ9ExVbozCCFp+LfI%SNw7t_1d9z(}YItsneTpEngF|88wZ%alZ1q zZHRJxdhEVWHxzi^G4+qs&D5udlMa2@yMrQb)a-wy$eY@dFH=u^ABsZLiZ^9R=cC#! zud+|B6Ho;&xSU&s5!CeuzWa9vFGgPe`#%p^<3qh}7jUC~%P8vb=*X{|gComMMatzW zh4|NJ8M>f97RMhN6fee;GIByUCe6hwZnLiJuQXt;H(BRT&LM+&NW*tunMNvBN%SXK z$-#oAB1j=u!Y4RkFc|WZ2m__YL#?*wg&AlExCYvx|K_W=bX?bUa^>nyA-p(Z#`28#f|=CvdsP!l2!6a^@Z?|<(-WO)DJ8s0xW$6TIruFz!oHi`L( z(xV(t&yapV#M&w12bBx;gxs@kgFdlnVAmceQH@_NkV%VdO6 zo{P(XW;cXKNQWjd&j0b;i2tOXdNoMj*Y)>9I-`DG`-a^-uPjSvV_o?w0uTEv8(; z-JgpOV5&N~_yK2hgnqzA`jWmZ_-Pogfuq0Z@tQ&@mufU2Flv(o ziR6laV2Mhp3JL}*oDxm2TxB$SwrAWVL&X8tP;ttSxf+AQKl}T1boAqr*S2`dWVC?g zNMI|!9`|xj<{=uly!X+?tJ^oEDyY?Y@l{*gmF$@GqeB5Y>)~dtODsSuJr|zp(y%In z``X6yyR~%2gSv&RKDtuC;9(nk2)ZcVqg%Is+zlL0U|Rpd(`);WPv&B|U*M7N+DZ%_ z|G0-_{5)5Dl$WrPXyeY{`%kSL6%|Y1_76W?=vvQ#!Fja<5>!4V=;9{V%Wo$>Msz$+ zY3}rFgPm}VC7+_V_o>Fj=W?^4zTc`}Ku_*3pYYJ4gpqe?I_mhiwl;X)orbm^9F9qz zkM%F+SJ$v_T&M99e?KQvQtNvS%hna+nP27I|7xtXq3dyJ^^1V@^XS|d*Y*heNn{EN zYEmaPyJ!PFcf?ws=4JU5@M|CP4%{x-c%~_Skc5Tx)gi4PurbZ^VOsmSHctleRKwlR2A+mAsQaLYG0;+B`{XKhcnJ<%S*{c8LAeX~;e z@P6O(TaH;f89m_E?9uITX}HfD&Zrd+_#w~7p)v@!Ov5Xe$ievkFPV=6%2-4e<+%9b z;i;+IVL2+iv?F`~Su!Oql&t^P3&-fzSF8#Sf*CWpFeEq_4sVf4g5a1uFvu-dio_~W zfbsn8UD?Hk))!nu>&u20@;~`2|EXQY_F471$uZP;*t74_!0=ane$OcZwx?1AOpQ_9btj}k{DIb1$Iof~u_|g7oeY70ypN?<&bd*mI{k*mx z_3)1M6I9-dYEw9u0a|FoI_a=YnYk&I^y}y1Jddl_wXnijqt9$5iS}*;)W`L-R zK0Mk#9ZvhcQS+~x`KNI@Jo=v2)A{^_sSqDTKc~Z;$6ry~AL32R(c$!Ou&cok!0GtV z{>C1(y9n{2DgB%wC zPRrA{s*@+Fm!surJ$(OXh1egxWxY{Y_}h7l+@`iuVBt_%ng>CXWU zlEF!{$^em6B2&p=1PYu;CWPr51NS+co=v;I9D$Y}k?N=R^nYHn*0&>k*|^rnlUtq; zZrH8(Tg}IJK2iNNC7zs(e0**Dp!MpN-%v~=uH^Bmm*_^khhoZFK3-sZpiLGSl=k0p zGJA4YD_pnsFn*%}U@RbDY-82_%c!5dX78Aj0vugy)fSJdu2{Y@s*QEvM>KiOp6b_A z_&BoUlD{su098xyx7Sp)#aDGlRKZyw5o*N>$G_ll4qMm3vFq5Mzd|JACh1p#q zD2KC+-VcAhnOZXa`p2`+wo+rCeV-E;*^l~Gm|p2}qexT|=05ONhjcXMeE-ZFt@@+Y zSI>@$S-S;UKKJ`^TD$>uUcP?Q(buYGF2l<|Jem?Bosz23k&N*Hr z$g4UZWoLuDThZdCG>HJoe9mzhNkgbix~atXYFY1Xch^-4Q&@V`imaF!NO-4tI1_> z&Zk@sCnyDq6@h_LSOguU3QlZlS!s9{R$11;2xeIQ03Cj^+|KdGUNM$9wF}9s&P_IHX+KTe) zJ&2E2-8?h%lU+VFbK$HNmr}l>QS0T`S2iKAq~1Y}&rCbK@W5t^d#-SL^@}wZZod46 zB*#`?DBRkh6Twnhzy1WcK~d;1X^Tzl1%Wh34eAiDwC?#~myQ0zsyO~tM*IBUx- z*V>9abUxY=)faK`(u@m9tCoI2p-t?g?&cLBarJXmGqc?=d;6#o>)IdCAGbpmwis1J z&FS1={!3*Zb-0(j&)y|&c)wp(f<44%$hStWl?S~=cavRmTMVy^hn@JeCfCk3YeP|! zd&{gG@sw$MJ6TM#!e@I-b-U5^J@q7ueYt-#NBm^6b*(~gD}3DceR}73*x*D&zKOV) zgEnRyT3Mry9kyzy$$Av)il@JI2kV9(&?`wqt@dq-(OCNn%Q7vh;a(n>gFF}6;Kq$F zR_iDraNEn{t`jZu(ac&oarZhrMg=RnEfz-D;byV}>dbQ=QL=37dycRG%{cd9(QM~p z^tw19V3x=pD>rQ4UEk6L%Tj7NO>(%0c2pm$jjmdNhOg;$(bvZYA9;4JPpTz>Rqx*( zXqRe}<)Z1jsB_d?)cC-zy$Ad%k8FLB|fQ%~?-9x-R6W;hGhFfuYJ*{8y0!|MDCt}k@j_T|0k z+_3(N}%h&h3PG)F+!cmrq2lp*9Ps%HiKLsQQ-m z-n^Nz1zk;<-t7C`6zY%KOA`AfZa`nVtm^J?Aet&_)H>d}^L%76L;8T*I)j=R>B(|i zn@SDmukX}qN(!1Xy6d?3fm-T~8%liBJOu@g->~-Fr-{_Ar;(RLVSiALI;GvDrdyEi zW`j8kKJG-}Ypom-)U!}pH{qzTYCBQe7H3kV^VcD1m;K{?)@M+1Skquf^r0$08}de# zl|VHgzRo}H+;Zw>YPU~Qyi=*^LRF1R-Qp;n<>u+l+b2;y=M5P*@<0Mw^W*rL{tI`a zYVBUMPko$$xZd&0T6(QO?iX^G91C5Cg4A=r_O6;v?U^{^NV^HiR3YbhZLg}Ss6kfk zUXCuyP-E@q7M>Z2Xy_7IUd<{SQL$~j{EUT+LPY@s>y4j;$g|=RUk+`dzS$pW(7nel ze=LFJhjX2MEDTSI&|6L=%*g{ zR-?Gv>pSRf`<9#imWp6R_RI0V8#;2x*-EmEg{O3nS~S_WA70HTnL-gn75$I(SGMOp}PBsn{!ohZmpb)V_6c7+1(ZDMA07+n= zN@3usCqkaku7R)Wmr0oO&%a7f=9?YYKehdie&^Eu>F4F|jkp7*@$&B9eg3bTA1`g6 zWx92^_akL+BM>(hklfkW0~%mkB0kn+i`p6 z*FUy7j>01%eR^oqmcY_1;on%AC5F#iQel8Z6cQ|#g$Utr#h?&{Obn~EG~&QulW~M& z_l--lz%`UPeRZ_!3 zF$_1|mv)R(>>o)8}e{)#-N~>;H7XVaMM3;BV(DXy@T>lgXs-VHy`*CWzDYB<)SSQmzRI_dBdSD(Ntd;Q2-!TM-QSU8=)!}&+nN)lEwQ-?IZBmC5q zn(-?UeQ$EZBO>3uEm*UM#>467?M|;>IA+bBvZMfPsK9B6za#}*v}aAh+jo%>xWmnW z_RIEu!NI$(bZve?9x;Y&^5-RwUd0KAP=~;>JyD1pmTU%SzBy%AJp3xf`{dx}Z_(mG zZQho2X5-kO9eXFZR>GkxL+>R|e2e&>&#qqA#SLSxWevi^Ebzzu{c_}|TySXq!OANt zm!N1-(ffVy+vu)XFTloh=qq0ejIyU>4;xm zulH={01F&>^I88nS6H}zOS!jr#YL3UpT9eeTM1{j319N*n=@A059^ch?K*n2}J7K%7tEjo3{=Hq=sTlXF9Wrc6v|2lg^`tQG5I5>@v zTpdyw_wLN9t~_Ox1=(x3f;)b%XP**_7RPt@ZPm@AVy24DmNkC1Z>TTjrly5}6#e9+T9)!~EH-6^Qc>7IA5yi1^lPy9U4x6LY4 z&|45yd}j)J`aSyEg?9;PdGDy}0c}=MCtht?!oE9&ic6a}Jsa>rpXy}w0la$Ftmy@J zrchzxBj?Y8o?R!qEFKMd8fYGUuQenVo$H&os?q8RNKCw58+9NG`SzUNyAI$_Pw(ol zSUrJ?dP0m}b0CT`;nbD3lC8r44=S8dc{Sh(O}1Lp9}-Jdsu?_BBq; zE0+G6TsolSE9b&TekSpRK^A!#_qXCz&9k%S665vbe+!Frmi*uBeUv!gZ^8tq1tkNz$E& z-I=^zddXy2XQIw%(Pyb{Y>R@Q{qAkol`QRbx!#p1-Sp4~!7nzSR*TxXyvrBwWGwxp zwb-3}q2#p5|1m~?4QcmYThHv$b;wL^!0vfkr%PAgf8d#-HdzoVdrq%dD3t$lOm3sT z@^-V=-5&?B-}FJP*g0M}GCu9wi_x*s(@Du+)qHwBNUTx`1A-OGpg=iH9?Mm*FDO7B zsIU2&;E-UY@r3Bp>sA|TKDcG(gY3-SkcX_^7Di=Y8@E(AOc8?Gt-e&Pwj3tl|v#?ZI82Rs#7BDNY<;Tk9vdtX(jxhRGsukc`X1RzVcd?klU;*8S=hZ4dk2rvG24K z=&AR*(SwDNURhn198UNM^6NiwYb!uLc*ckmts3W2{YI^MS|8-ozujZmf_%eft2$hd zsgu>RcI&p!LH>o$kOm!I+GRnK4A)_|A)%fipYeFi#lA1?@a!Ft%7H%~>plfOwjzPH(YsBTt8&8KtjA971;e(UQ&ekVR3Rn48%=4Wad1>W)H}CJJOgJ*(lnF-{cTKFuz4lYLOb}YFe{g}iu2Qjf zUoI@_HR5U$t~23fDY{A}6R9M>uKVaz?!Jv7ji=ziMSY$J=;Cm2_vRG)x*y2g6qzl6>@r3ODZRaG};(*jo13P4WpuTQ=)pNp9 zJAD3!FpvUMBOxm+f&oV~zNE>zn4eAsdd86V@NQrARZaVng*yuottX-H(o4B}?eXaY zZ><;Ye2Qp2LnbtRq2}snd0R;tF*pJ!+F4s+=BV(nHaDriE_{W<6Oo;rCqz2BJj#v0LY&GxxIyUVf)5paCbFPj!yv~;6S&tdxt@F2HN zjd#b};mNK8J-6G4_2Yjo$Die3ZP&x9FZ`&uDstezlmjqWRsI!ar3b4ju}~=rQmDZ2 zm|Ub%h?FAunkfzf1H}@VMjEhPrL`@DA15XQ_?Nc&h0>Lp^0dftI^E%QT2gRZq}NXtnduDO2;`M}6UH z_R>~in{>_R`t}O{dB(I3q>stW#Tt`&*$mputZs9=q^9?$ZBU!K1*RJ@nyF1?2BpUC zCRo?wj`UIAy|>lIZtA2-{}gq4&ygpr@@|)1XCi`?^1tj=Y}EI9L~iNRKOg%$_v|l~ zeZk?9SJSVT6h==Y6@OLpX~HU!is9>BusA3{9H<|#!XYpU4ODr#Bm~R?1sZGqMbCbQ znh)-u)_i(b5xnQ2XSTVkr{0=9aLLTi=wN5h{zNN#Z1ZO1g(@rg*oePC^&SxM0S!N( zt?M%vKS;O2vGe+TZWd>cX?*8_Yy1qGVruWYug{zA<6&A)NDEIFz)^YJ>EIz}`+kL} zy#Cv_J0$A$Hwy1HqQCen6Ca4@%Qp`P>V&e_(wWaA`c~$yy)U zspqkczi%0g@h2+2ul++6o>5y`^L=yrdDS_wFX5_f;ko{Y)KuKhGl@lKDcV4c_rvsc zJ!YOlb#X}cx1s|St$}VS7ROC2dS7%FndH@eK5hM4&pbsXdp@19qUZoU(Hef)@oaKC z&=cF_WBV|`X?b>jzbbwpzwOJEBEeZfOgpSgB>n1RE;#C3brTr*cp%KvGMfv47DCyL#@}ZFVlay|J{`l*Kfn_qE9Gj_Z`RG z&hJscy?ISTEG=-`t*yRU&fsC%AC2!SBkzo9oR*_qBmNIAJt{3l?hWPJGPiN)^?vm5 zfNr-8kfJP<>=@1JuT;v>)kKd)d`n(-wr!Ar1A?VtYO{R z<8#6aZ+vvxdZpD@RQ{fpqo33Fv|O#4c|(fHTvXomnEuf3TrMBeMW(#_cb`{KwemUl zU%oG`YMoQCXi)y=u{~-J3w`59{kU}E<+Kt0XvK;l@0>^ZQxTQdb}5XF*zB2C_i-7iB>v4^x-J$ zyLh!9Iw1|8Qad98?bF(J=ch+d0%`J**}Ec8?84(u<#1oK-7O7&PXyX#sr;i#dIZWH z+w<&;-4WKZY|9aDZb`kJj#Z{356*=(#Lk@rqs%C%R z29-PrKI)5scA z$T#QCY60=dqT};B{dD+Yyy%AzPejL)mM^cLmcO=k|0c`!Y+QfB=uO_Q9WgDxzd<+m z%86Ex4&z1RVcP1OE!&q806PA?zug4+BI;a1my(2q7L1-M{ZCg09ML!}Un?XU^suw& zaA~}6rPF&B$9pp2(&1moow#>#yhBzo`@xlmhb#Zbb6U=*=O5&-bU+nuMGpLvIZ#UA z2It)L7`b$@2&s7W!1Ue^+5(>Za`&9(_qQYCNH<>nC!i%s#CWaRn)r$*k}j_9MM;?735+eO+^thuqf?H-h<>7%iB zedYXcpCqC-@X}-63e}WxVaq<oxe$0@3oUy)`DiiV-38w1D^TlGMmJ7r9S; z>rVWf5MEOx_b7;L=h>1nE|A$@cI+vGjYrB~f8OSyjI@O<_v{M9BQJR`_M94qBb$ck zFR*^AG7dif+WKvXMj;YQgES!`nG~iCB_UFUL>Oe4Lr{i<1d9!Q-sK;Tv}@@5^xp#h z?63T8085YO)%LE;LMgqYx*j>5h{p7h%zvBo4At4;a4o8#9hM*5)vS@$0XwwO+1K!_ zj_a>{_sqk@OSCh(_<($%I)19x4tIq(e^!x z5E0p_bH<2DxZ1GbPCdC*@xJXIJ+?cr@N)m~o5yEz@c1gTvUhlYMA0@@+x+uCBe^RhHh*xJQBId5)XCJM%%A< z!8NO~5BU2D@Q-MB|6R?VqmZJ9YYHD0gS`)Ho7OKJ@RxPDcvVQL`RYY}>$~44o-uP; z3wF3Q7AEdp&OA?AuweamrB}u55a%ut+W4PLFf179}|^?Si&Oll(1*ry!rhgSNShi9n9E zS6^S}wie0pvvE5PPoZf1TXNO9m)+J<6Gm{QlZQ@2sZM8hDH^Oooh*hoSW=vV_P3Mg zP8~XpieL84QjfnmbXq;8IE897Fs5F$6A@H=Czp$j_!B6LBbL6@nGKZf==YaAPedRZ zf7q|%nB+4X(E7HKXKxqJL_1#|xp=2-0yU~%uj(V-FgQiy6MJ+OPI|MU?BHJpXE*(3 zS)l{kT=h`bh!Ft1>U|9g1-XRhOg89ebU`+rRh%^wed!8kP9gf5(AZ}|6a6HC@v6Nr|GC2~E*7QwZ5Fh!@?(-$~kVtpEe8;_4`ssMJSo@VAzlJ@lpwdH2oNUlvIw&`(Wf^gH7U!HF zwe_%*-v2Kz{ML)u)TrvekEoP(q~4bwwv3z8)cD~jQkT~A{D!X!CQ**lyIqgzHXfPI zvsIgLMB}ExlFkjksNG34mFm@-oEzJ1JTtg5;k10&jz}bg(jfb_B^oXYo1d|2$C^Ss z#FhPpl_+zALZ+MM~KVq_U|eYwWrJxbu`w&}lhfUeE%Q_nVJ z=TVlOL<2h=`Gl-i2Av&#$^q9~ujyS~;*mr=`ll^eXjX@ire zYI@8%e4DDh#64csm%vSy=ZlBlzfJvYYWo26XB{2e{Bf?mW!Bb-*%KZM`1n-YKf2C3 z{E~Y8BCpE0^%p6VJgVs+jk+nXICSQhEx~^iEUlcKU%0>T{ls#W)sr*rc330sl4TC! zMf6BNUhP@CZL!wvgKqy2^#GB7iTY~Z{Z%uqtfx%bsc2aVGvi^>uioSySWi2kME6v9 z;6g)NE7Kv`Q%tLfB28Od;!UfK!qvtZ3a!o|^HEG}(*SO#`GLo~s{5a@t{1Hkk!Oa{nR!5Zj$ zf+WV}LbrYphN=&)q3Y{duK6Eb<$ve?ILzgIxoCs;bRW98 zIxipHui5GR*r85%iud&%yn!%KHpz!Qy)!oK3?Co8zI4RRMR|ypC%@-iS&^NOydrrm zggQ?~K9d{bKEuit&#M_b!mYnMqrdn4Yc1UqFQOA)9?kBV^A*whZv;>4cA3w`P74Rr z_Rex+prU;3Shc=)Zq09{ zlmF$PKUkC!;c8QH{7w0M`Coru+B9}AdjBP<%8;AR0IgO?Y#K(_?NJqVW?C zVm~z*IE~smrHfORHjJ5Kn#~^Az7fd#e`sm2jk8JXpho)Aa9`sP&hYmM9Ct;V%7Lb22{vvl}GX`gEkn~`! z2KgfgyxnpXrFzmfpBE>D=xgnK?Q@c34c z|1|fzX3|6kXW4u@c55HVFT9c%bjRtW(=Mmg zPBBhXodTVDIJI>0c5-vFaD4B0&+)Y59>;Z#v5wOmHIBU;kz*srDvs6;g$_>~E;<}= z*y51rFxO$c!yt#w4oZi*4ut&=`#k%b_Q&lr?AO@G+K1YYu*>}(*4?a|Ti3UC zwPstrw|Z!G&MMPtz11SC8CJno-KD)lBFIT~>#dWm!YVq9Sn#Ey@%@#{6W?2lk=xL$0sBb}VesW%M zZg7rrwsVp=BxfvV0H+P7DaVsz&HliC$Uei~&0Yf;Z@LJw1=qDIXUzB!nla-W8~RYz zVS;RK4(i`7>yUtK#&xYuOB(0fSqBBA${aP!1hEeANhR0SgO=5=<7H(Mq{2+8VTB)S zKS9dPLHzkmFDHB+eH#md?%ASGr&8~3-d_7J4lOrv3E3Troy z6q#!jD_Oe;QfMY;FptK{5RgsFsMMRVVx<#g6EjhRc_G$LKH1nTM(}Mw!P>zm8<}Bx zlj5xH09q1Xb21f~;c}cf$dltc?P)wwWkc>VlnmtPOm!mYGs9tgmCOCrB?d?;?>L zVw&ng*5ta@q~8w=hZHi2h?T-4YnY`;B2uuD`DAr7l_Jo|TE`=+nTe^yV%Ay#S=Br~ z3OGwk%v!@IJlq#RsMx;r*-KH zWMwl{DOV^JO4f20vQpU;DTQJcYZ;GpGmEMWqM9TiUCa6xNkyzgf^;#97Q{-ZVlCy9 z0`vHbAW};NBvID87^+4BLGsPK!`fGwl(m>o^31%$z*fqN7m&_o-c<^zR3c<85|B=2 zs8S9`n<`lg`J|(nf2mL*XT=Ff2Qx`I5Lu;=wLn1Ho1u^=`ga#AAnnSbLJ4cWfV4G3 z}4$u}sL)@=3N?7C>o{ut)*PGLwW@!LC?VI57u6 zwy6~~LsY25Gx*cZG)Z74Lpq&5%?yI@l;UZ`)G|KhVwHRxJ1Bx0f&rj$z*Le)fKLK#dhl8Yt~FLBAuDs(SK!nCXXV zCKC4GcQ=EeHIYiX^ShZrVv$54>gL?lJVdFgE5D1Gj7WbXV;7#USuQ~J5&9CH&7uH> zN2Zi_=5;cY5=kVoPJAD8RU&94e29)_9_0$LSR(D{+`%jga=EAj(cVl5w98e}_Plmx z+GSFSydA%-nRba#E>pJUw=sjDddP%roLieIS1FaPom-g!N|mY=51VO*)=h|c$Q+TV zB*yHbKs|;OprhT~c6!q+HpY59E)5M6eb@(u@zxkAZ;C zN#rU5XrEa#D$`RS0%)HZrU0^_Ab|BT7&KQ(g-lKW>0>bHuAz66@${^ZjtJ-yN%_F} zm_S8B$OQ=x2%njdOeE%u%!KrHL?+_v*0{rNujhHRkVGgIDVsP0 z%VSz{r9{3WJ*~*J`g)bNI%w6)+K<|F#+n!nNV8C8Au%yRoLEAn-6@h)E{Jn zvK9e!j`63EN#$V5nE*D&U@GX(6~dYXP`Oe}A(bfPH3;Bx3?>r^0jy2{mt!!wR0#Vq zsu6lJM<)Q-7*v$11W>tBOs;?fQ$6{5Do4xdS+(4Q&=WZtg1%WL7gpf|jbp?hi{x^5 zLeJsM#3V{tWoKY;OzL4DZzTe_o0$-#RO-e9^2X#XR5Gb60mO}wg8jdcCoTfuZDn7v zTr42;#ElLUqAyh`2?A(aDF#(QsNxHNwV8W{q>6cjp0?4RRZ2LxOzg}D(#E7qBvvSt zP6RNvQZWTs&=orJfUz-A7Qq@P2Pa@_Ogk=B+4F&?F^XUuA{N{6fuxl}BB0Z@d_6;> zqaXr?sj}e%KP!cR9x1JPK+Z}-lZ%B`&U#)}>Q7+sfNsgrBXvcnztsR@fxZld*y~7=clMWdU zD;=U7COG&zbaW6qcsf|yf3$ySf7X5vjQL~iC)p3O?`SWvuV!y=S7i6t?ws8|yJWis zc2n&F?Yi1Ex2t2vv;AiK3`YBhZMWDa+RnBeVcXZXm2D$iH(Qp?Ynz)kSvK2kmf3{c zjJ6qI)7qx7O%)ri^*ig^)+eoZS+BN^ww`D`*t&zY)VhYXomHV#4vh2vuu8UCU^NZK z`Q5ErSk<#4EK4l&EN@vJwcKvG(lXj|qUB&qA4{2KHA_2gAvcG6j=PVW%#GzvAQ z_Ut0|Q}#u6CVL}$F=X<8Ud~z|X_e?9oKnOd%hv)$D;=^!GtC~u(*j3h2I5fT*`ozo zJw>BELK7xaO4*|XS|Di*YUms#?2*pe5oUo%gzOQ7R!`Gt-(slrGWKvn3q*|p_3fL8 zJxrhlrp9PA^rRwoh(HT$jX|LzDHRfSFkh=@Y_wLWBMP?0Sqq$vNuWr=4&rNpu`yE6 z{3_XjgccZEnV==?0G<{oTWPA5O7>8JR!`XI5Fpk{v5Y;0&;n&+d_&VDm9q!)wZPe! zV1RQ9*@O66AZ-jLhW?)IPiTR)F`l9BK|cBsT3~IZAmpx?J&>mb;>N^BDORxu@U_6* z7=`fZQOxe|tOfGMq!{AYPoM?r#>5)lm0Tuc_a(Hz-k31JN}oc(?n7vSzA+#)WKto! zH=zXz$AEf41-q9(tLJcZR>%dky9ZwjEY2*B@TR)EXo1F=%^c|2 zu#DY}uLUYsDhfQ8-IdVl*&H1X^!<>=E`%2NTqy`eMa=dUX!VqimV}}L+XC602`!L1 zMihn(A`!b2q4hBXAw?=B+lSBsvtuMhdcMQ%D9{46W0cAiBAHCV?m%dP-!YN zwvwR*xwr&O#IOWByv0_9^uhp7XlijDYMV10}rln)VGO=yAmF@pL6Cug_h zX@UGP?+y4qyM?nB*dJ4lWKwo>LaQf#bg=r_4H>(cKnwJbNsLMXV@U;D#nS=-7kw7>?LGy@Y6K~vS3&;lha71fWB*p2vFpoEN0{Z?akLxC13A>$oruL45y7HAuo zq58H(C}%g|Yk?Uu{-sdM*!6i@poUCNili!bJr^x-L-T?H??%k7%hv)wWPFQYtung~ zp#^?e8ch)xFlW~$w0e$6CsW^b%h|OEEwDregy|?~*Sz>zpoyg_B~YMi^0Yt`OCtoO ztcF0VXNj~zxfI53Vs>?bR?ib@R0Z!F-gq^ER?ie^6s8vSb+jr^3tW*=3apLoNoe&{ zkrvb!KoQ%+MGIuHY|^29s>0I(Uu3*Xp+vj$wZIpds6#U*XIFO70$()q4b=|HM!PVZ8IdE%@=5aJu;{Y7@$(f<`G(;k7f#lDjD0E(CYaktpM5=sFY4TEigzX z@%s1XNN9mTmde5STE=#8(E^7wPd&_z%GmZiEl^3uyApbAJHFP|EbTDxld^3It)5FV zLCfSaIopP>1vXilNiea>wialCPBO{Sw-8V@tOzYo%2Mwzg{ELz5?Vd2q;=~19~qk~ z&;qe!{OdDGC1zXjw7@NySVEN89KIIFB@?&^PK;r*`C67a1~Xl3mVlgNrcgh72@}mf z`Q&UfNtj}ivVI82S!R;Z&B>J#RtZ7QG>;{WuO+PS0y4}@)G#S4V|^pYP%}~JW<@I2 zSAv{j215CODdl1wIo&KhQmKmdg&?Py$w@@uo%NX@r%MHlZm`x%3Z~}<4gkEWV%r(Ah%se;F@KU zqfg~69tnh#iKPSz-Az6TY?DbL%#N#AHwe<4Y(nXlC|K78B#=$Bpr!inPuE;XV4G$E z38C?ovaSk9pqq@ae!>M}e?>q7<780iU%|K+>oT7N&dJ0;33ek`mt067oMvia7DudL zT_ngdoD<53f^~r)%{iwSKAEUk=Lr%xXK6kQq2pwo6OceTnebs01O$PVO^{_cClp2% z>nuT|&d=gkElT#uUeAzlGAc1l+k_K|EWMv6RAe{`V zpTLt!SrkD6>oki1oFT{3xsX6R%VsseNA%-=4#$#ZYXSdNT>m?B0Jfi_zrX!l5g=12 z0yI+iybEW}x+0_Bla-TDVvy0oi;e!j)($0zwqB}Xo?QAih z@kisb0MFa(fp(a7Io1z8%4d~sN2d!$>yyvquhzKyhyJ*Oi#Bc9W{2tU=y2$G()VN?b6|v*F&d^jvwvP@oDex``+UP8$YmxgFc?+BU%rAPe`WbObX#+TAsew4c?o( zu!b9^<>`A`A1!YT@An7({MB=#{)z*C==WUV{!4S9bcJHCscolw+q5`c7*VHM_hi(# z_0?KcyC+i_lZSMY?unq(2i9JKd-TVw%o%z-F6z_=G1&Qe5u7}^3f$AKMZ0K^&ZRj0 z9JOeCe2WTjpY1H?ti-`#h z(h!ZW;2iUu2KTgkIg2x`cJwSYEl2yK<@u-Yotg@IXxAiP)>NgD0{wFJIS=j1zHlOK zMG~I4q27wP0erl?D6q@bf~|2gNcCU$e)RhDAQ7B5CkccvrNKckz6}V`1cc~`n^X}b zgyR*AhU@OHN;33*;2L^A4@iXh-{0v0&1JlY4Oo0`EE~VNbUtp7&wEs8)2>c)`>%+; zkAG3U(UEyPy#LfkztJ%QT$e3q^SPfZrv1@4Ek{46^_cRQaejx>dTOsaU+C4!7Sk>* zPv6rnEnht#S{xkz4&6WIy~TD78?RrmIpO{*cTC^2(!;vCjdR7C`ns+@>B(rHYJZKj z6M2~SN8_{{{hZcQ9#3S4cly%Bj`2^2L*w*4?XMTljy3y?EvEI-;nKJC7v0=SRd%&nck<>|rggWST6ZKbXj_5OGiTobPTp*%lmH2D9TzPjgf&}Yi( z@XV;zE5g;!)H$MM>2b8h`@-ib|(Y<20 z6?M*K!H%cu$m1>jm*hR6aaJ}t{PU-{6x2B^OlPquh03g2#clo0ILd9M%U+AP6e=jw z{lk~o6ly)IXlj0D9J+kGdDP1VDX8X+kejE;9LN!X2{eWmo$HaTFcyR$iAa`f#_^^5HX4fST z?fzEd^uu?4c)W7eJ!$0@IR0pAQlpYZ`td*4qBhGh%Ki@b9{i}dDsrGA2g>BYTnX9c zPfQxqr*0`UVpTw(N*MqjsbO+ASRA6#NL88;aY%?#5~2t&Fv6!tqG;E^4E6S2^?zM^ z`SnkJ_z-2TyFUBO>U@;?Wqt1@b1PxT?9rB$$9_avl?OU!ov~~w^>OgUOJo=umr01(P4e5?AYc8TV(yI0YnAK|i*Swy7d*VQ_o+uFtrGY|eupCywNyKt6 zswjd3)8Jn~u;H!cx{sw@!`mxwJn?t;PLT7z`g@wYde4C<9xmM3GTkYiz~fpLSK8f$ zi)-8T!zs7fxG?A4_6Ixd@PRA)y1(qt$D;AG1@}8VLwQvOt(4p4qxd|YC-1Ba_8oNJ ze|+`IShcdBRrl>~IPk}}_=A=C7;UbBJnr)uJ*rB5{h!3RV4Dt3x8uR3<@?W%m2ew) z;QI>)_tVVf;-9Ba6gB_(6E*9^ODQ<$ftNfd;&XIWS>ZdeYdopbqQ;$$4#q-qenct zKje}frVBbd-|}mOLr5>#dqf2@8S8+)gn3}6>dci{F6CQ+FB2(lrcqOo#%D- zWSCYvlDgdT>Z}FdmQag)8jHM^M4(PNHAeLlU(t1cGyO;Crd(Z{^&0C-K`+&Bo;7yd z?J*OLLb?V$Rz;$Y7n*cR+M7VV_#>Dz)pI7L>DzNv(dtMlk>k0*>yHGJ-H{XOjiQ@Q zyP{vH+r-y+xpl$=-R7*R@7i4mMRqM(ZSR;ANhQM)puyV`s0I?=;zJQ5(WX9XSMu8u z6!~hk$JV70R8qo-LCQ{JsPn_Ct#i4#1jZX4uJ1ZWAp3)=(Qy|;DVZ?3Qmv#&B>MKM zr0ezs6cPL8j}YH6==%We!qXYEpk8J>c!o| zN$BdgQ@7R*;Gnt7Yum7)&LFe{4BBdpQbAP21Em=8Aej zz*U=bL*qiv~T+pblLjwDgSX?>HsH(^s&DWQ|DeDt8Q4@R$9u+|-2FX7#B z&K`{73r6lbmc5*EueQADg!@@#O)3~a4E{&&Cx+gShn7Eh`r`BXlXzs1#;?A|Q`)HF z+y-Q;KOrq}?vjoE9EKp7EGSR{TP+kCxiTbB6c`{3k;y{jAtA8cL#{GTXFX0a9vKR* zq1*F>qNtyq>aN>T(`nEZHSHR0(S9h~Oc+-M{IbMxQRRWV?Y75n#@&3HzFjt~HIA_X zqf5p2jNkul!2Tb{=D(E-6?*=CIS@aCY-buQHH!9yGiJ5ET3;3kzYT}Xf}_9ak04-l z9u{f_!LmDXkTMVso0f%0!0=m8kOlxzu+a$5i-JmqRtQ`}D?}e8{r}W0KY;o=a@y;< zy|1G^4iozg9B>j1ejLC@xM>3dcg{HH|US+Pi(M7-n93d z^1q;$KSp2fc>FuUvFFzg>+}qwywPj42e$R^*mR5NDVh*{Gv@VLHuhDts&nWx2e+tx>nZWg0`Hi1zkQ`I z1itog>-&&3FRdfdg*0YCKbN8~Mt@d&-mZQB22LKTg}a;iA;}^LO{N#=HeLC-r(}iSzEg zoYheG5#>V>wXDf_gcQrV7q*tv!1k{UAC_4Q@rcoN1CxY3>irdvyOGqVbu# zt$&_wl8x5s#!!G$CLEdMkx9Nh&gkhlc~f)O#j~iEuIFMxicg{{)@RRzK0k)a;Z&#h z9z(*OAEQh-s=lG^T9Btq^5t=4l1HO8y9EcDU!4kcIDhzfuj13nEO?-2$jRBw6>gk-z1A@dCDaJ zI~>F3m}N?RjXW=UZVAl zX|Jh6^F>1w$N-wPe`GsZ;!e^BDrSkQWzS*{Db{|{goQ(DUslm~-9*)~D|2RsGo@gY zo`bY|sCv`$6&?`9%M9nm&?@=P_YJ1!jP4#2y)P)!{#j7J<$W{+{V-*3@Seo?6nowo zYsP-A^hiLf`c7TH`eTmFa4rm8l)gIr8+EYZ_fpU1NixHU^4nhSsa!$3)<|+Z;E|d3 z4JWL;G;>q{Et2QIxcA_V%=o){9zRm(+heL?W5KR_@EjMPS4ID-3Mpq7)v+u7C|}Cj zA8sPC{C?&ixUMdH0onNPNr4|tN8J&(_4FL(CPlv*?WfHv^kSH2w&jTIQ$%uRXYaFJXe)9mgLTr$tQK&6#V(`J)=YhJ!t z^n;%nGsa`N+0~Ozw(N;;ZDu%rOY)T)9g|j^y07O;D)MWw;rqq!_*yPtQ*lpy)zl$X zKi13)pSaWXtmSTTD(4BKs2{Hu%lln1Y`&0ky||}RAN&0+A8T@|UgleapH{tcYE0mp zwrz)3rk$M&)$JQtpB`0WrC1yHKmiHH7uARHzgP~C{mV{)>=ejOf$S8>PJ#bH3V;ZB z#XouZ#6CuJ+w>+oGmF|{cENgFI=k6uGr(F9E?Cdc;x-3rm z`aDg)+FP#E*C%w#oQw>cJGFaqg+G4_#b%t=<}kS2MweM{cUfIlH!NCbci7+&u-5D_ zxiof^VNb7)Vi9E=^VX34>tH)|*YcvVV?p`z+(N_p(1Nc!GpYE9x3QI{VX=uTdjbIOTMgb_5djXtpS^;q=V5K1*oV zRO8$Jl|eA@_X~52m-3?v)5=MjnLZqu61L) zmj=G9y_@eTP$jk3qxE;v&R#7m_w1WQ(L3ubE_vh-wRp((E^YFrQeD0_4BFcMf@j?9 zrn&&&e-rLhX5>!w%(T-xPqe>45jPB5VvZd0Tq)W4ev8OdssaD~+H=5rg756PdiaoM zxy2?Jlt0z8C!>DEc?ic=SkTx6;Us(-_h4{rDit+p<=L-bf9ZXXLAii$P~h^?u@L^d zxt}wlI$ZEnwQgUef$%M(^4SW;rBYK))|(dzeBb5$!_>gHt~T}Djo4JrMdNWN(cuDT z{LidP@YAYSPF)^2@4?peiFA|0qh#iskLcgp%~*81&L?L4zu$j>pX!C`y6TvB{r^F# zt}2_V2CM-Xuga+sE8i*aC{HWXm8(GkV7RiE(hchXmR4$%kxIGZ6RZSyUa?QHUNKiO zTG3C@M$u4FUSUwg!itICar5Dg0fJVNcvWKTY5_Bk*<=?kPegfl)9vKq@|=qK?gu4`6#(BIVagC zSudF@87=82X(MSUDK9ZdVkK%&0(c_6EIuUOEM6!c59>jcIMlt>ZaG(U~MoIjbL z!tcOu%rDQ^@gwodhKV4u^CMINVkebPPD$77}C( zINW9uR17#Ak}=?L8%dBc;BXsA5Ha9z>q*cs;Bf0m5Ha9zYe~>B;Bae5P%z+dNWg%@ zAprvpw~_<_0}i)>1o;9Ex10q10uHx~1oZ+AhqMbg+!7MB3pm_j5|j%#+#(W$3pgB- zE#PnqNRTbyaPvt}E#PqTNRTYxa7eR&!_6TCJEwE_+|l?1HxoBH#pwR%kaCYyZSP`uIervri;2Whv}^D%kUj)JBP8WZ5+m?ZpmRS5;TxFpP$PuT5ojC0~YLNmbD|!78dN z_?qx4sLT5@d`3RNCt!X#HPZ*na%=O;s-Xww!<11o9k47mywd7Y9EOn(@U>y&1AG}u z&2+r7hLfOXx?NcrMnJ%qDWPV%Tv=_3tC276ZtZEB~v8b7@Ru&t+S#9k_>5?P55v=`neJ6q8?Bn@TxDT+jAVc>Q&i1#tg_k^ zQ5WVgh1K!C3_o7Ybg8l)#i#}NG6mI4cPgt*0X5T^%E~ZW0lrK=HPeyGY7?hsx=~q~ zylSQsm6ge(&dp(RtCcn*@YQOjYm}8yshN&Z7KW!(D|{I~BLU!R zBUeF}C~M;m3U(Y5SjnElz$lf&h}BF_DC^0Y_5XbtX8nJkQwh~f7bxpN0yWbC%E~aS z|NAmLr9T8C`O&Rm#{b*>NBXIrs4lAZsn)7ys)nk%samS4sw}EJkSt%64?qDRUAa;@ zRXIr6S=mfkNoiEZC>4s2io1$aid~9jibE%lRT1~m+Xy;B9pr7~&E<{c_2RjBwRk0Y1$e4mdYxGZ_5K6M6sFthW7c1t0v%5uvwrFnrstW( z%)c+0!t_0}P(1%WWD3*!^x=AwDNO&yp@IKID|1Wg4W zj)xT$IGj@riVD7`MUn~}Gdl^A3LMTxf}#S4Ye|Bn0*7lsf}jG2LwX7v4(TayxTYlN zDfn=F|0X2JDfnr@-MFszFY{_i`JMpr^nwt51TO0*9+df|vq_t4o5G z0*9+Zf|LS>t4)HC0*6CN3LLH`2}%ka4#_BRxauUxC~&xHB&aBGxT++GDEM%E|0*O% zC~&ySBq%8Oa6JD?BnT*QxQZm`Cvdn5B&a8Fxbh^3CvZ5VoxtJBlAxWy;gE6yhbv8j zasr1#vI!iHCP6lV!y(lK4wpcJY66EVNrGqshby55)db(-Qk(?Y1ddrT5>yj7oRtL4 z1P*5*K{SEGnMqJh;BY1qL=${CzQ2(K%>*Bghb0p@oSp>91P-TDgJOd3WoXqPnczzy z!355cMM)4$;BZAq&`aQOg-K9L;BZJRfx{J2gII#^X$z8|l)y1VIte};AL}Ie9-5B? zodh2<9u`U9aCu1(N#JmKNYF^&aJfm4NZ@d>j6wp3LkbDLQHdr&A%SBSMS?y8hl?aZ z9f8B;B0(F0!$puFj=b zNFZ=HB!R%;{78^M@a3=o0*CuXf&c=C`$~fR0f+lSg8Bi6`%HrN0f+lUg7g80`$&TF z0f+lQg7g80dryM$0f&1>g7AS4$M<_ng6@G2$Mbtbg6e?}$M<_pg6M${$Mbtdg608- zdr5-g0f&1*g5Cj#drpGf0f$3s2ORDx32Fx%?g}nxjs#r;4tJIWSpyDth6GUq4tJUaMFS3ZiUd6a4u{kXINS*m)C@S>aT25q zI9vt^N(LP67zsiK9PTIyItCo>2ni|%9PTg)A_g4p5D6Lv9PS_q5(XUZ05kp<_}%g& z1_~v-v9R&K?l-Vta@9Ydi;5OPu^X%|qt;?~m~5bX=FnR;PMzIuvg?cnn_X{qqxocS zbBb6*^U9b{*^B&1if!m?$cWW`rih+$L)l`lsmr0UWe1HU=mSmfhh26GX!cOdzo)SM zW3N4*W_i|*MK+v&+tkD#^Khbz#uokUh?_mL7n6R0nP$FoQ5mt0pUz9+(K<;`!#PbF z(2cJ+8%L}Z!yMEqe{T+|OA8W(7N@K#JM^Ca* zmSzXuWljKOuM_hkn)P#W$TW_#ywiiE=**kfrRG ziubCW9<+Gc%0@}U((p3jeG*!HNuRPk;Ch0WPw?_-UY=c`HOZTzEPB0ufAISjI7SHO zrv*-sa6P05*>rPrx^9uq!w0`OMi1<8_Ur4-Zce+g)r`_eW>v2u-^7%1T@BmT4i-L$}oozb)+krguM!%wViVJS;Qc_AMUYBLO zB>m{d*Zgafqj~hovkAl`>Jzp0PR&WpNj2TavEu59b23`8Y{b2jYxs28sU^dU?yZeP&JoOJoeofgh{(2{PywA3(jm4mSKN~OQQ^orYIuxj;C#c55T z_2kyO?Xcdc-C@;O9WFc6hN#rN7(R$aRQkBzKmV0KtMbavwpU#&zI5T$0Gh>qMLz_F zJq3}1<-e&r#P%}ewjRL$L{|y-?1v$uE?}w!((~Y&xX^Y^Q(;oNVt2v;j-b3 z|KQi9PVH!#4WB)Z ztXfp&@$CL8^G`KB=6*%7b}V0fXlkCYQ9r)VX7?BDV1DA-AoYgxV?TQwi`f3x`*A#D z{&}?rdx6=QodW;F6!_8lIqEGh`n_u0(e9FxwGMTo5_T=RUInmF>xBghg||#&{a$3% zUJ_ELT^LpUP&W^I9INKS+OhjFUoT;PN%yuBvy(k>iGAXRjH^$nrk8lw-oG!kztDH} zsoBYt^8NbO{l?YzOuN!Iy&mvhf9C#yH!luP^zZ9=JLvJs&S1xe*R~URyczJ`a6D`{ zUVAni)}B2t8x9*jd)`bt?|CS1_*{_ju3y zSA5b0Ve4@Xf zVvOtpK=v;?1+r5hI|Z^+AUg%JQ{dm70z>O3*TCOB*Bw{_)?7`FNRIG>O|;}H0P}-| zx(Q~XZaXaeY;d?>>1UT+s|W3RBWV4APPo;mb7~D*tS<6#_#rgz2mI4QS*?7Wkw6>oXsYVnXpt!hpQ*3&p(pUtUVjf*(D85L--UMJ{t~1A-ZWqL;|*d zL$Tq!IB|Ii6F%%mKbG&!FADrC&#T?PKdmH$Z=${NX*F{&#OD>o^6YtWIDh;G8HdA5 z$M%1GH0yibY|S<{J^mx`$M~k!!WylvZiwposY|uU`MD`uuLO6{Fs)~R^FXdv$#tlE z`xOsz&dp7&efL|wv4geLM%yG}U&gNE!=H>Qy z!=X6gc)a0wIN_w}yNutZwm8`S{2vmD9Y1+@uuCSSz1dQ|{q{8m_P8YWz{gQQb^Q#+Yj(XCc9-LGX`t`DbhUDsh^3tn`w^Fg_%vPIK z3-3%^I;+!Vae{uhOK-9|w2argj`N3*qNq{G0pF8QKb&Xv-4)Utw-m4P|< zG`OE_$J zEaGs4|GtMxKMI#_$S2h)e&Pi+|I^6|>k>vTR#r1C=#!aQUOvssd)OHIWSUlk)a-wn zZ#=5c0z>U(6Wh|-KB@srl~w7QB{o_cs_m)mldJvtTL@T!1--8{xg1&zEZPF=t(Y`+ zv&Ln#*mY)`nRzkca+>k$uXimHP}TuvzJ;g`v1R{k#D9gsM-t}f*LgJgWp0JwsraaK=1jSD^KG1f8}NE{t|tQp4Ol-|Fqx<=o+dowjTx zN*+u0OXf(AD^r!t;am5!{MG!Sg6qP9BBewmz9HT&o+y0-U*1;~#}Z$N^Tb+WIN>A` zh)~fh(J|3tQId42q?dfBe6qX~tOk&WCzsUrzvsVOMoUxtXZUybui;<7pU*qOTgV&0 zw<|5YhSH|8oYLbwo#YY3E*fH>RxVfcQPfowQHbTY6i*ZfRF_p7Rijmjs?w?mRGua!=(GwQ82lfwxaVX^7mus)5(rh{1pTC+BK2)5EV z9Bvax2E!ssHj5L~$6!HyokgQ{IvrZKgE;23ve~s(oxx^tIbHD1$7!&`?X7{g-XPGT zHS+W1;aBt58a1#Wj?rMZIrJ`je`*~)46kbgJRy8JM9*uTVv5U>}H1p z&a1OJ%o?55ZY6RcD}&puGnwFxlEG@RX!K^Q+32=Izz#dqHfB44!_qqqa5k;Z;dW_U z5NW#&v`&mxgGF!C8SPfdZ4?#=B|G%+Uf!WMLDWr7t<9p>8?>-wluK(h>*GG+cSAqx*&}lcC7|h05)r^HjOoC zF1FG+9WK2Vv~09s<<=YEb(s;O1W5~`ZqA4WI9@Q*!wf%{(PV~hw85%(o6L5z$!^wb zTqal87;NRV>Wyxv-RiJ|38a`-=LB%O^){2%?Fw7twX#@kPOH_R(HY!2y~$*-n%riG z&8~y!+N{cLI9>*gQLDEZ^k#=%XE%X*f(Bj|oAi3O#Rw^qV-B)1SnPV8*=;tO?Itrc z$_7YErxPqpPMzLnBx-xDte{7ywHuidtF^%LTd>z?aBD%a&SkPk;CLCJ5J2A9OjeCi z2RaQFD34mB$p8X(VCR(M95PtoP`%X+iRQAK&3cQDDKr`wXP6+vExD`VGiuxhyHjg{ zw~TI!!C`UO+zzcBGFPY7*tD9w8@yIJvr(hb>g@(26jiIq4DFCX>vVurw#DI&PW4)8 zbrw661SmIl$SOO?)@a>2heqdiKo2Btq1Q@dH#qcgAmpUp1oAdIjRSL9|pI^;e`7or~?iQ){`^q;8iKSr+1h@u)^ksX(M(E z6g8*TtWkW!R$8rAr_np@W{5bn-B7&s4x!)*k$LA${P_p-jg7uE_ES<^XeIy+n(h-;Y45UK{MDI=_UW%k?RwbFxp6I54_ zA10&AsI%FjMza}gMgy#sZR3@}R$4n$4myY^;I2DsNW8c0&OQk&KSlIK=)n70%fv`~TBT%bl|1KD%8)(Z87(_k_=tPZQ)nIBiH z2EE;FHW(q%Y<9>LmlLj+xd^kv4Yh$mJP@DJ1=Xb01a%n1MGNKAX4BhXz^j4TJX)7s zdKFtaZBD4opvww(GRThD^mX5M!0`<8Y|Q} z2DiPtG;CO81aB6f8Gdz3OI&IJjKskfE11}H!{uNiu7{h*?66wNt=J0c4mVUJCbz=~ zq1cQrIIBT#aO#Z~o81z6)N5sdTGayOOY3kz9R)X|5ek=01BKS2wF|#`tzgAPjm`oS zSm1D*4$89@N-j(>fcDR7lUBu6Hpnul1`Q7Abvl^(0!e=oCN_y2ajn zG-w@A3A&)_a~Lfycpw8!qzx))E40668|f`M&@(q0pvi`6(rR==69|`J*22AL0^Kf4 z>_&V>m)ikrU}`k5s6A8-2Ilf%^-HK^ZF+;o88cjp%|QLs?KHU{e688!azZl#bu5#m zc1B}1vJbX0+1xrPfes5S0__5wFpJr&vp}x`3OLMFQbu{LjBrmtyJxlOpvr|7#0`f) zRbbOY*Tf<`iTV>zg*tRlf5WnlPK(B@b!v4wX0cAESyCpmz#4pT!|SbjgWX|< ze1I#3TZ-w2!2Kbr@3ms4P(tki3F(0D02B^rk)fKfnPJ4Dk>D}`Ma`twx?vmxg~|#| zCrGhEg#mRsv=L@~K5zR7i499!>kJkrwEQ4w-di&F-|0P~ zu#6hB3xt0mgQ0DAI*o8@w~3if3fFDd8uQkZ-|}N8wEemx`X9Rore9ZXn!a(71snFFc=s`JNi+Z3TLJ9d12juUQAZ zS?FrOVzN;53{bU0&&Uq*LWQZy<^KKtW0UrOdBku&TAzz)6+2VEw#`yX9XRlBHFE20Iu;q%_W1vACDZ>y zBWC_T&%eB%Xb(8qzyD?m4DXd(>#qcC*w2TJ7PnCc@0j3GIXomb!K-nT!EJ;mn`S+{ zNwhl9gq<%d3$d6v=Zcwk^{YR#5JA@e@rSP)o;~=UN)gQ{HfH{Nszv25X9jPS&=-cO zj*ee7u|$Ezo#V&MPocU!Jso>~Xdtc3Gd^L=e1blxACr5+{1gv0X4Q^8t3Oa-59$O} z?*E=za^`va$J%g}Bj=$q?5NYCv5LOslLt0fc9S*3=RhIo2DJk0xEnJ%Mo zIPbmTz4xeM>fElp{S!L_%a1+JU*#7k9LgJx$9sG_8xH2B-t!VHrvHMcy?pvVZer43 zC0O?U|5*zBm_t~1(YK9xNBRqG^%|d^u*rkhH)xY~P*yekSdkr`l?5NAtd)2Yb~kDK zq|f+G9`eDG2?v4yIOsA3M}W5YK;* zR;0mInKIv&g#Od^=P0VYn^3?)H#(4hIBmk=ysll>SEm&l*K+x^O>Yva9dB5zSi+&S z-kr;xzgT!h0yR4S-I?7#CKRdGukVM|4-#60xhlNrw*6Mbwh{MA3}0uO&@yM6rX z29xXX=;v1&PN=Z@8@26Hp|sqRPiLUrJfM0i7q(o#Uq;lHkL;} zCdRH(6bho*a7uPgI57A%NryZ&)$gHD((M}$98xBqKW)3(^>b2kAl)^ecFol~YC6$c z@!rBhfpmC@hlAeR1L@F>+x5hPXA~R$6wUGN^qWw6N5lP|Mz@3Ld$QPW&uitPH}S&u z4DTqU51zPi%Kg%xeyeNLX+q5yTDHPP%9LUB)P7&9zW5MCSALSmJw!~v5*+bgN>(E2 zxqcfC*7iTHSBs{4&WBE_d8$Z~OwZe}u-S|8H3Wr`NsLRvTRMHnqOUmDh!y zO6VLTYu_uiCxR|0E?GZfpNKBLvq_6SN%qT>?UsVbe|)AQSo5cCAuHa0#EZAU}p=2H6g z25Z*|pC?i5d1F@ZIvSC2gUZz<;P{jm1kGNTYDkkef?lC?sd*P4j#xE{IzK3%V(P?? zRPPz%iT>SQQtyI8JIxLZrCI*n@>zX|Cks8DpPb^=8aU5mu6bitzc!Koz8TW!R?2p= zQy@D9{;#CKkKRPHn(=gvQ#BfQNjxK|v7VY9BI;3o(Q0bi^r1_h6q)C-ObY5e>-9L# zlWhU>KMXxWmCAYh>y(Zeo(;#FJnXvb6g8yTvUyL|Wl*{nM*phCHhNm$fA#F<+i?`n zQptE{@Db0VJI~H9?wCQH&s`zx?Y6^|YW(u??FX&!NWWa_(X&&A=kj5Hvwi9^s=;B2 z>|C2g)T`LQ7j5(G^PGLqr-H8LdQZ^Ay$hTRS9=N)#}=P7&!Z|_J72W;*2A7P%}UM) z9JqpdkXN0aqf-WTB4+!#N4rjWF4PxhG}(~hK^m^bHd5L&N_}t2GLPX(rwwP?Eb?5u zTJ7NRn0-_QOZCCqKOLi16#6v#NvT8B+r9O^tk`Z?9R@#n*_R6~#pG?Ec9o$GuY?)kq{NDZ4c46@eo&1h?UMAiiy6MBl zgy&I97rTe-@oWeQY}9qyAnMVC$N_ho|L!@MS3hvpkUdoITp_B7NrODoPLvFq-u!op z*Xd4Tp_JpE-kSIPufq;|LhmMhigoPuSf#aNgil6$CRhs1kL$C?b9Qx|cS|6g#@A1s zfBpHGr%>Kr{@+R;@^o$Mpyq>izX2(^|YFTZ3l~;{06Ae3C>PC}8qjFly z(4B|&+~Gfa6g{a~pPLm{2Q%Y;0spa|vZn03#04AKzw8vqPJ#c8DKNZ8a=pKLNX33_ z4D;aiI=5YCg;}i@m^f^MuLK+}4NPxzxZz6`G)$87MonObNeupUm{bH(@z4I9S_MDp z?h(_}wdpHMY*5jLzV0Rqn@7-dpKpD&;5Sfqzu3NNxqtI#0lW&3A8P#wWV$ zoy2SP#q`jJwj_P8PZS&fc7vX_el|xyS6ElI@$I5vnc**byXk$;Eg94ulcCS~alFj% z!;C8mEFTq2o77dl3I>IE`CosbeBe&vLs#ElufnygkgM2>jHvA4!2ia(`G0avKf0Aw z`j4)(Z-0n4u%rPc*6+S@FNM04+QvF~a~Jk;E6aPLY%?DW-Q2~)wcj1MpkLoV@*XeG zhU2$>ZY|T!5$!#LUiGqedf$@`$1kDLrir^pw5P}wSEqD;-;=dZV(n|?+NY)7{@_{r zYv=NfE&KLII3vGzuXb#~SWo|r=PcKjPiDhU>N9lNyrT=oQeHkYeAfO{>Q~8|<&(KQ z<>h_9#@1R)zj{P9Gja}Z){Vw=D{0>fXWFiy$1UT@D{XHMU-j1gdtdcBoKE<>!R53% zpsLl{O)iVk0blGeUkVukoT$F_FXYc6s&}*2v0BI)leu8alNJ5wK}%W`?_0%>_U2Y5 zusn;bJ!`=7te=b6{@3*qKK`%w|KlE>|G00o$3NMJ=y!NiCW(_mtp8WYn*Y^)Kc9!A zdaYjntFL-%-o8<8GP#288$o&% zy~Sd&!#6T6^c?j2mZdDB0>^$n&R}8qw|#cx^rC4J!qn6FW|?4y*Dl;feOlvjnHY6D)`?XIRhkjvK!uY#Q~|K(tj;bGOx>*nMTR zr{5UYkg&-i9{lwFTiWy}{`|KEV|TDu{;}|*Uz>ZnfqD(-&TBn)GPV5O)np7 zeUgol7n6R0nPxuqgjS=`X|;x*<{OVL>Nldc{I__z(nnZ(_`p(T{4eyY<|ij4BgLhK zhhX!+{~O*jx$$3(Fkt-(w4M=svJT(9n&G2(Gkoa*U*}oO@Qnz3hGo+_p~b@^4CRV; zsHFo$C4i}H{_L-Z{aOF%g=<>(CQ9f%x0)Yae*P)-Im%G~lRT0>*VP!8^5O**c05CM zd0QZTZd;zkdq3Z#So>H98PWKWl0N^6zP!#AkZGTMdv80*x9?Q?(2h5gGh{StzxDl^ z65?7JRQ&zLiw?W^nf4nB*3Fl1IP~PpDJ;{zVutNQ)G;;PFsNYf_%jii;V#?E!S3&Q0>&Yb5dVziz2JFSoW{(=7-6t0FO@ zNk5l0F8ZHWvHt3UviW~a3jC3+ONW-KP_Yv=chsKQl5oo6v~ zh*1Obko~jS`Ey*mv#D8ppIqQay9eekw#eZ}vwm#9b|=eokFU6}>a)2HHEW;F9>?J` z_y2;Yy?pw=t;yy_{9on&apM%&ESwd4PvQ+Yi9JE78O1@9g4p? z;*DJrCzeS*_p{}XooH?UzWwPEzkeE;KjsqMbR#2uqfPCZ+~CjeVRqd%SdavkA%IVz zVI3T|0~QZ~Zj=L7RnQuZZmUCs7XOkV~eUcQb`4xcGxHv`BTx>r0`7w|I9{1_gm9yPQu^A?q@Fo zA73xQiQpn4$G&t?a^sS_vHGx z=U#Q7oLPxfo-Fr+O}rAR97Ujxx91L}4iUA^A&9JhrT}LBmp;K?5uYlc%jEtTagiYDqyZaKtIy!kt6Mj=-fR4rI%=_g z8#du7b@}k4dOI3QY1Y2(2gmk|<)0{5;Y(#VyuD7b_G713zgj3tPB$QV+m;Ov$_!`U zi7sDShYRVWaT8yR3;aT{;k?_Y*pt0#DOA%BOPd<9@D&HtY*fxY#nYt9mMeFDgp))U-%?yFpXkx27I?hj^1{q;wh9Zj z3YAZ!9(af~%`Puw&zqzuwQpyCuutqwJsNy*A;nfAjsi6&=)m6H%HTeEaiQXtQ@v03s->#2s;;WKsv@krS3+e{ zX;cMOc~p_AP*s3Rt`e#Il%JGul+Tn8l(&>umFJZwl}D8OlxfPX%Js^X%EijL%IV69 z%F)W9$`oZkWe;U1WgDea*<9IBSzB3ESzbviizyAtBFg;ASY?DVM5$Ivl>)^##RtVJ zSoQCo;)dd~;;iDh;*esGVy9xWVy$AiVxeNTVya@iVx(fQA_-Or?560TNL1JrO%?SO zH5HW=Wfchui$bS}SHvlz6*(0_3Z+7<;K{$p-^pLdAIa~?ugfpWPs@+V56IKy+vOYO ztL01O^W`&Py~45b;qp}Z0C_KY7kN9mTi#OMSYB6NU0zXMT3$kKl56Az<$2_h@=$qz zTrL;M{bZj6(Sjm^{DPcl4y)*m}sD=zo?U_jmRl#E@~*MEvhOiFQP@oLQj4S_f$*E~ zgYcE`iSVBAhVZiRtnj$-5UdZmQ@B~UR=8ZaP&k|ag8ztrhYtmckcoQ8Udx`y?#iyq zF33*G4$Jn)cE~o$R>>C2=E$bX#>s}u2Fm)$y30DqTFF|<8p&$Qs>sU9O3F;KqO$z5 z7+FqPpiC|kO20|pOJ7JIN^eQ8NY6?$qz9xP=~n4F>2m1;=}hS)>1gQ?X_BFP75f2q7i~ESXira}@;^yK8;+o=0;?m;c zVuQG_I8GcT4i%GPshAJzW4c8cP8f*7L>v)Cgc2md7kv@E6+IK(7u^tD5}g(ug=czv39svX zqDL2hZ}7GvBh8OT*KZslpn?T=h1<5v2L z-1Epij@*^V^=I4)zaw`baswH+e0AjNg$y)2fLyfQU@5a*|2lF9A-526Q9$+3X-eH^ zm{Ro^mu|$k6p9d44sGXS+yoTqg#2imVBC@|7`MbzjiQ-$5qCuB(`+5xnsj24U!fp(_SAhNc)AVJLyY zh#?Ar|1AuAF>J=L9s@p+e=qE7i=iBbcnskfq!1OpF2%r^`VG3>yw5Cb}yC}u47jl%F729$iF zn0DCL27?Vl3k;Pol)+FELlF%5FhpPwB8W!GAc}s5eK<hvI#$gzZVF-qS7?Lmyz|ajtXAC&4qpD+H zMGOfTG#F435JmB^&ksQ)&WFg?*mn=ZT?`o*aIQqo#=bEaa3mrdV_#hixiAD^z-1y* zgnYU18FJx^&V|pA>ki&`6~h$_$1v>1kd9#+1{{}MQ?YL%hLIRXVCaAW-36jtiP+Z! z1CDO4de~P5Ln#b7F!&>gc!dEM=m>oFh-H|>WxP{Oe z3#?|g3!o-GKWbOwUNcM_>aoP3c*Z3m#*SPwa?yG7?q#;~qN~V*nuk27RnK#U;q!Dt zu847SpF%EbqjIBWBX?1TiAC*j>`k>(PpzVRk zMdyk^sT32Bm_WvjMzM|7qzI-wb8bmaidTiqEMS3g}OjdP-z1{7`Z9PZH(Ni z$c;p94#tf{v5NeJwx1(+J91YccQ|tUBexfFQ5+&s%13GtBVgQIb&(6r1Z>lc8_^B9 z$R;8`vmJf|xtEYT3Aw0$5nc;zqmmQu$86_xAQz=@*j8pcY&mj~eHhBe&_@gtx(~T+ zkc;_Hw2e9kInWj5n8PqRrXm+zMaV;DI|OwgLr~m;QT_&_6brU6dxKHTgPt(kL8u!M zgio;rNer(f z7*~D(xtox?61gZoGL(-p)Ps|vkfo^1NJ}#|l4*=fpsOR$8Hogj;iEJ1>oMEBvB-5H zH=1$%uOjy-at|Xn4Y_NPI~uvD1p3FJZMlFi@o&mFq*Qs+eqbyNolQovrkyVwIlO@Q^ zGL5W&ELIjS3z8{hBI$SO2kA@cBjqvWei-p@QLa@kQ_fe;fbsq)o2sZ`3Be387BJd@m)+>l(7oR%Dw>?6!X z4>3>d5pNY87abI(i-(Al#J$B`#BIe+aWnpN{saC^{$>6d{xSZ3ej0xZe=UC*e?IYs zcuL$OekU#xr-&oOUScP)iC9f6A?6a(i1EY-7>BPHtq?5~%@R$9QTR|%vZ#-!tEipG z1*7l=qMD*gqSB(`Fa|FyiW5bNLPaEu!1=;2!neX_!uv4(z9c*?JSyBL+y$fWHNvIB zdBW+!3Gj4bkZ^#or?8W-H9T8rA}lD(EewX|0Dgj3g2#e8f)j#6g584c@HAnOU^YBO zND*`sxZ(LhLqRP;WkDH134sxwSLB1I76Ag8K*0aXe@CPe{fUl5B4HyM6LpBHL^;rM z&=3WPaM2@i4N!R~B`zk`i{r(4#gXD1VzpQzx-GgYUM^lBo+LUaUMD^)z9qgQ2otmy z^c74Ij1{beCmZL9&*J*x(c+on4DkVppZKHrmH07#qTr+GI}s}+gb_lmWS3;KWQ}C0 zWS(TYWP)U*WRPTlq^G2lq_xB@X(FjBsU|5ep(GZGR#H%sTM{7&mMA5J=!57b5kx2m zk#wjuS=vY1RoYJKk~WvFm#&a5l+Kb)mX490lOC7emR^OYIBn%lc{6!^c@21mQ%YV; zu9wHl^THFH9CEc>BIn6I%ihSI%I?X2mtB;dk{yxlmF<*mlC4(il!cUel)2!Ulu9X9 z`YS#uUc(cqyNc_I3yPD9!>W&}SE|SGEbW@=yy}GNkZQN8o2orLF>9e}sH&x^tSSS~ z%Z#cbs(hIktqOyuW-^sP`ITv)iGsxu2vCoj2pf)kVJR3;|Ah#vjC}$OsFO&9K0v-u z)ITFaPhubHfDoY*un&hBg9jZks4qpt6h-@D@?*eZ<|~YR(HAkC!>}L2P7EtBpxz7- zjrzMpbRSH1$IuExO9c7KVjt>m5&2M;n24^1NgS7aIIw&@G3kv&UVQj1Orqfc5gmek zK^XiIM4{mTkv|&w^1Z?2N(^%_pgtH;unhJMMNn`#_Te#ER2NL{!DM?3_)-c!M?Ru9 zhVmG2xI}KmKGYv1BGE93h{TB%*$(fkhoL40oMw?a?8A8! zsYbqBpD}#IfO>#LuE*GS3WE(pQw$Ua)T<qqnqv042>}85$H}~;33c+z<~2wyAJzsl4#~5p9bHg znqt@&hyfp71YdR$TpJY0f%X;tfZ;Xc)TxuAR^;&PQ(|*K74l;!tpGG zMsh@uq-w*`G78vkl8mnO+&T<33R}JG3X~4zLgexc$PF7P+w9gcbkHaT2 zV$0xOhzzce0Y}g@2p@oNFEg&7%=qS3pl2Y20$-5=AE2m%4p5ZDAVwg6g8|n*@+sJt zgdrM%4DXZSdt7<~ky2cJNO5_Ud_|<>HHNDg=3&6;A}N7=xe&MJcfQ4dSftQAQ6-}i~*P35<287{uskO47dyydxCt$aG1sRA)gh; z%z6#`PGeYvVKN3>G^{G*v#i4~9Rt2CEcoofxG)4)Mf-yB9UqK~a4><$Ae;n2Yq4)O z23)d&aBc>b$7DQ)2n-SoLIi;aF|5Hb2?MT@0_!7R0M4oaobLh8F?kEaY796Z0`S=b z@a-Fbdtm{Uut^yVxGV)&u&*eFf*5eo2#_NmnSo&?27G-azA&;GCM#gT1%kXS734p23(eu6R;0gNJ^ZyN}Pg9d?E#|!4$X* zDkh=>6gYF#_%hYZgvEcs!geU=N@Fn8MNsi1f(kf6%Hvw99Il?q;Y%r(7nzj9)kWD6 z$X7NBLpTC~3jrUOLB0|DLNMUEmG=?(c)0E0{f>QSFf2w;2Jb6_^RLVSOm4)0?|{;~ zkgqf@XQgpPR=OxN|6kz0)=ybme1YE-HnM;J(J3%6e{%Cb`Mw6`XrbAbpb!L_P@p_! z21ON6@H2wWpw?vrc_TAOs2H?pZdPz;YZlQQEoLSp^Nmg0N)@bA6`v{1tkB`HF|Vm< zew~)@YERI=N*Hy|w*_^T1)2NR8R08-wSQ0Ha2|WZ;nOV74JX#HZy6K*e`zKrH;_N` z|CsZm%eN`CZwx!B_E*Owd3R)@&jdTG?vK+sbuhV(M{D{u?Mhu00AtvRKlL+>X}dJq~+ zC+@xz-ShDos&1wDdGBV$(8B)q+hrq!w9IXfzrMzwu5vgyzPK`)?mKMFo4Qp3>AL;X zroJwtq>C%J?vO6aLoYnD!rJd%NG88idO@*2Z!Y?_rF_`S^7dd#x$(Oa%XWS)1QI4WndfJZk;v=t~hN1Dl(-TH=naXf?;bxf%~GB}uqWJABM zr4BaS^7#XWd7(G_AbPSYwEmXQ52%7ez7{X?q%%8Z%ddR*nAdgd+WHw^i_|Hm# z9~DsL+$SD=ygs0v=1!TA;Vr2UL7uaZr?;lcBrl6Ebl66fEOd9|grQn0e+zy4U8@s4 z)!VmtH*a@4PvYlm#^Rr&I6Jmt0bNMF!or7vqd>CbdWx6B*w@!C^QpH(yLGSL9>>ZS18#ORdHgSI?_Fc~6l!b8>xs2W^!D&h zW-J&|zJy0|t<|Xfz|R|b=+?eo5DvDdey=le@yy{8etbi2;`lTh_G{kv5!$i?YFq71ahKy=P%GcXJC8eDb|YlN24U^k#D zm%O)^MO5_`fdcqve{Pkm95AH95X(#I>ZA{6b+7pJz4bk>wD=T82bZk!_{doay&zm$ zae8hkJ+jTT(T`2vDb~LFmILcX4hW_}!>`Ptb)lK|xpLe(Hf#Sys&vCs?-$1NGI`yO ztJ8|YV8EU_vM*SX|Zc~eA>Cp=kZJad$&6&Km+1vO*6Ds^G`Ag?`1|LVR@zc_L9J?XE% zMBFDD(?W44`%A=MrQiRz{Ew=)(%04R7w+83rbPYgOBA0!m!;C4i%YaAG@ot6llGZE z&)xpHtS48QCIwm)nx8pOdvLmGi26%eD!x+F69srkFKMe4nz&W-x~Tbp7+n$24-c4(e|egiu%bAR*QHNgHY)xLPu-=YRS z@P^}0vpo0ss*RT9>9730xn?kHfBqF8k1p7*W|O%OR?*ci z9qhe$lZ+Yv^F{0ZWQ!z)0kVJDDUh84*(s2n0@*3>ADRO2m9*{eeI*Ur@LC;6`kAdx z4SXdHa_BD5tk%2C2A9Tc^R9G|?~WaHF#u5)gJ}?%Kf8>~GRc#^k-Pk9bI0_==6&B# z!LNFyybM#&Z0dN{R{ON6kAU7`4ec;AXD&M7;oSj@efv_k=63-5&aaMzk-fth`-NYQ zH~2s7oe4Zu+uQh$+2NSyp(vG>3wv|VzV*BJeLwI2|9kK6Z{NFn&iC2tJbUfOK6|gV_S(M3 zX$~$Q!F};a_<Y zCUu>9SazLyon!BvZ?TZ;?>s(AKGFAFC z$LS@A^0%35y{qm=fK5U37_63PAQiMaWXeLNfAXm}I{t&v>(;NN2PiU3y}r;a*?Sm} zzD;wn${F0#hpP%~(E8{-3=#UCJ{F{FjhOzZ{NP-aK>1vhw&}K zF!K3k96pj2J3rj1do*iKWM_Iy^wLWm=>MGS1f#qIzp&z&i6$J(u)I8IygJRl}(6`|c|y4=Z?>d>h2k*4Vh z1!&{_?eboCuaJ(^_r3 zEEjxkksWhsq$~1&sA%}fVItC%R$^N(%SEs6>+bU&;RT=70ms&yM%_8jF^yHtL#H?nVJ;dD6=tWP`$#gPUJm4F=UEA5T3hKZx2dKX@p* zD--z?`#*hlH5k>6ReW7qeh{PtZG0@YD-(R2^r>O_*I4jmapL(=mAkCB)qKC2g-@e4G4tM>l zI3?tj(3rrMobF}SL3ckdkjPFiLNUlmR(oEKgJ~a z<7X!JOy}AdvPqimF9y>SC0m

F~Ph#fT9@>%j$?+&kRG&B)6B)X`)sIcT(3f5L=f zI*d>FQi=6|s(07kZk@Zi5%>$Ay%cfgEgnvo6<01@<_F#$`gGW6nj6_h36o{=($iP?q1?B#T83G<^^_m&5x=a)*$+6BzjYp> z;|2&j>_K{mLI#BUA=S$c4^z2rJ#Cr+!#ywTobj6PSmPvD3 zVT9-Y?9w?`qm;wB-Hq=-o$Vi?F=J@>Lst_Bi_V2?e4BFd2(mGLj>mK}wQ%c7^~QDK za5-ismRvWcS$BVL*xhkmKYT2^e)zK3z+W}IZzaTE82L~bIxI3{j@eubpx5_(S4&F( zxSV!(ao90%Q8&SL-FFd4>ctNnwtCXh8x+W)jb0^F+m3A0d~2HJ>(SzEBKb9kr68$S zJNkOF(UJf(Sj_xp$o^17TYn)&FI@hA$B(V|gk+liJkM3YK_LW|Fp|3aU;ad7Iv+j} zxQ2*YAoO0rqoiY)2&<~5-;)AZh$)cA}tj}wPL4!JDtFv(aKbOCs z|BuCS;V_wQ91hFe6h~KQTDX~*aX6gLtQJN*mU;J0!NBZQ*SUdZ*SSG`nENXx&z=8@ zJkvs3R9GSdhitXD`9?_++KAqE&t6rJZe&*P&AId(Ai$pXL6i=)5@sbwWH*4ZSuX=O zysJk;bOLhpC#plUWfc!ZO}-)1m+AX{c&b9tTm##;Ee}9TSYZZ-tq$KQL_8YNDh+Lv zCF1WI1ffQ!Z?Q|(N<+(c!;9W+dk5Y;w+P7cmxiGS3nQ=L`7Z%{r9p38@I0MHABl

_*mJc?o>2B;YECmdC@~be39;J?aAQv&l{T!3W^$ebn zjP7sGM+17kkEa@xg4WyZ97`wLsEgVQcx?53P--#xWt&VU`NXBYOGAd*$dW9d-?*># z0Y^^u91}`l(82J&XZSnw2M4<@BvyYL_Q5@Q>m`jl0^|FZ^?IK zIDPi%XMi&)S6|Iv8Hq-}Z4vA-drN))pp&X16=D{9%h;7zyxhHrI1ex74FTL2_*5(Ixo_L^oTKSNVdx z@KAJB-c!AU7ZQNG$=%pP=55s3Rr=o^285!iA`Yx-m^_0g^FK~-oOLo32tE68Q4y2> z+srG&zlRo~&AgHU6~~L9_~Ap)+w*bVt+qKZWopQU*8WGB!MpebIl&)huW8)FL-}e3}(b1 zJbU#6D-^zxW?nlv4hm(>P8mJv1h~9??>6Jwp+Qi#)Szk6M{Hp=LtfG9+?Cj)LeNHA za8fpi1>*7}mtLMMl46Gn3Rl&48+-@2ykGw;wkMC-AX<7@ymhW1A0e)YMbURK(5L9>>S;P+*SrqJ*-1yw z-be?V_n<>D!s5-*FZyqR+IapiO?!~UzJs4%zgHH&V@&+ha@h^!u)dyG11TgJex@p| z{*VeZIQ}hr;^r8z-Oe?tdaVo8^v%1fnm-19Y$7^XZPCyNXjSGM81lU@ji#qdnd!$( zO>S5mU%HkJn@)hq=N8}HKrYTNdcdSJ0#LVQ`|SkfVaP}m-JZqot+U!!j)9|dF1GKL zmw*kj%Ts82)(;*DdsQR=7j^oaJ=Q3U>8WOQ%QYw4i!P=PHa!E#U-k>vc=1EY;qT+LpU@-6N=xd_Gbg?chls%jbN}eIcj+s~N)|S#Q0E_By!0z8 zH=M=1X1>EkDfsL!QaY{NG3(6YYeVcXvfibkJNFs=r+CWDlFBuCcGjokBJjlOqE?kb z`pTzpuM#t|;>HNt>+QY?}Qiii9^( z^L#q(K9{0ELwuz_iEl0(%BY~*|N4aquV@$(Ea&7YQXb-k=gvIHqS+(w)Tb6VDFMTu zp5FfBs{|Zam5tf?_mx-euf{Skc~rP--`6cZ)6-BeKm3v&Sq^6~&z?I|nf~IH88G94 z|6&hJ-(2l-`wAO}K4GC|IDN+?QDu`&8Fg3dfW$ah(U$QXjT^hFd^SP|L@ah`TAx^t zrY{k8&wbm1u3t2vwS$Vks^zKcEcEV0uLhlb=)=ST#tc$={PhaJx&9=L zpnD~tv)H2k)g?vLVdbpa)RN1n$ZFmPYJ#6YU9QibM8a$`a)Sc&`O|_SPfY&6 z;cdPT_=JIi;7zZy*J@GQ+r|6=LpQ;;wDwWGON!v5j7a-}lFL9}Ywfw^m>yO2%ExoY zB|)GdcUR}tI&{Tup2lX(e(N2*_eyd!(2Lrew)(Fr0DX~jC+A{zMzt%idMYH08ag@) z%U-VqNY9)(HJE)lIDU0|x;;Ud){`|AAl z`#VwU(}c~7CM!V&Wk^mhWsL0lDo{#6vk`PlF_Qx^`Nahr;_tueq~1x8n0TxF3N$@A zx#UUL9Y+64G1)Equ#n{bPLSTC+^CJ|aj~CVoLtjM7OMU|Y zEy(8|$NxpZC!Kv&3%0~Vw;J8np|3k&d_jn#(0zLRKZGrciH8mQHOzF=96o`fWscudJH(+`Q2Y`TaaO3xcXC7Her|YB z$xd%E$0Q(Ec4V6hrlQn^UJ>KymBm3a4`i z!1()vKqiL^+Z&bt>aAIQU`<0`G&d$cFjO#mXde#@eHonYkvjw?C71OdJ2wD?EX|A8 zbdVrYqZLxw#s&cahWW zn6;#xl{;Dv?96p&3`Y_{T)D*@OZ`-kwV)!(-8>t7Sgq0&vOfWhI<_p}O=>F2J?0;O z*gPBMThQz%NAd@-f?J$f?O5m2PvPYYWM)JpNv9M#wqUjVLz_tzJSpGFUz?`9L4?*qs! z4^q4hLqPFCuZwfBc9asUtX5l+4=Pa6)Z!@=f;vmx`l6s$fDUi%+&-*-4(LQ~aAjSa z&WPY(L0#dFs+Ir9MXzsZ5T5fRjS=C(g8EB4KwD4i&!d1mpj1zb^F%l^-1&&*hzQ$B z8244(=*JWOFpm)Ce{c8!U$SfHsHf?qucc|Ir>pIxO>x3DgvA!?8X92BcfdfniE}TL355R)AFy4rDD~~M$mKjIzAst7l63DSGa;N#)IGa z6L(`rgd#?PDz8-JdW}ASm+S4ULprla5dO|%c6td}+Eeo)k0-6y7$@u*FxI*zC;Ruf zt?e(vuVeOCuMseQM`jg)*ysP4rpm=;!u|2x8S+2P1JgB&>ACTivgnF8+QOyt^WKtk zW{G_|cDfl~Y8i0if#Bc`o2OKFNB5;g*PakFJ~S2@+B~}ck-Fo4DmK?55KJw}w4<6F zyE3v%)(?HcX;(Vi-ksj3i&-7;*`g);;VeJUtiyL?7>nnK%P*R*(mE{dhZc(Y78GxC zX9V=OBfn5N{-#OB5A5zeImh179UpdJ!e=V%Us$CEYy%1TU#@&D!+%(E$pHc>5;wBF zWT9Q}slg@3m&3ywb5w6OsL(D-5;MYt{mrBq5Bxvqfn@Ix)~WHgAf71N*n0?12oDqO zm_GQ9WweCPKgSl1Q5+m8_6`(m?zxenfupm%gQK34ql2cdgT1acJ@O$x6MH@F?nle% zcfWsce))g&{olvq{yqH8)Y$lXhX7n774hs+z8HKyOPiJ5QW`oO2-_Jf+YcPQcs)|E z!T3GK5uu4bvtaVe+K2j@2*mlkfm2)eV(p#x&6fbXh!~+KH21yLhGr&MqP&P{P3|zj z^>2;T2nh<1fvhU_r^w1egr2^~C)lMn%D~3{#1A3&LUE6NF&$CgE;Ut_U;{+kJfA0U5YsFUP4}J__*13uY7R zobTYN!pi1PmM=kg=mp!`e~CiJQ!9k5)_ehT%j?fxFXVz@FPwB7O}c<~?VFUEbwY5x z>;=`uuLR&Z{)Zcf*aZlAT+i?Fc)7SdZV#^KcYE-1aeK1;f~C7IvJ(E;y7aR_;TB=o zD=p&K@lc%b!}l9tGYhs}TMYfjzX4nx*YmqPUM?<= z+k@-*-5$JL+#c;KIfYM0|9QM}>UU0`FaO61Q2aLeul9kN<>r5M&I~!@ff*0Xc;LUo z1JkXyy}GBZJ+7RAM$50Av`+_&ZQH2pEmk(YbF2njsvq^2IHcqKBV^9Kquv5e)o7t% zxmmG8I?9+QPL7ilIbe$MRyNPQZecu6RY!Q_f+%VOZ_$1+CoG~S|MPbX1rv!@+s!Pe z^6Iy_=so%5-If^d5zIUMMInT|x%#!}{CBxPy+rV#qd+1We@ofvM*B&!?E2gMHW>fF z+h@-g{zP;tQfLr$Ui*0s&Ud-!#TCL!f0R}Lyxi;e2U}i86{A%vV$b$q{8Xe+Z*se} zyNYjAF<5`JG4Wk#1#TzVp>SvaT#Ub8aL!V>tO7lDF1c_^z;S9yLbi_trstTM8tGnp z8490LZuU7>qPOEmFKv)4L)CnWQX4SdXzTSaADt_~`2P8Yev)N?LtHvMy}b;$X@2~! zyt5Kja~ZKW!gv#pT&En2H(nRMj&o-v=w%URUW@55{-(N}Chx5#^w%7WSN-Uv?WW&m~38(G{5dYL{8+G05iC|6v*ZR(o zFv35l9Fr}$OcH_7T_^G8*f4xO09Ec$p02!UBD!{i-H9)(k-(4r%bR!U{@|{7PEmgr z1yvvCtaamz1Xh6u^Qoo&XlLtEX3I+yAX;C~PvMC~&9Z8wlq>x~VelvG6PXmyz~h*= z;d=xsA-2FeE5#pmW0}9UBbS02mTlkZh{@-9o%$hC>5m>Z5N#%>Q^0Qh=sG)0K5F~z zZ)a2d!N|syr!HhuP&J2}dc&AKL+fIXb*B5H%U&$C>dv8{FEZJ@JTZHghCMjgQtA%^ zB9+DGR#1MOe5i*_2-f{`X6%y?kN12Z1@-{*ly%MkXdQKCa3KD0$DHi0%w*de{249jQwgiDp2j!?67^gm;mItT9Try_!{8+@8zQJ!hJg?C&BCWw{2XnD0-4C4InK8_8iET-8rGh+3-XJ+uf?1Aa_oQ2Pm7T?R! zlL?z1sJ4WXgR1PCJr0#o@%6mH#lDMnja87jcB_fIO8bJkgt!G|Mc2rs$|)nkZb!j` z+4Hqe?4qDkJ2YG)i*`=-e01G|>OYCs`Hb;gn=9fy>pj5|CcV*ce-u5v=)tLq6_-)| zS@RloOm3sJJ=kI~{#*Fx+t)CDqfdvcmdR~~{Cyxg_SPK5c*YJ8)G7P2s0()?VXMj? z0DN0-F`FL)ef##H6Y+;~oLngAy{E!sA8I|(F0~7@Rn$|czRN>(i{?CFjNZjiwJCn; zW?uKux|HUulE@0cXg5;RN+PVG7ZpV7Ii(k2eABTj@nlLfx^{bKv+zX4$h96PP&@dHVk^&PLA{%oui`;&xx9y*3|gDW}k&|wAUxC zz=0RyKLD$6hT@L-%E6v+d^*J&%o96_Jyvp0njt=V}0Z} z7I0N%C8_(a031tNBqjNsRDHhm#?jd)*kR|rlf!eG-vTe>{^R?NoN#o``3J2ZMnUR< zXXu-k6M#u2S;4CYTQ{G6hClDjG9a+$SPI)qb~qS)+((v$3x4eU=pbg_&XDJUc)9=d z%U-`HtNU#-Z3WR(!tJ+P9ai(gU;v#uFJa=mPjk7d+18;}(#i>xqcga&L{>!N%KgNCLdSsKQ#_6l6*ZrPzAE|9-?D>*S zE<5nDi)m{e`f$+MY~mUZVS$o0wI>|dPU3`Xn2Ls+MG|dAm6IC=4eVOnjCSA zLxcI9(`hGhW> z@t?FIWi?gJ@>KB@@SNp2$;req z#POEn2}cvhRgOZAG>#aKAdW*E&K%YprW}hoG&tlrgg98(N7%dBpR?a#r?Qu@XR#-+ zhq8OIyRh4^uVaVo+U!c~V(c7jV{9MT+Swkk)v}edonuR4i(vC%+s9_dwvlZ)n?Bnd zHYqk9*6*wXtgl%gu{N?+u@nueq87y%uAuJv&yI8ien6Z#qG+7i`L|E9EzcTkSzhu70T+Lj@oWp#IIh@&>*_C+* z^9JT+%(~1f%o5C8$OQ5U=|mnP^~hx;4@p6y5I@8XaX>aBD-lCP4Us|kcmjA1@Hp~V z@~q-n$fM39%Ok*pa1V38=YGcB%zd4^h&zKjjyr_ggL@bER&FzHGPfqTBDV-P8`oE^ zUaps1_qeM0hxp&}KjClUzsg_8pT-}BZEi?fh?Y`L4paiX(A5^B9Qrj5fVtCHR&x#0D)HTp&)io zK+C#rBR&LL%DNRfN+2-12k|D*5_wJJ2!R$!a3Nj<(wpUtcoImvP7v`Rkd{m~a+p91 z76&4S2sA&@8aYUyd6ikn0RqkK5JcPwG-oeB+z6y{lL^^RAfn2{h|` z2;xd0=@+|@Jp__kCV;pQNFvb<*-ap^AH2ve0*R_!MRpQM*x3znCXnDo3gSc{{u{lB zBY}8t%|<8$V)5LFI1mV_yNTEnh{RTo*x`u(hes-6OQ6ZYGGqsVCJc>{?F1UzdK|GK z(ASea$Tk9vG*lp43G_vx7}-Lg&$gCWtZ>G!m49&J1Y$*?fn^I2O9FjTeTP^OsM{+N z*-W7KSN}pb5$K(oJF<~LZ;YvkIe}iqYa$y6)Y17DvYtTgD?cIY2=p?i1zAg=7lJK_ z8G-)VeF|Aapr_T*h$(@dEc8NF6X=m=1+t1j51SSvCIq@el|)t&sQH}{vVuUjgxiqi z1iBfWix?BA!95&VMxeS-e`G0vuDtw;Kmt|vE=B5ACJfp$ZUxxB1<6Cl^)0}04YdE zLIhG;a~%;RkYWToB0wPdTt$SRK(eoG5IzFQ{18KU2_zNhhwu;WVRyJ~^y9BZb-axuTpiK|QNw;w% za4y;s$tO@QmjaSUplmTw;nb$5-|sX9yG)-j1XY=-7qx zNHT$fU!@~S1PZ)=202Zjqg|cIDFS($o<$M~fbx z=kaz!I0%IG6C>;d!ul2vHUeRDED=^5@nI{y5EcSqs}B%n0%0MM5QIQjj1+`KAS_Za z!bBi!qcPGC0%4oTkiHWL+Y*2@Ng!;GYtjUPu-zs};{DwGl6QNy+}g@LI?Jf1_?x6Uql)p z(Dn1%Nc{vV7iK1XB2ej9C(=g(l^BeXJ`kuVUWU|1pu)GSNxj(Y@iM&iFq8jp@Bse( z3FIA5M+)fU|IEjkI96c)X3~rYW<2me4)RjaXOHLI_A(Zjc5F_qU3o8feM85#;yEw0^7q{& zeRUjotGCSb1jYypw}V<+9H-u(HEoV`FJ(#s_b_Sw|K&M_|VC{i@09zk>7B z(UM>zQJAz&P~JjN5vDFKC}q3G0k^-{f2vt`1ZeCw9#~!Y8F-4A=WPZou;g-^8dt9v z+`6vGeLjx>^k^1H^~+|0UL(cZ?caR@igWjvMw28V$55|*L(E4|eWE13%YzMaZWze< zah4bEQSADV>BkF~w0TG#6XS#y}J22zl3;n(4`3!D)D+9emB-f^JlE$Dx2llNlp z2>7_gfmJ{*vGhO$%1OK<>(?bI0 z9?oy}<=nG!DZC`m>Ily2CGYjqC;@eQgui=oMWDSLHX3W6I-)rlwjorF5;QgE@Vv|X z5x}KyFzTk|aa8r7lApss91t21*mKIQ7#!s5RF<;XmPjs?~75 zHJ1YtKo-C&k73mm5SO63<#(yG9Upe6RfYM3Mw6#lzNp z%;tuzt(G~jNRz-RUF2}Wnx{aW9LrMR#10>bhjgq_#Xi4^Q!nPE5d58gyM^D>+gAQ> zdRlhhHCtrL55JfvY?$jO4a*KRnLyo9U|T2N#BKT*Xu1WtJx}aFB}>lhm#0WU(W6EM zp8JIUCclw&uY+p@Km1+39(qQH?w5xz-^4Zct`~)O-Wk7F6X1lL^D5Yk7B&DYnRk)F ztplLrFVDwC9m3Es?_)(?ogn<3*HKI5{8ayUJz*9>%B*ez@Q1)K6r5y%C}-^phi+NI zpYN6{35D|TL5il&U9pj;0QYA=>Y1^5wrOBeZI#WsJ931)MRcsMtFt7OG185=v9FGh zKUg%TKz@qZm)0D`Q=aGUQQ)Jc{dK$ODEv zx=!S2mweiF^hu#m$x7}t@KE(uZlzr{5Y}!MQb@ZFGEFWPF6T}|zlb_aR!#FLg9i*A z*8>=OP=+3K<4dl3Tk}*j{f_L1C2Ec6Y18BT*vp&1;pS{dh5`_2<5SY%HTo8xtD_&&seIm zf~<9`__{%p4Y&uT_rKbYPZhGmd6dC3;E|$n6ST@G~)MDV2yz}Mb=;$a%UxnNR0NZDK zH@}?(HHSsa`d3Wv-ja}6CU&Pl5Za714Jgg0?^oWA>7h1)o|qTPY%u?QqRx%w9{{fB zq46d8K6W0coYc?TX-40;vXdsiZ0yVmp9i9_Ddy3Y6Skw^3!~iKBJ%yQlwQy-+_oTm zh_(oZZYQn(}kj#{8S2o&y<>12*KXEkf<$8A5bboe$srSshP|;C~b^t2vbvlxIKE& zd%T#f_+eU_uIH+~2xO>XVo@_vo9IU%$E&h$hf_r12;F`j;(c6hil0E4DpspEvPeQN zWVb}7%qY=YLN<}zDcf@e%|n$g8&GBfnc#k#$^VNzFx|Iqy-eTAw#saYe^QiB%{cj5 z!L{3Y=MJD+ogQWH&Z&ZlfcWBx&TrIJHcwLy#x|hW?=IHcclHW6x2YiX%lrZIM<3RR zN8C+7eG^Q2qfvpby#02S^70`nQ*xB#X}by(Yq}eriR%D~U$*QOpZr3;ms?fv;aV9` zU!hwjw4noKO!KE!?_%Efj(QhGqF$)Hm8n5<*5v&4YBLiU4#~G1uDJ{PA3dly)~W*E zPDdsx2@a9-w^tt@SmlpC@IBim!IcbdIrLUuvKCe0UG>%ZE z--zkOh?bypJuGC}+fi_6WB%;en|Gdm?|glqY@spQ(q5 z)foHt|K}8L8NxGlkK7!*n0Akp36x>N4yVRf7t>FjSiDNn)uHHOYgaWjwVZT~^eGf4 z2W=xGjLzPHZ?_H~zbAn=v= zwQ}no&3qCl(mN&ha6c?XZFE`1S$5CDBSqAO4PTwE#_wjs65BJO%D(;Z~8}jM8 zdz~Errj*b}7kHbwr7zqC9MvXb@fY6XAyJteoc*qV!W*bt*!Gp4rMHOSI(OLvNc{yj zP>0+$<5{J*@Ly4YmV(Reb>G^-c+khV@BSP_C{shOCtLDI@1Y3~Z5EF_e9j1#bAVi3 zT&6wv?H;;#$0N-{tmJQ7H}bVz)$C?E{3z*0tDs+LCv~=BxZa6JNx$}>>T2s4{%!P=wuFikw14Yl&ewH> z+dkE?4J`vOa97XT4Sg39jY7B?qn|d%X``RP>cQCPCw5G)Td<6l@aJzMLnmEFXDvNL zJ!gucy{5gBAr?4N$H~AzN7ulCqC*d#%#Te}rq?Z4POn>Nwe)}H>*u=a=VQ91|H8jU zwLRAdmeK2%>J#ZYwAdU8%=U9rgD zcpE{tgkZZ7)U9!do8-hp=-F!eE)nCw@B9vpLuM-$@WFk^s&${(Ujtk}X1Yea28Fuz z!_uo#pIG3T?;(9Ii)O)#AKI@e$zkn)Lq4TapN4_^!!)UPI^3`>@W8yK^2pzKp^Zz7 zIb^?rfxPBKpKcLC&#diREHEDZoj-m5r8&)>ujzu#eOvgUg}qjo#^O%!d$~COyZk@d zgUkPJ&&e9;1>f@o;qT{*^S|4P^O(GR#%&RDWKQ;c(+%GM{(XJTe6In@v4;XW-$VFw zKyICf#D_27cfM7sSJ23a8OE9)p7{1njQGBkC9ex$eD&{qWPBI>`}*;+`trbiVdD3& zd506m%hJx{m(&(}Wew)fYQ@I6%b1m<;4g3f1g7klo4IfNm%dYqMORqw6^YC2pjqjJjgOmQ@?k#lbrG5g#O`c-i&~ME>sil z(76E;o@i#1i}Jz`F2u7_x$~*!2{}(NLar*VX?7vxE1#!yj3|4ebL1kVrRrSJslb6Q zWVf2eORaM}!533Qul1K)P~0!Gf%>~f@(}s>924K&*S!h(#Tf(0)!h%NM!SWwLq1fX zAN}+{R=ggX#}X&L>apD#KuFteYt3(vj*>a zjQ4icxUg@s3^Yt0uND263+l@Cj8!l_Mg1zSlbHMhvCyWTzFfd^EKuy@CD80S_dNXorXMZux3^!1=^;lbJfYds-KQuej@k2Q9@3B5gBI`SI|MgBq<-Kx zQly=)c&~>zcD`tDi_TJ-os9Fs?L@Ow#(WY4Z<6b@-Q-Jqm z8G5LfcB;v>Ve&$0o0W$5Ud6w!fQE*VA7Y#j$!~W$eU{tbOvwKl{$F2D>+g|2w6TX* zDbT!M?z7*{IOru-W*)zz8AhrJ1-J~m(4Hz0q?w7Q3HvvbW<2n})&ue0A$*KxnAve2 zS~H9%n1?A4JDzHW@zBodnQVH=1!8`?V<=r}m)Yg2Uf?XhN=k%7Iw-c&OT zTUknPhG99q8770N__tm^clG>#Iqn*MQ8P&qhuP`IkCD>@;HuRxCN}@L5-Y zHB+20+!3er={{MBkjHsk4=#`E!OO+%!OO+<;P&8l;^m&WYcMaQ^F8RR?RqkQ(IY@P zJfF1jH51g%dA!L(ix*O!4sYD1$qEZD#t4R!W<#0EjJoBiB2ccWs7VE2`=Dh+D0anS zQ}u5ck#%cGFm4k?lkboitY|n8x8i~vA&>L89$X&RgO`ijgO`iz!R^8A#LHbA8M7-; zm;^O06;fC%CxP7_RhMVnB9Kk?j`g0EBGBT)NTsjj5I9#5bH#3#6tsQfH+OCaFDw*! zly0vj45cS?mM9$P0V__u3|xP@2aIIB9|jHrQ0p4WZ!AlhkjHsk4=#`E!OO+%!OO+< z;P&8l;^p2eVP9==;n#G{{TAMu%71>Jnfqm|%tFD zKxfC5t!F~wKuTx5GHG=JdQKzpWR6P=;C}R@;V{Nees7Svjmg6+4>)ai5_-Ve3s2u1 zT9p9oYRld{aE$@=ebOR1n0#1z!$EDV+=jUv))Bj6Q12+=7v)&FPYX^{x2;Y9jk%n* z;Vv=g+WFi1A~Al<$St7_legS_>Bb%B7+_s>J!iBi8x0y5jyi0e24Y46U#gGiqRC}e zt4?A(EsA)lbsBncq(%AjST5L5#vdzNoDHH4zRh2_B@HOq%v*O5J(!G ztX$;D@%3uO*=T@)Ij7Z@G_-rfy~-ccBeTUOrVq16W`Zx84=eZcLE}vxSh z2RN}54-V&Q1KBtA`tvh$(8Zdzud6U#v)^Fczfc<$?7t@>nVAE=bYCsnsgQ#@g$}$l zI;#yPZPO^;7|*<{x%D;1FRQeWIFzf6Cg=;l7|F;%S1fk&Jc99?Cbdjw=V_zaXC5I- zG5Nz1XZD}R_@_}j=S7rhgD6eso3k==(9W@2M^|IzS~Na$-QuH-E?*TSYKHM`Pc9vN ziSc5$*2i`x6L$VOj7f`QuBG|6PIy{7Mgo+aY8MRe{0@&SZ5cUvehEwwoJVcG5D}^y z!mGlxmYrREaS&|^jH@dXc0hmf!!laJ2lw=hbnWf2T?_T~9JKTej0_!(bQ}yE4Ro<% zdtC>usrZFh6cPH9AC}Xf{CuL;q%B2mejftg^kyOVz6^r;fnzJRBbec_$F?yKR`J5Fuj_5(T=<}tMUQpj4icoe ziEhm!e+0EJUg+oK2}3KJ-kigic_8c7HC>r=*x+_~t&15+67aM1rH^iu0iaW?<6qh_ z0fr7QU?xSdz}$mFog-hS7c(mETve8`p$9x=e}@|W7zI8zJTFKBAsFP_4cqHKfJF;W z7@i2)iuUJT{<7XA0w}sEJ+NCOGxG$1*Ll-5opE980|z9(DpyXnRp|h5hVPS{lS|NZ zS@Wr_YVqjvziO|XS3QWXfUcqElS@EH`q7+miFjZbY@Fn#cmyr;UaKjgQ-C7los_jz ziJ&h-YL$0`J0Nk%9x~Q00HGsG)bGqag$fM61Hmm$=%uFy>4|aO16Ur_u>^w$mm9UX%|UN>x@bi?r_5=#%jt+SHm|FFIQ{jS*G z|6X#4-u`C_XW|OQ{{0Uoto0`FueaVP_Kw>6I=c2c20D&9+FF`U&ic+e&N@2!1`b+A z2HJY`YZr?dgy-~Y`1d_Wf5*RH#TFnl_zlbx zik#&Nknd*i#(Q686TJJ%clGI=U%`n{kY6e%P4Je7(76R~>j7!h{#~97H&HI9ZEJZ~ z8WtLP3~#%kyS|E*c{NqO{BBk@ST z8#sIw#H_XdJmEY&$RCn>Qc`(H8pfR&y=%8unb70?;8v=p_W)QMa#oFZQ4djWrfyYg zff@&#bK2?Fq@)lbKh(8*)$4EKaN&`d^WTaU2>X|vk^U@sYZQ#{eWZ4Dj|#zq!=j&6 zRm9-(^8WXy5~o``tWx%?=*uw5C#&g67VgsiQiC$G z&}`eaV_#N~2zjNGt@|$w$iWDo1nv5r@J!lyCszq@=KPP-?}H70Yp>A#VCP4>4|d*9aM2g z`y8(-JUu|bT3Y5RzuHp`kW(;%QIz6o)eVNU-T`U9SSKi^O z37nSaYY{vdpXmp}Whv#wRnvT=8FGi>;s+$;hlCCuJwxa}yG+upGQ$rYN|am7{F8i@ zck|Lkd;L(dO87pnac%EZ3AmlzVP%WXWlpU?BVP7Vm?b)AB4x>gqEEM?oxCquzwaX+mB!;;ZMH z!lP)Mn3v&gkLhylAKlm)=IRF)PmXuhA2~ymyN={N&nCkU6t|eirBzP*KRN5!D=8;G zG{pW@M#9l)dG~d#ENz${_9Yq&v0a&#cP+e-9qv`;5L z`K;&ZUt4ebn!5VBe|`#;0Ojc^98X`bhu)`$XWxAo3X?8h>9qT965$cTKQ%eiHN=cI zInylAj0roWx8JagmhkqQlanSk%B7?4=z#4bVn9FEbad9#wAVM((RS8!pg;3r5hC%N z{_MwxDfrXtpPunB@nMgkjnw%2taDE6XM(bGCLA&xxZz~1l5t?eBv71f@2|`!4%1cG zu5&ljCqg~*_BW?)I&(AN%~f8Q6et===^h8858o`K$uC>Nw8?vy0Q}l`O?}B(F<5lP ziq111YumISZvZ$CvNKV2U(>#EyLoBrs2B2G$(_>DP-||$dg^-mf`;mN4+*l*o3Csh z-w>E~rY$6<=mGj!Qjtv4KR>yPU+b1A6erym2buHEno&l_ul7x=iLAN_kPczADT7& z_UctttE;P4rB$VU6In4|_u*H3Zy1)3ILYQQkKY=@of+0{_mt&|gtsLq%7ZvuyBc(V z_)YzJvF81@ibyg2v_}Jdb<+epR(w{%V^4`ya|$Q@DE!Vkejr3E6qzG;g=bH9A|YS(5aeE;SggDSwt{}P9 zwH2o4{4zaO7Yhb-?%czw;)-y4A&!JNIz1)eifqH9!i_(!>Rzxph_b2S0W{6aK#1Fa zL9a>*Yz_h;jw;%n9;D2YKv$P3+Gx6+1%b?)v8IR4iEtn}MOy;nNQi^)iL-9&-fu3n zXebHQ*mnu6vA=FVdfgEu#O;MR65_v?y)-VH@we@NWpMfAV83A#lVH%*z#kDuDqz^8 z$%>TCL%Z7llFG7p|G)km*sp5o!?H@)d(?Ok z#F6|>&T*s0p)J;)OK)?WWj8Pdsh<`$NQZ z@;~ykuYbWcu_)u~Xw5%EDNNjDo00RdcX6xeeosHa!@{A_)Yq|V9YxeWMPWef$Pdl- zLR>@(ua|u!EMy+o=dNpz@w^c##8(x572HURTpA`^+!`vBv&#N`rjGijpugg|ztRFn zFA4*H%IRD<<0OIc-^Ty({(pbJcvT*R1$|OC&#Hnme-516Qkw(=wT%?if_mEjQpFPZ zum9-RK)(k5?KF@&lus7UoY}CgYwBCrV!UfW2>#t&dt=gdh}J!Dx;E{l;wf-cKQ7J7 zpT+X>W_fw>yy#RPDu+j-b=TT>CoU1~uDwOI_Meo71^?Y?it+ywr(EqfXU$mzgz`GG zZ_1tpKZ1{au3Di2r?};~HCBEBH!HMr@5boBhru>>PP_0-n;VZ4lwWAWc^y(q-;q1P z>NRbj7gNn)KH1bRf-MbKOId^rxULTO6r9edgi>LYyjQ%ueiJZaK4PaiNI-k;r4{Be zdhqrWt-7c-qUgXFbxEoP36|M%6UWVZ0rpHC*b-8#4NobrGZ?W(0uFoU)v;So0hR~l z99vmL6kVHgb8`7fb$DcoE^PyU5Hxr_KWu!M6dd_w?%0G?>Ts;v+m^HeW^j?T4s(H{ z6zovU>o{+80?19Cuyn@9AuxWSf|@IyEqL&Giq)}f9r)r?>Y7L|T}V5%aQU{-X3%)h z`&d?;1k5{J}*GqAfjKrtzjfG?dsJiarM3U_(z7!!R_qG;+UCyD%9>hM?U3f|jnGq}tl zVtm;tygdGJtj>%O9k}oLaEn*5I`E0!wwSgU4Y=NKuY`$t0jL|hZ?ueH2%K)X)=fqB zBN|zMR&>CwUI8*V-6gMLZ+z>tTm9>i++`*gvu6)W6c#=1~Y27MGn7r!Yg4NjV7;Bi> z(WW{p=y9QZb=rqdh?E=rD%@KK{@7hgn)ZSYjhf09|CmdLg_oH`QDYH!K4)u_)@D<< zVYlK;+qAFX#nXV}wQD3{och=1^(SRvdgL_j_AC0(+Py|z!sbn{jY0NMXQf5!vzCm-hjcch?5bv`+-4{W-dJo9$IT4`a#NuCX zP98)%*QKw{JRFMDRzHhLu-k;{(gxf!%Q=XWZ_IUSP7VbwPX^F;;h92J3JC}HV!Zsv z3+pxeJi(|)#v@Kf1giJ`;-QG;JDv_KQDfR$nPbZ^pTnjGB24l&**=YhhnO zm!%sr23;!#j^=I!GPBQt!YLKEv@?!?{qCZPL@pZJ2+v%c81Bb8di;W9L-8?6UyXO!lVfoO9rzMt!^H-Xmy3!#u4KH8;=* zrulshw{yV6a(G8P`5?GZ8(!A_{5tSHww(ROJsX`-#I_-RPkOHd^QJ$)jtoPhqO>u7 z()gn7vm@7#apvihManto+#Nxq_SkIXrd1bl$Knb=({`Ys2Suo(P|7x9QZ{lwr@Z4+Zlx#HA+v4`cBiDfI#Syj5YB}JA{lKKpPT7cG+P-j7b1|4V zW{ks~h5|&?x1fmEkk}ub3c@(C;vYCpUb9mxPP5_)#bd(5R?xp);bi9 z;MR{nqX$~b!UILo?3b3iSL9qU=|B24(651h4g6bcfWMNj*|QeV7EdJA zmvnKFz(2ZsLon$&L}M9#JUms^54(QwW>VQqmM@LV^P=*6=}bR=h7aAB)7=Y#cQ6;x z-rkU|@e2dt*(LDrj&oAUkD^~l!Sx5`zFe)_0rmtetJ7eSU=Q8`dhkXeE(w!lsz+Dd zR)PBGBWiUiGO)qu@a%nebbE187!;0Py#`D24>`(Egp2q@)FQ2WIB=&BThIa)tInys&w0=l9G_M+zmvw&f^3 zzx0PANDk+(zI3|kQfd2Q*Hdo~-k1+vEW8S8154pTdz~8kXd|5OH2$@G$MmOj7}%*S zi;jJbc;SG2ytxb?8r{c-L#JZLklp!><5?r3z4@-oS)(Z5Ex!7psN6;hb~fvtzJ^b&5{1WisQD2iYmz5#ldK&WGrJ>r#@+V7baqm-HK{qav z(<5I3wtjcO;fmxaK@l$E6H!ro{*Ch;ttu6*tTh|#_a04nS!Xzx!1%brI143a_L(zk|in5f`_fml4X5g@5AH&VMN1 zlW7b;>lw^V8ulGAzTWWK<01=Ny>GQIc=!{DYmja>FB z!1vruN0{C@Xz~2Q&XMa?U~wW}!#A}Z+}`l*eL~&G-uO%#8GAh4?>ngcBDZOIaEK#pYBHuAbMj%p0`#3J~LbCW1*qktA{`1sZqq;7rVw3_Q=r{#;3SE-`(6u#%$PgoN7joJ%w`yQ9k z^Mv>P3a`d|JD_o>6tT3->srU1L;BZ3pN{yD+q*fRkY8AW``K^Grp0O=Dh0N~IpIEv zm%xhs4bshD4vRMVvlrq>h>PS1T~+smE0iM&7YM@@$q~lSy*FH8JbxekXXEX3|Doj2d~Fj0Qq!rC8}UB$Yjth5CF>p%K6(651h4fJc^f0+ihuiRY{5bc#SxlZJy!vk2WV!L0P}A!1p1NI3f>O#{w zPUEk?BtbWW+b4AN-+*VzaRHh-+R&n@apN&FDHyR;hIK;v9B2zmH?6s@1RFqsK1uo; zD3TvHJ>EMKsiIq?4e zRaIEUKezl-n*sEqN?W);cmrNt@mcKnT@?;%kV8mP8ICWF=E z;jp7~Mp(TC;TGS>JXHzEb)L2UDMtaeg?_bW4LpWs$b25M>xMQ|SD8v0O}q!U7CqLZ z&M<)sD;Jf#*sKlLnI}{`22tSA+vYngZH?fB%`b!aO~1g(1sVqLPwB&hXN~5=dmoVP z+gg_&;YRSEzWhhU{&RnljbYHlm>;#Mp-)mbJB2aFYdON=E4A?W{m7m|{y@;y4#P18nJC(Lvi>W(< zyk+TbR-5;NjmyWp!#E1LQRssCQPJ47m;5oF=o`PQ9pkc519xr1a()($c&Lu$oVdLw zq$>9k+Hyxb>DHXHXnMWccxg==;I=N!vI66Q_pg1fm~$3P9OtvUNx=pks9Jp(<3NP3 zZ&OG;tZsw0Kem#-k8yU(1--ji&Wd|_ofFk;z)3KD$L#So=u_;c&I1eff#;UH)J#al zfHt|@OWw%_J?$)Xm0GwD<?Rjw>i}PSGPIz;$frR-y82eM)#xo+O#h@psgDu4vjM4<{_WeOhh&Z`&E(o3(sip-JBRGiu}i+4G(#o^q8AOPe6B$2foN>Ol)Yc<$7h`X;M-`*^A9Z_d}ou0i3IxASt-)O+#rryCg$(k%cp zJYT*oo+QfMTO>1;^(!hZ(6#I1-dScy_u^*+S5wx)H})~Fj~IG&nu@me`D1jO$L0N{ zrs&z`u&QZ!+qMn`IQ!eBi;FCiV2J;*gsE%W`K$Ok|MBRyXp2K8je++P_w)7h^QQT8 z@h;}pWt`?u@Y=djci(*Ed}+4D&-3d|N2devR02A0`|yh7s^qG{ugSy<%sxf z-^+H(KXh49EL=q>!bN-{ntnD1^Q-^+ean6MeVE}P$CXokKLwU)Us;iDFPsfwFPsfw zFUZ_^V#b!Eg`%yA6mA^OJYtq#_%ms0#lXACqS5TaM{-7sif?BW_N-7`(K|-bvjx?< z-$%D;47|+fZ!;dAR^bum^txdc9Md-Mm6d=27p!&lzp2iQox#@@c28-9cWvDpsvoK^ z5rThrPwBy=>kwr=Z$Acy>&?XTD17NW8lCFp%jNm-XiOhAi|fVZ^z3gxJUOB}^I_VZ z`37JC|CLj`vJ?HjwqRqHqjv{jyo`4ah*f|Sb`eO%nG9dn+1@>9s}AdlA!%{RzrenX zEeR%URcN%ia?lUlr8DDFa(RbU4X{TG-W^!42|rJsx9FC2Gbmpy@9clO6L4wUv$Yud z@X;6dv&?)I=w`h4he;9{`c4TbsLvrmUi!kMg3}$K@^JGuiA7(4M4GFc$s&39bcmi2 zC0!XhE!VmG%S##-&|MzPnx_L34P!5OH|j#$k=l#529Y7jVPvQ*$r9>0PTDoLNghUQ z8nEe+of7Vfc_*LhYyzc^fN4{|X~L8HpBaa^{RAtVjM@wK8$s=K!#A!-15P|W;Npr| zWXNsVB(eXZ9&9KltE~hYaFSruq!fv#AbZB;rI{(3@a15)`$hFKaI8e}!dJm$7$F^{ zviafwxL(Wr=_wCwc+AP2*WMxnPYqYUUzv;-DOG7S%5oK<@1Qbz4Yvuz4Sbky>uUs0 z@yw6jj?;uwZ#B+L%~pkvr3?cSW}3os<$>;B#~DJ$s<5=Nv6}Ek!l4=K$_Bv=sSbx; z)@VU~l*hoQn*!0+AP+??Jm`MoI$ZUl@h8xn7r5IXT@zYUM*q^p_}PcgQ(}|WpoIq( zzsbS;_m}L7yhb1J@Asj1e-{1!`Ze(1&_G`$F{>$HML|TS%Ch{*q{JHV@%RksDK(8q zg3OevfBQ=pcVN$MyIvq+fh>$Zqp$WS9HW(IS}& zE<_aWiWoi+Dj(=zjMa z#5q%omZ;G*{3qQ5x;v-8YWnyZ4EwM*Zs_*Mpedm&w2`1G>7`zOCfIiEdV)q6fg z(b?@q)nlIka5Mf4gZ}_54BNCgcknvE$$R86(e(!6g`OT2|Kb9Q-B8#Hyup6 zE4Tr!-aSIRi!4X;8V=Ed%}q@5Q0N7+2S}wi5b_yQ}KO-Qp*cB;_RJ z`nyx^7ro9f^A${WOq%CX?IO6s%;B4z5hTo ze8yXGSuRrl@(#rRFr1eXBLR=RPSR0E%8<3)zkO1O6ij^>bl|&*K78(QjC#ji3Cf4R zIKs}0MC9j*`|DDqV2#?if(bHTKx2DI=%r-}P=q_atug`3L?q&OUGq8X8J?>4KQ@`M z?~5}Ts&Yu0TAQVK{7vE4BWU)E*M!UG{irQr!-45b&mkcWgg6l5f8>O}PFeW`s7(`k7yMY5>ZBUS%$|;eF+Uo*-yEjhZ%$;~|7TS6hX09*vS`qx zKrNTHO0X$*{`YyKlwf^6liXqb1BChM+-RP!1LtrzY&`013Pt=k^-ssOjFN_ZuYTmRd`2f3u5@_w?drK}VAs<_DqZs5 zk8e{{o!btgN|eqlsPEohpqqc(waEN0QCe`N+ejZ{Ndvf3$WKYkKb^d&4!k)2b$i>Y zHXy>ET$(&pIZzpDoeR!*x}|&Qsawu4&dHMEOR|t@;OnxYKmy1A)hHaFLLzt8x${Lu zK<1Q*^K{jLa9DhaR#$wQ_guW>Mbd>aIlr`(@m}1b^qMRi{-iBS35K-Ayg%)#1K$hN z+af3){&1rl9R4cl-h-^lu4ZFbxP^9FXKwDvB*CbvZztxQG=;;3@|Px9bXu2m0-GN* z&a##&_sVf?i#4?7e*x}x-eKcD8~28L=HgQ+&tMJc;oqtLd@Y{F-W_h=wiBZG3H6V* zuG_qAeu{4I^P>AAy7B1fO$W{TH>DHlqo4L`;5QBQ)f}E1?-8s5mvb?%d=pQg(5pRG=LzjR#*HSb>DNy+1)|HV&x(m+HbL zGckT($P1>mdyWs@5!O;zSjel8oO}uFA3y$t&hk4# zIX~@7@7e6Ta{L)mxPPB*RbC4|IlLU_ul2L=VHU~N_QW$VlB~9^^mZ+<8_J!1@8XBT zig7;NGS^!mt2K42K>jeOC?ZyyM|`k1t#2&bHSreu+2n7z{#)knH8&j_Lw`SpLk91! zKU!z?bQfH?*WMQY45Pai!0f55*w|DWUQ?Zal^f4H*tq51eQe3%Rtw#0aN z=@Xmo-QzvqQBk40>c+IY>i#<)FI||ZivFi{02gnwmg$#-pif8V%UL{>fPVX*=p}zv zflulVa}G@?L=lH9MnBs@f@ISMr@@2mpx>*sqrBA1NI{mL^JstxbYHvJu5+jzY+fY2 z@RZjrB+=~MFyoK~wDR5lseJKJ_|8gNsyU(pRhLN*Zj!Tx!)ITtZD-oUHy=IU9Nc^l znJlmIS&?W5-N%O)43=_$@vgN;rayg)XV(;Ltd6sQ0Z=1kasUe|p5Eth<8%!$ZtzH5 zXkZPmgaxeKNaw+VQt4qH!y3WbeCbU$^I`$;%(}*YCIuVQ<6b|}sRxN27U$`YB;ePm zsNK2qb>WSflh#i7Pz&DW91d{D>Egek-lUB-ht;%CMAo@F;Qn!$mqxQLoV&R`LGy?W zyi}9(;fvy1a3Rb$W{jE%EYN*1B=sB(hJK%L#wD^E?46kxDV=5qWi%ce=#xgk59-D! z)>IjaW;tuB$osU{NWsCyo3}hTSBVZa4}Uy<8V%YN47@+r!5&_0FJATFTuyI~^!=Qq ztho+`|8qv{Z{ne$A)_y&9iG~HW5Tjfpj?@4a@nq*xvg7 z^7(-Ecx`($GZ_3l=Eli$TaG#m-d^|=G#^a4j`vzs2}aWr!+ptMIr?q%AJ6~iG5)%5 zX4j~WD!4|CmvT2xf*WQ0tIsPZ!lclNif`GIy668(l}O;f{-a+5{Tlf9(?BYZZy@Zu z`41cbiu!Iieq1Vt?MG+QIo>`Do{vAz+mFkkGFg6HUw@Wg&(@hvV}iR4Kuo(0z<)CE z{s|v~wfO($q~qti&b397U&D_Q@a&M(%tNcyA(==W<8)RF9v;fbj)~0Ju|St z66VGzz4yJR+spqvw`Algt{g079#ozY{9QCeF5!Kw#q5-=3ADl!pw*nzd->hRt8QB` z)(oyWTkG`bPTz!+lSAxYrlhMu!ySk04&OZhMDa-t*m6Ik(G)(P|NfJWTwfo^*}Sjv zQCwA6+5FvX-Iz}B=4fttSV||*k#!FTW50kQX{;;D^wBWXR&Koo;WqRctvQ)8CzyA}6 z*>>#S_rts&34tsSNc$dADC>KSGr2O z7Z=IlkH`czO}bDdN0i=-$ersteZB)xdaZL7R9%C8?wzANE^TLivVdFa3|>iBb@t|m zm&@rH9rG;VmHoC~c_aEJB{cZIH2ET}1y`sHNcpMLmrq3RK}37<)j!|Aw@zRwbE$s; zuSj40v|j`L8tB)+KWd<_0qVZ~QNZc8L5m(ajjH=`0g0aA7?uUNp3g->Tx20^8Dzeu zYzoFV=uEM7+gjGkAC$I?BF88e;Ua#CdrOaHNMn5Cx-xzXzevQtWyY?n7f)Zk0fcxj zf2r?<=o1*vv$Awuk=45u>XxYwj@}(oa9M3S$2la#kq}28C$4^E_E#M2>Nq$?w+|P|K^Z}M^Q5KM$?q+T>IppL04?NheaP2$wwPEcU~HUydyi1UxEV zkNJh+isXZ4d$(Z#hg)#$pq>2gNThr~XZ!O>Q7F?%T_8!?juuYdSEIT+60BPEW@q)J zD6sGJ{nlWN3;9Jjx{r|x z_~*5VPjp{&E#i}JD>GT>tONg~?bGKdJ;yD#f6B1`>izzY{+~4XzZCDYn4H%LWY+jL zf~RT*hht8nNiJbyi(*a!Qh>|RGJHL=v%H0cDpmnp9&aVCW4eCm za#}8?LzcG&t-^e;dO=!j-}7LP{`By-`1)hapujCEndqtF@!~8@U&e&E?8Vf<<6F^b zl}r#^a>j8drqo(~ax%svnAXd4Fl7eJG7iIdxmN!EJ(!m17sMrEnr?ON%u#&*$NHS5=TnVCR6TFQ z#fqkMMxHs`dgAf(%e6b=*YOR7-FLPTUG6GFT|!+XLh$eIsaKeE9lieB{O}42Zy#Sj z+|$ovdwX;J87wB9=jBafdr@i3o=I~qBUX18zL<6wzM?|*pQw1j`yc)8^7+Md3N;%yEU_OHD@@|hl_hh)w+(ES1kQsPFlj6rb3loeaDYR%!qa8s3c z;|M5NCN;f)YXx<)KK@eqX$m_srNv){0UfL=b=Fi90Z3w4_~(;Qw>rrV+`x%bAVio)a<}YBRIe072)hQ2;SaM zEzvRodtoRUHfx0YAQ(SzsczwYbC`H^TJH2QicnX=sy$9-FqD{gS<%zq4t(}3CB&$8y_w)nU`b;?pt=B^W)YDkJ`lI$T)&#d1ko z3z|1H`u5qX6CgT%xw_9dz5a3Lt}nO$=-0siEe-Tl-qUS|c#zd@x6Sa&%-d81#!KCt z>gs&}$(nDSvt@P;T2gyH#wcbxU^W_$UL1D>kvxLtPC114Lzr`?Fz)Ml^d!aiW}I^l zaQ)nH#mdFYHm=P<-X7blr&EUl^P)|~=l11- zyGLWM?|*$1oO?T(Q_0_r+Eub%Cr9Lg`}?1*tlE445#i~%Z_yEOZGM!*l3Tfe{$%>I z2=^is%|ElpIq)RdSKxT?n!_=)2aYP85px1r1fSj(6|e+-d^IWN?#?n)v%n?Z`a>?V zTz+F-e%T>3B((fe!>S#i<@!L}=bjB@k6-p5Gw%pG`ciTg3miu`3$INdU3v$c-_g<; zr5TQfJh3=9rZ5kszYZ&Nd3F&gYdu+Ce=i@+x1#Rwvp9*)x*|V!%H1_3?2>p?q<;GzD#d#A1;qe^W`ypyG@^I z5qm|n+XU)r=X9Oo%h$z!i%g?=*+p#s=I`KU+WdJ3Z%e{!DT&W>bES*$qkntMw0d)N za;+lNpW{4mQs`F@!Sd%BESD?#@qYOoSM1oS>cRW14-*yPy64sh3@0`t+X*8pmd4iu z(sFtCfUQkHYf8KBc%3$&^U~FK>E1#V89V5t{!DqO&|Vng)FxSEoMrNU_R<#6S}4b; zr!|0{_!~e`9KsO5gt+S_2BhfG883Fl-^kpGYn@od__HaAoc2jP}#Zf@y1jXaO&nmqI^I4 zwDRiuUvD-0t9p?F|J`U`mEmS!`hDMmi)N;gteAVLg>E(KfkD>i?Vq+zn{epyaiI%R zNBd)pauN!=AYAU7W6%8I{zkXC1SoHq=!%UkM(zd zY#;yhL+F#rhlQ3-@~hwE72EfO^T_@|%N(ba=MM`#Zw@#f)?9)PM4Y!upL-K||MHuX zcJB^&w4z|?$ZM5g$<8-pi!t7_T=}xST9Z~i5z+tF^b1oQpvX)7-%(Lv z_a~KJ;F4eb=oSba*b!@eMFzquZQD@X?iqY|?tITg9T@MVSV=wF3M}PLXLz~32G?%+ zM4f$m8F*b=e@Zn@2X6C>N^9KLhDL2k(O!aUFFjTH(C-_JVW_v>-^?X5USKbPpC!YN zjJ+uVN4q2px0aTH_QiWBnRR;b@D#g%6-r+L_vuBY7cpAUiIa1uCC3EjuYHQHV2}Hc ziZ^OrKl~jXw_#moTr`A{uKAj?Gi#8iwf5UbE89WHosrk~XLW#Svv|%fUkspK`3Ev7 z&j7AHykUHeG64@)1YVf7v=y~0`KH~g{|ee>CyfckOLy{JZ;hFGRt2tZeX`q#PJ-@< zd5$biS@?Fr+x2GhtzgoMGX!mV8z|aTy2u)8z-uAzQp4*iz~Z|%=43^X;K1yvtnp!b zP|^4-1Km-8F9%(CuRNOsEk3@Os_tY0<5O7fR$Fd@VrPwo`|(oqi3%zE+ou~s?{Ic| z-7zhA{lU)nS1a_P_e4$8N?8r)nD!#lYKl6vQ9h>Q*7OB^I$dls8W;YcW6a==_406m za&_gI5oFk~+R-atrWt&l@Y!2#2?@5AK0dq5SPiZj%41wzMuzz%9y#+IC5uKb%3{|V zo51?(vL*`HAc&4NOv-xp9s>?waAqEIPK$TiB#o`%QtR<%q`S1Pp3@O zx#-s5Lz<~z-~Gow1_WdRGcW#*)5~vx%79xM@~f@_z2HG;+SKhJnzMLl_-EXXPYiO9 zl)eZ)mg{e=pIZu^hRVMkCUFk=B~Qy#esU6Rb@*AJSx^d6LsvJfSy%){O&alj9#UmA1RHzu(r0ESUcU`OJSU{(>?#CcDC2td*F8Yt=$g}e^3#CEQSQJQ z7b<|39AQ0T;#BZ1tU|8w-ElCfVD3$qxA|a4;L8(ui$#$B=Hs3pZ$r_?z*FnY=BJ~n zJ3HT8wZI+>ug@mEsXT%N9yKf9wx%M}ggddj8A=nC~nv_bteNQ{-}}5QbE(;CZrT4%&A&`-Rk<1!&7d ziyW(oJJ1mI_tw;a8$cD2x6?Arg1nB&8WGP zwxFRBnI&HuDruXHrnEn_4c>eKRXW_&TQR#7Wm7iC4DNkviQj}7Vl9!UJ(r_hfB~8Un#rKL26fY~D zR!mh4Q(U4rUC}|&PH}*utior7X9`ypauoI`L?{F*%uyJtz*I0)P>^qte<^=mK3_gf zew+Mid2e}Vd6vAfyo%g6xz}>#a_8mJ<>KVl%gvXYEH_kcpd4BDmu!RV1KA?ky2%3BL9$vhoiYtFRWgM#$7PabLS+`qOp_TUGgwAPMoRjl^b_f;(x;_&N(-a| zrDsVyN>iovq-CUCnZZX6VHk3L@u$9h$4arPhu=V zBMb-y$yUi%k~byuB@ak$!zva(NP@}IntH^N{c0Et0@l zOeTv(;}SPX0w*zy&SrCo8zcdCh1nZRCKEqpZOHHu(tFB*W!7M6^OJxvODFR0^l*eJ?p)8`5A{Z@(a@kBCmr7hA2}X&0$DIrOaOyM;w(kL7DIU)1|IYy&X5Hxv7qp~=dp-C!NNj6Z^>mBeBpKRO~>H*rzTq6bpyNqY`Okfq@tw_IAeR5PLNQ`oHOj z&Z6@e#2&IhPn;KjU@T&{hG4)SybS#6cc}|>#e!nu7r&Dt&=HG#S7yS+ODb8QEhZFq zp3pf&iiSYz5217#ok%7NG{tzkRLUZDPy`xcV%Z!zoyI1T$O4KuE04=y5ZlQDvKWZd zioe`Mihv~cJd4WY;-WNxBv2REDPB=c#FGSSVi<#g%YrzPKvfK*aky+EmMl;a4~9p_ z-&c&fKv_&7*kcV5tszkQEtOr9?^wh(ia=33LQGti@QAG>fr3~-OsqX3iX@O1!)Vx9 z9T7qz{yVo&2_(ulPb z{u;43@o-s#-^Lmef3-L#6&DPvDg0nD&aNzji;!S4f0bA?=~V2>hFC@6uM}tHP#GK! zu~MBMBou0x^uvWU+_^>iqd)>0z;`#C!_hUrZpbPFT30^{4Rt z#84iGiz^kvkHYsAN4dE6;t;-MzK>YiaTdc0_0iya|E5qXmyPp?w;JC|T2ot8{^MwR zk@@q)o~F@Qxbm1s=Fj~DWD|2q{5fI?;^G>Cm_y-vip2<9D7f+=Jjwjo;*Yb~I7YM8 z__M@<#wE%uHU3O71iy!w>iijE`oS)dh#6{p4>2YhlklMMr;CM&KS&O?m8Mhp(|$+U z_~V&I#v5|>*>nsl6Ia*nWWJjiNW+yaop7V@r;0s~=K*tYSW_wdDZimy9*<2-QRidp zv)2T`{m;Z?5+56%y`~_gKX^BSA7B z8=!rxJRXxq05bnK3lxJ?!k*0k!vbZ{h!G?{wm^H|6r;;t7*672`?DA0;nI&7M&|!! zeNt&G8Zng2$JS?W*bFKS8`(Sx9~+;&$;8vixcH55NqlU6_R3^-ncf^SpDp${wx4jx z%O>;1ZBGp1Vu3~Bi`$+o4lWlM1e3zYwx^gvaGl8_7-T*+KYJsEhhT8p=_Ee3K6`^_ z;1ZFbk@@1rCym!tN>V9&apM!eBsR{EL&$t=eD=x2O(`~EN8w}Jvp1pLtZZU1i7#$_ zb{omIBtEu2dqd|^@zXXGKDIr3pT=rpQ<;P{g^x{7ag>Wc(LoeGHa`1MTybEF*ow>- zH$U;$j5EnV5+9qNy+PrI1C_8O@v-&Ui?L|fRTfH$QR9p{tHDCG)ZQ*~{8J(nXkbkN?X#ODOX3zy70N1N|E4*Fe7p`Zdt6f&Z!o z!dZN?o(&R)huY$KAd44E;2+(SKrrb#L^IfUer!KqDwX5s$Mt9W`_S+u%h%V>$A?Yx zrDJFH-IG6NZqgOe?nxjMu>kQ?G#MXv(|)TVNyDxHx{ecS8YNY|DG6vPN6MM&^3wKt{uvC0EW$wa34|+27WPWUGaeood(U2I{5&*}u9-lFqh9g2 znfV>y@!LiFwQf+Lk>tFX{1-ogrJJ|G^Jxn3P`!uQ;L8$lm849AMVk^_xhyYd+Dk>) zaM8ymeUtC{K`Km0COy=Xfp-l-0ZnD1!xyjcl`eX?r2xA{5v@w)Bl zs46Wu*QR4-ijf%zT%A9!@}wksJ@`$X>0%48s(HY{!;3Xx$g{f+anmJ>jNt}?%!iKd!-w>L;{Q`0@V~9xzM^i@uSXqO?+5I3@F}rexg7+}NdE4jv=KeZ z@BHrT9g9|FK5DqXay$BFSANn-awGVBtn_r?yjWnYZx}h@)ecY=u&hirH58nUdN*p^ zl?34P>OC#z)ehub=og=x5{lkQNb%IJBp{Z`z>8x_l0ag6fyJJ!bo3&{CgAq#)!^gx z1yMtFl0j=^+;pC{54ah6bJpxFA!x&}<%{NMCnFEp$>pJ1KIl?~8+-ib5K!pqzum4R z36Z}Yyqw%f2eEUaw1Z!)M#&-iZ8KxDz=Ex{3J>4LqazFQmcEnT2HwqiX22hl1->v1 zSDKn{2WbViGFI$(lpp-XG-6B^BCY;#zTR{@y1C|1a0NRawBB1G3um zxKC3#P%g9WkMaNRhW=m5-Y)-t${;&X`;_{oV(4$V;X@VYDCEx!T0JYouB-hoDMd)& zzy70N1OE#(uyY;Xyl4J)sB*M2w%hs2d}WEQ-`%D=CS8Zf{_th+yqW$y+(+u~g=abV zdHJ!}-fSk_pUd^7@vzTeraMrZ)y@~uZu{d;d*i=){il8MU%hq-cUf(fOo0bmu4);T zo4_}Wzzf!^WMI@g{mqBi@^Ej^K9?XVRk(3or+datB{=NT%FgY0z37?;r=DJ|Z3W7e zyHlnp%0Z3d!3_qN4B(2oyux`W2EcCvcNY79F@mQ4qqk3bCJT2-ZYU~p(t>N6D)*Ao zs)5|b$f$*$25`FN?&HQuX0TBD!UgxBOmybr<3{KGUqM<@X-H0?30%6ns^f(E0BD#w zCv20xI-LF@o#DN>6@)d^HNd9>pmUzjq#HHbkW%wjaKHE)upF7kX2~`H^Yi+{+)}im zlEpxcy{)>CU3S1gI!hb6mHGq+Eqwxr`Q{BJ4|Jj1vwLeduoWTy($*`jZr{OOQw8R< zre^?bR9-iy+7!ASZFMkz&;}|R!`j~OP=>96COUoi9OwUoD#CHYe!9hKI|;=KnN<>mNOSSISg}(JQ{s%I8s_ z^{j&*j2D@~noZvlL%faP^YhthO>5L4eRRV2O!;asN-N{Rl-u6`sQtb>^}0Tces@TJ z%uYGjdSa&Sory+phV!i#!B=F3GHOVtvjH@ z;hF1?VKOj&=)iNQ@?@avw~D>$b0lF3yec#NkufBe+ZeY+{sJq~D%2TkG-2KerK?MK zkYSDT%C{3oO2dP%4z2hae+PU&Ugo9lr3nu^%}{&x*aQ+s!gE%4n89El#(_s|1K`{! z`eld1_drX^lHo;rv|#KYP2P@bW0X1?ua32gY$JX%ipm zz?rl%)Ev5aO5pO zWYdSqS(f8=;sE;37+kY5ycB(F+u$IEqbatG4Z#>c ze)^8iCBY#edqT-hd2Bg4aoYKn&bne0v}^3wPbpbwf34-7^waTZdnDb(ExHPD&kZ&C z+=qkeYNgv6O3 z(qUdYuymy+wNJbSBs@B->%T@L5k8N4MEV!rBBfGlw-c6wL~{0QLmw2#`$V`%J~~N6 z?^ML{C5x6h|1??&7;)3rGw}NTW05MYpgu@WXPDL*dAe!?2u`7@GOpzIY;G{E;yJqI4l){DDxuC|o3zkA(U` zD^xaK#`-}*{h$kPM(k-fT8TvA3ga(|4-%zI7@yyr5B1Cm_)Gi0tMAnqJQ$U`eiHpW zjGcY&Ys32;a93ev(EWm(?(u(V7YT){_+S6guYrCI^lRY1se!G*e9QlA9LHU_`KK?F z#--skE`DA#4#$V!^S`hA zx$oy{zdEh+{+`cXd++Z#d+)W@UhBJ7-5Tk??xYqZrzW9y&%I01hF-6)q>dY-19M(o z_gFFWARsfYduBBw>hd2B8j1AU;4V_b!oZhd%&Eb;cKYEx!JSBfV zW7+px)mvDQoRZha*E^YdGd+RYGKJ+|e&zq_=WUN_|I&T`xyIk+^dY5nr}@tkZ}pqM zVbi#Dgw~cME}xhVW>2tkET52$dU7IVRTI;Za_T;$Iy3iXrCqA-uZPLxVCEQ_R{J&pnZ9Dr-1iy;A~8YJQHJ& z@|14JF#Y<0QLk55#(~X4`hOdVt=BvD$_3M~G|)epF)A)R4b|Avf9%Eh(6)5ynD8|4 zaGZPgi_kQ1%GRjF0sDOZxa`QoVQFaLs4Ycju~b*N`r9zf{wi$W0UIpccD88rJ8b>; zd2@MeEd3*hWOE z-GO-eM+z3)4oF#8h+i;AQ9JlGkOQLHoFNgAIn7L%*P=1=VS0P|&SAv#x_x%AQ$$4sHO`L;_~@iEzNUw8FgS{3MWi z>rj#uv&T%Ht_bRV4IsPQcScy8M(f7UN$|w-!S#8oG6pTMy9#bPxlXc8Px_UAR=1Zf z0XIU?)_(2RG~4lf+;6x%NG^#(_bxGutn!19VQh8>6D*Qq-@PFdcg$RYY8xviEcRIm zUJaXdEOTl+va5Ztae!|$$ewN^G~XGAbU88BxhMOA^MZ%5x*==8qH5t=7c3uK-)W}r z;i=m~fMd_MT@5ikp8vt6mK#c*#G}Lxsfi1){PBGLxN|g*6!^%oQVA!+lDGazn1yvxqH=BgDe>S_|T5bL=2&vfMV$&9a$=Z9|xx8py9FYfYN zrghfPA>qBsZ43?K_Lq?-XDoI90@`Pf701Q=p63s@<9c$w^az3~YPsyK?*T=U)0<9w1aFe||FBS-h}ytV(~6bmuuVSFsWVl>j&!5Aa` zoBgau1G-h^g7;>@XeMvV?7oVh9=-PUGnQJ9M!5am44dI8k{SeA-m(YT&+C3?`iDcEs6F+1w4Z})q9_;Cf?&|hO@haE@Qopr;p{m<-`e!0pl7%X2n zbAPSsXO_keaQn~XuPbCdYqIeDBke!Z9523Kl%>`5@29c-F{#|=xB~N|Mjv|UCVM%} zFc=K}V%4Pv+n&a+xIeQrc6`48?q_m7`2OR5$MeDc-w^UZH4i&}&G(%3W!3DL=9nKe z`TVlKJr3QpvG1L`$hR~2>=wN7MNMYBo%h*NiY<3bm zo*l&wWe2hS+1_l(=CB>uMr?{fjX{|~kwLaWl0m#dltHLLkb%E}w*fTZ7&sUh8Bka? ztTI*+E1Q+Xif2W!LRmp9f0j23vN$XUmJy4hU!z~9U!Jhn8>H*6>#YlQIausKBVCG4jZT?P zkxsTwl1{u%luoElkdD8Ow+__d=s4&Y=}@$5w9B-Muqc8_+VR>^SRBD1ZGUZVZK%!B zcF;D`rZ8$4WsD+5HY14<&xm4#GJ+WX3~vTxa2O5@BL;n&}wL9v?5wIEr}LSi=u_nf@uCUZyLlR3_8$^XcTGpXyD8R1VdFYDA@I)nKs(i<-|)t?cIiZOi{o4;a!qP-pq60#!3rv*rcU z0!cib#M4NuCb5#lauWNKcnXOpk$3`$$B}p}iN}z56p6h_JeiaKw=(=IV5%`u`7wYlGvHVok`q@#Ev9(AhA7(ZAolRVoMUY zBe5BYO-O7+Vm66cBxaIWm&Dp6rjwXTV#=RP{Yl~PenjF2B)(7LViMmWaS@4clK47_uafvOi3>=4iNv`i z&L(jdi7%4)Jc%<%oKE625~q+jiNvQ!e1gQsNqm&Vhe>>p#0eyhC-Ghq$B{Ue#5+m6 zgT&D!-b&&q5^o~$1`@9$aU_XXlQ@FJ;Uo?v@p2L`CGlbsFC=jYiRY1cE{TIkJd4DE zB%V&|ziTz1Dg~XFcJb}dHNIaIrV@N!T#NH$xPU4{?9!z2{5)UA;CyD!# z7?Rk7#Jxz|gTxXNi%2XWF^|L?61$VwmBd|1>`da$B<@6FM-n@b*q+3;B(^58C5hXS z*o?#`BsL;3o5U;%BNlYa%(}d9$2$5 z!nt+y$k3I|0g%F2VJr&vwq^7PmNYMT{3D4Pn~MgU$VrXORm+u#v8eJwflSU9sN~pG zMq&xSrQ74QF~jh*rTZhfOCos$36{V0`Nv~CK6PHun2}HlrbTZVxxRn~Kd8DD*lhm^ zf{&H>7{*&bI~is7$r$P{vl5Ek+r!AEbFMb)&lEDo%#UjX zQ->O^>h53E{Lnhet%>6?bAFGU@s~_t^)UAZZVx|$?6_qu`jDRA7;nlbP@2D?^7Lgb zi0R|&_PTm>{+S{#cw+>QI)!^Z?q?47>>t&lFWGfxON4J7xcd3W)5=;)xaK<(v$LDm z`(xQDPrZ!o4|?Eb64C9lB~%#uR~&ix1%HT6fwEZsyFR&K#DE&>5x3grG5y1N{|@-w zeMA!2?_xD?VgCz9ey)U0z@j|1{>jEjWMc&?1?mj6k{4*{4xNz34m5VqeWKs<7*jEr zu=-vilW_!$uU>R<=L%okjx=_pv7`G^uh6i}{wQ_3jqtNx0q8#Sp4KSSt7t_`!TWvL z!;zb0J?E(67NP{cyxPA*A#Mj6JJ8sHCLf^52Wj&8bKDp<5f2sx^)H|Q>w>$>ADly$ zOgMdB;X2c0-{BTO@tUCjvMv7KzvFk-mgoN-4`A{CT>g3YfBgQZP&ZF3g@yYQs5scv zhbpy-rxeL$3WY>0lkp|OmiyjxN$dD^Ed68jBzb)Lzsg3KH~(*5l)eFGVTERpx%=k5 z)!jb>|17H=9pAaa6Yj5O%zthKi#+_yb(gT`G(Tjmyl(u&bw3>N2ZNqqG1jy>(+K4WiixyckN=2rP3CKd|Zy6dPJ zb`)sf*96^<8$-$70xkZRH=wsj>rJ0Cj*uSbGi=@3GE`=4Rb{!L9h5zgj|m>*0^y|< zE^gMzz+>FY&pY>HarZ_(SYbKL99D2ImDqK>1Hj1&8Emf$0gGKUD6bLZo*qA6J*EjT zPxUV9p8o@w%vm_xTkQxbHZGQ`>qhXPo3WK-t~K1S=WVs}fD4>lu_vj4M~69V3nZpZ)_ie7bV(|gK4!Y5K<1-)|bn$ZR5i0!Xzew42nK4X0Jbd&vZ)5m8 z=TN~7Uo5`fhE?2SwdF`PH>Nt=(-B@AnicXo*%^8dX&T&V;zh9SNm`vJ*BmN(Y`#R% zHG>EJZkS}Zmv~n&9y!F8qvU;4^QFE38;ku;UGUXr|JcHJ&WA|Dh+hNx)KwlRg z+X-s4{?#<{qk?q)zbpq9grATbRI%F=3fHHXY#7dZ!1U#&vN=QEI0rj&m;n>89)D za{D4MdVbR4n9Llo^0tL;$GI0!VS!-@W=BGL*QjQ@MvpXlq|qad9%%G{@}|NsX%#U%Il3mw*hX~=z;U=ktUcPw*!qHY4m6z zv`NPD0neCoW!0T>k@)t+hJEI@kSfofHn3M3YFzSd2xbRQmtF1=Z+;7;*nV5tY}e?4 zMh`T4pwT0Z9%=NzaA#FW+MDV#ek{tOKCwgf%5hdI_Yw z-@OyFgO8;VQp}FFKR+KGirF>#Uv`ZiY4k{=2O2#(koMVj(&V*-tN166)3%Rf5jn!U84sYJ;3wP=#fSb#-BYAc_AbRB|f1JIb3@IFuZIk z!kF18de^zhm>meL-o#^e)TgWSxn{dY4>Wqz@!bBKm>p>J=$q*AwdQ;@dQ$x#i%QP@ z$H+0Q{r~SK+)W*2;{Z1wUv2aG=PF|Szqa`Q|6SLww(b1S^8i-=@A|LT|CQK$;7SEg zDp2s1SerMOBUY%@9F+5xAdG{{xm7<^s4q`*8^PtQmjxDU5od5Hjp8;I3 zbHTcC4~~L;M@rZB7}NlUI(&}|%cj6%4%XXgMuzat(e#?mmTU-bO^%!r)E-)lE)A<) zY!4@2*>R&D&%2R*;1MXFLbud2wSgPkcj@buqzj$X`&e}yrw={D zFMr;B=LPb&;fnXo* zy4Yjyfz3zHu=n@cLI0X~@$7wO@Jc{u^O5=1aKxOB*BEXVa9oV^Sf#xa%ya2&6Creh zJFUWeODC}4aNk+tvUZN}x-#~{OfyrswEv<~(WW~fy3Fk8MSBAna>u=I=#vj%$(inJ z1S^c7jikMeVXq$`z^e208~Qfz!%0bg)=E1#jul(3I%x|(jm$*-vHEu4fVXE?6;fed z#r4UO-L`PTg9485RwGFJs#hnu`V|!Js?s0-={wLdnfxJMVhD}*HH>}cWe&5tpFVBT z&j1=K3+=>XJHiQiY+X3_`xYTuNJOysOf3It* zQ+xO}PFdGQ&4xq10d9P?9lRU6py%S!jkJ@qCa*51u<@?VB`Y4Vn?CTA1*t z2prTAZGSFW3J}l9YvPbK0Jo=^MqP{+EkznVZbuqD(&+JgP{W6u88KKs?eE=w|HI%C zGMe;KFT$<>9iVI*D)f%P?Etq|R4;k|V?+ef=y5v;HEH)Rkj;aHf2g zC8o#oK^i^!WB9*Nz!PxM3pP9i- zWbS2dVMZ_)FsCyoW25W)F-1%lrY+NuN!9zJSE*O3cT+D%FHP@=Uaa0mY>a)do?36b z-cUWD$J2Atvt&!zuI%<~6E?%(o55Ryrv|qTE*YFPIBu{9iwLmNAjBZRV3NTIgT4kr z17`yp1Ga${>oe;$>k;b)78f9ub(pn_wLwo`_or^HZiQ}%?p57PY_$FX-5t7Xb(iYS z)|Kn}=z8h))OFYGplhb9qtl@C9vicNSEoSdoX!cIeL7opR_QF%nV~a9XQYm&j##Iw zjvY2~pQc@>{YLw-_ATvP?K9d(wRdZ8(hk#}r#)4Bg7z?M{61g1v$mBsi_ygR$au+k zz_`Z9Vk9#TGGZ9(7|XCx{0hcc#vn#7Mt6oI!sh4l0Elh|1PZS>Xj zMf5?320qt z)-(fbT>mHPE9yh)btwq5sQ}aLjS)075$6)r}Y!`x9dmhFVUZ+FVi2RKTyAiz8e-Z!Bk(HSi!T!L0&c4UK%)Y=r z#oo`3X0Kr{X3xapJdDPEB$|aqv!QCP!qikEq!1yQ2uVaZO@vcKI7x&PL`WpUaUvWe z!cig|A;MuI93sL&A{-z>0ulBTA)W~Ph_IIkdx#K6gxy4lCBiNu>?A@A5q1z^I}xIZ zu#E^?iLiwTQAF5GgiS=)NQ4bUSWkp?L|99NNFuBu!fGO{B0>ZaRuUnc2w_ABCBh0K zEGNP;A}l4s5+W=n!XhFpB*FqBgb-mq5#|vgmNGX{zUL3LO&w(B|;w}Kq3Gl zco3mC5qc4!ClPuOK}rM(5yV6g5kW`<0TK8_;1PjK1P&3p6TzJbZbWb;f*uiciJ(IS zZ6Yv;Kqmr?2vj0y5rIMgttKM;Btjz*eh}e15gLe4PlRto_)3I2B77mjXCize!bc+1 z5}}3&ABa#*g!e>vM})UTs3O7}B2*H9dXEUjM7T?YJ4Comgd!r`BEn4~+#terB3vWF zRU%v=!et^95}|+y`9!!xggheT5+R2O*+f7@$Ra`}5iSzp0ujy=;T#b%h;Wt&=|ng~ zgft>_BSKdqxDdga2wjNKnFvlq=tP8$L~tZR2O>BSp*<1oiC{+rTO!yH!I}tGM6e`+ z1rgd2!JG(YL@*_S2@#BmU_=B%BCv^IKm-;M^of84k<&Vl*AUv$|4klf?*C7tMp0PW z+8eN>ZE5qs|EC_n#-h3Xt7Fl`*mOk#p+dJd3x(4iA4d_rE8PMcdZySVB^mH4G1bB%f)zAHcx^BWrFKC>tURn#qQH7 zoeEPT z7i@<&JFS?$1Q^DDi#EDx)Ap?M-+dbVewNY8dR4mk+1qWm%8mR7jshXn)I%4J1))K! zzYl%&ek;(QTH9yzzA!Lms|mZQyg!=pP&%^bmc_tx?HGsa-rGT;-GJb(JywIOGwAA5 z`(x1#q3xuJpLe0xdrVw7#;bt9;pEp|-{*j+sTs@EOLhRG**QY{mzX3aSc!yS<#>TNeiN14as2;pO zccLyzAG=4bHG`*k6*1p`RDq~w2*>Lx~ zpq=7fRp6Ppv6{wc1a6~ll~0}i20Xsll-i-I2`s-{kmI2H6>%Pt92Yz20M^quZAj#b`bdz@ zPVJQ@hyrtHcH9H+=YkVsE1rd7c7OAoFGldA(71MEX2!mqi<~4E?oXJ#7ZrD~?>zI} zVz7JlEQ@xjk?2hDm#m?feT}u&JNAdgXyRDy#lzAffwb`Ln)+FLK|tedyQ}XPqma-! z=R;B>!P?+P?*p^93b}Lqa z^GiPo@-X|;XZ`7sU4lSEY?oUHR;)s*KI5NFKEDsG$~8NX-Z=>P58G6v7rF|qzO*96 z2(!N!xlupgDF~T$8ogyGmd_v00eNEXUylN2f%s=GFBaH-g449744?N(2-jIY=-9W@ zv^60S-Tqmr>(~5|5`hRC^vK1ET5_RG!VwE(*wjZ7Y(gZFQX-JZTduhCd3AW&a^-EV zW;DO>k{7wuHB;eVdfdtYj%c?bV*B3DKvYz+#(QBc5bcjU9z25r*LG<`Svf*Kr*l2!s<9S#Pxr70(E|$==8Pnn@0Z=WN&|1ZNBX|S|}MgiTZdJ zI$f%lh+gx7{4iO)JUb^8W=-yP-^WX@5hCTQ6Od_Q~l(V^=3)uPt@H~Y2!A%}|=ZJZnr<|JP@ z5Zvn^Sfx6=t>#=?sq62)zyC7(?<)%!+qD|@pqku_IdRG6MwZMdEaN5hf=76U>}tro zl&qkcK1COWc^-Rord%DGcqI$BzwVmYv9oJz);>vYY%-(9qoZS_aAR?0)}5T?3%jlQ z;6YE+|LmVx=n>GP{>Pwam06Yi?=ekpvw(EpJgw9jWmyh`URA8=djfSV4`%m#whL5W zTs3Jx?<|zVTKe6^^8^@|?%Xc0d>1PGcxrKWuPm@_oeN_(rgseUcJ_Ifg<2;RIFY6O zqiKtsyF}1p zw>&r5zWiPXsJCL%(bQoSn6hIv^S}wLX!ArWZO;EK^HjWcFC+N;%eD6_;A?=}?@!p) zGv|{o%q{cmefOXioL=3x3*)*uOgE_Q(QHrHme8x&-Xv8VQ61<2?Y3XI+%&`pp4}A! zpT?L%%Y}^2&G~2cOs`FRZ2_f5CyqZquJy~VpFL-3`%EKf9{luC>1$Kyl%C z)q#hujkAPxxl5is8BG0Ue>d-RQB_x4=+Awzy{VlQHDZL$Ao zusVkFonh9}UHKNimTJHs((jthg34e(87$Jhv=I;PxJyb5di+XCA`S z${kLloA+PmKk+PSJLdm>5Bx5&j_P2|kQOC-?uoB@@gf%tPD{A=JpDX4(tk(y#|zRt zcAoXQ?>jmd9qN(dx}fSD(%7>uj$irJ=bS$(vKf!gANEBv2Qz%)cy&J+PtMy&VUYfce|LehKV$S2O3={H({W@m^2wKLlHg zOnR(Y7>PO8&Cpk_{>Z=C16w(QvlO(Ko|;wTH_F_#K^dAZ4vM!oBt0F==IBa{xWp zFjorpwl#oWV9QSipFdb4;;8u?QS&H5kwVIsD>*WejIZMIIT9&H%x|4PcyWGK%e@0j zTkaiJSQdZvA~#@~*zDZc`!x#=R8#M)USt95l>B{#Z!O^S2K~?#;#x3g+AZcvMk$z5 zHm#xdG!0JtmO3Mkp$pevy8Fd#iZPsbD5!e0un~-b#d*OE4ZuV;i+{hW_P&ID{$y7FvgxK!ZP6#hhH3~q zzV#TZU;GU8ZqoYF@VySq8#J&yrLPHm)8F1D)k_CHkNv9cVMu|QO-CPFkI;hRLrq@M z^L_xo&HN_dXbio4Ox^jmR?t<|qo$7u6F%~{DH=PRgHH8sOrCK39e6J|c6VU%SCCu- z`VIPM3HMG=K1hrH02UNVZ96s`L%IXTs+PV@1a=8FiBEf4w_VHs?hE{%ZvTC4^uA}8 z`!hEyQP?zpbLDL`@pXqGH+GkyuS;JUF7~VeCGo|y^_91Qkv@0Uz1?MCV7tZa5YHMk z#p$JCxN0BxV7=>|=c(sldHd^MZizC{yEhF%V^#am6hE^&V@^FstDP3<9TR1O)@xEi z)}{5Q;tvQ;XK7{LHC~>a?a_LzJ?N3pxnXIvafwIWniV}8r{rec9Ovrl^CcenUmg~= zj&}z|P$LEvy+9y1+;^brOFXa(*!5@w?+#eK=xEXV7YL2(WG~+3nT4+G0(xbg=A$Ql zKYxuLQwsd{=j+CJW&ytx{<6s(=Yt>oe6ww1O8>le@g&l}sQ)*Q|7U`aH#>Q7_Fjbk zrxiUc-&n$x$0L*`w~Siq|5}46jDFa^w&nl52e2ko?!VfEDppEFShzS9Ux@{hlW}<* zi3F=V2zXqqNtG|+w+egx6{94p{TxP$H>zJOwIvu0=pwRaG=B);cfx%7z{npmDCRFnG`Crbzuel!Q zQ9*nh z8oIc9`GpkM;w-Oi`H!WU31IWx4p^h;7PPp7=z^S{2K1-%a{VqQpksiQwYc*Z5Ma2{ z4akFC>J-g@;U zrwXde>Ti}&7s2HLkCO*XS-V{t!D$U35jJUUbMbGb>q=Kj^ZS-ycaAZoUm?y>}D)W>AnFI?YArh-v&iNg^cjD<^wjJPY2>)t|w>A8;^waV7y;mmD zVGp0{PIvbjL+d%Q^Loy01oM;$I{B8S&|&;`Oeeu*4rREXHi4`&ON|VItI+51+ZS@|vjA?#^+_2)9RhckpfG0Bpdp=% zVTYqP-S>uQWfyq%O6gu!i6T=@zxU|(1Ef3;8Jm2{7@jF%xzCZB!kF$$rd%FY2NBK7M@Et#Z>tAnqQ%{>wJ)>=6Hz?Uu)w!O=@I&N~@XvQwVZ)_*=53sPKb>PF-o z2Aw{|j#_iu5`O8e$d6oN0dYI7@Al#~J9)SzjDYrmecUPVd_SMVf}zIn_&2=?!!{bg z&j;1(47*vuAxnBX)lZ>ka{`|#raOE>xE(sc3hv+P;p~*rX9R9We(EQjZ;-w9_#D5u`&JOQk2}eHd{=T1;rc^Yq_by4wr?xH|Ce9n+cIqP zK${2tsRw?Sh2#^`J+{Aon)7UHls3Z3Ad7m24*PH#Z5XvDz%D5kJ-F{)8{}Pp5;Hwq z25sC8t}PGeOv**bs@i1z&B`>CRx&3g?`94%S-U5~ZDcMu`XFa)znlvo&!pNa)ikV+l`kn==D2Ls;o;w7LhKyZPygLavJm~;+=B`}UJ*p~$20&?rNapTe5=+U*p9+y!!JLke%tQfSO z|3>kHmV#z|+OW6ebQTKP?SG@ETMn`uK2_%KmWNLJ+UK81&O*tTa!wYvi$NCqsQ$fZ zg}^Z&w$qq}i_i?O$BH-oV$kTEkKBMg$5GY@!M2@e@__B~tvj5yr=ck;=3l9E*@Jnat{7iCx z4%l%GDbE(3t1`$2hp86z-Milh`XkCBo;^$lSNQF!dcC^@d|b zY**YZsC;`9T$}DY&keIvT|d5E^B^5ihHTIG_PzrKo^ZZ%fpQJ;w)XCO!{{n7b{_e1 zx^+5`m3O+){%$VNyRqs=eMt%GslJljJ);Np@rIMM^HBHD-cFv~KM_}~O=wQpf_kp|?>OH{h zdQ18*OU3kmlwG+}sqnAI|67BK@dKC#C+vj_{QY+?MsaXs=4kgNy*IYh|EYx(76bd& zwzPTRf2Rkw_K4tX>R`PhU9mb?cwl%SB@BDtQU}A5<^_L#D;KHNJRVjJ6JxC{Tp5qU z=L*DPo>GX_$Ao;5xTWs3`_+Ct{mVKS-j$7k)M>>06K zA8s)zUm{W60E_O|9U8xa2@7%;CoI0y3HGQuw0794A~4Oq7aL8$J{3UjfRMk;~C%CWM4F? z;naDb1|&?V29}?r)7yJ;p{Oc=|CPdmaqCm&_c>_{jaExyZi%Vc#f{!i)zAPYI#><@ z?~P#sOTaxk%miMfeo9Q=U;roVd8*ah%>=GquR5f{g5*bTJZ|vT%?!3aZ}#zEjT{m! zolnhP7<0p{!}A(cuHGa$e8Cjr_GQ_dPkat520p115)vmq1Ug2djt*x8ZFQpmomaB9 zy>9cspC0&K#~9Py&moZO>TNyUZ0q%Ku*Br-rdZE4AVZHEG9V%vgzk~BoUex?ACm_2 zzS1=)X1Z(mlKIhSVzf(I}cW>k=n5$3w2O$E74PD{EAdx3^ah7Ha(R<_G;{6bM~$61{j44K60%2!i zi9$KUcixzd>2EH2Hj(jRKWgn#o1N9#K6}CAxe<`Q(PE*@kAp6Tc&$P)a(iX_Roj5Y zMdhahN3u~TJ9gRBp7}s8f5Fo|ufKRO_Dm?G2Bo66rd|mROR(VTqoWVUt=oozyptVP zUONjy-U%1nO}+@O?Mv=5jk5?Wj|kwF<|czr;;E_y<1&zq)BKJ{YtJB7(4a2!L*kKv z!FY=udHcZ!`xWok=3PRg&n0~AF*Oy$ba655COd$Rbj_)Qng^^)3yY4!^1r7Ui7t%Iu1z+{DmSnqp1h_uNZ^Dmd z&l&JI-KaD()aaMK{q)|tWVkBEWY$U3wOV>XuD_sjeq{j%)hUtgG!yPRga7DoYTx~<6U3mA;X76Q~zAjXJ zBKOMgd`1rJN{O(@1|u9p4~WL({mN$m=TbbiT{dd$!QihNmG^7Ahda`X12Fx);%lS) z#{9PDZ+u3dXO)dCmK{GmdSo8)+z)cY>NJACj{mO<9;G`A(B%VgVs)+Am#<4;XwPt$ zLvPPE*Z*lWZwfO)do7lXo=zh*tJZ-s=;tz1iH_(FH5le~^ zYQVGSpN`oY{s6_Ddz|dpNP{`NnahWCU_lS|jrG^IFklV)XXD)r3&{4J)u`=BgN1b- zD~<-TV7%4afuDC9!0nznSts1J;LeT?tMgOdf%V%DCCzw)&9ru)1Mk8#U05-~>Pr8@ zk3cwS_RsG}KZ24EHfFV^-@pam9zW8BOgMZUi0f)ehmr=1XU^OsJ>|VByI}oH#DgAsk;1H5{?+rUHSWRk( z7K$X?f9ONo+!0M19Cp|+QPv=WzMD8xYaH6aMLkMGQZGbcWl-V2S_b6`1!|Fk%fmWN z@FiltTqRJ*I6{R`BE!xiuDo^f&3%ERT237-Z8>$wr%dzNNPHO|g;VM(t3X|GfBHQ; zBlt3I#n7I*me62%QbC409p3w@+}6?R2hdl$B&_T37~py>_lF*q-9G{y|JQ?84LA6u zpI$iaq3=!;xJr9+g5v;fi0d6Id=A|g>p-+RH-{1R6X5x$_NjDL8R@_sTez!7bvF5> z*OS~9UCdYsu0IRwmO%gVOF!Z`ZG9hW3pn~h&Wvu@VE)$pnef+7hV+^w_UFMFtU>Mku2rjMRoqr5QVS3aS8r{{9O zg#`38ZTqZc_Ve(3Lf?;Sm`A;k0N&AEKlP>h8bQS!x7rAVk&D7i|ZK+tl^?^!(- zPye#%{_na?b;UN+{91P0dF}AkRJg8P;d|$>I>1VZ@%pya0Y>NCFUb684D#;+t{pVgLQ*2X|vb zxjsZECVDcAA#N9EUF@KDXDY(=+-u75CzfLK`}}3@ZN~pt5B$C~4jVDY(_38^Qa>Sk z*PQ@tzC7E=5I;YZ)}X2l zHrnPp0bt+!QwQJq`GModRfn8y)`0R8!#_;P?*s}A^jy}8xTtkB|IRGUcCmT)TDA)` z+r{-rqX!zjhbDid$scL*M<35PE#}GjsP$ov%44?NYk22b=0IcD3}sSfY3#o)z5nVj zfY9_8yj?FJpX6tV)6g5e{D;f z2iiQ)=7Ik}4{TfIci?J^IoR&!-mDsRN zi9#us@`W5VR`^wNTPvVP`8Qiip;-Et6;N`8^q&O+pdKop*P*ioBvE>^`ch zo8#FDJ}|AYbRJ;=1B;JXmD|;Wo*UM@IjFXVx4N1--xW22y7pUrTv!%x8U5wVIXOST z{MmXtGq(N!>SE8T(u#J_T03*f?osA&dY)ZbW;zr0S<&DK@BRQrv)et$=R3kzJ||`j zI@}I+^Np^#Vq*kHHaT@}Tw@QZn@WZ+ur!7#E;U^uzGCZ7>&^){pbg>F%AtHw6F6BR z2sP`#gp6(Wj{UgCFe?9K9Fu1QXKUSk)^SA{m~=K;`22zc>@+gpcrv>Rj1EXkn{8_Y zdkhqv{~FGM_L7e~4(hVt;DJ3}AT~JmVL*m{|1sEXhm)D7@A3>`Y;mt? z&oixHbfkQ3pSh+`@^g$zOI`y)RULqAjsYCB?1(I|0~KB^c>1bR*AB+rj!#K^bQ1LZ zHe%eAT_1thNu8g|j+B5N6QA@BZ?c8BJ?!>@WJB}K$SC7*mn#Juk?j!nCNELPwo>nZ z;N|82SBLz&ls?jYs`tf-MmxVJux8yn2~2KV4?3_s3+#zu7uW^dMh|RUtaNP<(#m<{ zaKa@Ky$#|mvJJQm46`q$&%AjOY4k{=2O2%l=z&I$_RW}@yx>zlP!3#p=EC|cuu@q$ z-(vbLbm`@f-UImANFIASbX!Jm%|FZ!Gn*|Ju(D#?M03%Qwe{e`~?R2Qz&H$IZq~)%Ef$@NGRv1u~Ek=kqYYypu|3sw%qd$bZxx{Vrk30 z4+b{h9Ac(&=TOI7RyPom=xjDlJF0%}2b63R~mP#kE5h)tN*C80VJX zkoe#;!1Ynx#`)$wq(E)klKLGvbm-LU#iuzvXxWPyb~pAfGk_Tl=gtpsHG$b2_q6Gu zwCsAT3>WJ~da%@oQm$+95(xWyJO)4YVUSP%19Qi(MxRf;MxaR#-g4OGNujc!aZGy6n* zyy4M0z#n;(ei=LL(8CKJl$GmKm``qaRCXwgwn(}0YvBLDd!7$_TrA3Z$={qJI9%Z2 zB7x%zvae)qe19hXYe7-g^0+6K%-VfO6F<;nzD?}i@^2f!!E<)})P9T6`?=2!diC80 zIt-ahz4mPbvJ2LFT-9eWFnJKSa%$gwXnXBVr~dUD!0Lo`t}kl#ft{5(7L5Mavj!%0 zFMosS|7Zaaa5#S*xFGgxJhfC)+_2b!)bmq9hD~L5%U#$=It(l*h zHRIWlGREwmF zKNZa#zqv{dPs;7;+N#1H!15#Th25c)LKRK3MS(!Y6$m*Zfl|a(OJ!KpTI{_H3*{!3iDhadacf?D9$z9- zDda+xLLiVU)L5)H5l5mDaFkfg2D#BRjfx`|Nz?*t>H(=psgUsHB5Zg8N2ru5`CJu; zS>LMSDHPbZfsHy)@^A0Y%fJ(1xL(PVG(@=N)?}v9k5uf7IVcio^7~BB~@d?;*}f) zN5YY-xY#M7R14%htgegMa^JY*^ArQDjC%L`B7l z6-l)#ii!eCODu?r4Le{%#YR!FGO-|b#fAmN2BKI15ew~3Ac+t0dC&8H-#zEt`<}!8 z5hnSwm_2)CX08A1y=Dy$rv~H*X;d1%7a;WV!n;&plfh$(8EiV2!_?W?BE=Qp=FmmB z>*4;$7IW!L1``iE0iD4W(fP*D6;e1m03M(MIuDN!5rfO(ix{|9;tW!Fw6Td6n4%Z@2A;re6WeM@H5#h)dszAuZ5iVjpjHw)!K;&lEB1L6W+4!Lz9v@sb9=W(r z(F7tMUYR2hFu6=miD9!CAG1r`5D}k=`zcM#!x^+VSPyuw5BwyM!SLj`Q-)~L1XzU1 z7xVG>7vUkmW#KEtT?5-htPKIzqUDVE`@sLnzyl1MXetfcIXukyY$kqDX*BzL3Mm$i z<|Sfaj*CqZk4mL;`1rfQI>A>$V`wgIk-}{iF}GH^tAoG`H85PNyCuqJRwf>#HH zzG&FeGQHS*I$OwR;f^Q}Q01nW#q*+zcwDW~in^ErHdnx=(?wV%I8}s@jz95)Y$`{9 zM^?vGEmBnMdhj>MVPo=76=+;ZGqsJQRpUd_UryfFD93x-k(U$BtF#(t5*6Vcc>g$6dDRJ@!^#Np7*a$2OA z*n9}_(-A67#Kay_Aj0;5ivb3QBM>nNBZU-|!{g$9FL$QceAB67AzMhrt-~WhEVApO zl)~Q(-h6_GU56JBk6jU+!S;&5o@2V10QxF?GlIN6-sJu&EPDxJai z!bSx@KF3DX?zBP*n|C_)C)lyE7-Aa!;o~}Ses{dT2A|!&)-e0u{_RXH;j*zGWt+FyRcyNi*a!)5DkYYX#>9%mwobspG=?L@MqkIHr7o-o zzL>|Q(>PR|lLdPS%m)~lGcnn6wxh9GA%zJC4O>Z;7bYHJ?1<&8gv-G;3a=sLxFss2 zu%E-H!JUW8q~h5(T`2e8cz9zrDfY6wrI2E?*_ar}SLxDNbUKa2Vqsb$?~H6QonsN* zB8BZ6zBaZOhbh284!9d)<0Zl*4AWD(kngtMu<_s5Jri4Xe5YEjB08Iedkz=7a6Cxp zG)x?*?YH9*`#;z7VW!B(YW5Pb#CT-Tuw!6gL(O92jmfbKW9zT5Dfo6-1#;lS~=kcgQY<@AD#Y0kn9W5R+m`2hWY-0tN z>%h^xjg3$EmGJ=2=Q2EbEtCmF2dXl zUm+7~1wYnd(M(%>2Om!^ghJezy)d`Ib42X&u-oEbhcD)dyf}nVQ5Poscvx|S3`}X+ zI2s9Sz1d&VxlAUD{HBn?mxP@RhsWU3=q#*KJhfn8S0YaY#o_5I<}jGP;;D@QI}Iik z8%eqA#zc{crxyVx+mL1{JfiUYj45E? zQN+h|98Z;S_hVqIM91M$92c{;`G2#Xa0GVS{9mDow)ubC{J(Af|Id?|zm;eiLv8c_ zw)ubC{Qu7ZqVO` z1OxWCjoPt63!2vJxEy&?4|b)Mhu2MZf__UVD_3r@hHqw0?-(xBhChh;3+4)El`FFG~NL{*QSaMb)62k!&rd5iadzP7E}5;Bg>bx+DP zgUWV#rk0nznydqRB{eZWb}d!Om#p)#a9jQoJk{x*VZO>lCC~Dx8^51)f?hWaK4|>Z zR9u${biclLzwdh)QsT;9*BU2bYy7Tig;+~Fom78!jXAX6@%Sp$-wk$urX+1~X^S>j zDzL3=(ZKHr$bRjn_%zOv9CnF|9TBls8EYHET=_7*GFbwyQyjq z_2ST^i5)kiz)zF6ONE6hdET&z2F_9I5oaNov_V*?lHa-H=H4eu*Q>UPYuTxjDP6k;cP(CRC*dlV z7HLT*%Gb9?#YSnw;`hxo0OTYfFDpK#W9O#2$} zc|EJVv>yegKYet0_4;yfEz$RD<$F{3y8U6wo;6M2p;_IwG@dIg=iWATA zqIafnez)Q@BUrhuU;0NwZJ2%4?eof=Ht>A8%_lTU9~u`w8IXet!7h)Rl}px}!^iLQ z+$G;CLCBBSsozE$!Q+Rz`&Ag&KwnL^)z{~5z^nPU^23f)g8K&ifH8z6++T3fKXTGX zaP(*Vo35`d;3FyzE9K{mq zRfArS^?!h275A6)oxKUA8~0y%?8$6U9b^3F+L5+J^YX#ccKrWL1HYZhNS`#=u*)J9_qScpD5vAA1^H3Fm+n-NhpdRxbx%+ApimJv$TRJjqyIe0@B4bN`Fh zSD#4m>3PI~t5K`K{r+oqO2Zcc=wa71xhfLqd-coc6ucUkH5z)DI!pnscH{2sU@rsx z2X;)BJXw$CtTca9;Ti -Z%ZWW#b`Fa9|H#IQB!?U5;0GR8~M*zU_X-|sI3{kxcn z4_qDv-dl$Bh#ou_m?nQcKdav&AdR}4Gw$0opg)8$E=NOvo+dBpF1@uAQ4&JDem>=a zl7!1zUQzMDfzmDvp&ck?xJMG>S~M84H~V~s67&ScPfagn0kS#4bdy-sDUMs z5n$MYtG7Zavw`R3R}WlzOa;6xZpq`#79uMzMty3JrK>p7?7w&GJ2;2w!C<}a_gxYy_4kr2~wGRKs&WZE) zxmQd<>Ku>yA9r8D*}1Dc#gpn_d^=qmt1G?a9^s5;jE3n))5oTlO*2h*n65CLZyI76 zV9GIdH#IYL-+w}y8O&lw&t+-MkMIMZ;f zp|2s;(9uv2uL*c*aMR$F!Cr$TgD8Wk2BQq1L01D?11g~{5p*LSIL@z*(qvx(?hSwC7>pswx=^obIqASr2)t#v8 zuglbR(KXWfq4QSfuFg4~13DXZVsvKejMeegq3Ss5=xNtzztp~|eM)<;c9M3K_Ehar z+EBZzwyn07R;5<4)>WLTAj2kwKOOnDUT_aDVdZVc%{UAN(d!@ z!lAfR%!qoToOnRUh{MDdLPCTR6A6EUNw^S3nm;t(YTng6r+GkgBUU;-NNJdst0pmf zvS_AgmT30;FoO6>O6Rr4C`2_WovV&jk?0|ip^jCM(&?=+3h{xIPE*I;lhUc`SUD-3(i$U(ccfITj+K#8QEQAsl#)`RI`)>7 z3R+_X@rIQ0TVoXBH7Vt(W3NbQh&uL?lulO1N=Rw2I`)E;PEyC7lhTRmSTQM`ppHEw zrQ_AHr=)aTYm6YCkkYa0*ke)})Ec7@MWl30Ym6Wskezi! zI-)g35cf#waCNMZlm@oOD8yY-I;=HD5O+xF(AF4*xJ^ojw8jYH7AXyAjZui3q;#-4 zc7v1-YK;-Zby7O8HAW$>ky8KG7(rYmr2|@H6rzBX`l(}ANNNAp7(wKdQs34Xg}6*g z`?bah;u0zC+Zv+~7fER!b?gEuh3c4$lmc}Oky4-57(wKb(%!8x3UQv4daGmSNNF#1 z>?|qmsg9i?rCfFFG%4k@#t7mRDP_0DC`2wPWwpi#;v^~cYK>8d6Qq=>jvXhZjMf-I z*f~N}|-UC{hxsjzyA^2z4xilq^=q z7L$@i>ewPu60VMglaht4F$%Gelq^uk7Lbzptuca_PfGsae_^EL5B@igl&JAPf|yH6 z{@{P1q(qJX5yTu)@(2H$O-j`GABC7jO4Rrtg_uc7{@{NzNQoN%BZ%pwM2-Iu#57W( z#{URnDk=Gc|4kt!fABvsDN*Bp1R*k$VE(87SFS8HlVJMS3KEz}F#Br-@y#Td{I!C3 zq(qJT5kv?nQR99DF`1O8aX*3xCMADxze%Lz5AHY7OoF*ztJ6*}lVIxC3L0-F!OX7} zG|o(diC-&dEGbdreFPCiO4N8Cg&0FhMyvO+(WFF;_YuS>Qt}7y8%avkcprrrK}!DM zeZxt~AG|M+l&JAO3Neh7{K5N%k`guEMl>EW_29c6Kc;7%$ zqQ?6ugg+_ygZB*}C4cZfKT@K``zSnz38xJq* z#w*tGN*sFg-~0~`vkJ5B^?}p#vaNBW=~HU^Ai0!`o-z7p*h^MH~T8C2k-_4Pw^`=SJmI|o1R8Q zswNE8(Vvawfl^-Y))Y?QC1V&W8Xb(!D^s3VGADS};Q{67+p_G)H@)l>dT0PI)@nB{ zPSuoU*lx4M*9Dc=^=X{xOu77Qx^FYy^1a}#0Oxa8&*o(4sWi9oa$y(&wK_xDi`Rnm{)%*vB`L1ox-2bYSw)brsXwyJz z4g7vw7C3N@7PofpGSjV@+GAv(dfg7u;=wn;wvXOR{OohVs>DO>bjRF9!)qU}Npf26 zlQPFbmx1xUG0xkuJbK__9f{?ULLSMg%v|*`ezxoGh6r4bLLMFT&Q814kn7Wul`73g z|J~|&$43@|%!nsXw8q>8HlG-ijp8zVmi|a#4YSWhIT0VUCtY9f(MxVHm@ zU@zNA2(kRMXs#xSb}~()vgF3xVRi$b}g=GUIAi-v0i{J&MEk`Y5_XOw0Mw6|p9wyu4#R#m^nH z@|1bSTVhf6C~?*QweC(D8m(hgaH^3%^8d+0RBS=v$m8?Yk_g6>Ot=xr+6ebC?SG8x zyB_EN>-F#E|HCPygmgY$FOJi(;5;liYX#2GfipUZaS{u>u!qyk_%aXWD$8c(r=IWc zuK_6X_uCAr{Li^pfJ<2c(T5M2!4B`wy=t(sg*64oS*s^#LEVc@72$mwfsD`~@3M-4 zQa&zjt6n=+HOP4X@vU!&u}Xe#pAMW$M=jv5^20BaM(9AL{P%!<1Ae{HgUrIlWs^s1 zsN^?%AAMxoMqPL+W{C6IqohiH(3poQKffIZy{J2Kt@XYFWqUpb6mFPTZUg&0FU2`O ztQ7M9dIP}zy8|KydD}wu?@jr6#QU4Cr$1@id_}2`|7GQG%R7`B*XeZjM#Y(8=!~Ds z4{D_V$)u<2#FR)?G+D1P?_4x>Qczi%U1N4rgff~erso!MJ3>1JL^KSH`m7bH!aEE; zF;%FQg3f#%vGz%0gt8uuM^t@|Ms+Co;>$1fUA$L?Uz<4Jc+IC!G&IF{lka=Cxz|C4XKqzCqH3d}xcJ_m+u z?{Mv7@=Yk6@YaF8YIgJdUvH*{(G>iv?P$|Ln+Do6(58Vl4YX;XO#}avHGsEx^!cY* zhB6GEfzy)WT{fs3yw3(s_(T`tG)OcNPC|$`Ocmm6JQgQWmGyYcyEI+rrY>}96wlE5-UP^ypydzpD}b}?Y5Z`XYOp7Bi{GgV zBgh>`f74^478DMhE8U`D0j zESlGT^&}fOgE$?x;NidC;&Eop!deJJy(efsag`1JdlFT9U4fJS@kjn*G`g9E_< zvsPfdpZBct3mgWhp62(-p<*{W^f0t$AI4wob{=*qg@Iy1Yu>H5p`!A}-sZ2}pg&pN-+SlD`&ffE@1%WcN@w2M2?nUe=6yAR>4NppwM;ojAE zHa%EwCF$L=EwXQ%dp&{9p+e@knX-_RxYF*aKQdgfE%# zC;{ZY*p&NZV>IxH=(*jiFbBAnJ~u@6M(|mU`*FBZ1O6PW*Ed_ngm1!PJ`5XZ1^qp9 zXRI>Phr8wk&B&Ys;Kp0M1ZTf|0GXv4T6?lfK(FZXd1Y@bAfwa#zLiT!`03c)e*2QO zV9`?F^*bMNp?$jZ-d?Hopxvw?$;_fkaK)hZ)Ux&-aPQ?xec1{#7!RLyIx}D#`g*}` z>;0ULa6_QO@GK(@=+VE|lMU~Upb{^kP14#KDMCv5*QM_#4!7^z_VA&(JN*}b{T{Hr zCN*-4yuAl5enfG(6$LIGNmxn6)ApLzalNMb?gKHI6WR&3)6r?&qV>L|9;ipbvTMg8 zCLm$P+r--JDD-_*>bo1SL(!(yb~bL=F?4|#W1aF#2qrF^GWW}|v0(X>JIfZ;tpOeKE*!7h zJP{rCTeiIRULex>{@S!hJ8A3qf1IG1@#o?+`TSoOWSWf+{KhGSQx?+BWR^^Y(&9&3 zC24Qv^M6gPcn$p1cC=}rO#^Kj_}{I8LjCgy z+`zxSGBwhT9H)K;J3?NN%*#&jR=0NkbIn|#65qD5);4XVF4SCXH^1&=1(3u<>08~_ zfkxESH^VMAfKLp_j*6YSxppXc2=f2OJMb%(TPKSk#7>Sbr=f`}6o8KzUx%SlNzG^DW?{?b#1~iab>9IdyD&k!bfxFzG4NlI}-S z;huwM6#CJ>1FfSmQDrgqD%|1y`P1LZY~jq)i?wpQ>8Q^8;jovd1JebbW-O5n4eF%A z4VRnk-n-oZ_E>Yrsf!L(Rgd8Qy|H09aHiAvkosVsZYq3GP2P~+Q@?^Q-_C0KdU&ep ziO4P!9E>r7?z}TvSF+7icmey+go#I7U~*^5ue!IEs(RjDTy^>96@3`KD3&@osl5un z^!>oQmkrL)F)V%dJK1@a{<{s1=`?t_9rQK#zRlNlQ`Nul{`|xp&whXpPL(z;N9ig& zw&;7$k&(V&b4F0E)p_T3v9>k0cY|{V*(8vlqY7W= zbFGWa&H_G&BhL7*|E-@b2SOizx3+_aChq#wez%QE-gO`=U|98c;5?Dj-lxJ)bzU>Q zL&CIP22fP|ae3yI&MLg?kYWdWM;DkK)?;Dyj_!(jOrU&V{oN60EzlO?cK+SpA^!)ikXO|+UKZoU?Ry!``#Q3>)qssE+6gW`GtLmRQJGmIw0~GSA`locF zZW4Lqg6mISzVG?7K!xu%=sjJImkqi%$LD8(svZT76!L%I3VBsMVMES_k1b3@&pzo` zguC5V$t!VYpKbRRy4_Zn2MYN=aD}|8p57*Vb**$SfMKDQw=VQLppqXS-KE;{H?ERb z;7B3=ukb%_%>6n4mq-3Mg4?v7UtbUW38f>ACDX3Bz?4zTtG%z5G(Z2>=%``fj(`2@ zW5Z%epMMSxS8{YN4d1N8+jig$zPNa!6C5yvcWRLD?jXSXFVN`BW^R6IS+ufj=IAc? z?Ej;eI_LhcepX)G;Og>w4&&<* zAArsY<{mV`U4IT1^-^PeEvWAiOx4Nud(g+%iUTZx}LC{e~;Zsh0}67oVl3j1Y;skxE(rRq&ly( z>W(?{t^+)0^TKGsR}U3_o0a3=cdZFb_%R^F?76uL?|JxRu?P7R^w-&Udzo`*75;_# zy(E8Hd-$0(|MU}meHDJ8$+jYMo(qiM8m4J4>!!l**?+lECe(t@_P;yDaQdL?!FKa{ zF8G|S1EVFTPhX92Rmqo(Ost=j=L~h6OY>Pyj;iarGV9sC*CuZ8`G%4(?M6=1pk?N_|>cwa= zlP%;1-Y=ca`lV>kzvj#fuWa1@Vb$;NLqqM*v?O{r@OrdyW;MOEeQeV}n+E=(f!`c- zyjC}3ltoqkjjlatjnG{Rir2I=zcMvR$q8Z_GoI9apu zl$rH!j;p|t0{=sPtxmyaYwO?KS1GT+mFJDQv8|!Q>8aUZ-lv7}D|`Lsf(jfdaHYJ4 zQogD?r)gQQ-<(n@ufUb<(bz)Ww&~NQtEhVEHhxFvRAoIe3LGeKrMwbHE05oq;O3mF z#Fg~`1+J zqQm9Sh-|?%@=yv7??NtVAEc=@={wwh$9?knC(~DrmB5y`jro!&dH%wQu@g1$+veM| zxR4*?NKIW0U0prA`74de!U5tqW&>|HCB)knBKi!hed|T~P@_GZvaoOT)!b}9$$;zJvHoHLl(e^*)MC-%SlIiD8SrYJFa79x1 zX&2}+*(BHXinR*A<)0R*li~nZnoOE?+{_H#&R$~RA7c)c_?z_Uy|#bQhgwB7yK+Ma zNb2<2bJ))suE~BHA{b=}*PV&u89g+D=4lR|;}^TanLhVNOek#tqfgD-w8zO94tqL` z`S^tcd^_84$TXH2d~><-6ngs#C~@A*Sg)sjHQ>boopOhcGKAeyj_m7IV+c3QG_~)V zTLpUbelV!Y`8}Wyc(}uFej})*99uo`Y86Paa;#)=-UDL#_9x%xTfz=0?q;!`rcjA1 z>$#Yd{K9XAB~-R2iaCPD^|OSYD>J5y_ppV%8Fdbvq*bWfLCWWxfrju!-q_AVQ4?5P zri0$zvW7~0j(zCe-g7n}rM#iP*NyjQ9olG&yw|rK|2+--c5$PZ^b{Xu7VEhUzhpi= z7A(NKyZ*>p1gbqpa6Wxm4fOTSPm}dI4+2dc7IsF*f#U$(X#0Ct(0-SY&b!GQXv%yl zRfKWRuTM95)-D4_visgQI1>Rb9gL44e%(ajj@A#$F-|}F#Lr^<8PLRZ(aSoP4fb(Y zhHXo~0^%Ls`Ha|q3Po04(3=&Rh1y+u7v6)Gk4E_mOHY{=f-6j`r!Leph_h%|PK0g}-TpNw9+%h+?ze=8@0m~CkWddqN9gr0oGoO*slxA< zm$<;Rei!?mSXmDSjvvzKYWNjo(Y3DAc4@$=QInSr+*%J3Eq;<`47mg!~ozd>$ z)s6MwjeEt<$fWN`X16%wc&a5lZ>(`=3zlb=KHLBE@;h`OeCCP`KV4z!rT3|qF|K{R zdWFsCa9by#BH6MlfRW_B74iQ1rE?T@KF+Toe`|tq}eyQLKnIn^>3Pq z+h3NkGY2NtgJJJ{FFv)!X{Ecv?t4N?vI1s-K|{Xvt1KS{QoHJP-1gZVxYuoTO-?at zbJy|_*mksOKwSgBsmO$b*&jGIsUt?S%0`_`La)8Y@7-;ggQ^Uc&DilF3QV;QEl=`a zj2idVeF=Y&1@0Dp%=UZA1vzrYF+Leou$F)M15vi=#^unjRy z7T1ir5RRnjK}pMd?gMA*gBM_9fff%M&*%~)0w_4{PT8KrC}0d_$9H}J>Z}>&@t!>g zREwPMY6YG~LsniG7CbQ)`QCrkvojn4qE3hIs1Tn6Z@T&H-gG_}mCSM(T}RSUoQw0K zeZS6tOQ{!KON#hUvWEM*Hb6Z)7-MV z_S07avD2)l8+~I@Sekogq}zU^{Z`mNwq6KE=R{_1T)F^EvhQ^&UFSF$IlOeB(W{AI z$(%F9q~3Fot3db1>w!nnv2A_(ekn(wMCZut_45|Dj)27bAbZhSe@lVP!PUI7TRl1) zhBI<@dZG_|;kv6gVm|(wzIw5wufls9Et4m{i8G4BiEr@x7VmA;++~!{Hu!k+c{*Q+ zC$?0)CpVk#Ma6rL3%mpZ0p5n%i`J5#KY#9uX77#5X78;`P@^pW8Y}egyf3Vr(Qd{G zdw4c0foRxi3!S(hw|70_3Rmwad;9631uV1MT2#2p7G4S(e?2CTgm3e zNDFy;J=?*!ttTJC`Ua4CHrGZM<5!l@XES}cM)S*&`R^=zc$pwy%ELJ%Kk?fy#C^4h~orW1Zt z@kr?8J0xmacSo4Ae09mj2oK0JlZ??F@*RZH12zsk{tMjg7u@0GA~%>><{vE1Gl7)# z!D)lMMPTWWihafUBb1F!>2&Fn zC7d|jaKY7&6sXBHBR#E27=AYBv$cj1h+pV-c*Acu9a9Hh){Se=HNkc)zW4Y#a)%PxmQG z^c#YHY7GDE$AMre5#OZeG#&XA46Bda$w#o{Tf2|1V^I12+N$@Kix4c_?=Y}&3i>6m zjjqW|0-Yk#PjxuX2D9t7onxde2M2X->ep2-2H%FTi&rL?f}AVY)`#_p1)+NunA^=+ zk5=DU+W0+pA##3LT~|ttM6auR9M@R02b4@{zj~P<2JEW+VU~O_6jXov@+2=b4V~Ba z%ql5dh8((%U(wMi5q;iiNE}0H=y1aQ^G3I0fzAFebB+aWMECm4t~&X3C%7Q4NIy|3 z09zB%t@EOi|9S%;v>StppRWJm{N(p`&A?vknWNboM#ClD4%t3DR}BwtTrqm&ml@6R z|3=yx_^0h?(?FXB+BDFnfi?}aX`oF5Z5n9PK$`~sZ_ofv%;Ed*Cg$L>I4lND$l>zE z90s38=kU2ymYB!F+5bcWCPUnO-|L#ib7k3l|EmYqqWq;E)WN`*{I!v?^>D;gQ|Jrb zCwL_+2QrN#H?L_$Ale!{t=i`XKkn`6d^OYsdg)&6mhj5}R_D8XblcPsu6wk5#HJVR zVfg3;lcQZ4fNWh+!MrL(O?74m{{t2YLYGj@S3P!H{oD$|Jhsw0)b) z7Fxvo0{t>}39X|&;nEIaBO?lJVDN^{otN6yg9&SYJ_^M3RM)zUnrG_>Hyn@Se0Mwp zhAy}HY=PzD_AVms&vAhUvpkPocx(Vo-L42eO?8GvZ+h-bE=>ev5!YEWa09ZMdG+n$ z1^plfu=W(%U^Jt$gp-97@<<_%6!J(R4;1o1ArEu| zcaE(Jq60EDeIhY)AF}X1`t0ehok)Qr1r8Lr+Weo%VE=vmzaF@lKjU8LNh2tR;MqyL z2pBivM_N+1m(BBkjY}Hxpa0M!g)8aba{oTgdren-|6VdvGE+nTzWM$=F64*u{=EpV z7#8wHco$zj4acU5d0ZNu&E|4>Y&MfGQrNHSr7d|zaM^6X)a}-P_p@@7^nbVfS5NVk zc<_V%o1?SJ!6$mn+dJ5f)SUDl)A5NuyzBdJiwBO?7oCyLcznJI+%YbF6MMW8)rCiY zFl(pbiA(k z#s@&>hn9n$GV%KGM^Z^>rwo%{*M9Y4k&-VH99u0 z8Z4b|C#gA82QI{T&X{ws5-}bK_AMLd2u~ckw7Z1u1c?QXo_2@yU{9Ful-}PO`USje z@3H+Qc-OROq>Y%6v1*1VYd6}%h&-od*?r7lYWsD?7mC^K$cY^r^6;Cg5HEJhAC!Sd@{kqWs z%6xJcKTgtzj}nLZd&b$pUd4`cz1{Ho-*fI+zGJZB z?Pvm)M)&9tYwirEmux5toA(5;XBr$n6MG$`m%n=BcJX%;Z- z^NNR`2~C*0f7;2Hl3&OwV-1&9^bSneeErltiZSG$^Aq2*wSsF7+P$5DgCHe2BTsm5 z{s9K`-uiHT6A8PsH$6u|Ht_5u*05=NDe%a_ujgK;YruC+be4aT7M$l$@GEdp4LJK| zkex?S6?pJ9)$&=iF09uMHCraa&zJUZ=~>-H8{VSlC!L*R1&@|cOTuHUVf{N(?O#<7 zQ4I^8Ty)_(>ca4>DlxW$Yio!0c@^LW_dedsbP(g5JSM(ieXJ$SSmG00+ri=+UNq$4(#Q-H@_WpVs)PrxX%@i)CjxYBVP)hm6fdX*s3FYbffdj_CNGx z`_Sn0tTC?evu(y+zdI2KjQZLB`r)Zyc8T+qM?V*#4IvG$N=)3_c))+CD{5Ix4zQmWgQ6ByXU9+agbxp6kU`zyK)lEh=>K!9aEW@Sa`>cBdCu{{7tiw~x zIN4~RUA6DzfM@?z(>e^C)Zc z9=7QI50SInci9%s-(_+keVw)lLtKAbgcVLWSBg=7g2p2 z)wnS^8ihB(<1^MCch(<5_ZGg-ep+)56`FdxmUk!sC!>b!o$a?3B-Kan-F5IJihuU> z;@1(`VDZaA7JUcqM6X^g-`%tAN$J%rLBU3t0Ch>__CrBWSB@>aJ_1dB9;& z)3oE_o#+&7Jm_Y841HqN_19c^0quPqIYui#4ICUsoe}92gU+4jCXXpdKof2Z-rjI2 z8R-q*M=AXg3(AK*Ib~Cq3XH*U?`OPKz;nItw4tv9z%J8XKe`VRfweU~riX3;BsO>c<0BfP@j@dhxgo=f=>4-_3OSW9wefhb)8O)0G2UzWYv!K zD1X_h`1T#QqM)=ME}a(41vhEbNkhB%p-29xm!^0RFzNqxYCuu~80q1cezb2q$nG`# z?ziMk=*9TA>CWsBqtLgA~EJ^N%*8VPU7j zS6)a*Nl(Vr4Ank^X1sTJJq@qzs(pMcMfclXkos_QiNCN2Fvj|ON@~+kNX3z2v2+$% zH8cus*@J8v}7cO^^ zw(Eu&YaH{@{$b9*t;=adFC3rw_)Y;joAo;2*Vy~uaeoV!s;fDzqu=NpHjVwq^M85# ze+N*s#AR5ghVO8B-G$0={YjV-pjC34He5dcr)b{MFp%mQ;-c;N&(=U9N8;adCpSte zzZDWYGj^s1e%pK}1Q+r{xekFT7ICRKa*@Gc2m2pK3I(NSO+uY6~+Niuk2Y<3@l;0 zpfQ#1YrcWg30GE7r#B$K%LxwrL-?^@vJw7_86kOnX)qi_FPgKXifZjDh< zc}tMaqq+5gS%$I&b~^pXyfKw&eQY$k?q&^nAM$QnR2~7G?^_Su+L4KdM021`Mi%I# z@g;jJzik;G?z(Nq{|*iOW|AZFvwLdMWo|d&e%sq=Xm&4OlcH6V(6{~lCXK(e3uO+f zGT6U(0h-#^H1Ts;9C$gN{bk_X$)Ime#fyZ?5=6gyBrix~E9jdrICVU1bUy_w>Qm44Ekki+gAOEMXT@V z`&~a94T7CqPghJx1f3;&oMQVu1W#84_PkM1hVmQ>x9mMq1>Af!Yl5deK~m7RekeVMNW>I!ML63g3&+BXxHJx4-OZwMSpqIj#>rucnmxw7tKQ18*>fm8g?zP| z+!57;zw|yXgItyb-b=HAqS4e&_i?$ks>)klR%UGRy_be# z!m|s?`)<_=KV9)8FKCH==tEqu-^s5H z&F671S}reLY!2@U4St+gJ^B1|`DZ^r{&V^H_nq99EdP8`dS&xsct1~9e&zp3$M2EG z*))03)N$I69-Y}XDeSgS-sbByM?cr*8J#Lwv8u<8Jgq~9H6iD}`}iF+D}1^0r%$ns zXa4(WEzq*r;6%0IhxjSNoDtH|n1j7nG$)=!`&}H z1HWzVd$^Dv%I7yco&fI%!C{LyVk#G}o1_ZGGzMKr7kY`PG#b0*`SZ;@ycS_JFvVqa z-%~y>>5ePS`h@@j4+IN_`{pnri4EY6_YPi=1opG0)Lx@Y8Da3VW*kXC^%9DnWn#IyVD>fQD}em|v~oluEy&Bi0(g&H=+ZzrCmj3*AnizLfp|*mYU!H?pii zRexpxZKohg3o;VLd+yHFhsyc|_o_T!G#bJ^CkOrFmOKE|SqJYw?$rRs1nsz+Pgw27cSFLCZ$xcuaU&b(x>gsTki^3Kki3j$MEr?9@rR{(35U zIVAZ-$KN=(S!>rA`erIXRLzwF(s}MGd6&uDaSv$V0>rilMxt4 zlI3wxOJ;09cl}G6+Fz2QH}S&uuDfD@_3hX3={{jV4MzhzCQF|9grQZL4x5~DJsD|0 zwxe(t$gHa}oTaxJC~-!yhxN|FU8uIm?d%-A%_yZ`QqqYDNy2JJ)^Hh)$&29 z|3$l?T($iFbSmBJ@8|zU;9zyJR_w$^IHxeZ#^>BkxYc*R!vKkU^ZZ{UP(%MRaTOQ; zuMf$5$>9GniBRq!&tVC~0=iJhqcbp_@ZvMX0uD{g;nGAL0fQrKSqA@PT5_`?$7Qo2 zcf<{pzmx|3-)%3cW8ZH^kQT)9F#5vCyu`e&F{Vu#IID{suK`_m?bh!b+5om+DvdO4 zss#4>``^06o4^TuvbFk#8q5E$)zN}CpmAx=KAV^RPM+6F2!Cj)-||1t(ET2Wj3j$r z7w_qcD~nOTLN#3Z_n7L*O+V9@9`Kp%UDXuyY)4-0^5|X-!S^uTYR2;vxQ_z&X|<0f zox%Kb5vP0|iNFrtxq{VbF#11>a$4&QblwW$~*QNe2D%B2H!B6VYjSml+O& z&13QTES?v~i-|Xt!W&JA_;@*|P|)1xiezh*Wpm$i!k7Bj*D(C~zw#Otxw7_H?e9He za}KZ9z4HDm*z{xDj|&}}l2e#7@pqY5l6&t$%65h>=wp#} zgM|8@R`u-M-Wn>Jsd3g};l5?r4XU(RE&BZZ_vu3&{i&Q>)!#Ec>&kQPxc=vjW#t98 zm2Dc3Yv8wS?>q0Ubr<8R+MrVrD?Ap1FLMiQF3-#I8DnK`)6Q`{$c$WeL+Y^@Deyc- z$HmCOaXnJtN_i!&lGnYz$}4wgmQVZ4!xnP+8z1~Wosz|(9cO-BFLPP}C>{go>z_^m z%oAkIIgB$Zdt83)v;uuDch}haYzmN?Soxfk#R3Hmnsbgln}T>Uc6DjSP{l6o%Y2OA zbgg-MM+~eFA9|JEV;m}JSG1+YW)6e4bK%|A?_e-}M*=6!Jc~3mW!R zY=sEW->k{O=T)>59q~Rp;rY`kXx;8BGwL#iqL$s-m*shks>v+9BUZf1w zp05Y)G=KL}vao-3^M66VQ_~(Cq(HJOk^Cvx40;wh9kA`+h~_o+?|JpJ5!CN?`&roO zQc#hWuykbYN0e*JSlxKC3YiD&Dm=Q!2tM>Z?QL&k1c!#KPS1$^f@(hmT;0JjhMOM^ zSsuOXCh{M%d_}n%3GGTNbz_;^!DHjgqj&DnfkML-MNjQZ!I5=0uj}{A2TS%jE;)P8 z7*2{AZctmH4R=oz4%%LA44?HR+AkWa2?KigmAVL?fagJ>+Xe-_0=t1_W0aNU^1~-nZ1tldr3c5TY;k~m6>DV-`^`Re&-P^{a_>IG z`+2Y}tbSV8ktZ>O->psUkBh9}OHZFIt5FmBrD?NdYK0@*Daq0w=XnFw z>c3`sc4|ahFYcf$vZKgmdYXDI9p(U!&VoJVc&~!`oZ)XSp=b+FXSsYQ9QI0o)vnEh zwfTYnJsSA!X$A~^RF=GZ_Lan=Plx7Zp$((&3=a`51EqQs-b%?e=nk}K++v%G7R`EN z%)vOa{~FcE&qPhq>)V9B7qF9UHs7W4BTEy3s22p=83 zdH4k+EHGN|;6MSI8y(A^IQA?Wa<}^rIc_k2@d-J8{DqhPc#L=VHqoWu%|wrV%y@zVG@!VNEkp-6vToB zu@E36#KJjtMHH-H7XpY4QNb$;NV8-TQ7nKBu{Ug}*cnu8*if-z$BtLAWB<-fGQmW> z@4o-~-haLOJ!h?C@7a6LnKNh3{PyXmF_u%XkskAS=4=&`Z%RGnx)_DCVShmH`O|ZO zU&~fCnfnHx>06{k_6DBA)6>&S`t$YwEJ1M#(Y5Y@_u-_-ZPE>iwP0$w@BDh#<}vku zzDX3C|BsKto+IY}hXZvq`2BSo0oT!>aCcWoq@JjG7kMbDTUd}%DwlZ#1u5imnH$<2 znDtV(?_A?3>#ews2Bwpt1^RvLo~*i27l+35S5NwRa)S{MP<_dmYOk*Z#*_=kIDGE> z@9O`-9Sw?e$M3t+cbkn|-L#qg@cds;Z=GCExs>@AG(o3NFv#s*GP%bumU`lQkA9$9 z+}(Nw>^`QZ|Cs}OU}RnXNdM0hsL3fk|5Zl=m-jq^&~OO1(9n<&Sx}gVToS63E8N0@ zrC~COr$nmMZZvnR^K&-uA^Ouc0^^AuT#1RJ!>*0#xY?0_orX+u%Prdu0%kv-o|@kP z=H1Dumt2IysAGvHW;0Eovj4QmDIyp6dd2C5L8XW2OCuUwA3od~rX2s!NOH0{oGhGs zk>_}v-n-!G`nUaS!LI(XM5={5T&1qR{Yjg5AZyj?8%1qQ;OwzQ>lci!2OaN?i$9Ep zJMhrY|~np;Dfm4SQH1oOMz zHHE|EZTH;c*Mwg@6&@wB$6$l{{gkywt>C=wjoQz)aDp{_Hm+{=@F7@szR-NzOElrZ zZDO;S0WJ{7lk;WWEnq8+vH@&#HhVJ5Gl2S80iEdiC+g4OL z4_tXRFKN#KH7H0c-2c)!g+8}DV9$Zo%fZ*O7pvD^TLY3a#>BrtJJz(m)wpEe#^oT; zF5pS`1$eahBK$qZERfq z9#QLo)y%xjf1FGN2G}c;%e>sZeyF7~BMn7hZ@|d`^IQVqsEb*uR*mXI)zrGvH0x{6 zteMi|ueSgnq+3H?8RqE`ED!Ne2Frt_AyR2*hy=|vmMA3h5M_vmn1&!t0`M=U@{Y;L&{a9z8U|lh=a@(04eQx5yF|tqK4v%hJtRMNo-m)L5agWM-m_h^MR4%=!;AI@Si!lMe9Dq)SEBjdaN{q~cpp?SQhF$5p;B3xo6J2}7V3`1`~-OgxdqGJwHrK?T5CT} zMA?s%D+YbveZufu_X&Fk^w)o(N1^$C8|ZRj{Og5r0+=*8W7Jbm3>~ z;pFX+h1q3%1YCT$cz#pmiX!gWK<-={pQ>|C&QXOK-4cBJwO$*3jo%G3T!Q_lXw^sV z`qtTJ(#zGl8zsC2>kd{Qz5uLny=AdVeKDVlz}Us*uXZKOryuvf-7LZ9rC#3g_LS@Z z=>ib<_Q^2U>=^DNPw3yji*F#3Z<%*(RK0aE`kYt3Bh$wD;X+!pxR|$Uzuw-2liPZ> zJri-E%3c!eq-a^EH~nM~`G%TNtiP|v(4uC`Bx8Xi?^#l(h*L%%zq{O1@l&sigY=S( zjlW%N1BZsBIgLAG4QE9S9`AQHimm^fwl)!k6E6k35%c@_yD5MwlYRbfWm4%WcXO9{ zGEEsFs8%UKZS69p+yfPK!a~rt<=V0K5BF&sG7x1^)40iQn#Im}0a6{-o z&$$=&>KdqaB(9^^^%uZp+*{ul&j!KBunQt_?<&w$$WiovvBH7Z(8T z$H(EQCI@Z}PBDRrC$7X@{8U3Pcj!?ec>k^@+aq8Y|+*k zs-6}*7QHluoP6S*u$;>&hrp05$=;?NN%-k~-3#YG3!&c;uh#9zT5w84@x|cIwtBg; zPJGQvsb9gsvJrJnUcCm%$N%hm{L%|>Y+9hh=xQjQd!s_Xboio|y9?fd(A!qfA$`@n zkKL?c%@|%rOCAnYg$?7|ZEpydooakPCaaoWemkkpfjY^~@K-N*YPy?WbFUw zZsd!texvlS67YXjUgcFf4YiH3=RdjeD8**z$S}Hg-L~5{RNq98?>Z&a*)ad z@Avk+YlY-sN=(JcnA5a*jj%?S|H!0Wrqm`|SqB4`LZ_FnqE6E{?xnuHF(?z{xyO$M zNPgzc9p}JDC+L=mxa-N z=eb+UPk^$4f^X;Q?gl1i6Yk$g>_=xGauIfYenK~*csH#~@)*<@$qkCE3NNYL?Z++2 zw3W%zt8LDZ#gE{GCvCPad5A%()NOj#4F_h(QhIAElWmfic}`=-m>`BNuOh-6Tr-|D z)GH|1ONvIRyGzh!E3z;}P*8|x7y@4fYDt!9*K2>^GM=qWBFZ8!x6=x@J{5GkMp!*{tg3h(*mxkP`$-Q$0_)oX<2Fa(;+&}-xx@MR#cMr5KQfQD&p>&r^ zJ>A^Gq%w(zTp6rXpeY|pZ8J<+hkW+SK~&!i!;~qxIf7im{)!G6Z58Z&H{8oSdUtdrQcj8&gjI5AJ-C-_LjJL)FkdS;8-<@q_EnbxrHu zdV!Wn;F{cZa>LFS`A^mv(~i&V6q9;SqG<)YrHM8zz-TKFk z%~XcU<-w>TBMp)(LW4s+gT2sBk`gyhw2?UK_6ydIP`Nu_+xLg4LAG;5v29@VJN7~F zOdWX}*bGUk71h%cew-@N@Vsc+wP`*0Fzi^`!3Eo}0^I}uKo<9N z-}XZK5ln7VFKO7}ufW4%+votza5^;M31!hZhu)HBmf3U=>a*F8rMg`=hsR=Gx6L1c zLxDBFj2v!JtNaCR*pbw(Gg;BK_pn??uF4vo5A zG-|=UeN21~cL+Yc?rH@sn~+pGtw(j(p%NA_aw|XV=A6S>^vk1XhFy8El0LsA=+eXR z-%~n&cyWJ6PzC(0M%AhHtd&1E7VdMx{H>qwT+2_|6I3)QDmTA%vTvV+Z+r3^TNTud zyhH2yI_G+C4|93ocKwp~#K~^K*EU`B9vQGKC%Id0zL#3EzU;+zdf)X;7Jjw&fm_4+ zt3U7AK}!=mG^|Y?0uR;6V=hf!4`{)gc&T|V6B-6Lde9W6C z>;o;QI2Cn!z6xZnh>qL&>JYu~QS+D~trHB}<>jc2{HOJQAvnA)f8Vfu=V92G3%6sp zy@OLdR@%p#pJ1?BXtLEr{K&kgu*7U2ucPS)bo$?VBy~<1=w#AED6Cn%4J!6IIG9v# z!|VlU(xBuO0arPCXkbJb{zHj=J<8q5#GO7(5lLl z^&d|eHO=o37n(6OEKW;)$N_Gs@fS9cJqvu$7tOuFI`zdIk+>~mDcI(8-o zD?OOJZo>)BKj|KYeI{kBdqWpp{opGpGliV|yGvR2Rb*;x|Ho$kUsJN-ky|aZK3p~q zl{+^`NjLW&8`IdNbY1qQQR}WdVZi6p=s1&NlM<-)Z4DD0P_As##so2JwnT(Edg+Q( za;Y*%A@!05dwR*xy1D2G^F(V<%2D1#vk%<0b6#E#>(8bpqHJn@@6V2l^ItNrrRqCzl60yR-0M>9RR89CzLwA}h8HY$ zSaHNV+ht3KOg}ytXSd(uY_%%47x}DEYdl<(blNFu?XrT-c&RaO^U9_haiS&?u*Xzbd)`FS>*D-dOK1yj=bzJc&3FUdeOdhMYvwHBH{ zW6BvB%eiw-fA3wu`I-xV-8cDn_5a|4cAesVBTpPlwllwK+4)(Sw=o@TsoHf#@ZS6A zx&CvTCsyR|R81Kg7hgp=y|J7-=kx{`88q8W{*aG0LC;SGIHymBME3sxt#VZYHZJ)bN1Z1G3E6C z;GE~Q+hv~L*>3N~S@bAR6V!otEl zg1p=$VPT$OX!1drhr+`xEG$GORm!}AJ=h%gyvYNOvN?}yRN!*n_q0{(WqnV6{0i1s zULIt5<{{WBDp)NZQRU&JIV)CcO3{{x`6H5gk9!LgI{V4Pn_D%HGl!{9rimpU=8%(T zZ+7Xi((W5b9hg7vb-+Wt#}!{QwVj5U!4Hz)D`oGiLoRm}WnJpu)d`L9C#7)dTRp^HD=cfs1Fl#>no&}?ndt*!UCh9DE4Gl}yGp05B!hqgH0va9{?m4CRK z|F6@kl0vJZV)vV8&Vy9v2TP+rcsu#IrrfG_iEc4K_JTb2h<3mt6TKrpV*xhU*+=~Hy+xzT`s+N{*v zkC_O#x_{yN-u^eUuIAr!-cip^a@lZpEVIqqkA-~3B?!S&Q#b=J%Y}nCBqi<~JQ1eV zzVvEVSFaS?l)ismMCCqAMHm`#?d7I)3sZ(Fq;k2ZTS$=HQywCdFcpCiZL8no$!hk) zL{$G_av$Tb^4Oz2oAa)o_ze7eTpX^LXaWNo|5;eRcn6r!-2VCf*3ZF(2D25NySxNn z4(HZ7d4LZsRwz~+<5f+qju6aQ(X`Dg+H_#~ghvan)356GU(Y**Lr&gl{9uz0>1f5n z?JxX2->n3Gi*|}06jl50_)w}co-;fcxId}VD3ubpbN`YuT-k31i2JNre`AIUY>GG8 z+;>wJ(8+0?oId#WbYQ$6*2CK$i zVCw%o{v;F29>gm|ejmS6;CBlAPJ#b$3ZS7gL;mj2nGi{^CmQ#wkcTNlz-Owy8*$S>r$~cyK7cEz* zvp+QX(UW`aFqq}uCDO^!9CG%(dlzSq3qA>MjhL|StTj(>A9mdOOT;xhD0rM$fA0v? zA*_p!qhF(_4XrGoBs^`_s)KI<7k)`ci-bXzIDB#_eW^UrPH%t0VU*oLSv7e2V7qcl zpW6EH3wQ3BVY~YS=>D>3iAMUTKK?Ck|1tJO+6U0^+2sp&vuyPCEqm|FLfcuumBUU1 z#Gx6r+WY=kYUT3Dhi-OLl2+G(+_^D*Yrj7}yW;me4C8e9pFgiE81=naaBu#sS-KQS z2!7)@tZDd4TImwwOBT=2*S{_nrpE}vSAqvlnrd~>#>rd?PD=K(Ix%7;UDP`J+?(Pm zd0&&JZEcbLmEud|RNsYjZ$ePFq3zcKo6`-OHyEF(59dW#z~~9>*V3!cr`5hUK9jp| zLM^}Yi*d+aHrab-g9%mg%P(Xt5|F)jeEH%}--RPe>^nZ;*-T*FrZW8P*C;L=lk$yq z>(+TTlOA228njXn&D}Sl*K)HuDdlK`Y2O!5BTTF0!6WZ<@kI7VU$icmfJYnl_jpRs zvb{f61++tFHiTMNf9J6aj%eOx!1BkVphhMW?B3X9RzS*NE%x?JX8KT=CTkPKY6p93 z`%$>cfM+Ng>m3&2E)PXb)H1ISIU2`^=443Sf|U}5yQj9j`r|+qi@k`l*vs^za6b-T z8TvgQw+Xy3{NR~4AU*r|B?TXY>E3^M8VN{Rz0sX%|?SJ>fr>VEM?53rcb_(5+3h5nFo|+9QnvlQMb)EOPEyuy7 z+gVM={CVA5+mFyM|4Nswb&qb|O*_Z7>XMsSsOw+ommlI~`muG{{rrHZ2U<-J`IyfQ zxo8=1!@{!3I&WpW2eW*Jywru$DSzS32h$&)E%jdRb$Ic1vtpfmy0>5d3!CD4-J&mD z9(MO>yQli_&o3Ah9FOFyq(Kf>+dZYZ@SBG#`o{Kp4y@nQEf<+w)W_4H{SS?}@cMc4 z9|4~C+B*o}S|1K?`lfhr;UG9&eQ4M5Gpc0QltJ1)MTap6S(-ODaTvqG${02aalK$c zAtBNbv|gv&OA;cLxhZ8zH`L#ukSG*hvJkmk+oxEW`h-OlL|Ig+hk{^!kK4}=dc4^R zmb6;DXHL2h+84-t*ICholPdV8oE5D;ZTo>$Gwc zDBm<0dqBR{*`rYzti1_Dw=g)g^8etV->TeIZ`E#P3VxsKed6BMp=Fcu3!EqGaz`ka z8@zRLqqd;FtI73Mu<-rcln8^*z|*9VVD3wr~DoqLr}cie;O|EBG+c_!pr@+o)6E z-Eytv63e-k(=Df1jeeHyh)rb zUMQx-)5KBYk>a7^KH{!oZ?RPDBCaQ{A+`{kh~A2xif)TaL?=c0q8+03qGh7_B9$m! zG*L8CG*r|_)K%myl8Ty&>WXYd<`&;9-dH@fxMOk2qR67aVwc56i{%yzEL0Zp785PP zErwY1w&-Hf#=^~_v4y>bm4%u4NAnlv_sy@FpEEydzR!G{`C9WO=4s|L&Ew1`nvXOe zYTn1ZtGTzi)V!&AU2|J=b8{2o9Z^o)C9V*si9E5g&lL&7}aCSkU4p^y?z6GjV13x^B)34Me>=qYp&))iJ45`wRSH-e{v z+kz57k)S}3C)gxdAxIaf1&M+`1Y-rkfpIJvUg;`6phGwE`Gfeq_+9vI`5t^{z9YXnpWuDtz2!aS-R52574c}^PTodd zHg6$M%}eA>;*I7F=k@3L^4jrayk@+5JX@YQ_6>W3J;82a7qJuAK`a+rhb=>ykUels z2;bBcbKzrmgc>E^l)%K;9b6O42(7HJ+qfo(VSV@vJDg!%afThnunqXwEnG8{Ve7N( z5QcSN*})9kf{)$AHG^~ph#knV_N;UOYrw;9;F>_zfMo+1wie6wXIM) zMpz$1TpNb-!_Esetr^ad;aX{B#X^mO;aXzFxWHB#m(H-__IxEc)Su9ck;YTOvEK6VDzNSI6PG3>NZ*a=+IkYQ`!4C}!29G#_R*g6_hHHQw zz%_ivus)Arc?|2u#|{WJ7^C;Y_Tw5;T|oPV8WTq7i0v1ur)kyugz9*PbH(=I>Nv&_ zWjDmeGORC#?G>u0GI}w?O<}ki*j`*6!>C=%usyhXGQ%R&K_Y$U_hL{g!89CKB3Y^P8i!Ei0How#}| zqZVTfJBGao!*<~6(F}{S24Y7sEP6S)xO$|{0I}f=i!wiAN9d&6adjBOA}~N~D8u&V zW7}{wi>K(@+9p)9c-j-&DpUt+a<7yU5Q37uksu7kl86F9>P>NwWLN$U?CV>%$kd#UE99+!;X#*VFgsWK~wG(2S zglYt&OorPiR3jWE&?9Zc)d)t}r$;QpQB$nD0Na48Ss<-%$*@`=7EIA&uMw(Q zFl~;l7OD}L5|{(F8dtMeiqwcjSjy)4Rk*r6!=iVESQbrDqOTOHSu}NII31d<#MLaC zqMNS3)d)@59DrB^r|kP%Ayl(?S|7_6s#!c0W7)VG;VH_1=Gbyvtp!vohGhY@z5vU@ z)hwVQ?8p+T5uh@k-!h?^#Zxi13|AvOMIWLWwp6HQ0kuBEv4HA^EydL=pdt`iB2>FF zSKBe14p5iiY8Ft@)r*B{1gOkQLma|W0^#XmT&;yu5w-|dvv7)hDq<0wnxcG!WeU{@ zPnk~^aV(sQ84kfIlcO?(YJ{eY7epKaQzqsaxViyzaZfWWU8qK2%G?og2um6FkdCWa zC`E5;p-_#Wl)!2-970kizb(YoERfb0VGD3I3#2GQh($n(fCa3#C?k zM#@5IJq(+Nt63QaJ3ds1q`djQ=DNDp0a)~6<4!>T0?*#76B>>498~UY8FvZ_C+j0R8y=T%d(Ja z&%-oAH4CXVFb%Fokjg%f8doDsWiygms79d5eAARrjWCr-8VXkEg{xUiMM;BL zgsJEm5i6uvP<6#-2`Ls-9kE$B#eyoz(KCe<0#)V}Ax?*>GjR%GDhgAC&A=%ZRD19l zmIYPh5T*+$1gZ=sOvfoLreatMPO+GZ{BeqqVlmYZOBPZHQwda1NyaG_QW4G|mW5R0 z&y#S9g;eCv5sM&|{df?IFqKW~M4VzlwWa_=EDNeF`B(x@v7m~Q60t0(BCMZ=Q!J=j z@)(u{RrG}MLJEN@6IsL|Ol7iYoRC70$~-mV5TY`x3)zTeF%@Nt7@R_w%02*MSx`l{pNvxoRN423SQb@V;@BTJg;13Z z46z7S5o?D{!YLM4Q4trhEUtDpXIO-*D6?Q#G)^I4W#JUDEV3flLM)4{C<&r)ibYm; z9>a3TIuWN>WJNgxu`IHp4{?H!LdeQII^qzpGT-wAoMLg+MSzXRDHd1J4G_!XDoXB1 zoMLemW*P!af(G&6mi5N zWM%z4ViB~mh1)Sg3Slb~F~lKgWpex&oMMp`rR`{(Ldc3fF2o8c7FzAGQ8>jyt0RwL zS!hLZ8HrOYv^ugZ3$3Ue5sp&`TG`hSE~Hp&b-_m96pO855yK*EMS2Vi!zl!=DCzkO z%c3hP{D$|~N?^S)B~BrDWkDerrx3oPjKIT!gcJf;0&~HJ z;}nap=#3+m#a9;$8-`N|U)h2qViCZyDLWLW5W=F!h%m$=h-KqF1g8+jqG0*h5LEw1 z>;Kg@A^2tlCP9q;K%<(2{(8s=*Pi3);T6QJ)E6d`hq}9aphZDE-6WDANvK>J8X6qJ zZUc0gT380c zpx3QK20G+S@0T)Mn=7P=v8au2@L&_fXvez8vXSEY4p4huFzT>XhB6%oK{919T4ETj zet;I(X+GeQ`emJstW3Bl-_~2@xN9{$X@WZrW zjf#`Mfue1Wn`$)r2$uWoeEDscHDqJ`lfTO=H*R^{!CbF*5^tM%v*CNN#Ol%V#~nU` z!c`4sUFdBEpU)h7^k&0H;9$bZK7ZVe(5*d}kbesMQn9zh`oAML{p9)nuZXI0srR(e z(Jzkq_DFqMb7bZ$decaC)8T!$1KYmQ7E<#iptRlOceB%HffKLY|0H{Fr(1U`d*FmG zp^MI59I@JMDNy=23{SRR2ChA8@@Qw9t@QehV{43eUrNt>ZQE_Q-7-3CWN1-#+pQor zwto2ARi)nd+e~cNpIs%S46-g@$U!$bFl% zjBUV1^ru;y%jQfLXMROH8js&TpMcUfYnn}KVF?2lr3D?FIt{p;sr9a&_$4@h6Euu7 ze+013Q9j=7szc5`^&qoJ|!%!~6dFb1GRhuoYSPYrQs{|B@*s z|5v_^|I$FJKG|+b`?J>7U7(9+f@z3#;KX5T={(Ev_3k~N&2>$i_;s$R?Kf;Kxc#*E z496F;Q2? zpZWzAl55(ah&k;MAHhKVLqbH5%Pi}sEg`uF#aaSM@zNF*`|+Cv7- zO%4fBdU?7j5>zr4(4KYm2nkN^9A@V~=<-&fA?1?_9tFIsgM#J{%= zs8d`We!a7#&6520z|+3pg1LP@(471MeY~er)*?D@#Q6KZBdf#MOYs-lW;_Fr<_tZy z%7G6Pq_>}M`x7nfF(YP1^j8yDzjwG>vpgQGxBZp8oEGZsi|al->`}b}-1%6sIz;k@ zZtm!_bir{FCa>7$wCe;3zxvJXI{3~fuzbZ1C*SR_!C_v5)n>fvu$JfA**Uw#kh5Rc zAfS1ZtVJN()*Y6QavyNEaHjox3OwMr{w!>8;Inur`9~y^}uva4s>#$lhYm+ zUr*#mPXh<<1Y8>4cRa8U{LpMVve$(Vbm0SC`1Jd)fBJbIP=T9ynkI#P$J6t?&u()L zhycZBH@UYu7!QKdpBA)RFo6#7opYzrf$3cMv@U#F7e1}Cr*-x~XAgAY)4K41<9j!< zWk3YoV$YJ@4$+|?=t9$LpRy==aKt<3tNrzIS|+3KqGW_FN@6{4(rw?qN_)$i}P{J*ksxpnNOS7$5u$-lzl;BFJxad@3Vs^|-d zeLKWze`;fBa&1;*zwPz(^1~fS7qi$Vu&Fqz<~aeOmv@mq?Aq>%2+j@In9;Cx9lbm@ zE~DFMI}D!2mS#;1uPURvO)t$k(98vHzcTiOZU3s3@x(jRX7g@4!FL^R?uyGR*JpBD zo46~>*Vw`rc1^C{aB_l7!ZJr;;qWVuCwvBW^E46rn!f^E{M+mq95mUf8C-Qs++f?H z2KxA5iB{LTJZb9J%*=UA?^WaQ{1LN9sX)`x&xl_p2uBdB1t(mVCn=dY4Qs zv^(vnw-0j;ZtwWKCd|(03IhMQr;q=A=emJ~`x?MPam*LZ3&osGd$iZv%cQGq_}JBrR^>C=O#vn zjccxT=i3c`slUA_?#7Iw-@_7(lI{2D?-cl*0^g@VRq=4Ji_G@n(&iWA)?_+A8cy4- zxw3xR`wUQg)E&4U`JBIo%6JQJ^5GD$)ED&Y@H7K(_A3i3r{HfkgFS5&yN_RAN(Wq-Y#%h?oA;FCDX;l; zw$P7GzJ5IH?NTtZPQM;j(ciqA*64mQ^ZjPfN+MdE`lw2t)%xT(>6^{8?fiK$;N()e zbK24;+sA$L7A;r43m?86#AFByHWr)!?_);aFI)dE|6}0H!o5Sb(+_0D?(L48pf`55 zaVv{{moKS1HQ9gQcJQg=?n7VlPtYG#e|TFAdY502c*x;;pY8PidSea?Q8)`KnywKY zdY8Yr!QJ_UDg9 zEzFsh2WnJ1u&i&G2_3Y4_?=JlkAtH@H)Vt7<$An(g_wMYH1xM&1kx8BH z#Rc@ME^3>}{$IVf3=+vZWE}xL?@K%v3Jbtk_q1{Cm0!JW&sE!UYS9r|d1}&@gG2!s z-+tN2k+xsGQ`&4=JaFL=aIbJ!&IR)V+9ZC^({I^d4cm4UaxYKMAIBs!1zbCDCiwQE zO_8Ty(yAH#G2i&<6)JO^W>uxNrZ^$JCs zB6@g+1%-ODYcyMt2cNPfKt$P+pDW7!-0%Aj;d_41@U!w4SKfmj2fi$l&MF6ay^|Hz zV|kF1U(PyrvGH1d!JT)JNxbpZ;H&)wHJ>4SWq2yt1j)IujO37$b9BbA?13J$KGChS z-Ro+a{FgK~R3Sp`5vR*eJlcO%SaYD^dL>lVhhIW5)NB<#GH1P#3i7EF`x+#@1Ioc44%?{#|P8?;kMQ{CM z{m2{+-+QO%0}eUOtUd=vJiYuGG+#0m&Rkmox_z2btDZ&(IXP#?QLS9`@AWHHy#`{c zFnUz0L0o9Y)Sy1;E|LCZ4gk9b7M!aXyKkuqj(ms~@(2q{mZk*%#n69lYZsX$I8+(p zB~hTQ-ci~~V}pSmFQ|${6BiLiHRtinBT1|uo~Ot=Ur{&`=j$ZeF7&_ z%IB9XeGM+Pe_676`FpUq$hlyvQVG9m(1hG zwSw63|6o$PTlOW#+QIfotDfzQo(?BxJ#XMZO=0T)6{Zy?KSxgp^x#U`M0Xd7D zM2;c@Nnf%R*%FQUx3c_d`O5N+Wr^is%RI}~mYJ3_&{+R)%fXgDmfn`lE$g9C{+PHz zd|!M)d`!Gkyk4A+#`jMaj}Z?P_Y}7gOT><18#J!}jp&}}vZzqBN3>S7STsu%D;kBy z^ZSaRsHLcZ$V$YwcxCa>qQv5aMIIW(pJ}17m|_uOG1$W2!rQ{#qMk){3(Wi-8oPhh z{1_U!zutVQIb|MiKE`~wc~A59<`VOU<~HU6;tlbbxJ(ogdx#ujF_B8d5|PAEqBj8v zDH@?)gW%(p_(S|UegZ#$Z$xABHFyFZfd}LMct_kFZ;V&Rap61RQ{h!K9)F*3i*TuM zt}tFW0gc4(D{L>62^$LSgaW}w!DGP6hQU&Q;!d(OMTE9T|%a?zOj1-v9QqCSKd$m`5&#dGF4pt1B{v1iyF z>@0Q|+lH;i=3z6iC@dW7kH(>Up>bceF-z0W=mqNzLM`XnVq+smhF~{HwTxFEix?I< zCUk6QMASsAlu&ze+EP+2HPl`w)E=DnI;nOy)LtXhZk+ZSsg@XOuM%okPJ5M9w=~pV zA=E86?G;kp+)#U&P&ebWmr1pYq4pA?Zpvvdk!oi{Z3&@n!f8uLbz?*AMMB+((_SRi z4Gpyy2(=TZy+EoP7;4WG>iV4aJgKf{s4XVcbvbP@sdhBfo+H$CIPE!7?O>=qOQ`KR z?O9S?+faLkP}kzLXGpc3q4qSPuE}Xnlj<6V+EawumeZah)zuBPMTEKLa5C+?GaMVH`E>`)I3gmm{eni+Czlel+zv})h3480)m>xX$wdy z-cU;uR2-+JNh;P*n@>_)E}Jo07*?U)b1y!XimGI zq@oP9`v__xr`<DoLJYOJ1f}G(xg-^AsNGIb zL7a9wNewsDZX>8+oOT;Y4K>tmC8!~sb}LB@Hq>q*s6m`|3rP($)NUrI0i1R-Nd+2e za|kMc)8>#=e?#pig6hXceR_l2mU)?FNGC#c4N?l)s^NJwf?#+Vv#W z(@?vPpn7oHbtL6$s9j4?-8t=AlJYUst|6#yoOTUKbv4wkCa5l)b~Qns7{=A z6-jk8)UG6`4xDx+NwqiBt{|v(oOT6CK|^geK><#iO;X;5+T{e*meVdLsWyh%EP`sy zX|qVGm7#VSK`A)xGLrH#)Gj3`z5iWGQbzuF2|?-o?-G(S^1q7-O7DLcla!JFT|`iN z|GS8!jQnpVLFxT(CP^9j-wcA%``-+bGV;Ia1f}=C=_F<3e-{#z-v2HnDI@>8fS~mL zcL7Nm`QP~jrT4${Ny^Co&Lb$j|D8utM*cUAp!EJXjiikH?_7e?``@`FW#oV75R~5k z&LJry|C>rsdjFeBQbzuFHbLqA?`)DX^1m8_()(WxNg4THH9_h9ubQNc{4YgNdjCt2 zl#%~c5tQElsz}Pn|IQ*Pz5ks>QbzuFCPC@_?@W?1^1m|(O7DMXkd%@Cola1C|2v(e zjQnp3LFxT(3P~CH-(-T)``=`eGV;Gk1f}=CNhD?De-jBx?|&0X%Ef5#Ilz5g9g zs*L<^B%#v#-$+tr&z``-vsW#oUy5-Pp_9ZRZ={O=e-rT4#M zNR^TQ9Zjh8{&zH~GV;Hp2$kOdjv`e?{&ysy()-_$q{_(uh7&5i{|zTqM*epMq0;-` z5v0n<|ArANz5fj(RYv|dlu+sYZz!oU^1mTW{h!C%Wgu>eWZTGlK%T_jDu{*nr&M_Ul>%F1#Zc&P58W!NcGsOcYQ(RsjUz}aW7ELtVqp9zB6 z2Ja6zU;`Hlrr!^2EF>ozr@m!J;)M&ZBsORo8}n?jP|MaN&%I zw>@VYZVH#y-u$M1>`9t?uDT=co(mu7!l!lN)20=ZojWe?046oh-#2?o zDi}O)+uclJCSz2SV!oZAPZ1jG5rR&GWT;c$GmPC;(5hNkBn#_^ zvartWJHb(|fHd;pxZbCmJ_D=7y{itN^?ZlUFJ3;Q99Y*3KJQs` zyASOI>TkNg))$@kNm<+M5z@bJx}l7^(*WEYSvavDl4tY3Bn&}xwR7Oi=@leA*=NA| zlgKW6f1@)O5IudgQq#oC1~y$>Y!-m*>_**+8;)qX6IM3%;s@|-en~}9QxRO*_+#t> z>*M+PTfJ}GbXbx9w0E=6SW#jAwGGqUt{{C*i2FUKTj%pj-}djZ=GvzG`<*YI56rxq z?{n%;p66z}e{W>Lzwx#F<4mhQ?~eD1og&6LK9L0cDG#&Pt)#fd`{KCLvrV7Bq$_%# zOqn&ZDIK(}TBirOPT;!pij9@gVRYtO=eT8?oWXs`-ZRaJ8g$q}yCT)3Q2J_F1sOZ5 z4rt=OV9w4NI9Rg6XUg&5_3i@vqt=Y9X}RG{ zX@zf^iRS+cD83D1ejoo8DS-BH8ly$D=83+jsOsx$f*9>Qd!{Z{HDLiQ(cM9*qS(j2{fp1ROmP8x0q;w3}V zSbiKZ$*d2}2erRkZMQgRHSB&i^-$eD%ix5Shj;ILc0MI4W%OU?JbmK=TG2ro6y_c# zMQg>nd4!;$T^{brAcR3dK^{_tcEF^4X*iqn5M^^7x0@k1sLN%YT~npvEAY37UUL|? zfXzb3E3zG)GX;gmql$Zb@41!tDkS?2aBbfzF-v6)FAi+z%Gi^ouGp1}6gaxO)5SgC ze%(l*wYP-E8RpRIsS*?pJyjgGXKk7dJmM7HtX9!0eU(%j{$gGD|G-9qs+{M%HKTs5 zer>WGPTi<{YK!LVS~QBX`P(b$+@DMal$3&1RUk`hU<#B%N0N7 zJEjcB2j>%>NL=P_go9@~#r1rh0TY2q*MSxf5aNyc>wL$R;eydBB2tv&(DDmX2^vKx zN83XLqvkCcs;PT~y2;u6R=d3wN7;PGpdI(K=i-d$VScBfOWnQ#W6GHr%eiw-|0}As z`!U~f_J3FZ4@SKDD;oxWJp7G&Vk4TMJLjm8`iE1_jvEY_ud^`BcW7WKnwS66W+A>e zw>P=Z-SxKAz=&;; z`F9Tq;Nwm$E*H$H0GvH1=fdIaxp27qa`EBr%Z0e&z*x_)Zc3@Re0{7}ITgB2S1=-#fCw z?aerh*3@h+C^Lb>1I{NmoN5o-yAH0d%(l_nb8;>m&Yla0yDt|X?!H_&Tzt5Aa`&C| z=zjKX@0#$3T&WX0v<=u&JEzVs^*KEktm*yU@psMYR|)uA{?AB(s!~O#fU%8LiCJ6x z2S(Ig9|M&66DHnKrvZt*d!YZ)x!}hB!r?WMJlYe~y{k^61DiG6Xu4o79UkROJFJhP zpTw3Hlp_24=NHz0xqL32_~6CVCP=>Ddi>ckWPj$H|D_EXb3ud5cVulOPiuNJ^eU43 z&3Mx&bZM3Swgt;Q{zT!dqMDag%-7q4nvXl}xQOiU^rSQR*>l0@a^KFaP<$>Xd8wZx z`+FZ=n`dOqr7f(E)F`RFi>~I}j(5F$1AVz++MRv%a%qQ|a}G1|vQD|G!txEk>C?gN zl8s@$R4QFT$%+KrpRv>@7*Nr=on_yJw?!Cget}sot zc4O*JB~$(&&jz8IY(pqVS6Uh&%l^u`;IPiZJ{SWWZN`pb+~kZ z{Y)=@HF&)5)m^oNFzB<>!Bdv{86>(#f1}^qz?RXqCd5vD0s^h>=hdG5mR{;`e#+KC z&%hA!!Sld6B$W8hxwJX&EAS*zpU;W=3dG_86HEGFu>9n5P5%REU(|I04_6%*!#TH0 z8_ZPM!k9+l=Wm}}07u`hdb{9`7}g93aa-;}LKER(k4K{{;Hou^#S^xE1vU$6#4gLuzd@99gv)NkKp(ks|x(t7bfuzL5u z{-yq>pTEk{`RC4TzWQv=g$v<3dt7ZzpUMy3Cxj z;+UI1ojbr$Av&5yTlus&`~FI6aGyBq66fv@!av%Y)IstFa|x4m*IU!BHBQ{$@8%CK zb=f^vjKb+);&-dUKOMY#=?khg38Ys<&cR#Rt)Ro6Kk|I#pH6Rj(0h6blDBC3ItqLowHFm!apw^?||g5GUPlD>sUia=29j<61*qcP4HP4Ed%9s9O1?Qyri}v5F4(AmV zSzG&?z+wBVpJ-d>Jy;6m69-JPhc@rm-#vezCQRRbzsZe4D>(Boes*tGL-^&Jq#~xi z9c(P}EM9gaoxUTP!}mC30jZ-Gqz`X?0~a>hw5d2!1MZ!$GdKO91#FdFIT1@-LSJ)u zv99T)bo$1!1!p?FwEtZv`cFgIKP~(!=!P`PyfHBp8!fXK!}OJX)4k z+N5R3crdB!($Y1mEPCF^f?%_?vp~oBORBXy7!5|fOX`AcRD&~AkK$8jVnJR(V1DkW z4Dfp3@q#|h7J!nq*=hOTrURFQ3F|whE~ZECoMtnm?g22c`g!vaHaVcVVv6^t$@{^% zwadG_SLD%M>aWjwo4<~Je(R>lb=*Es*l_R(C$9ofvF%3nnYGjDC0kY{Qjr>Pcjui> z!`k_PWz6i|#`z%O@cA}%?300I%g-UZEDzJ4TRa{p?3Muzi%lY81}voMCIj!KpPWXQ z_wI<*$t<8xb(wf4=XDzG?6KhFFsnR3G$~oMwEr5=s55cxOP>QY+Q}^7*~mR~{N-6I zy;^Rh1MVH)dveYI-< zC#8)2SBu|rt^OV!o(ehYE=TL#qII!kQl+Oyhuz3(%-l&j-vxTuAZ=bV)4J2OE${#M;ha@q^xopf{_$y`{>P<(9#OmKLgGZkc_c6Y0Gh*uG&+5B z{65vhUEuP_oEoiBINDwPPIz;R=W=^N)_7O*dF{`a^L^G{wYWbd-0e;_6pxoaC9@p@@{`@hAFq62d;AM$khKaj4%-}f4`6a-4vJ^oihGkxd3i# ztf$n|-3t}9!<1p3VIf}b3Z+8gCh_v{L~Epk%EGk8@20n{+2S{%tP5ar8uMeuy#DO> ztE4{_T9`QE0T2G1zSehBa{6tVYunpj6+F>+kkE2@q zf35$=b@*`)_)no#E}+*L_4DDKLbV%ZMC!H_SmizG|FHKSa7``W+HfGf(~EQqh@d7w zNU&s&yQpE}aHdMsk5PQ!C#ex;PqGGS00%C99B$J(e+{!uU-tYY1 zd(Zn4e&Ko6+AC|%OlBt2_DsC6=ABE1Mz7bPqXyP$^m=9I9Y>Z|-$>QGi9BUpG$BoI zpFrr9gq}Ef1NjLe^m;!DNlLh66u-`U&uo`Te%- z$5F1|^ZwkyM|Z)nOVk+q-}1HTDMF9yQ|cZ;uLhK7uD5sMTKZ=-x27ua`{b ziO7^&JFq= zo?D5ze&_D|)WZ~rghR$T&C6dDD07!2{D#$xaSHb*O1$Tw)C_rO#P(_MR9_ zJtdB?Tk`=^4jFXwgbN$Yw;yl%1MX68@qzE;(5|7up3oD12z$a0abF@l#C-`rM0kjB z689bQsP~iac~7y6W^pIVS6;)+N}kr7I7$P9()vlKl?y=EbEn2io-)9XmRH3W*$sg0 zo&GCs!rk4!nKT~0wL1^gh-Hlq=L^C1)!Q^57aM`8cb(wStQ8Oh#w9n%ax&NxdcqH3 zPxvA3ON58GFX4v>4-rn{zKeGZyH?CG0>|<}#=6tru%>+vB$j(y0KwxOu5B7xz@`2> z6!I}uSn1s0jLa7Gz~s2K^)^lsg7^mhHqMwi*jKi##in}CvEys!q#nBc9GiG-L}8Je z39zmo%+1={$Y4+C2|t8A;fJ^{5gy{cgdZY2L^z52CfFEFJAlJ(-!R-7}T$f2OsMgBN~&~J`Fb>BN-We zXf5Uv=~I!-Pr=?ToOI+syEH6pVQN{^gKP1(ow~eV&r89-1>{~?1nuW64>`R3&|19Z ziq)Pcp}uU6Q^HqhZy{-B-u)o#W&W^#dX}Gp!}{&^H>mG^{pH#g(0;|WLZG(XTKANPCHhBlz*T1+$1 z`LaNkja{?6KYb^(4||vX=2?%mnB!dk_RqEYrSn>>hxRFVFP|`iez0EE=Gi{k*w&{0 zrW>F>%lF~SXy_+&?~dw2P(Kz<(WgK^t%N_4?(~4+DU7t0!~4eM(?Tkty*%OK3=Z`3 zy5Ej&^`U-)OGJkFnzEAQ@fcj@+Za%YV z8h~@^-Mq+-h{3Jn-j)VHy=+oo(p~!i{89SX@;Tu#*wCSkQu{;w^2X7bv_=6~X=TMN zlkqY5ZMzm7g5Z6XOFBBq90TC$1jmw&;W2o^g^V%np?*#J$t`|v0odU48oQivF+Web z$RskERQgNnPuu^u!S?uMM}KH_4NQB`bKVWd6JYuB@Vml!JGDJBhS4&bAe0l%xC}|% z>wiZIB*_vc|M??L#5x3-JUG-h#8=@RD)EvlynVtXQmL1pw^{`ym6z08*A$o9uO;xO z-Z=3{lQWF9_ARsM(Ryh+HXzXFlFhrYy(XZl-pH?K7;i8F^FIkX6qS83$b>y~p?eWB zVNd7@nZO8a>BV4MdtLtpM(jmOd~j0x!7fm*fApz?!JhaW#a_Qmx1W;YpElBc z4|{KE)i3_r4-V!*{U0YYBAUVlHL_o~_0`sBEUc<@FE4m-d(a;HEtumnB{2%ij;d-?dNL&GG# zAu=_37O;7%I9`R&pa+&*$+;QQJwGxK8Nz^lUn? z%Hs=0zr?v;a^D14wdK&fbIkxD6W0knArlz5^tG*H(;CC)iO&&wbidmBGkW?x0-78Obs*X5B7KqGfX<|-~p^nj@e+{=aD^@89JihB&h#sm`Ns= zc*}n+nc5W|Z0tm{MjJNm+6zX#NWBK==;-$ zn*c%|`C*^gt58eeB6#zeea#H0N}n*h*}uXjo3QpqPG*9Q2@N*)4*rS}`urVJM%~Px zgvs3|FN>aEjZGQnnD(d>6I|vyT%7G<3ND{3ac|w54pQTv#TEO$!gfFNYt`|)5Co+y zXmmNm(xCTjbm{3p*D6eK@03m2YCa(Rh!QVtxw+#LHmTw3yBo9bV{Ud`RO%W&pyU99 z=+N&{XWKF4ETrLrp1kSj?=oUqUAd*E%RerYWxQkV@e5NToMxO+cjyg%O)(>dB}mqAb;_jE6>3wN*O)eO)`3ny`OICW~tu{t3?$Gl|hW+e~jgH^|%s>>9T_X5gkz zhHN@MBY2@(xC6k3Sh3P=8yoz8o7v07VT2_JUtYhwkE6)o=ZUB59SdG1#%ZjuYaHli z&@XRy?a1(vZeZ=+?%lqnnHc<388K3beyvGBbUxLv;i>sDX7 z!H;=Ty9FK%TtUOGAJ-JGZ%FEG_w?H5{TU07Jm31zd$JwWq|4384_q99xnl6Etn&H> z|80tIw+P;80p@42H#&_W)93P!kCVUNuf}}*-+DJlBGaeUi{-_$g>JxW@6ZwUH$NKE z=W0rBQ`I^*5HxJ;CpUXCeHyZ+Z8hs#fjvz->F~vzOrHsya%1Ovmf%^V2X5*8$n^Ql zE#BU+#vVL7_(kfzgiN1Tjsr#p&cBbKWS}ZNT0{Ao7rf{#`y9C1&BzGKHp zd&^vDV5i4uti8zxjL1LcPgj5Q>Te1jpB(ah#jg4W|DIN$$D#$c;j-AYWrJ*|4a9*pBJ~hgAKbgXCBs(^sne@ z%PmT73_kg98F^uEJ%gX+)__@5zacnU+S&2NcJkQ6h?67yr`s_>%U+%xW<0esg!5ZB z|B$orlsn*QG#R3e=@QT?+i4O#!lFG;Uq)iT$Oy`mspm z`j*2TKl(-C&qZC%j7nQ+7|dTg`|$ExEuyg8v>Vw^(^ld{aQ*M(6*OuQg*PjYnjZcK z{rnW0&(&LLq3=E6Wch=Bi*=rA=b3xXW&zQ#1PKxes&YN-3#$c z)tBrQ)Id1ZSQ^m#NE8dV1IOs*`9o8AHCNt zvV0TSK|e7jtYa>;Z+j|TZMoUtNAuV@fHCmwQoOZF2o$$A8^U>@aJhx#>{6`c5M#%V zo0|>s0w3I7bcXiL$67l!yk*eS>ZOn7HPPA^43#XoY4AfsOF^e8zhwlJxLw*{m_$QM zP!p8HZ9_ZNL~qpmJSGoMYy8dLH`@NcD|T|8*IVz5yKpT2b=CYf43IarV#XZdKyCk@ zVLXt=OX3Ka8IaVy>QdmJNP#4ILe!tHUm(VOgM)o!Y9D!suiDEGu490IK7L_pABDur zTNR@4K~EE44PNNRd?Efc)k7UxY3p~uGS)z;MkYT0)!v*BCl_h-dhFwnRePr~GBIKmzvpnfdXJ&K)mEqP zB6=o_!7qU5t9IEUCt(_fMJ= zdRaHR@ss@nS$I(gTLz%oFPr+hV0DBwNWI@7wr4kTG>iBqrcGZN2mKecxPJ6&xq_T} zA+8hH`(VV;PpAHc8?}>JRl4ecXaBS2sC6=#c<@8BbKabFofD?&TPKr=2RxL7UjKlI z{(+ECLXQy-ju6-N_CL?iD-?=f*8-v)7^fz5UYBvW7z7H>+x#3;H~umbM37#!j!_X!CO4GEEj$|O>i3a&$w_(^@dP?g=%hNsJa5Ow*^3Vxy5 zH?i}l+B)a->%?lJjYOO8zUz>3!$-`#L8a%@)8s7otEV0FUn=;3urJK!zc6+Ch?Q2c z_dji70tkCTPY(jKs&7_c$leSP`G|_ho>S}f(r*`8%#;7Y4yF`8&$@9M^zP)*ATVkcn6-S{(kIH@sZ$cB{?*?;IoyjDJ%J>1h|=Xfh`PK-)O7?lgK_w?H-D#V<-);Fuqeh@p|B9SPZ*0MM zS&MHgy_n$e#IWh@_*Iy4V9j}t2OO|xOudio@eY$oXS+}6V+DpdZmpKe z?19xG{uT9^@0hLF`CjJ@M!+ve+R%zyjg9s6pWyZU0X9FidGNLoCZJT@e$t&*Y!E;6 zuFLddCU9q$cX`+L88&NHW*e?8+}Ipx;=dSKtzi?bHqc#Lg{S3KR=pB(8v*V#t(q}yXd4(T26c;brhbDndTg8;fmKAwd*KU%F(ads3tR?gVr9pME25`H;Dp|N*0Tv^ zgEH~lH{(L*Tu@_+kIwtC;AIXLDc}5f;=okw+lh8vr(aOxgnrk_OY|dwsrcLxeYubQ z)mVsbNN5%o;#qNSXH+uATHm?t79cvDxb+5V<_`fv;;NtL@KVOMPG;tLwsUKV;;wP2DK?vBag|b{CQONv! zB@%Cy8l9ZiWejR=K-4ueL@?cmH__A}u(l-K8(i%qTg3##^)(x+!bWZ+W#T%aAJcm9 zd0{g#_-=0gAiD-)^6^+R58@!XEh$Pe{9dg zPK{gg80rm(CNA=R?gGHY;cyh20-E~3HFl1 zT}gBe|BlDQ2{d#*iH;|cR#Y@ANC}#D`wK>3{__#@h4_1nx7MuXt(|=|1SSt{a5)e@ zBXz9Hox#&?lO0v6Kk9WPf58c?+%)Fg_}-@fJ4yzc%g4c zk4hzhafQR)H~Jil>17Rpdi~E06e-*uSL*fqHVPe!{p^16wa;+S70c5MxcU^#$UZ+b z_HkBxlZ2TKXx-UtYx{7RGdw+M_TjpW5v~gpd=8F7W3xHFQn&*z+=s%)TN(mSJ&{Vn zWU?@|R4ohk2@6##(0;t_UpeXW1w>uG&_4gB%@@Pnt`3Mj@)4t=KGwegva5aY;O|~% z|Kj-{Qjp(ggM^h1Lw0%V{l?aUzufkxsU@j)myh2TXk*ZN;R|6_pCMp;bamGI-c91A z3DJM@Z4upgmC9EgqTM}Nt_V@6;4YqC(h&Hw>Lr)T;pWM*P~ErG_Ssnib>Ef~45jw> zw6>R|9(VOb+FNY>b^Gk$_g-SnIN$3XokRndN*|2f9xDLE=hk1{Y^T*b?heW!?RkLE z6LtjdZ`lX>fm?ABihml(Z_9C}@$4gBQ;7PTsNWIx(+>r9ASML$GG!$&TnAjsm!%xk z-Ogj5W*g|d%C*6XlvO}&k(0PHu`_%$XU5-sG)EEY>n#saNxW2EAre2hDjz;&>gxl) zM|H4T3TJY3-&h;wAOdyYTs@d#e|!_IA_8kt@cEx-DC&oMqA{8#nrZXIg<8SAi? zajZkHyBHB3Y0B#k&PCqF}bmu0CTpa0;;V#e{CE4!@12Hq)*6&zY*@Sm>g zS?{&~D%^5rO{;NviwyoBG>>`yzSk=3?9;>RpB^FgF@+1CuI{!9U&1=@!+-mtpMPI{ zyng>o9tSJ3pX_hyS`G#`TlM8;$}SKasC_1{j0czhXETn_#Gk~~;&Sn0@m=wC@kQ}z zaiRE-c(-_)c)fU)c!_wCc#e3sc!qeAc&s=~JX{ke4QNC!iC|9&Xlp&fgN)pA0rimg&qea1@A)YnAo3G=i9AFNMGhh>kyykL83{iN-wG>)PlRQ{8^RJ{vGADiuyBuXyKsYWwQ#90 zO_(Z-7e)&w3&#mZ2!r5ChCae>!uG<}LSJEXVPm0-u%6IdC=fCQ-vu88m4X+7hl1OJ ztAg`_lLB0DK(JG=MX*+oEyxrs5F`s?1=9r+1Y-mtf}w)`f}VoTf;NH{0&jt*ppn2) zU@b5aa0SNvFZ>$*EB;gdJ^oGpCH@)yasCnhUVa{bBR_|~jGxY*%TM6XImT(qv=5S_nW^g8P z#&W_q!#RPR-kh$ScAQom8K)V?jpNL*<(P5!90vOv`#t*&`#JjoyOe!}eU5#CtzqwH z=d(AnbJ;7{8SMG&Bz6pY8at9bnjOp@!tTfJ!S2KcY(KUa+k@SZ?ZCETi`g8u5$iMS zEvtg{gjL46!75=DvyQP2v-YsIvo^3+vzD^bSgEXdRy1oeYaD9?D~L6a)rZxM)t=Rw z<;!Z$YRqzB)nl2n1S}@=JM#mxlKF!9ka?SVm3f|dl8G}9Fn2PyFxN7(nVHN5%w%RP zb2@VZa||CNN;m_#6P%`8U3B#RHpJB(aU``evrP4zLma?zLK7$tznE(^z}>~)S3b;x{N;?QC|l9109&5>lK1HTUlt{_-}0Hv;%a@8yjMPw)*-M*V59@-djx9{BqIn$ zAVEadPR2q^1ZO>A)E!xigBN6Urz~=;Ka}btX&Vr!2gW>>1a zW_Jcb5rRAfD0%Eq9<)P6)UFS@CP#o0*{-2Zs<#&b%60YHA;}$q2mwPo^vJC>1&#YhTBlE)H#jg}!uiBixKWj9Oo3uKAX#S(?u(n@D(@fHCJnZ;uyp%__g zLJ~@3i!dbhMSvn^jy`VwNT)PMsbr14FUSXxhR0#B%P9LtpiT64%pie z&~*UYx7zj#>n?(G2#_V~1d<9797S*t!EOZTMl9qLHi~u5P}X3fz_S)1Wh#PM2xcHa z#fgQIj)hX56^yQ-F90^Ebv{x2SSWrhfRxP;xFA5ehlTPWi?35MQ7SQOkn{?{0|aFV zZX!5^02Kr#iY@aHQtm;JhX6&Bxe`h72vCfeC`QboNEv{jD}vStyb!82jiJWPtj4ElIO3r;w z38#RQ&&h$ky;x2JCy3LF12_`cw=;p`>y_;L>=JeX?8)V@7qMf-mE!y25^;gZL_`x- z3h%=ndVw%sn8S`>2eEs>{+xvE%r;@uSe2~%u=iHL%4g-k{20rMfPJ-IFc(Q!&MXra z4ff3LGfS8S%zS1Ja}n%^MKFVyy_kS0VLHQJ7mZQLxX&nI6fp8(A8QdKR=7wQD~y2s z{a!*Kln9-LCX5J15Th3Zz&@5U!-PSjSJLm(OJI*GpPoZsM31FM(1Yl`=zuPvJJU_* zG~-I+`$8HV5x6fX5fm7g7#A4l8|N4=GLAKlFb*>AWekiZ#?Hnj#x$c!LB1eIun3M0 zLiwn3Ct~1w!OB3gd zbKsautT;j(B<>{!VhJ2!F%i>5m7@Ei5>WvhPstH262*!lL_wlnaP&kXayGheR03z8 zooUXsc9`G=G2O*@-0;xw(6OQ8CQW1{P~_PZIhG>NqR7z{If^1rqsUVz@+68pfg(px zX^MQ3A{SBQV-&f7B5Nq}5sG|> zA|Ig0t0;0dMP5#kmr~>`ikv}_(<$;IioAd#&!fnx6gh<=CsE`CiaeVl$5P~36giqA zM^WTy6nP3ookL8)&TVHE!% z6j@D?gDCPairknYyHeza6uCY{cBaUV6xp64*Q3ZbKg)DVexp0Mr{p&} zCBM-r`Hj8_&P3(>S%0GL+W%|{Q1S<*KA=J=vuSWH`X=u@?-*}C zZyRq7oP(YVd##gsqj|%5ec{}*lIP89%yZzG@z`+Axr+OgdmHv&PjC;xx#o4;W!wea zcui{I{Pdfx!B9z z!d?w$py#k>u_v-evWK$!z?tV(Y$@B7Z3o9Hm~cey4eK%M7V84*IO_nMY0hOWVa;R3 zv8J%bz)_2StgbAK<->AkIl`G`4)Ytcn)!@*hj|IkC?AI77wehJnG2Z-a3*;iGng63 z?9Obljq{Ax8ZR}TZ#-MD0*+oJ3Zew#;mU@Af*yi)0)?QdpgvsDAP^Yw z-}B4iIL0-&k^$%M;cw=z;xFc>z!eM=_#^m3_`UfZ`7PmyMk9VbzL?M8ed4_qzZ1U{ z-xFUI7mGD;b;u_1N^!b4SsX2ngsVaZ!;y*(;uhlO;)ZZFh)7HqeH6VCJrv!5t3V1x z`$Svem_?>26|Vl6BpM|eCJI0$hex+I)-4gEJCM|#q;@2&M^amo+K|+mq*f%gB&h{S z%}HuTQd5$ekW@@k5lMw46_AuqQn*5ch#Qxr9Fnq0$|5O~qzsbMNoq_|Ba+e#)c6NU zzmxPENxzcx3rRnd^b<)xlJo;f-;?wmNoz>@mZa4rts-e9N#Bt4HA!EQw1TANBz;NJ z7bJa7(q|-nO427JeN56vBz;KI2PC~u(t9K=Bk5g|-XZC2l9rP67D;cC^ae?6=SX^%q-RK4Ow!XNJw?)!Bt1dWB9a~_=`oTPlC*%N zI7u}mJxbCeBt1;hLnJ*&(gP&jPttuP-AmFvB;8HYT_oK}(tMKcAZZ>+x07@mNw<=8 z3rRPVbQ4K8l5_(}*OPP|N!OAzm!xY*nnTjnBwa<)l_bq3=?aoAC+RYhE+y#_l4g-K zlcX6WT};w+lBSV#5lI)4bOA}{lXM2#7#Bk5FF6wQ(SlVznucw z=l>ZrYZ@<+HP1K=67))#l1Tr1TLFc6t9{@GEMbzcP+vcpkJ?w|8x|(_@(PB!U@u>^ z^;Ewb-E>a_LDY5i9N}HG@22prefsXx$|?1VK65~-<9*x0&o-dvim>uQ$w3iZ}`J+Ns4+CP&P`ifW~?Da7K5UYTYPSH7CO^2Fzlm>M5nRtT|y#hOb+ z5EYpgkt^TsRHnIvI~2mt)4CYRwSHYst-P$2uiaba9{%c#a?qWFV(op}9k`Wx9AdAw z#+$X*(NnY$n*Z4IUoZ{+BQNr47q=T__lmFPRE>O;viIX2&2yJi8P>bbEBB48{~Wu% zSs5zq^7i?$ADZj8U#ds$_@-%&jcj=D<5x}h2if7nBy`NyacT3&JC(|ZO&>k4AO236 z`FW14zTY(sZTG~uk$w-Ab|1x6jX#$Dy!)BL*GKW2=YX{vQZg`}QxlhzP3u9A$bhl@ zXXioGOZysiYeDkVMB~4D5{)?dFw{#CDv^YQ_(;_XH9RCnE(`VwRfnoo3hjYz67&pV zz}HQ>ya!R2_p~`)`z8(-C$KrxY2U-m%l9|9Oan}2MyvV5n8424wHK==v*2<4?au}+ z5P>Y^q&728y~ECBNS7tLRABBU8y=l_#Rpk)S7zSA&A^I(&FU$Mq!8(%kap@Bf_ovqmGui{@9zmT1HzaFnzd%O4g z>0EG+)%MO)e?GWc!1wZLe_L;F0*L#lgA#Ga~#~n;8YDHrC)MUY3S< zu}mD2Vm7S9I)#W7My_)XrjuP!8n64o< zd41MoYXeu$<(e(t7fkW^6^6}Ui_UDJoh;Gd2Pb}?KE%INll{rsulnwG%+dDMi%V76 z_~o2t6MburVpksooV_F z3Y{9=3i%-dqoo}NC6xPt+5TJJed~lLPfj%YtD+xZYQ)w)>>cVI=I7_*7b*=4RfKq} zLg8KjaQnkhUwHbdL@M`I>56>sO0*LlL|xI>{<{6{O+17;wcDOMv0se=LULz1LhltR z>Q)#@cA^Z`D?9BinQ}OR!|JVJ@Q6u`lT_%fu&rvC( zcWoG~*`d61*M5!Wh6(+1NvBK7`_d&lT(?}-aL;Vqu&Kd%O^CwibIV6pHTx1zE}0Il zXx?>~-f>FB)3E9T}^eG%QH#I~Nx zxPI@LrfE>Ya{Dz`G^pqP}< z{%ZR_;x}IwDp9EY;MM`YKHk2PurRgSPZB0qd4)+r;Ds=>`$xCcmvj{XL|wrr3V3bU z3}%==zGIh2GT=-K=omfc61VIr#yzFFaD27pFEU|ATqk7WbL;xOi)MR^02K*4DpEg3 z_$82fo$yP&PUQde_vxI(85~d>QtfJ!2|a7OcG+;TV+K~<<*dleF#~V5Qh9Y4yt?64xL1 zEcwQUc8pW)FF-v$q+UCB=m(GB_zV%k&k>*3>oL8a@Jsl?^?q=@AH#M1eR2JL4gU4v zG<^OqzRy_w_5U{hv`dSZ#K-sdJFg0`HQ@r=dl=j(V{@-3^ACoBMGRlo{w^~nNA$Gd)QNwe_LsUNA!QOB~`w%re)Knf8EKz%h%Dg3E z610QdPz!tAFBU{YFO}GvNIODO+bh`l#%edX-~B|cXQPP5->~uD&N?%0n*yY-!I1uy zUT*|4^N)>h`AP(=*bnUbmN9|rSE)4VbUiTeOt?-@+-K_}7Z=8DBT!l~JI8Gm;YSDv zJz+Lg3!BSnG8yx0J zpf2xqhYtSuuC3Knn_k&;x}B5nSM1=79(z*9aX_mS+p3GzZ?Ui4#T%g>mvsKD)7u_8 z{TAwpoltZE*Zz~w>Q^DJ5Q3Gh+pC;>1pw`RB-0S~UZMLO&K_lh=93d|&Wo$Sx|lsW z7v)%qMI9YBA+diA{#N1iuHdd1RPEm#rM@l((YI$L)r0%G%^Xu0_3-Fh%oU$*^xez? z968nJm2O+tSR{o;0Vnv=T(+wG3G$@{+6kyH>2QthrTcK zCFWq3X6D4cKWJd|a3AwVr#|3E4_*5tiD!ZBnG^R8J<0*go-7;dD&y6#e%O>ukgV^bc-g72dfE}$LUDw8| zB6F`8-WSZC`$qUI^b=0Em|#1w%mO%v(>JoMb8v|>|3l9Q4>0rdt#x{j?>#MHcrcW1 z_WE?|pieh?0ZM;C0ZM-xfYOs*U`Tj9LFw=CpWE$)UU+%y-<3>f>(GOrnC z0WKtOZj^vE9RSa2SIxjYCmgDd+LnU1*nT_b{or`~;jGVj^L*#wI}YrrN+Ny9J*KzE^!{(*f6~ z17(*`?FixTM+!`^H2WbOw>6W&q=xmP7fFVL<>I{|5|1{C{Suj|)#Vcx4S)QG452}S zb_tEveJ#W|fw#9L7#@Zq3x)?f%fn=Hxim~Jg{MEm6H~m@YTq#3FW$g}8wBcp`Tlr3 zg|^l8XDzQLWo>pgD&hgBG|jX|nJn-!_0^j6Kst!|cI()gu|^M zg;@aA(=N9L=b3`Vxl3;rPZ9yy!u{8W8(V<$4kq!DTdJ_(`xkj}TUKDN`j5#fab|#P zm(IOzm?;FhSiygNAFiw{!9QW3VzP6i%x_qysbgwR6q*5Hl8d+me}So8-%n0_l!Z$> zSiG+ETBYol_TbU2(H8%A*WzRe_m*?rPQDH!QcI7qggklH+E~^8WNb?38Rw6LO~hvG znkx@)u^iXyalIbb>#-5^&kH=T(fF;8vm~bF*;rgfv@ z-w$JAD+fO6ne9Ij^!HFDfQ?Qd)!{CFVSftjPB8vncS7a`_cw)euCQ6>rIv>(g1xjO zV6qTDwU3Wnrb11+0n@8=85^Q5V>`ig(7uT#8>I7IEFGwESg79IyUG}pSsWe}$gu6FH^+2Bg&Mj3;kAvwRHiVdinKuzvUYUx9q$2WV?35*=@MnO5g_pCeTTVW z(GH+skWJRNsAm{~Wshn0=i0r-2s;9whj7ijR&#!_7j;|4?DWn;uV({8DpCmYU;FjK z?>*QNxz7LQpGoh)?eT+0?bb&;mPmlfTWFkD;Bx`U|8ciLZJ zPeYi0C7Xwx_Id375&J#Xz6(9F|Lg1jv1*HKpEo%$5?_Xt`kEoNG4@fFPp{KBh4VsZ$cUe>JB}jL?U82TK*OMeP?Xm#9L$RVujx z{yIs0VADFx$6JBEv!E<=q69?UcV+@Z_{TT#Q$--r)F!Z=WOGVBJW?*R4Q*`GF3+%N z?vwcQANXKVa+62lu5e^tuRm(J^uTt;Q!HaUR`c+ZIrtH=viSZ|b3o`oqkxXL%^ z2|xRf?>ohmvH<^37vo7u@37O;ay%N>rhw2}e~ce-(PAw| z*vqEAI2Q0a2AlS%y6v`NmcgFTKYgk$b}TXn>DboCnRzr&v`sPd{2RVO zPcxHL^uBHmd_PCbf6Oximv&v`PVFZ!*e8T8ymzVi2}bA%`z2TUG6&AI0<~oYuj#F) zvS!zQUBJLCbEjDS`|85lB3!5cXHtNygL=-L*e0uf_~Flua?)Hj;X_`t^8KH0#U9Tn zA6Y0_gBvZ3xwN(ZCalf&-DBoG+lt+-FS#y``q}CI$iW>IY3ePy)|3RDG<F#&57%#=Ds2JvX|D!L{nul!9XEv^p1Bk+f86v6D}5!liW4>&_^-#!6GFR{LVdJn zJtm2`w8Pts{k!Ac6FGnKLkTNg-4CT)Ch_r6 z!83c*zVN&tsh3&?Pw>(96@1|_vN9Q}bcbCVk^rzDF0YVj@6UV>weAu2CFUj;m8T;?lPNkhX_@O(TUcnVjr$}dc< zYuk>UW^>j7`yiEwm)*uw0H(USYv3(6Z%8xTST%GA^=Twum74I1L!~Y zx9>yTmx_cRDk3`z!+rj{*Z*Tx3^hRiugYY5psPuXs%xykTgBIcLR4y)V z6neC50kPg)GsL55sA*)rX8x?OjL%kc6@jGU=9Qk>#Mt@)BqT8=#mtNke9Mj{V!{m#5mHKdK zJem(Vv%lkcCA!}pr9K?W)!(PLR7%h57YFbCc}0m-uJHYB4~e5J5rgNd)Wsm^j>Fln zbK^l`7mMR90~Rlyo+#9>DDj`8T~QJ*h=(gmAlJ>SLD0U4c{O>k#3xJ@;-wA^QNSrS zsZ6f&4fRtiyybE)coIB%-fP6d_qwtVQCIezp@Tnr6MdxGka$L?Hq6?(_DaeXvd)-f z@31#?25Mp-%YVUaRp}4 zpj%l)t{L$E(71STq7ir(V(qrDMFT+9`}$oLI-_Q~}6do5vQ7Nj2${G!jyUJ~&<^D{>H;kIe^Qs{jP zBlI^9dYl?_z!eaB!cWhHEwZ8eoxtQrL#^Dl8v)|JR6P++!Vk(Pwb$L~`)UOA+2t2N z`N6%e@A*s0)G6vxpe_XrDL}S80y7pyN!~6tT+x&3qqbSKT(Hy7`X#RG^|)S+twm3$ zX@no&E7!|YZyYxGL(?>qm3lqlkEYjaD0=-g&}2eS_#yPlN8;uV+q)daTss<#+}r9P zHenDK^xAd;BlPOVqSjE4cYNc$tM9fGcz8FoLcKlvllf)#Kt+zpnjBKZHH5 z_mAuS6M9VVAJhBCi2LgOQ}vkMKkmHwqSIs6W=yH+Q*zFF7j|()BW&teokzHR%> z;Zu5^CG2s+{=mlcRg-_!Z5E+~t zm8e6#<=$a(UFR{9J(ECPC(;Hw_~W~_&YXNj5vE@@2e_bIIpc12Chaxt<3 zD+f(i^-g_y`80!2Y2z2{abW)7_bkY zt!~-1e9={Wt?bp0@vooY{f9Rg?YQIy)>P#Yadt~CzUsoEa?_qqu*vm$3GB`8VL}h) zgyR>kV}(cZz9!zki#=Yp+D9I+4|Az1XfZbJD%SGhPP_T9pZq*w<)!fc{i1Rn_FV0m z;AK(|jEH$Zs@nQ2NbE6V*Y^|S=1)r$|MeqPMBU{lm4`}HYPHl$3P-+R{pF*Q!)0u; zV7T#wueT2x02?oRr>nal>gujPE)@73cYqn~cW+`qjK1Lv&84{*EPHWcQ|VzNusfmC zyw%&S0OyVG^*$-jv1;pJp8uAn6C(IcDQgYl=A|!-eZpy6;~3?f7<%a)4dxeiGg}g{*~vu z$jxTbr&r|+fBXWQpz3#ZoNYBm_&F%kY##Q>1_ZSo?4H0T{kQCrwM>-H0aj1*MhshF z3U=6u+V|UF1*rOn;Ee3uZN6igWuqp{FS7vw-J7*N*U|`VU|v30;_)4m#$lYDPdFgP zed|o4Q)D=)_VssbzMkJ|1$w(Zx^44~1NJ#N-Wqth$`H&o&MjE0_1N^kBXhQJYpVXWG~q**t!{i z@oCP%L3i`<_=8Vcot$$7-=8Ysi=h7F+nYoB-_6I|fVrF&o=JY zSpR6ce>BvON=xkn^?1qWuGg0gF3@a_O6+8Q{UBzU$2_s=!T=mio)l^3a7TZ%g8CzZ z>!Yt-9DuD>99zDVS)gfYBTHG=D-mn?>2}Na@8;p?x3XHpbql-<9?r+Cp4oqXe`*}= z+sJy%rO;WJqstd|GpKKSTOE1{>eUYSx5@gDkY%#NFa?+u@Y z#npt#H$wlXc4EuFyqkyVCxJ9Q>8Ww;p#937@~(x@J~ZudVe`OQc+SlQms>ne!?K?} zlJV9hPCv;d-ofxu7~K!=q?IlN{+5y1&$XrCKcpus z(4G@)CT>3QP`~w)e!qZS!~*+ReMP5471A#Kg8F_+G;4fBqpuj0`f$ZcdVGxjvF(ti zq3ya2vj!c}`RNwjzPxaSMKl{h~)8Zi4jT}JGzK9JCWIk}8QAIFZ#4nY@ zMclq>xkMf2=N$^i4}APoQnZ=Igk#fmg&(4>@c*u5{bxF&)lJ~C~ z*kHfHx8stpSkNV!v(s!-VBc!V<N~uJx1qXj?3_$H3@*?TX_n@xIqPZk!Zp2?)J^hp8vf&$R!{gJIsAdT?LZ??6j}8?S|9?wo!VY|lmfSF_3ItC zs;?MKHoGclrE0JA%DB{IyxV)c%d1J6RegWp<#9bHi0ktKq1R`916b`D7qob#3BXtE zS;@Hg9s3$zR6RJ42l9?Tp6b%+8E&#+(WV_uYOrD>TfVk+-|OV`iUL1TLEu84*P6a2 zAlq~JG)b(;pbyXAe&=L;Zb9r}6;EqF>`rpi4;ggu$y@$t(mYF$cA@*0psruwh7mQ# zh9;Hc7k$5!r%ZW^w{p2WD&Ncs5PI>(oUJG8+kyu3m}j;R?4>0995R{_D9$zoSvTzJ z7eDW)+}$~&e~0lFU~Kc7QQ~Z)f=BNIrf9=M=$!{ntNfC%0l$^(tczF8@xxZ>Q)u8S zCuL^$&TL>YO5!=d>@#N2o20vY#=pVB_eN~ghVxXmX&Bx`DqUi*|rInCAWJKa#W+R|(B#RvD5N%@~z zjsN&eIl|TNj^mqh<^F}PjTatxU|7=kvX4h-w4g}2$w^eTc=2mx`ekm-*}%if?aD1{ zZ+|LRj#W=r=HL(Yn@FW9QRD86GR(iQ-iRv?lug>^ytu7+t}I?(Z|?nJ-!uXD?r$3T z`jfKL`XRi#!@nvYG>8ur*06A1;c@Hbn@?-5og2jK{N;z{Q0C>ToN8li7<0XA@w}Is zwDY$-3hlot%RSe+`ZRv646Ly~yZp{a<*ZHpB6=P(#z!c(AMft)UPC|V)GcZv9d~+n z!^88y4`uASHJiI7yi@LqNS)lN4+HOYWKna6u0J#%tp=U63%GzkZ97|L(ykQmk!um$ zGx7wUbA0d^n}9M5HCD5gw+1~L5(xF)pTbsX?b~E+87OF1ih1QtvDfOoVuo}NVHYYl z^oY{DtjbsNdu=c`vb>L3^sU#Y@IWq>$~LjIl;>m1`@K+{-g_47c_w~Z_q#=S#sHTh z+lu2@TD{_{=KgDO6OH=Hm>p-aSzqq$8!XSq-<+I%?Eho$EdZk0y2bH>1!06pK_l4D0? zA0!iZZHk%DY&F^SdP3uQO*WA+Td(on3r~?2?o-o+rmIOorQ)p{x*R3c{pZzaqq-^gfd12-O50Tk_pZ3*r7S`-cM+I-Tk#<4SYUAD*EFOD`E``^Hl9 zf39mRN5ba|0Tet1Deym-0+WYE75?+%-FOj5tu|RyGNs;X(pYsytIBLsTdf+K25th7 z!C3_Q^w6lL%N+ASfR1?}RoE&2-;`rT>i4_n@`8vVLvviwhQ+t%?mza42p#V-eZvl*V06ZgM%Q{~YMt2z*G^dF7KL06 zw@%n>P#{e*vsDG21{{TM1iFNwqwwK#!5HGjg})1zi;l1SEOU2G+SGQpYTMgZ49+F# zBv3ooZyMP5t!}!n))fhw{jO8D&$7?twb=tA%rk@t%co7an+1F#qH4dv6uTT6p-ER6TxAMDrS3c2~?GQtAc1i}dE8&3o$abL;AZ9-P~* zl zCUtc`)Hw@_4DY^IDg1mi7p1hxIO%=u3-PseYq)RSdu3)>H_*w;m&NF8+M?Y zCyLrpsmJrLcgbE4E0m++-*|VVQu+8jNvD6HGyNMl)1R9`r?Yzooz6v^>CDaev52>K z=1al78n`MKz<+28S@IaOxqQ@h6?}C!O)f@g$w` zB;KwOw_E_<)hzvuXQ3BUN&D_Hb-FcKNv=CkWqK;`E1v0`;`w4K;p7ue{*UG_8n}B) z{_7K9Uc{D4Ena($6r#bRBO{I_%|f%&DrXDJ?t$GmpTE2Nrc&DAoR>`t{hr&PLmsbA;bK3Drb_CQtmvUOkJ4yE1A z!hGL1<7K?7+u~N|KruRee9Np!?;a87`^WE@qm!UA3u{{QUVb8!@f8DhNjWG!H-3VD zyFKKam7n(x%~?RU@-h`W*HDatw}iaPSR+Btn%%!tr(>2=-V5ElU-NOd=dT%a7N*EK zk&Yfcl9k9Dxp<6~Pa|`+gc0T;e>HM~D+F^Ph5&3r2U=@$ayk^9Qd_*GqV3eqOE>T_>oId zWK!qqfy;9j67*e783xRs)_KsX`cH-sx!WGjf8Ku~iS^KDME4qA4LVxn(BK5;8Pfy< zeV5ZFyWhJaF9W{pcAoi6z-LJD8B{}%Dx~ekV+qdDU6j);-~xvK!vcU@uT%c)|Bo8@ ziijm`HYW|PtwDXK)^8V^^94=FekA`8A4g4zc$_O7iB;TOcpi}d8_&$)QAPiHJkPkO zv*;9R8ReW`p#ewfaEcuU@?c3|wwN_wYoKwoWl@{fVfZf@&##Q{9{e80^Eknat?{;$ zgYi73F)TW3ND&>3=V|%RWbsmEqkF)(9m_lU1m=IbwBH7Lh$C&Pb24UqCZeM1hi-!L zHn;xvs#V5vGa}>C4u$vgMvEHoKV2=vL8m1(o*a7gp6oT+*L2cdfOIdv3>EA3=-@4KDPU^lJ8y7qw@2o zW`6QPtM&e~4dk{3oLOfN`5LVf_Dn#@;6D zVLoz@BVTc`SuTm?dxl=S9u4F9p$*~=>XHQc@u?NNAL38Y=@!UJe{O~|Uoz>Cl(@?zW~GWu4BZ)>j)A`_x`Gk{Mx`Gk{C%sq55_W1aDWMF--85QR&B09`y7<|^%GA0q*MQo<9U_(*VgP_#HR42 z2W+-GFqq%B);#JE9?Zklz5ai`hKiT5^a_m{Jfs`JZbq)tgFgqjAVdj%UBHjD#;nuO zZd@kaUFqod05WT+lozHJ&folFt@6a}KM-s<@zT(<#|dx5hVW;Ct>I21DsTukJaX*RgOZ7^BpWtP8zvRM5)#Csn4!rp zghcDxmUYG)A@Oy=mK~MC`DES@GT{6GKFLpA+I+$w{*QY}I&k|X`^_RM;hS3JgU|Vv zsCgOJ9f#$c(AZO{Q|56yO)eZ&gf4kyClmuiu3EJ?fHGqta`&T8&i!F8h@-+UM7Vvr8Q%51^yuQ7iTe_)*S+c1;W$b;`^`ovW!hPcMBT z0={bok6K>{!K+_Bw0s^x%V#^~UC_l_&G#hFmm>T}9UjgAJuawfEC11_%vfH6Fdxfd zc*p8#tO-8O^(#<0eTY1HzmVK8-9PYOs4X<=5BK&#T=ps>mbit@UF%$~Mm@ync z90daa^avD{z1r{iBR8pY}LUxN7+r*4sieSNv@oPQn3(t>FQ{%!=+`(~fDul8>I zX?^KV{G?XE$8$Wahw~fx$lu5_Bd;GSrd*~_D}HgI3N`;15*zovTHEJlZ`AEu-GSpT z4M1~LtHbV;4|B}_c}rpb@0KlS4M@RLkOBoMP>=%u^C>V%5mo$8{K(?5q{66@X;pHK z)~waZEoPHNX*KKBu#jibXmoO=+A&m^F>3&Z%q-^5EI8;qM*t)AE1) z{@*rU%)|3Jg;J^3QL|9EyVhvaDz$o}&SU|b9!G{vyI&8(AI5ss zp?tD`GdL>+8(MJ?hQBMqwYQwKTQ}s7^{{;VoOwggg#57{=6kDMq7L`={YfvD$8tD4 zERSh6q~(7<*88jXbo>j(dhB%lTXcQ}L6h_D#0|O3{wunIv}ev?Ip0TOZ=IONf4A;- z=_W(2CgHJOB<8e+ulIU{ zMAFrvAKIn9K;4edS-=r(MN?w;jGf=HFj&_G{`txfZc}Y=F$;_(jcT33CQ~Y5UanVa zz~up$*2&@WF}i9_ZTQqtH32ehDs`+f!1-gojD=L0{V5@4mMfZ7`~BOk#3y1)!@%q} z-d>32zaeP;Gbf*eGOoRU)BK7%sxn}t?%*~q;ytTq3M=N1zK|_)DLz(D>*1iChgyPu ztVfJ6AIo8g!~M6l#W!2{?hcqQTcsrf&?-Ty&JstmoK7pOCe~Zqq zP-yq?{!Om;b^cW(hR#d*aG!vcM(*xi`U!D)g8?%z%yEsh#}pdNZu>b^xIwK87aEI8 zQ>0Tqk|mG%YO4ZM%KlJ>6Y@rx?q-zce4Gmn?sQnWZRW9$P zzIsV&{_i4==D`02PeBS4q(DIm6r?~w3KXP3K?)S4KtT%pccs9zp;0Ah3-qwbln0fF2C_YY~{I#24-bt_yeOq;F5j*>X$hyWdP zM9Ti)SHIt#zWU_9bNb#czDR2>f2w3H543h@gqL^!sc?4SZOWQ1JoIGx{R5p3elt!rVf@zqU~#|bZexPul5rv?vqsePT;li_^@GfQ9w>5bX2nTgI4Ev@mv?bNQgo!# zp*ioZUy0%yZf+_4F^8zqdT{qM5Kk&&e)YJ0+&^?#AwM*d@&y{{mVsqvE-tg}eUgzp zsJg%M&X}VAI}9gqq|!DMdb zw(msXH*j5i$GE>KF|Sv5Y1%6tnP9(LI9K0^FjOh2E52tO@p5rw>dmx4#GspPmfZq= z(&q(jKJ6YyHVtpT@>JR&vfP!%t967@|lcJ1f)BqMpA#hYKON`6}MF!`K*AmJ%3 zq8=#hN!*=!I%@IRs>IwU&oskRQ#q)Oy)Zra9Dhjf9B#Bu#sMdoZ$f9n&viMPW46BTD-9_+e`P&*ik^@au77 z0sKpzDVnHK^m1RViB+L74-4Y}LYwr8qT?aFWN{Xj=k4$#q*`+*ATM@60bwRHXf{H-T*-#wnoem>%Gvev}!KVY9Qt4f7M z>v)Lfza^$_%$y{@?}9qK?OAfcx;!FOy76E}h!u{>K3maiGVyQA#>&o*f^-zB4} z_@>7EIp6k-9eG-R;esX-Crp0RP|xLg+QZ|=oi8F;5X=7rr<`7%z24+P&^|%)oEN<9 zH8}ZX%<-q?)|SmP(93Ik?FY!F8va=hshhqtD|&P;<5JJbC!Boj3h&2ta=6q~EC2Nl zjey_%xCMLxw9SU@%r}_PVyrL zt!A~#k!Nw;4`S%ZyAq(_|IZJfxBM&Ued^%H(hUTt_iI7jc1?Ux{reqieHUbtUZeYk z@7{NcNO)2r=pnxlxX&(CKdH?jLN&R|k;)4s=+U`-^UBV1W%%!MYtLykl9y3N)cdq= zS|M~>HMZ2cTX%_CNn3%RKWfDMl(n6nscyO;Dxs(+vF4-qd9mKe@9?_5Gsb(NXI>$% zCVk_gTP0i`d~YX2k*^Lnd{RD@!12WVg)QDaRo#3|rhNO}q1mf%1lE%?s4_AC$wd;! zC;Q`&8yimWG9Jv7t*mHyPE-nd7qlzoJyC!8!9QdIj>LMf zJpOwO>sIM71)UJ0pf?Myyzc*wT$R&m(xxyjQtQqLJEXiKgNJxueceAlT+F{1a(!a` zFI;r^Lx~ROS{6dno`+Ou8gw{;>?3XpFx+|PqHs849_1W>ng3W_2ylF6m?5S~kU+r<09rIgA zt;ZM58BTZ}Obb}Q_NM)eWLBEjp)urzRfglmHFe063iS2Bl!>HtPWa>d9du+}-uSQ( z?s&4SZk0aO9KrCrpa1U3UD=C#<3Fh0WLpH``!u7k59sOl!sY(fMUlkZQZ5tsC5Mw{ zQ|_St>viP!Fu%8bzjP*|b4LsboM0o)2bVXE>E4EjNPZi$KCcf+pL1Vo_fl2u)vsL} z;!Wq$C@{A(alIEueR-6P#C-aE{A|0tf$z&4#R_|qt)3eO#q=ZVj-47A+Q3YNUaF8g zASs-x;Wc|}kbF2|W!nHlqi$Hu_%XE4+H?1x4b=e_*qOy_v zlwplLCkQ3oGA8UVcD*gxXw&GdV7ZZOwQEpG-&b9U&CmNhdwRDm(e-JUphsFG(XegV zFSnm`CC`3eUHsxXGjXFFQH-m}G6c)2tsgE9B}7d#R#;D&$(S=mUvBGp!G5pRii1Z$ zKX3W6`xke$CC_cFnN~_+B>H-%mzoLkv_rsn=Uva6?TJ^)HfkHcn%pz2QNQhxbBIQ| z^Y-q7c(P;dRl&R}(}<2m>WzL8xr%f;LbPMP@x^GrF%#wxV_aJwJ}QVOdaUxy3Dhw6g&kf zP>=!zDe%7_1*WK@%KiB|BA!%RWO4;;|EOddxlyH~?gr4+VYA|M!EmIj!5%eGRDr^ntdqBgVOuU zht^-Pj>sfV{-CXHN1r=2J|DqXVGOAC`g_M4%j_qP)G4%PmmlHe+ns!alaI~%BApsB z!~tL$L(Y%qYpn*)7e{JdZ2^5l+~GJ1?u*W>hM)aGu6_oyPL1 z^Bk*D0Dq`B)P5?po*$|59JoCLR%KWaUj6d9b)K8)rw`Bu_;8Q2K!qEUThF-X z-b6yMVPN5%@g~y)!-ts9wRqhg7%=_AXi^7vDrtXX=}i=SUT9~f;<2b{)p07`v&N|J z9-{g7siUSSqsshwlZ|)r%~lh*n1G94Vc8RwKy7BNS#DJrEk>J8rPf*Llio|uPIZ(# zfR2)f{m8X~EdS&0L*1)g<6_?uY#6M#S3kS57h*$Nj*l?JB?v>zXG8XTOtT^VclrL{ z{=dHe$CX(2hyJVR{3cteA?DPoRt+o0$Jv~8B!*5t+cF!!cZxASC@~*H{2oKhXG8p+ z^Fz(itA2fGPGD#-GWMQqMldqRrGG`6qtLXK_aAI}v=fuRaoNI5T$l;Ce%R z5F0XI$wICq*RLmj<%ZZ0i_!Ys5F7s8_y4%!pI=O?^{e@<+ta@EJqx!DYkd_xh8Mr> zym#TY;oOIuH}lh72sS*nqx!v%zQ^qtX8GoR&nAxBD~0StZ(njr41=m2S-1QJ_s7kD zokk7LQ+}HNQ$qzFQK;@?bDM#gsI_RsFbTq(HQ1EBJUs)#pS9rg(V#NGRL61j)pS;kF(*3UQ-|L4iwHWLB zxKCw-`N_ee7cyTyQWxEMuT}k$$n{#i{&l8&BK)tosmy#zhJ0GNeQE8*UrM_ z4xRKzL(e}Mdu`7PV*c3bm!+Mr6JKYKd{_P6Qv&m`9xRXb;Bc`#4j1de@kwpeg!A^? zE~2ITz_FrtUMRG3W@9%~1(bEU=zwTf9t!ghvW+R4OPJrZ$#z}jibDI9uqXE5qaI&I zSE{u3E8!y(?&EJPjY^Jsyw(@?pT*41PY@6jJ5!R3OV0o;kDX-zy+(kY= zg^S~JsDIi_QMxz!{yCuT+0MR9xHvv#$F~j8xojr-e?4Dp$k#W-`RR2Mdcif;I6k38 zjt^Gs|4LZ&bIQ)`R7-9vm)?4~{?nUi@8>%d@AnZz(}uJ4X05+U|!!D%6|(uuK`W`@-e@A0u88 zO^3EGoR)Q+2vW|AsPv>fTG#OIt>O2)P-BN&9+zgM0oBU zQMqrLJ8E~fZtaQVc?=)x!SYxS4j0SgaIqd79~@5{E{;!$QpNm=Y|r2KcGT!U{9_fL zIohU2{(k=-UOCXWj~;NvqN)Y^%F!Uu#t~Py z2F!n`hJE#VYumO0`IjkyWx9pNk?x{1^Zh~3`>r2V=+t0x?32}^NZ@ZGw{Li^4JSR$ zFCSFp%wXbc%E3km_|N5w>+J$Pi=~|73XtEF*J9*{u5sjVprdz%Nrj>)}ucw^!@Ti!q>wnwI*=qRFp& zIt`xPW+lFQ(RL;2Wh{I?_S$^njo+wgwIQB`5}T~u z2J+hmRJfwPJfBn^ykDIsk0xaYuMf!sdCtRriCeDBCoc_scHkYzXLmc@%(LA}qF=xU zIp^wpLK7_7T?*uL7dPr0&~_yezpPbi5Li}?x3cb7o@sIjeXyuCM3B;{|#-*WatCLF}KTm9vwu(3&JU+T-*|p^RF8y1t zUs;{(cJ{HfV)`ob@}p%{W|Uh?>^PRuEOmHwLjAFOv7GdFD&S^R9|t#0Mkk_Th1rY%`mofzM#m-ztjYleHRT?6v#0;dOv*D-t|bW`_F z13>K1@ei_AJymq@zLL_eRyQ+>SX_5*ObobtBBUsXZ9EY@%drh zNg-G3b-$ed2N9dmW@L`FqtVN>0oSKLM@wfc*j}uE32OfDDsqSW|KOj3ryvCiQlKCO z3R0jT1qxE&KP?5KhecKVt6RYE2_&mlr%~#RHod~8QYnlYGwj=HtQwsj_HLC56MYNV zT5&1IDkwl^8;aVB!auw^i6Oj_Y--fe?v=Z9QO+92-SM zjz*<_3YV4_p~<%S3H7@EY=|@Xd-f`bKRIjYi+jDp+|chLLF=hlu*&qi1o~I;^JkGw zuT~3vmE-+&UfawH=cZssc=mp#2iH~;+X#9WIm(WGFs5!(mpTJ{r#!=7a;I}h8)6&D z)??sSeKrdC&WK0i4|qRr>E@RYF4@`q4@379&+7Zsskg7wo}<0pBwSwUO3ZC%{Q5c_ z^Y=7JE=|%MUVQ1t6KrxYfct3@Kpg@WLCnvT(zJboj0Z_kcY7=vmpr0lrQ}Gs|F6Pd z)qK4Fp;yVx8if+}JB%8I&7#&Q^*WncXOlr2pjVjayRufp*(ga#s}kL4rGA9KT6y68%8g!xzx zo8Mf1dj39}6dmg@+hget9>V%D-}J=T`1V0RhEMCsBdRUG@L^q^grz@^I6h(BIe`4`lDoM8Vhm|yc7PD&bLZG%1i;qW3c;)MSyIx-`%ON+Cs zb{f`{T~x7Y*J!3in3!TbF)U)0A=Cf%+_FFO?VBq%s8yzGG&VBDdhAX;KhFQ?5-Phfe8(5SKbXID_u&om&P!4KDeco|SMqkseqhR+F_&qX-=`81z&%9B_g^RXO;NtaZ- zV-Y-5MxzYc)R&v_NWX3M-b^o4!umk9VCZK&jKkubE6p~olP&Uf%KISv0?Rw~Agre( zt%nFoc=fUJv0Spsz4hCZOSzyP6T+5de*Em@=f@xOhwBgd1y{+}uM>@VdMnT!(R?mC znp|n@vBh3UPxCXJ`U#eP(kY*T<*^>2BOi-1@u%~JmqGJAGU$9P>daqW2L4VA>3k_s zFrmQ-QUD85;J+gU@+Y`GCmuL4YNluI6mIalo>}%j9hYyO)a0Jw`}BczigtWvsJnZ8 z`P}yR4H`>Qyl(MHdpG~54PK0UXdtKVUOr#<+<- zc4)EoTAIB_QEQ{TF?$TkVI%DAXC5+Kcrr0%RIj6kybgi=7V*;U#Uz=9+C8~qD16pu z>&c?8?7hnCZn($T2#%Sg$x(!)aQMUyU-_ebv;5>Kww9ayrR zpJCTY`aV%Lh$hCBSY0c3a2#7d7zmyM#ydns#mX+z?O&l6wF`38Cd-h7zuV~8fY2k+Y_FE@^e zSu>~5#K37}!@MJF0$N9r+Ropu_?C$!_b(r2&!{-U@M@O+Xs=dLWRW^erzVw&C7g~8 z3=alX+?vxWidc~0-*If^al{f^c+(m|aFG8|y8J}DD6(>oW9Rl(A4g7(eO@%7%rtVJ zwBoJUj#0$o;BF!Ivg62;Zt?nFfzya)lUm6yheZ9j0H9E5fA%;=E&cft2}1^+tIqv~ zhH-nX`J&53@!3rtuB@^_429Chxycb;=06cm6Ivwc0+m3@KfqtcAI1-H8{yhXG>>0d z5Xt9xRCiD2-FD64rFg{i=D1aLS;7nBHFR+mCJ7C$3SJQR6ZfKI0e7oMJC_WXirg5F zBf>9S6X&Mu32rUPJ+7boRnY)RQ_t&eZ9I31N_)n7_V%pnSxov))L5G7DiL&X6L^f0 zZj?@zb{D*n>ZIN-k0fPX*9iSZse(lJ8SXZBU7f%$2y0jE)#9nUCL|JZ! z+*Y`M^ANd2cx1bd6P%KS<9b$(+rkwF=+wT1#jLlfO&Xh3VNt5&3YE&H(%N(;wYf|J zEu~PHEM|qqsQ-8fDy?2tA%&LGn)FtcRtC4@fDWC^Y=JARU@WGv zfFCxcwUj?4rI2Z1a-cBllyWoNYh|$jrqP=%O0`C(hO111rqEI*xa~@%Fl+SS3Cw1- z8WkF&%51jC^*XK6swwq~4pDBkC@p#ugs0XhWHPx)Z82LkCWQsA$ug_`%g|D4jTOAV z$!*{fSEe_qVQ&WFr_~tY18iD(k@Iwj3M-tD1}k-$o^rHkF{-U97&2fusf zBb??dcEcfMwHUQpqXo_`#Q5isn@`LLu$Q6 z3j!)Qf@;*6;VLk>^aCxW)XQutof#!DeHnv5`5H=E^3lgX$xTH)Glt4uA|$rLshIW47A!fnVV4Q!!-Cu6wE4vM4= zB;-o1!l<+How*GOX*TJs7M0p4vr-ACf~9weD8-d)`1Cj0lxDTwCWDd)2Nbj>_yn6p zrZU3?Yw$%{ttUZ;sDuM#aCIJd6olex*2z_7wMA<-!D%wsMbL<2u@qd(Y>~-qa8ejD zOQVGXV$o5mRZtK`ezcTAZm}t~Mkvf0qZW>+QJy`)L$eMh#ZX3sPn}XSjou>Dnc?8E zUJ3pR&5-wclSZb{=`36vg-Z%qAIJK4^qWq*U0THfumop_CbAaMiWa=u?=MGFj9XrQ8B{8$iX?c3 zG+Jyjol0DYmeT92Mu?6@X$DI%sLfRM)j|eAaWh+NC8C_VR9dUa43%20l&fS4DD^s- zOr@tvhQ?}kaY`AXW@)U@ctDv4SKL~K4vGzwTAM;;lAFESP`aSSf`S4Wr_`CC{Zl~G zpoPG7CY!}|7r1tS zY_yqmHjM^S0a|OT+@@BQ-%3lFReJEOXf#5dS3@b*nBbfav|nnfeCw^{J!mONW;4v5 zjVcA)Gzh*Tpp0m3CYu(vuXS2|x#P5y5^@(@Z9<*bfy+&!O5>=$u<~Pq*|(}fcS=fb zQ^0=~ivmhM?1|Z+&S{J?Ei?;qP^eUuTj`Lp8Z|m9s<1!+X{E9$Z3Z8p6Qwg{Be2~wqpqqmYn;A)l3riLcR3XP^37O}Ke zty-znQth6)(!7*`UtY;Ce8bQf1O=ZP2DD%vPgJPqi`ujy6XjgWg;R znPXPK)u3{yuo`fg3dy22!HF%Uvb3Y7L0N?oXN4>8bT*|9Uc=>M7BvhNbPBZz8ivxF z>DWU*poVq}#uLypK!XJRAbd2G8z|OtbLqacltpbespLwV6*^HpRo}qBf<qgRiwgZwa3aom0^hPATYw)fPFGI8gB5jz7p~ zi&hJL2e@F?6m#~qI_PWF;1I$jqdEm>nQSmx(OY!TjvD1^Z!f1VwL+(cL|1|X37rk% zt}t7uYul7i+sqolSz1a9zF44csMRVXWI4pyrqi3XHaRtDfZi$dYGTLCgrL?J( zaoUzkkt?zjD=J%zJn$j#tmwf%HLVHwN#s9Q$y`H%XL&U zVTQsF$!~^FR>)k!oVuX=>7mO~f+GSMG$0n})UO}H1*Y%4azAqArxvk58%G#zkHf)2VYBq=@5+? z85AV7PA@mXm=UTY3>LI%g&Niup^Ql#J+eXu0~_cYsYB*E;M$;1x0#I4%}`#BRq}wT zbcis(pd8%lz)ApmW(eH~!%Q1|rCbk588Dm<(F{Gb7FtsnZ&{gT{2`fga>WEN@-R? ztErGzrN;m^h=l^azzA&$^x!g?3^1JyrUKw^Tx|*npyLHq)(S&YjS5;diw$y61tV5D zj3(rIXwSV|=n$`3SM7Ze! zntK?4LE{f8thHGD9pigw$yFc(qj0dxu$ko+wH*2d=*&Pf^t_r<;dF=)pw34aJP8SVwA6trm?s;z-kw8)@WQBuWF16wDQ+a^E9TmeE)vJ=`ysv%WcG$L187Ys{b#0p|iyQvuy3~$shD^No{hoPazRay$%DyyL_ zQ9z3X1rY8rh5UyU(<-4{n6w`ADJeNLe-@1uat%fb7L(Ep-wKhlKx3`88ck~H0;iNr zW3_>69LQXlGC{Op7_HMNw9tnr%tjBoOxUa%nHp*-bXd>`=!{Uap<{(%H`H!Oe37Fi zm)qd#RI0d{s5ut&KG2USs2o>AZH89ejUGMPEJpC{0Aosx942)zpi@|&>}u3%nG!lL zrQcvWUf@3(7XI~G7~Vi)Qyno3yr_XK6kDU#wU1Lu2`xEzSg}H9WQEIH_0ZtKKvH3& znpGwLL&5x?PN#zTf5H5}VE$h)|1X&T%OMF0=KoZ$0At>Q`Tuw4IHF+wUoihKnEw~d z{|n~-)`Ize!TkUK%KV?Q|L3`U;JBFt&v=6YDR}-PQUE-TR{wKzXY3-{st0=qSa^i> zGNTn%zHP8iqJTX**wm-i&FRbLcH0vj_ND+G_NG*^qJAylN4bdJFCTDtPh}BeLwNPe z=TM&xIjNOD5Nz1?EK*5=yb&8>CA7Qk8Kiy4bNRX zu&~weXLdHMpPbj@RgrggHq36j&NfN>&d!EEI2-|wqk7%XCyr>3qyAUwf93Umf|||N zHGKKR8x7sb3E3CDdvZinwLdR;*wR@A`@yiGZ_&eIJ#2WYz;eN?Q)%IU%4$Vvag?~7 z*5~EwEuTlw^4U)L{Hy73Ny4!9#9V7U*|DcXIkXSN6erBnb1d z9EN&r%^6gD{=p@$JNGvFg|w1wa)gOhJx*I+@7o5Oe7 zo&28;#bUiblsu(I^=nTzJVWNs(-KC*}%L4 zikeIY))QbK0lQ1Edk72b7BC!vZEE^f+8uS4I?5eDrp2QUw&NqNIM!@9vhq;wgOXg7 z?%R9+CHK!nvBomp`&nLy=I0VL|GAUTLA%Ks{L9-UXr`yMbyi~@suI@t;+re}3?J*i zYrL*~Ka0cCdF|KM^LTyC2G@*3yZF2FI(b^(rQeCq;DTMd~RuEWbl zkHt>&@!@K_lmFuZ53rT^*|)c&y)#T$*SeRfTL;vnXz9Wy^E#v6epy?l6|&6?h^pc| z-NB0rh0`4q#!MK)nE<%sbO!+H!M!bPvTMP93QQ5qurH*7%|ftvGs1Qy7&Pf%E80=! zws()l&{6jMLBa2SrQzM5{VpE(G*YqHAIXU&-GfF*khuKnfx`#+q4%TaEzg)Na`L|r zug?z%|2S$hnJhMTShFRcpTwPP>c7zsbsReC#>2s-QMcy9UOu{47?EVk+@hC)&Y#6$*MlqBjNFVWPpckX3Sjwe$ zi`ICdq2#iC!Nwql|8Z{kllF#UNO3qk_UZ0oD8T#Eis@Cp61ArV2Zs0ZLdV1@#?hmF z8Gh!nr5WQ!7ejYP)t*DiZ%Vx}tai1+sNuGaB`En38I!G)Jmz2B?|z7qr_(e4yXf>x zCk3mFUN;BLBk=bIewMa3-}gga8$ux6@?DZPlzepL%RjEZQl-1#`M;e4`KJy#6q!BD zw{zPD`zNP(o;qkqSv)0WjV#NaGNta~NW(*eea@1qYm%?n>1zQu*{^tUKV>$0ZJ^r8{*RXjcqmW zE}4-Y(fW&=L!7K+>3V4V9ir(b(Z!dGzZ$kGubes)bcJa3L|gyh_&a3p&hKYMJ>igD zvRrE(3c5ljpZh+y=KZ@whp(J28#=r-WFJ}`stmnP)baVY$p8Lba_*tG%aT`pH&p9d zZGDH(`(*1FLxszdZRFxNrPc@RduEp`SpR5x{8{o@k&dMYxo;!7@+y@(5c|RIKJHqB zXYpr=@$X!ZnFM5;0JX=n8@!aZ_ZP9xwa`OexrV#xzC zWnqi^(^bdFQkRp8k`XtE&K?&;jX=KL{#nAUs$)b_nN#TlfIp?1`-HmzBsrM*ItRTnx2F902b7kn}r zA5HyZ&=k&H{q+*BJs+|8mo)huAtpzBLDAnmDfyi>MQ!bQ`<`yv z+=GK?J#UFNZTuqSp+Ea-q4{6PwbjmkpEyfI{rxW=TF*c5I<@>pT0gw==%dZHIzKOx z^_jn=eZPMmQ)$>-JN9za@@?qMsXR2tO~>CurLlFjxjR;>cL}XuvMp42Ee+}q|$-y zF&M{N)L>0US%+!B#Me>W_Vrndp`+O0c^WQu_-d)g4Kw?7EyhQaDmC$*-4NzayX!X` z-bR4#98m5ZRyLd1inJ^IqjHIq(pS2%z-~1@ym#}8)B3ml)EYa2;q*C{*EKP_J*k^I+I`@p z-#a&(729X%^dWx#y!8b~7XKUm$*%^)>>*tjwjV4g+b3k9z6#m+=H&NPgI5yAgteEp zUpbw0@`;PX*JbX|tt8tSvxeVTF`XoKy%}k$)Q(JedHg}hf@MTzV#u=mW zW=mW2W1p^8!|@)?PklOl{a~qHwQ)RYmhr{Px9oUQSv`;u(2~Q zXq~|}63jB;R0NonH#n&`ap-Q?81juM=*>ug*5z1la^o4`Grw4M)ys4#c`z+c>N zN%OywquQ5S{4(zMZuMwA|3K$j`6V^@Y?-!Ux)f(hN3v6#MdSC8^h!sheaEP0{B9=( z8!|sNy}Yoi7vba^oP4{J|6|FM!`Yo*`;p@jUR{@r3vaGOZHjijBCpXN4RH98n*bNK z%Kp5`#sv?obM-c8w&C<3*Z{-POq*^Rf3Ld_`tPOncAAjq< zJ?fholgBiTQ+&p6Z)$!-d>Y#&UUyoGaPq8aS&~2E1P8UKF-PX$U=zwdCk+QvwhLhfXdA%{`x^?ccb_fvB|sDbBf=a zT)VkUxF4gZ%J?$(JPQROIzIW{-st!UP>GXs`enxOobrXy(9M^Vo0{JdN47p3)ABUq zbSE@JN*L-EKcZuHO#LFiuh;SQ%jw_628&D>yh^4&7JB(7FDKGPDtjTqYS zOhVjDE9R3YCuAjLS%Bch8@cEyk=)Z(aCm;Dh=@^*ihN| zqO@3<8+JDQVg3&nw!$&-Uyc=cM5$fJ;#c2VfSOki=gG&uM?>%4nNjNQc`-DPN4b$4 zalB`aXO?HCXNG5zXS`>eXQXGiXHU-%Pvj}{4D$5yo2hEy&Hw zjU&ntWr;FH8KNXnyeLi-DGC?$6orV8NG1vrd5Ji}9ATC)QkkCuW5#$K61et;iL6RU|5GRNfgbR8KLIg-469fso1RQ=2KZ~Eq&)_HV&4}`=D23LX1Zp$Cb`DD#<@nihP(E34RJ-T zGS?tiFISFBj!TwHCfqWIT#|70DuDm{Q^1p3%5^{wYnZjabwI=ru9!u;v1k#C7P4po zi{`Uv9*gF(Xjc~P!lF4p(yrfG^fwm$l|_GH(VtoLCl>vYMSoz??^*Oa7X6k*=dtKq z7X5}rzh==nEcz9Te#xR=u;^?S{hUQVW6@7p^b;2Sm_Fq`YMaQ!lEy;=u0g6B8$GjqR+GFb1XWOMW1ESXIS)U z7JZ6EpJdS|SoCogeT+pPWzk1i^kEi#h(#Y{(Fa)ceiogrzn_2WG7QK;0Z(z~uS@b#< zoyelsvgkD|dNqq)#iA2fbUce*$)Z=V=;bVW8H--ZqL;Af#Vp$RBh9VAqRX@BaxA(m zi!Q^WOS9-wEINoqmt@f;Safj~U5rHsvgiO7U6e)pv*;o$+K)vSX3@SZ+J{AZv*RvSxY{5+%~KcdJ8G~9(gpFf&+45g9QLRlFA&W=tA1|+g#7QBZTF8zj6Z@8D*O*0$B3m zqe?LWh0x{+>e0~vk2Gjpm7>=a_j?(&@VX&q!~1PQ`1|GD5yvU`u7mqj3N}4BCaw6f zD+X<%pai9VdA;TLsPH-n`*fw=d!Jdj`B303!#{W{pjLK?T_09HXqpw&ucKnWVRAwr zxbs;|K5J9OI!1EH9@hO`FTV@kM5m9<=dShlCnl{vcr@g`h$pbnE80Mvu`Z^0)MlsRzT zMR`jC&x0m2IO~!bZ5Hrp298Z>lbh4uuQ|#dKu6gt0t){4!=^VFR)hC{{2kx)zJ6Gn zSH#!WDgEwv2vO@@J)Z_>rf@TW&G>Prf_4OE4>&@Z0|RdHH^UOgzVR8C|mZ zK2J2As5A1^STCm@PXx*K%jaaM+u&(`Fd=6P`8B(+=zW9Y1*Splg!xaW^S3eECDiZn z{PZh)bf$xy_i0M%MzU@|Kq-bFnOP?0`S-mm$>uE@&j=`!#>D5*t1d;!-7AUkb_+`#3u5@3j-L~U zt=qGND6upd-~FGX-kL`hx*wXu3Ce`jrnjhOOUMQ~(uQAWSQ}HT;&4C|J!gJ+* z;J)NO;9dvIgCpE@ZZda0cLjGIcN#a6JB%B~HE}y}TXO4jgTa!ZJhvFvn=5kt=9=gF z4D1OmyPk4A;F=;?BUu7A8B-)? z7_2^n)nl-_3|5E15Q7m6W?-<|3|5Q5YBE?bgXtMe$6#6p(=eEt!Bh;UWH1GT$r((> zU^N)5I)hbXu&NAJg~2K_SS1Fl$Y2#1tUQC2W3aLeR))b!Ggv7G3u3U63|4}{iZfU- z1`A}c00t|{VEznNgu(n6tT2Q5GMEp8c{5lc2J>PtPX?1Rm=T21WUvnm_MXAsG1yxM%VV%y z27AL`uNf?d!Co=gO9p$vVA%}zoWY(k*i#03!eEaX>=A=KWUvPecAvrSG1y%O%VMxQ z40fBrZZX(R2D`yv*BR^@gI#5?D-3p-!7ee_MFzXTVCNa^9D`*t*jWZU!(gWw>=c8Y zWUvzqcAUYEG1yTCJHlXx8SD^)9b~Wr47Q)aG8l|xFgt_oW3Y4vOJlH92HVSEDGauU z!FDs)bOxKoU{e_^hQXp4Yzl)#G1z1Vo5Wxf8EgWBMKaiU1{=p2;2J6LOHU_gY zn1#X23}(U@m&q9}lQUcFQ{I*D~ zVGf9JsOsR9ivIrLn9yD=GffV(O{;5|90(d4Qcqn>=3|o4HSd`oP_EQiM+5g3f^aZ0If(4T?)9~JKZU$~M%^j7yH z3tp$j74v4~++LFg%J&Q%Cfd6kRDbQXsY2{hCN@J65xP&!9BP>9^6b@8;J$81i_}uf z!N?I;7KZIpgZ;UszCC!glnDw6|Ag?&q6c%@o?DZ~+#CPldY!$?8UE{-hCZ=NK~>qB zf@3g~i3>V%>Dk`D!D;EibS7pSnBK3!ykkAuF-l_y(rwJ<)thq9k-fpr!Jw#nk9J^{ z=a7T1UvC57wy#O?CVMlT=XrlT0W(2pZCU4wd;ew>W5mk+G257?8}??uz0yD`Y1D77JDkvI)Yg-g+_HwNrDp$F zy(b#x9n&JyQ1o+@HnuGY7!JJu4Mns|MDc1+$ebj3Vv->;lSqbQr14S2JCRJ4Xxcb> zu4$^VZ9&l3wuB)8|C1kXgD^C#aQmP9l8=1Z8`7aX?7y|B#?oLv7_foRx|Q*To3EAq z+KcTA&pjX2h=_5A>Gz^%JoNR0Z_7Pu*YGt7H%+}V{eDj;7#x0Jb5yP~bg!|iV}E53 z+&*gY!OLIV;qe~Q;-*W&VELMz-^C@n!d>C>=QJC^g?%n6pC<}@9oBLFdG&e?=-^fKsALU&gD>fksP`lCG+&!%lrO>vtI74MQ3qOiDTL z4M+Iru&c#}Kv9?ejkHoU=`C1Me_i>F~n=aPYFP?y5zB@VWa4*!2EeaIHyt z+lDy=ym(LRRwaTBvnqaJ@1UKb2MfmMrB{O=RFfaFqsqdz`}Qn|i!KEt9z@TtEvO2g zRH~KjAoPRWWxH2}wd2CWeL8ozHJ^Y-yR7g!mRuHYPIg%~XdoLlnLR<7)xJFZkj%bW zWx;3Q-1d6l+^1B^x4L`=1H7uo)QLF^;;w+)D<>#8T)S=Ez-DZi?r^>quUd0u@?mwzDn8kwGPAu*NWf{2fQRBjx+b;0< z^9DWiyGo;tUhg}U@%Dx5rIKT7*7?EAk$Fk&x>teYA2wFDj^eq+q-GZjVm( z358Q8yjp!DwJcQZ_h)@(vMVvYj0?d zGq;Puj%q?>*K`Wzk83>StF#KNojW`F`OpA}r|Dv~(T==t@4$@R4ZUiQreL=#BR*~4 z;1B0VRj&DI>I0x~@EF>m(ITKYlM_Duy`E8=AEo=ej`v63pmyQU@uvj-l)(QZB~W6s zb!ge8v@oMwySvQA>g_U^27=A=<$W{3#&P==>#q$2!6WC~y4__wzRFS8Q>B-1DNvz%=ux=`N8~*nID+> z8DWKp58ii^K-uE2-a8%Bf!FmHYhs($7p}(l0FSTkGW(5c#U6 z_Cm*9%=+o~e0`dpWjYqr`tbJbW-$5i(8INpbD5s&S04*lvm12f?9TW$dJmXXNjv6f zzg#fz$l9uXr*2~OeLrq3oIQ)_b)~_qf{weu`__x1%M_jkS(j^{L~+vi-LC186-;X#CF%y`C($IlBs z2!@j`Rnr;s^8#}|fjOUyIiI!*PsAwtq~LomTZWYGb}oXfUB@utW7TM%9dwHK; zC!Av z3d)UK<0PVRswnbsf&|5fm&y`R%(5h2(wQY7HyIR!V|Ap>bh%!;Y%{4YmJ@+tUF+HL8mu7SdFzOO1YsNtZ{Kx zRBbe58FqM;x|t*lmafk3l(3Kk55#;qez*n!Q`f4zy2&fR^Kq%SUal(#TZ!}mYbp~k z@8SEFNdv0Das$viN8 z&5?^faMI?wesLQ-VY8i4HRgMA;G8v&FI#qjonssY4(SF#`ZNUqfq`;&TNhe1#M|y`OjF_;I>D+t}!-?Mk zB03!zEsgETBnG;33WhHRZ&u7Z=^Q-@3|un$RMp6p%sEdo^33WC(CnTTy3}3F1nt!{ zZ#!iZCxLvkL)p1!O?E3z1qK0GPQf%y*c~o9H!{%;ChWt4rC&8y~njJo6L-OxU^^X=P4k$ zGP173VYMTk&DP+ONbtCkMjuSLru1NC8?5Tj!sZ4*v3G z!gv%{+Yo(TDNd525Z%ZLR=ik-e9xj_$te7|@r!?OXcvaYFW+F`V1;A!DAzmmK@jn2XI*8QFR>UT-de6kHNNffl+=%|iySl>glvh{@d9TD#Y)a1$kx#Hm_PnCr18)1T&mz2aqF1F)QXb=>hCQ-tds}d4X zj3i@)yEwA|Lu17=cx*Lv^bAuehTqgJqbW{54#aSp`COi;|1xmKM}VQse4Y^BIeVN- zU0@Hhhn)`0%5;PFklTF1e%~!h@HVyrEIr)~+QVO^|He&T{c6($^V}8X){NU5^PwWJ zhaoke9$p;zL2nP&1ojH5my^a|c-qZrpaPxC@&+nhN2UP`J6-kM?S{^6Djtf)B@%t_ zvene&O%1$M!?ROw7+D}7p!)?la^S06@7GU_{eQ;;4xUw58(g+J)kVoa$8S{vsO@O* zYi)-}tV$9h2mJ|BPo(Ki>Q%T~LiAf@%vCVk3+Xq8q+hM36hS)+i&A;)I zk@tH&`T*?V^+CDAIr(g84}Vsc{mw=?^?l?D?IGqj$#;eJ@K@=-aoeG|!aC6M$g9uU z@xyDaCxAVi<-hu|a?xwOJsi>lR6E?(9bmY`|8b=eo!kNUOQl)mu5lO)SG5_IHR2}6 z)=-y81ZZQ~kF^#k*Jk;p0UaN~X3IB3-3)&Y`%k@h^=SfuBC6H@=bDFC1SRpsN{JvL zNt`H=D--yM3DP8`Bta<<3gZQ6iI8d6+Dm?GW6eX*So83VyP@_O%%y?HyB+=37e%O| zO1;SQbc7xBZ>2}a)PiohJLR_zie zd|=U9k3d)>$?f%;pikgZqZPF)KX8Q@zj)ZVg9|l_8O(pASNjBZ``>UoZHc)g3gldV z3zl5+`s^bJ)`FY2#?;xs}Vzo-f4b_V8Efzg-(^|DgZv=OtgJ z;=!;jC(4xf&p(>}*U*j3?fC;fxenb3k~`GM`!-}FxJm8ZitwPFDr1QOe{f@{=;M%$ zOf}UEaTjzxwNds^glEoHIakU7oiD7i;f|FYX3xg!i(C-hTp%fUWe7gU1!aeKusgS0 zXTrDBo9POCy^Xyp;Gr-8KU6txV6SFch;OwXP&Hx0xdxS^;n2A!Ldx~qg;WmsHI;+h zpd?A8V!2c#=PPBrc!^4qkRX*vlwuKIm?TsgKlr>elQFayJNTnF^wRB*_VGO?SDmoV zuy6e6M?tjyGr4xy+c6HXRKt_V`Ewx%-&?1hU*;Fkz(qai?U>hK>wtsRW4o7v_WUay zrY7zKUa;<=uhD*)9tOJ2(W1ukC?tmo8sBH@bH)wY5Uoy{%G>kTUn%`N_t1HzR#jfD z+Ha~TgUl5sQIWGSu*Zb;H4WNuYYLavXj9vi12k`XEL9- z_hNu)No*#bkWlpT+w&>7aEYz4BbCX1T<@F!)64xy=7-g=O`YgA((VUg_d9L@ZjVyc z?bHfWy%VAEok5PBJ3F9+aW(=0!+{l!3fVU@kz62`#3!LJvf>1hOp=%&k;{cVxl)=a zFxIzc?prW4);q&Qn8C!c{uzY+=y=;IX74!o1{~gUaH3)v3P|LV+u+<~5_m0wBB zf}hy!a!O7Roykv6XJUhuG=LnZ~G^p9>2GWYv5NfG}5pBlT;2oBtBj@ zZ~HUQhCVhVx7ljO6jY?EK4sl7^XIdvvW1Be~{_QtU2H*?o0- zc-j5afu!TfE2C$eU?^6yi&(Id>GV$VY(bsvpy;)`@UP(2VCaquty|VT#*}3Xd=}Q( z&Y1Cx84t{I7_%H;mSZ~rfU2A1`-*K?ed`QP#@ySvPNRbDn)?K|8nG8nY~L5O{!-b{ zy>lY{9XyV*qFh>|#Gm6&3H&Jm2aQDC*woKwino=(ShL8Hh4fVb86~wO``|{yg zXNX~n^B10XZyI7g48uE)eaghLEqs{&qIYYVB=UnrF2>_GjnAsD&=6tr}s(SPcl~Hp-1`A4=>?3|8~#oq65{Kh3!(lF1q;v zM6Kt%owMjQ@Ncyx)@f1!cro}z#sR%2oW>*b)*?JRBD(qEsRhhtmn{qaVm#rY=L^3w z3txkm6Xj<|&vAkmB+{BAb6sJ2(pq7aw{B41Wmtabssd14>G&o3<`Xbvv8;43!sowy z$Ey9%3-06QuX4Hi1nA?wz$pl?p4g!7vg=YtR(0FXuBTn$(n`yo*Fg9UsZm27Ao*GB zPjzIcJ^h^eh!E=is53T|Dv6s(=+l(VM4Vp7Bn zHf(S-kK5`2EqZNiM1wHRKWGXKf` zPRyz$4|*na=*V2lcu;fdMiJ9CWyIZ1pc$A}IPtHkQCu)J|BC)grEun3D79oxVL4Fv z_#w&iY5-O>nA|Zoy)v_J;rg_F-MQex%eZ8hca^}QfYrst^=sN1lX9s9^;y2_o*S%5 zH=v5zu>Qt6FWBK}Gv2X@ZQ+=%URzZWBhXw#_&;~g@s}?X@RAJv%XnzoJYFOaN|IzK zY-^HIDTq%LCB>V%=kFE=8^3%6En3rHu;RIhC)aA#XKu1#{xRO^y$D_(wIpmef_ukS zss8Tl2jHj98T|o4_nC);Ul8oLuKBP}XZ_&XBKGZ1XI){mbBm#$5gh$s7x@K2-zdpo z1Af(#rXRi{Xr3vuWPhksdE#Go>cF3^Nb*&nfUYGHHQEq;B}n z_8Ojx5KAQ}LN#9|lS}0yRlHIrR`HbaXyO!2fW(W0CVTD8iCe~+hoG_MVfzg07z}^b zw4B=QGZlglVBFJ=hik?=!>ogTdv*ku4CGwjyQ}$-r7zJJTlh?xP%E{3N3 z{Cwosl+Qn*CvkZ>lza*G9aJOh?A~vom=Y=Gb9-n;m#nZ&TyvtDBYzlkz0AIQisVJu zE=w`vff2TfpPVe%NBWfJM892%C5gfgO4{ zd>O$8aM*U|6>cvc8=6zn(c6JKL{329|NMalm?QpKgW+~Wo|GU?jOQuk$m=E_8Li0e zHV;iT35CcHr${1_8QYI9bw~`2?TDe>Ff1=N{KEdBu!lY9QEHB}H@tJG`;@U`NVvoa z&Yo7QCiM3cIBk4X2+pLb+C5*E2i6}gr|4Oq3l%*E`?-#BhUb^e;&J>iG$U;73hU&0h9>51|KesDS|3RvOv83azP->StC7R2~i?&Ph{8f;`R zKjo3yImrJv^ej;F-Bz!+M$~+V*G~#NGa(1s{#|OzG{#%GXj^5MJ&YL- z%y?kNTfSSfyc8_gW@bYq`svccy(4(NB4;&*?XzmGow;y29MpVUpS2nnolM=-)N2~4 zu4!2C-g|6s2b44pF(Y6&uYn;!1BfvjNY&O)a=(4k-fXSw1M zu$R+;RWW*XEC;q;w{!1$l>=N*+@e86l?Tl0H9$JE)(230HLiE5H0K;@X}RDgi#RZ8 zuePfJzi{`yWphV81o*x?7PL7j=>3hkn_A)Ik{QK7aeP(4s!CkgxKZY<9_?ImhRt-% zSv}szoX*x;itFQh0#jSfF;bQBPY2lw36Tde+y7PMfuHiKN%dd1}gg57>5cH0FG{_*|^t=Jk z_(sv+()U!om$w{qDm*&08kJ{`DgV9@<1rtGUhDgodUP`IPwx&F$RGIcmOx2M8dF93 zaMY~vE*n_W8kboBIxwq-GjCTjzHL|%wL=y#P04N7AIzK*P;Pp=8r&Y7vL+G9!M!e1RQepn?w$<) z29h(dTbUJVbl>9Ry%yDZw;Ig7u`E3b$tlSC*6lFDd!69smU*|D8CZO z{!RA>6SjRkq=*YhY*H|Cg=LZPxlV8;xN>PGDh?WrZ1uFC}LA~vGv|Ep_;>n~! zRf1e@)EOT~HNeoQI}Apm{m-x-22Yj$44z9r{#RftPuSGE_u{$G8!kNQp7`yv3rzcb zch=BuzVL&z#nVbV+~9TLu<@Hm>H)@g8rbgEw?=OP=5I3nNri;ULEp)7-_DNS{;?`l zk6BNxxfBE!>%vzrRQbTCn&tTg>0dy>G9SB4y!!$Szzyr&^`&RKIQow{%|2f}Q8fSDv{UZ19eHn}hhfY1H*4 zK^vKKt)!2_vllbE2kpG?M`bgQ>%D5wtjr??m{j%A+(FkA+G~YlPF}vCs0goJZZe6@ ze4OyPuG3#mj47@|S3Lp6RJp9OlZl|~Fn zImonipZzRp)*3K!^oZsk8nD1a_MY0Wi@z$^VY3G=+i-{(a;osi;rltvvyIQ!woKas z`hB{!elX%Ut-+T%o4rp)P`yZ^T#h38pqVM|uhH*15 zaA-@r^AX)A!hW6(sTrfH!;yVnchfnvH8ktajw%Na7dH>*>Etbx{B!*8mH-+QkNW2! z1>CWdi3O-%hejnON@+q88X*_OixU&&swA{}8jVmIJ9ccU(72P?04*Im!%k+rc4*J6 z+q`ah&Txri1IH%C-@vl84dXSRIB__Gc|Zf5F9fkr{VBt zgT*`hIZFG$`KxaOUgB+=j6AvxsQNKD*)Agg0;7y=$E& zP`9Y)(8AT#FQ_*fQ>ahT|n`La``5iJ!z%$;63h!J0%YmLv(1glKvO zxp0t*js7~>t}l%?p#fS(vJBPkXGhkYMkBtBEC6NPOK+I?DIZ+jLAQ+5JHoC#p6(pA z#|z@?=AGLWZNS^|dq9lGd>H!7PegM5MIQ_$IxDDV)r8ZmN2bk#$#`j^-n7syMQ_HN zeIO(%%y?US9t8v9{dh?hp-Ai2w&%L*vtiVYr_C0xj)UEHtUg$wS~;`}HS(X^3;g0# zf&@OFpMW-2@RForp-QSmGwD1PTIimjQpw|u`=8k^jRzZyc>}Z<^M++!rOiKFu=cRo z@-ZvgpYTKRg1Cp?;-Bk^9= znFXh;ySvDAm%m{$2j_!rPTQ~vkI1`cq-y%+lWQRkViyvtwC9be2W z+WWclR!0^*to&9~dWAcL?FNOM>v+@5?+x*NQC5F=6rGFS`!3kH+}lXU`5*LgK*hOWOF_Ve4uZv^{6G zyUk-Xs}&h9wk`aSyHHHGA&jye3K*5XqB>w$l_1CP30o5$SWB5Zs6&3eE&Bn{fIkfTk3+c`Di3`kQOIsBfZ!`NK7W02> zD7b-5wPw`2O9#WA4~}-Jv^WEf`#Ao~u8K?0m*4DHzkI1glq44?spLtj`1p8auqxx_ z3Bn{1^1mYxiBzW9hi^`|jUz}18ozwQTE`##@N9#)#<~XA2eKdwo_rU0ARqL*vEby) z2mbKy^O6gFzU2Zp-Ze?)RA-o8!DDdx&Zpq?{KW@ueEbG5e^U7H`gKoV1W%`ynYaAx zM+^UmJrUBw>jI%_`^XOk^WIqaJ@;JlT>qK_tJkZrHP@GfSbmQPzZ->I0$#YiWl(gV zdlvqdrv`KzxQ~F#7Y=A$E8W$?A2@8qk(YBg&^>Qt!C!StHgb5*&Uup5Fpt6CfGM9P z^mn~{s_>sIv(O(T$6obq_{ZH2F0X$LmVEgwCcawzPN;Yp{cgdSlQLjdy85!)hc1fR z5(nCLD{dk0C$ogI^gb~@NYFBlnH+LCs5m_f;2hR<3~|2cvlH}w>YiImn}zum=kK2$ zRj$-drt_P{&rV!EYI)Purqms`VMiY0TK@5totKYdIVoov{CzKbM;>UgQeE;Q}R&2cqJ3BLFWGsw1Aij35 ze+xM3gO(dR%-_`iKjpyExOuq5IW=)Sg&*t>{Ow8rjWo3R=bi%|X%I_fNhortJV78# zlAtMLp+J z`dhHO@Mi7DtMWi^#hKFI+Bm>ZtL`o@Y~leUcUD^4yBIB{xwXC8*HQjZe{iA^k7cb4 zsT5@351wBCaETli#CXhyAwOmqlK)@QFD+4Pf;ZQiYHF-9=Q0IDtln_&2GeBbP;)Sc zsW_;E!i>k0qjrNCLf((3CP_tRaks7fLsMW=k2cdM^|%fDYjqS2whlMaCEL_-OcADi*|x(dd&hLBx}yflsusMPlkr(lUjyQXpuo6lKt}83sM$ zb}Lsc&iw@Jq509j117$bl`HlPze@ktnw655i6WkE4B|MMbC_bEQ@e9aPqFC@@FlYE z=L8Hk0RiEzF1~1gE$o-5)xOd_N9v>Huc;4Y=?i3Ho{*1xQ1K)JnMi=#y~_FMO$mfj zfy6XW?KyIX@qHm^d0&Qi7K(m`_x7{nWLjq0UyUhf4?~YJ^N$vApgk-fe0e~(#-D&a z#PX-dufEU~$+L&QO8?j1GtR*OHY}+>Awyjc_0^OMdR0RiIQq5%L#6AyYF8%QR$OyF zJU%DX1z>pg{^gh5Yq@|Xm%^{r-g{SX4|_%J3LbREnX!j9^-V72qj)SooP}}%T=AqX z`o%slI-PsUbLm1jqC!l}-q@WJ2dkq^{mXi*@zulXYwg>GQ|Q#nLczqFBn z_V94CrvoSS1zumWG|Db=)|94BEr0t5XnDubWfVkTI zM+|QhIstTv^EuxZ!9K@BM`%jU88e<45tmT9G{W2QGXhZ>>4D8)v zP`?2m-oY7@E*%a{zG3M9uXjv#K(X2{(>e4SdImj)?uBBrL%KTc<5ldHhoZ6Ty;gb6 z@JjLO>DA5)cvbiE^(^+x^StP(_gw9n={ef7hi4nldY)B0xgK9Vo_L)1*ypjrW17bp zk6s?_Jb*`a4pvdtn-KV;ba8E+f;%mEyxO=fau^zI{uy(PQvL>^Ju~e*< ztlF%~ESf5!9#Lng-PAH_GBun^qFPdQs45iO?X%k>w=-_L+!nh{avSEB;MUTuwp&Fv zPuCBw_gs&=ZgXAWn(mtH+Q~KCHPki0m2k;-x#F_lWsOUw%V?KgF6~_!y3}&;Jns3%Xy4*59c<{b)Bm^bDfHvo;h7|%5hrbl;xD_)Yqw_Q)4HdQ;?G@`Hs9p z9wWDq^T_dJGTE7ILJG)W(v5gW+#-$;n~1r@1Y#i3g=j`di1LKHW1-_6$77CL9OpSs zK!s~_a5ZC`e9Ibx*ZoXcPbwCX zJvo{l)<{(NBC-cp)7|>^$S1W#LU!k9x>>Or3~4FZjjKtrVnuI{FXxjI4s1<( zE0Iz$s`Kr+ns!zh7fX;2AUWBNt!Zn;$(M`dWLu7=jg?4L<~%vshOKFB&B&9>$<{ua zR!+X<@x!2di+Lh3*@~-aX>}(FdPu2+Y{}I`TVbU#kwnNRqkS|{b|M7m6-9A1Ev%R& zsFDcyWDBk)(i)3)&Z8F}$<;Ks!iv#eTE2vA&e1fpdI;2>2qk1Qt|r2Y6?rM*p>mC2 zYnoa;niSPivME>7#F|sg6QGi5!qqgk!lEY=B5Y%CO(Uy!YG}9`aWvspcSmmuWk1|o z)6j}fER>TCxtazRfrgJ6MUNDd4SY29?FxmD>T-Rq23nZ=)k1E(q%tuHxf)=FH9V6{ zL;{XRVU+m!h5ILb57XQ^g94+*hE% z39<@TQ`rhDL8XYAx5`{iC2Ooe#7FPF5?2#q!5SV*gmwm#A#6=WtBi@!I2Bouqp4u^ z1Ofqa@I_YOYRX$KH)IQyfS4@L)|9ilT!h9;$Z{M_S*y#@Dk0RQl;vv5SYM7j%u9J> z8IC5{iq+s716dy&O^_9mk9Hgi$RMt!wAJ;fJwnTZ$kH55pcSj3;gpbp98G{V61C$3 zGQdaUZ&TO;WQfQ~f40WYHvc@*kEs~L)5jyZ91X`NQ|RL$IUEh!3W=Oti6kVOtMRtF9<}FkzMS;tXlN@|RHzae zNpmz_R!BZF4Me0DN8@RQ6!6f@2kFVxcvuUS@eHk*2Up{6g_R(e$;jkz=W1A1SR|B( z9+t(`P&QaG54GnMN8@I7Gt_XQhQN)haka1-Y8qO+%OhQVG%hx8fQKSz^GO%3#@WJb zuo}@0c^T=<(KuPliI;3OGFvU#LP1Xi#0#z(S*<0v5~GihPvmpe$ZRda8d`TT@tmW! zv0G8Ck`sAcHL_bRtOlE($0wd~)W~qPO?%^AxDkORtqa%fLcuA0b6ZtwW5$>LgGG0jjYxZ zMpV^B#67MWnXQ)Wi^a&-qKvr9RU^CAf|bjW0mmoqaMZ|hwH6?f5VtvMWVl+8sJTVR zTU<4=TrJldOhvJTxXD!`)3pSP8eyq`xWQE;>(zoaR3SN!xXw{q+plO@oJdGqW2=$< zYI%VIzDz(|<*GZ_T#g+0^NA~5HL_nVLQ%g5wJyYEjv868mb@b8?GhewiK|8itcBG$ zbtfb)a@EL$wZ)2w3v9Ku4U6=ZfH-fQ|0ma?_dc&$m*wrqrC|TI1ynn1kjMf2~_}>8&zdOWu`&+aeQ9jxr z=Uc;Lo6RfC3SVZQ1H1DS^SK4u4zQwD`-cOp1dMR1+$_~~AM7rYxa?S68BL~k z_{GW8#CSd$I7pJK&{iLDJko(;nFM(kiC3Y85fY_oGBu#_Q{($W(D=UaS{%b zkp7JRDsuE;hG0zjHa)xN0nCSC*U4+jH#maAdD!uvuk-HyJBBvnSrFaohllC9pVMb2 zMiX-m{)R5CN@|{XZaFxue!Sh|r?{MK?svHCHax|YVy36$EPC4gZMkg>KF8M;ek;rw zbzvIDW3)s5N%0op)oIq7N&D3@S)D&@>zu&0a&NmoU6cb`^{H_4{^r(jboSa=j}%)p zVs$%H&HI_tT)cBh9?w(p6NLuLT$PxpLWdl62gE3nnISNY#OQ>LUCO(MvF0IYta)K5 z$M|QcdD!hhQQN0eH1(f?^sD}B|32dZ@wq*mV5a|!?`~S;$;3A9&>l9|_E+Y4a-lu^ zTH|in#B>LU;WzX7zNuR#&vqyUzm=M?r+1U1j6M8S`ft}(T58n)a-!r*;6M5DV8#=- zY4MxGS8c1p?3|srdXMKIpr4jqbLr+P48~(PP!yAT8PPEx#$*2I-k$pD2L~}2cDr?P z_eB5sYQluw;XXV4E5g)9F4k|+|Qg;!{}0gcVXtI*yHnN*M@ zOAsQrhQ<%Zr}YaAElsweqlcGJs#?dS`l9%M_HgP#!L7b1{+~Vk+3Q@E-(qR+8E()X zVt$hxH)s!kmHyj(03{W*&YP_3k<+T1R#15}_K@Gm=jCg`e7!wf8U3Dl^Xk3=L-oOW zou}vBSA>D?A$LA{01Tzd_WLA79<~+F)cgNo&2s`lUs8%%JG6o=oOX0#`?rSuCuR(- zR}2i*f*>ddr!@5U=lH){0;mb<_-joNTHq&=$kA?36iX0|E%TIU#b=^ijAFSVCqnTF zMyK8EN>}{-Kn$?!5gE7P~@wxN!UDNw*dwH*a?E?|1FPtLSHZ8oVGFQ^Y!y~5=_M9oa^;>m%zfo zGm5Xfy8sOH+pI`RW4V~!W)&-*_;qt4Y8NXmpPP8TwY1oFg}7+|^e0Ua*RS2yj>|(~ z#PLz97p-xG1Kq0)@3yRZ)-ZK!b)%PpvAri6R)3~NrbRj!(#GYV2pA4*IVKt6RwhUi zR4BTRP!=!cC90J1e5FVwlPmcs!jrMq1y%OP&{+GhrTDAxM@vNiC%C@cwZm3CaD)Ld zX_e}I@`Lxh2UN(8cnc~Ar%Fa#3WR6Bhz`Em>H zSEaHc=D)Z#Q$UR^4_}G2$-EVT@XnmEgSX!(2OHgPHNnaf+gJ{E135LXjicZZa)Z^-Cap5Ln<$I6X&j99Mb!8i|GNmH)=znm6d))vF|Jf7iy`g+7=-Mx!`Ggl7 zOMU1(pwgs!-O9l($+02oRrf6QCxmIf<_J{;rd(S&lV22K;eYvl2)|Ve7VOzT)oAt| zu0_7vjrcyg8iCMp!LUh15&mZW^3eEY{ooHipOhB3s^U-S+xxwIcFd>pzc*_4gFO8~ z{V9PzCGe*N{?!sF>A-afPMbKpc^1Cgr`rLMO{!&q1A!_1Ha997Ch$9o%wYr!ZYuN)i3>g<$mT%V%nsra0a}6wy73*I@N+7x2VTM zgl9fpDXOD5oeJ2`k2oWKkdroemw|t>Cr@1adpxt_8GFv)5;>TkG0O+$`vS9kV3yCA zFSf1-{HpMmH^>_8E?6-8P7E9)8(;ReG=ob9MJvF!?Or)yK~pL(BM1c z+pzvW2%JmmQrvfj51iB)={}$i;R25ByUCzgG!Nh*5Vo z?H_ELR@bn95bZ&CKuKc<5&^@3T_32D(83ux3fLe@l=09>l1e5~@`Wlsnu*|}SqNjt zv0AN_7}^Y97=|&dJA!yb)c}X=`5;-gxd1c;vE;#gN2tEigmvxA7jPuCSGl=K-iey0Qy4ty!om@mP=L zoA6Z5d*2b+vE@povL#8^`p)7VW!S(_xZK! zoi}@HAE$_Td~jGS!ap#$v#EC;n8W-k)-Kw!e+hjF+zGxi>GQ5jX77hx6%J|I&8%B5 z^tpRE{oN)%(wjZp>Q6D6LBZ*t^*w5T`Vw*HsfB;w;47oAcsPR{af(=e=Tm0yid_}= zp1$T^lr09g@{{#hajz_17MHR5E?r(1#pxK7x_a{D1!jLZiHgFcX$JG6qfUsP1GQo)V28 zi5i#G&Z(Up#-g;bbwR*z;MRp7FG)gc(WP>+T!mKMh|$IYUVI`?k|mNG(;W6X(jY?Ho`NC0~vxK>=ivh9j)0OuA0Wdl2 zc=_^`T`YLUL!2ih%ED9L^wQ8h{&03G=y!f38`f3@oHgL@Pv@246?s_jBjc<0cN<*c zJ3N+ee_zbsE@2m$(x?oa_V834K=7d))%=EwedlcB&h^ zI$+y@Gi@DV-SN{JyqEhzd;FwF4UfqZL!e-B$dto^U<b^Wbm@H{6nbsaOOk8bcyKM7-mfPU)x6ZNo9hLE_qfN;hWtp z*Q)q#A~%dJhNN~#&5dXJ)Q}j(&c8{LgWJb+pszc{yP20wY}rLdc=da zmv$e#xirS2)jh8G9ckIE6*Ia;ag|+*V?d9KXKpR8+y#U`oO)t^LMx`Qc`ESDjsdMg zR%PkScVW_|?HtwU?^Yn=y7Y3y@)%~eD7g0!UKil>rr(?8?6FLX*rG|#Dhy@1^oi(C zOwC|EZ7b7ellNF=!kmCIL&^_j#DgkpnoQ0B#L-bbV!fw=4!b5UjYoJ>%$qEQDQ3+? zJuCn6mAKN=K&$sxCrv3cO)CA6hrQAT#3X%%~f^;J~bXmzGl5$n)pg>oq!7Fqj>$9bY@p zFX{XAS^5yYnO;t3(i7;xbWb{lj-+eTp>z=K?e)#;t=C=T3*dm)cCV#gv%EB3!@asA z|3uBa6kavG%6WNt5uUF-A9|km%=O&rxzaPsbE4-^&)%LLJ)=D9dh$HWcyc@)JPJJS zdtCN7%{8-X2cwZ`~ieUvxj}zTJJb`z&{z`*8O@ z?wygp0EN52y_~y`JHdL7dNvHNT1#b9lcs8m2uDe~=yUul;;yN1n73k{P*0q7F#5Kg#-_^yX$mOZaO_$>? zdY82>3tc9;q`LHTNpNZH(%41hQq`rD3(NVVbH4L+=hM!6oi{qqcb?{);yln<>D=Bq z+*#&a**VbJ&FPC%p3`lolgQ`5dZ)!sQ=G;*B|9ZKwRLLhBypo8(zi zPi`g`l9^;GIhaf!W5~v2ZL%sEM6!r)L_TqsI87WNHWEvTX@mxOBIr)EN4^MTL`@=) z@N)cuUa2as*3j7pf=hRz7FrM9LrDq?DD zI-`OuwH1|79#dP<8RcxLEvbyMnA(!gC}T^FrZR#tHJZ){vZY2*8Kp5bip~hMrM93l z0x-1&o#AgwjifUCFg23SC}m4+PG$IFYI8cn$ClcR%HU#ZGdhD~OO2p1*q9nYXL#FE zn^GAxrZ%NBylkmWs0>d`Z9->w*isu)8Sa?cn9g9?QX5ej6s9(!Gu&*c;Z%kzriRlQ zF1FN$RE9IAHl#D0Y^e>X3=&ft&>4g+wLX>Mh^h7I3!uDMeAa0sWmBG2TZL=>)P8= zYf!p&m|BC@wY8;Ir*v&FwK}b9ZA-02=~`iGHCorwmRgn4MPq7JS{G$YtwQNqU}_aw z7imkaOzE0qYGqp2%$8b-(nVluC0f_imKs9onqX=Ot!r#ctw`w_VQNKM7j8?fKPP8nVX7akt7%IuMd@l_YAIS*-InT0>8fF>FRiO;OZB027X9l(>+JL| zm(p4EFPGNY>0b_|v*=$At+Ug=Y)WU*zie7(r+>XEokjn8(>go-OH(?F{-tT1o&NQr zbQb;VMeFSJuP3Fm=wDA-XQzKXD4j+BdeAyM{p(KYEc(}-*4gP_7NxW3Uly&i)4vp@ zv*=%n*4gP_H%e#Gzizb7PXD@6I*a~wrFC}t*M-tq^sfu8v(vxMl+L1mooSt&{&k{s z7X9l)>+JL|N$D*5m!x%e`j?<|7X3@mIy?RANa-y4*OAuQ>0bv*Ytg?BwANPt62+7j z>tCXn*4pY{;v1#K`j_}dYi;!}@s-kI{Y!kMwYK_~_(Ey1{w2Q9T3h`~e5SNm{}P{R zt*!ngiYP7CzeEwOwbj4GCrXR;FY$@i+Uj58Bc;Xqm-t9)ZS^nlfzo3AOMIZUw)&TN zPie9KCEn9oTm4HEQd+Eki9%XytAB|CN{jU`Q9x^L^)K;`(qjEfyrZ?Y`j>c1X|et# z-qKoI{Y$)|v{?TVZ)mNp{v}>hTC9JG*Rc0Yi;!}@r2T1{YyNdwYK_~cuZ-r{v{sMT3h`~JfgH%{}PXA zt*!ng9#UGYe~E{*)>i)#4=63xzr+JtYpZ{W`;->zU*bNkwbj4GJxYu9FL967+Uj58 zE~Ul#m$*x7ZS^m4htgvGOWdKgw)&U2O=+?IC2rGNTm4JiqO@5561Qlrt^Or$8v6f) zGs}UcMt^^fKPB*|1pbu3|H%@VI7Hp$Klz}>n-Jn9@j?`84@K!zqFpcvLgb4&K0zf@ zCdJESiIPOqEN{(YuEtqj1dX%2hBye;KkIcQhBS_bhGF94**(h?P6zgINc!Y6?WefH zzbY2JXui}Bvfs!fYTR|TKzq79AEv)s66R4R(-Ydm{D?8S*2jI|jLWk<5^g%ez1cO2 z2lRacFtn%J^I`fKNoDs=ZycdLyc|&EG^_Lzu)ER4X_bgQ9D|;_FJ!LtQv^Q?&&qpt-7QA*9ZE)K-fDnT|E?G zmq*(j#&s_y3zwgoaU0=xGru@bq%T_D<<3!r>#zG~R&^^TI}Vs>nveEhSpI6ybc7!B zgGTbuetEUE(Q;J&<@RUv;7L#Kzx@$P0`|Arqq3@B#-2SQ1{~;gxIy9B+h<*Y3>Ns7$G};_q?fCv$_{`S;+@A7qRjMHjFU)__!{Ihy z>=T}ehVqZgU@mF0cHAzx*8cjn>PTl}9oXK)^;!NF0y6d;S8TW%YF7)kJX1Y&U7@G7 z7c>g{aBF`x*j7;G7=s>Q$OIAmlX!WN{ldNu9m_RF$VDhjkeXXDN`zqc(9dZ(^Jmj}rk;B&N z2egrXT#_S%%X(c7S>*RY)1IG~F4iZCb@y4ZW^evX&G9cBjuGw+54x-$xBvF#pVS-N zCDA;s>@E|LRUIes}8=ruDLpD`huWb}F8=RS0}noB-;QXs5M%+_ACYUdccGyZBW zXda{I6Z;qa?EUlHLayYJ*}ctozDUseNpT!EG2_9?0`k6oN^;knT*gnzl^*Tdn6E4V zciJpV+_LDWeoePtr=r#rfUnCoxBIXtmuXK*N3X_VvsV|8yPu9US1kUiKc8IW!Rj`L z?8I&Sea*!6TK^e-EAm4_=8(1bj(+={V&u@=P&#N^=<$`bKTqP!x+nP$6iWaL1k{AF1hDBQuBWuZYm4?HEazL zXox^V1pW_4U~y7<#DBj1pSix;JI>W75@n4-_ijYR#JR=A#`wB<#Kgs+xj4#G9OF)Z z?Q1b;5rg#CKPG9^pO8vO@z>WsO%w1Zy9ZZ&t2=M21LZn>`AP)=KQ3$&FjZ*|n-y;Q zJmIhjEWP{Q{aTS8{E_~27IBM;?Otd1~dp`j;0C((z z;|~C1-!Z4oE5hR~82^3E&94%Dt$n+s{HOk;8JupK9d=}ywbsA6QRapx@AaXf_wDal zR<(d>&)Y@T7Nbge(0k_KFI!w2Ywi6`1st(I&W59Tmn&Yk{@I@D+S=Zdp@wi(zm$$f zvwyZHHpio_ZC6t$H7+{TKK(EKZMym4CqXrMTsMF0GzVi?5wga{{k}C+J-HGV>vWS3Bw++4Nyr@PyUhbFs6<17s1U`803L4>Ot+ki<8?e&bSiriw zGamLC{c{{==?xC8O0EI@E{)e|WNV`x7uNx^?mN6Rfy>$-c3(b6PwS_-=lWves3APL zZgi9BuN!IY&xO7gIZ4=Xl3nx)dCRYw_SnEbo#_iBtan+Rw)*4#T9Kvs;66~B-D+e*`gl8a=7eGt)P% zechgZMQv_xtFI?Ddd5%6*d+P%xZ#FyWpHCYr!5y&(i3D zM$hEw3N{Ujs7?Lf`nSZq|w@Guhzc!dBt|5C(Re6#oI{F zlkq(yaRqZvlOX}&4&ig8vpg}NsWJPy&C^OD8mCr^I>1`owrgp$BB3r+=Y)%F$y{ z;{5O=1k8RxPeKsVlMpIrGLyAC5Xn+=)uBaD2)ngg@*yALj8&$smP0c*X4a?n3sL!{ z!>)j(2s;TKbygvqu*lC(1#5vMGchI`;dGFeycD5-+8e9*xB5`@b%x=1gudG^yq$z_ zC2{jq?~O0ijbf(EDMas|SGeXz0m5^|YpnAS;vA^g)Q+BO9xp@X8tc-rRK0ilZ~B^p zFg|LsVkKFr_}BeB_LIaxX-w_qLwmE-T`sL0`a^wW{??VNJ?qqc9_fxvaJr#RnjLYI zcYsgo9Jr)6z2v^S`-9~p2f02}>pu}M`p`uHEWbX!;+7)@Jub~L@=p?g2%UEk6+OOd zGT^;YpWC(SiA-Mi>rFWL!6tuu5^{mj9M;mc+{LhG!t~FrXDomTJr3>O`|^C6Q+o7& zk=P&o>P$Z+JvPcS#??30Co01x|7pS6f~{q2VB|NNA6?1zAV`(~tM_Wy4?e0~nI!>r<^ zPyF-hwIt;~m48_}rJp?W+o<8kq_@osUmc0gxzSVKFRt$8G;@4<)PKzr%%iH&Go^fD zVxuGdV%(yy@#94HJ4^Qp{=p1-#-|&MYz1g;@U{VEuEAo&|%)o}3(fQw$eRv2OXKgaaS(oH~hY zOrgpA@wA?CjQy}+So|I2pYbDtjGnP$kg4~7+dwlF{jbKK2aq54<@NNZN}MvlU%^yV zubfg}k5eIJDez5zvlQv;mKKGjEY+ko$Wm+cYJ78G-mg4CJ=N&9Ju3{(doW(b2BnQJyHV33_m# zM|8Az6ndOUn;~^JvIdE~xXx`0TU%{9r>%=9Wbs^S& z09gM*qh~>k-+yF`yDNRj z`MQ5*PlwM#S_bH;o&pH=jG3ZaE;%t+j|DH;qfu=K5yw{4DXZ`JhVG%3;?6myHkZTrmzVP>J>zE=KY4wpIp`9Fol&k-dk(d$Y2(+)5caXGXg8p- zDeUhN82cl0KA9BNwQ@%nF}yHy@`avc9kBUuf866U#*oq5ckH0+HaeU1@h_ErNA_1P z8t;0(550{XBa*vKrRD0DA)bo&t*wxtw&=r^H9-s+=Zg7Dzn zk-DeI>x!Ny`Xl?1x6*=(bY_DwUWzMbN0Y$x+^fRdpHGt)8Loyu=x3hf6MJsa4~RjNGoYEjDHb4VaTENaOO>u!sK>M z(YH+<1$#D2{z?8q{y=_Jep-G=zFWRUzFM9uSIK9}C(B34qfsV+?(+6>AGx#KPHrrh z$l0upFkToX z>?`ahY$x;CrbA<8vyMsQfLN3cb(O0YzbA($bUC>SY-6od)_1yJB6a1z)Gj07Tn z9seW$8UHT-68{9sJg}X=fuGOM=Fj0z1d`rF_pU?Zsd&_%_ zvJjl(9pRPnw(?f*Q+{Oa*o9kL+jcyX;Hs<7_p1J9`6r18!!{*25k#gPB^q-n(UuZ*xtu{nrVS-pu_@7-674yhfr3mcu8xpx%Na;yT59$q z(Sj0fIh=4J)0`5`Y0->+7nd`D$TX$3Y)UktMS|0x$TX%zR4)>ZDA9(^2_rHMHCiMZ zP@*lL(~rp1r$lok3NrPmCKywaE+si}`VyIP+LBF)GL0BYWJ)O!^#+L&TFd2x5Se01 zG-gwxh!V{RP9GvuNNc&2NKj%ho70=f6i}i)E%GVRmdgnyGI^9}NsC-cY{BOAA~HFY zi28s;HYK7ekf=k6x>8OMk;$S&4=xf_$&_d%ro`!#XvyaE6sV?A{W0bABvex=r9GF^ zgHTPOL{m1Whd`BtdybToDW!{u)16RFqC^xVBu=E?rOW9?s1hmBNkWMeDAAlviQ_3z zz~uxIssxPz630=Z1wm=YQX*<%S3)(06740FIGPeoY3(RV)Fn7w2vt0-C8DOc7pMkO(X*zM;gk|Z zx1B&WfKpm=+7YV$v?ZGo!zfXg&w+%hA0?vkMPgq{w57#R+CY~B2vrCrnu;i~4<(`l zKtZT_(^@Vi22-Md*7l-A)Q7f&Du@!D_>|a_60NwD*n>9Ua@q(~-EqU(5UOsJ(t^!F zVjv|t>vCEXs;-o1Yf6b-=ywsER)i{m5}ml5RsvOL%7G1~>_jOA1jmn1`BS1PpAtLL z@8WWN2~`JL%jWnBRPCvEnNiAins@mSDyVrE5& zIyI2kni5gFk=Tk7dvH0PgvyT+QS6cEONr==LZXjG>mg8iQ!VJq@gP)QloBxz5|~QYjrtpgvyP!v8Ez*>W#-S;pqAAynONlh)qUJOes1W4}*#eHEK!q4r$nMT@BvdrvnsO+SCR}tV zHX&3r;i9t;i8SG&vjvGX;i8B)CRB)U5g&3X5iu?uB_z_6i(1f#P|=i&-iAb)a?#oC zK&UX~(jrZ{h+&YHrd(99J)xp07exVyH0A1|o5~3lO}U7<7(u`}v=uIYsk|tclFG!>b z7exk%G~uFv8WT#IaIJ(KV}TM8t`JeH5uv2{7DdBIphSF2eO?+8N}6u11(ZnBEjqpo z2qjIoW=Iq$5#3Vkp-(7jwneA0zCelCmTDDJ(qxO`sz)ekvPJ%pNRzFJfTK$&5!vdn zr7o06v#qt1BNr%Xwzc5M2_?<8h)iXKl4e`v3yCz_qRvVQCC#=-i$t1jQ41x6l4e`f zZzR%eYp=@@6H1zG(V2xrnr+c1IudENMYbYBNwcjrpAu=dMWT>U(rk;mi$uh>hzKMc zf>6?QD|Mtqnr_ke2LYj^=@vOdB2BkaTBPX~wTv%N(sXM|Nr-N#PjQ|=iP%=imU4K6 zk|tYJ6%uK(MV}~Kfl@=ZNJ3;wS#k*_&9!#A91fwRxz-w;uY^)Twa-$(VG~N4Y^A!C zNRusMG9=PuYj4WYA(S-PqB8-BG}%gJlt_~;I-gmDk|ta9A&*2vwmR&Vw3cRDdxBjj zP$ITP2M@cBP|{>8>5HzQ>_Ut(p0OFNi8R%AXR|*El!$7nb|VQfE&6a|esdoBdv(L{uwe%P0vkE#?0`p`?jcz-PZBlr+&w*_22VtuaA~ zG|>hN*>4FYO|+<5B+^8yE2l)7XnP3QZwMt#v{GG4q>0v;Pl+_qqEUQJC~2ZaK}I4? zw03;e;l_C2HkwFu!n~V}^qD3Aa6B#ton$aR6S{+m^`>`McF|Cjt$bKZqpovz> zene!@L~G2Zl!$1B?4In0f(*p7)QCSMGH9B$V6!U)8Hi>nC6Z{Cm9i^|44P$K*z5;_ z44P%lC<(DFHL?$g44Pzh3HE)F*k9)eXEBG*?ZTZ@ka1scmq)$yPmuNUORcM{5@JbcqG4rRu9g}kIN6r_sX}+ zi{xv$d`=zbGv^)WIp-nDVRr@15|44pIb|r5-Db`j&I-NkKBIMn=c0$A+oCI?v!Y|7a#5M6RJ2*NMzlh-Sd=N6BjrhcNNOZ+CC^aa z!&{QeXg%YoghVSEC6Y~OO=GzPt#L_aOHw40CF3L`Br%eKXl+L0 z=@98)w5}2?4U~42wv~FLwUwq)JE^(UKq?_32uH$}FeCH{FD;vSnyxNs01JCd($t#>$4v zqEXh#5Lpkj!USc0GIv>XStFU1%vdJpFW}GPr}3xqC-6t{hwumU!_aC-AipEOE#I5p z66IvH8!ea3$==u$V8pBQun=zo&ew4{*48a(BVrYrM5kq4dx_rcN6vKWDc^Hx~ zjHjXJZVcNos4ygBc!yyK4Lxv^dQ8Hl;TSs5(ET=sA`HthpoB=M)!pY}fEfHRxYE$m z9)lhR9t}aM7;poF#^4feL{KkW^2C6>1R2q#UUCdLoP6u&mC59CkvN4RtFc<@Fy#IN+7D^GQI}G0}NL%;DC($L6=5WV>p5VA5tUN(WMc%@x!s@@R_u7csvFl z40^(g^q|SuGbn|OJq-hW1jyJixYQd1eZj43-$^5kdKCaS44) zp&E2vVxW%>nGW4>nGPKjnNA7bw-p0@Ov`lWk(B8yz)Jdrl7?V*6bvIV zL|_QR&=&)J63BFLr&(`lC94v{eGK%7$XKPggd4%i!6o`Em9f%!9336mH2S|G{a;A! zmkz??KFESD4g8AX28K%*@DLB|j7tg(nm6FnA{@VO&{2Bd;OiJtFa%+s2SFO~o-Re6 zq9Obl4Pkh|!|-t(Mh99NwioZ~kD(KWmKdxt(8DDS!)Hgo$F#EF84QOp?5Cky8w_R` z@EZcZ)1|;#4ETT!Jb_F5Ff77AA6C-9KDgALh915cY-#9zkA|M@XozyBA@U7|+Zb?M zBJpVviCY#)9}v9jE|7u zP+S^ILvI~i!s8x{;~AWV_qD}fgFzPqeZEM0C*u;f48hzJf}0dFhTazvk6{Rg2n?+; zcwxYu4#7uxh&`?BgDI%bM_j_8>w~-52RE=!Cf1aP1PnBbNyp%WZwx-b#`L52jjo_T#-RsJ#_51d^r*-E5(4vsP8wr)E5tb-&eSVPy4?2aS5LteGlW(dJK3R`gX%5 z-1ok2xa5q%4ub^-J`JH)G3>*DorUJ%(tHd!JfS!|p@Xr~2m_0T5PaT-9LJ>B ztO~;;445v4sc~s11|0HXD{%>*#luG7QV0fs!4m^^J@f%x8j76_#mCanWmt)WHPjWC z%xD;L2?MTn2(EU>9IV7VF=QZJio@NEE5{{#WX0jQ#NnZeGpF~(R%5t|;S2`c%~(AA zu|-(95yLtRg&45wSWF(VcmQH?AY&(ClL!pq75$_Ac zfcdk_AiC70ABL_N+F`&)PZtYZ5@X=d5O5j;KFR}*;u7vrz#{5uQys}M7W!-08Y0jT zfrbb)M4%x84H0OFKtlu?BG3?lh6wx{A~4G^^)h5*$-J&vbBnt`j4Jrtzjkz}} z%FW%wD>6F9*VWa>-OtTCI@aCQJ32Db6Qz@k^7O>_I$@NZ+7hf*epU>b|_c zJ_1@07@zfI1`i5j+Klkn@{+kto5hrw{pfA<#^@Qm|LRy#>*hL;v14#o?z*dA4(MwA zGWOR_)O_p`p#Ph_!X)s1E0=Eo+ex6Ny5HbG?O&HE+LGJbtl{PkCUQS-RPbLN+$dJ= zpxKea;QRIiy2cyIwIk2yb<`=GI+iY@Phsxe8sy$>g8AtJQe($JO8oASn%MmXT6@ON zE1vVs+!kMdv$t*AJvpq!*I#b{a7WJ_cKh4w|M}o+i`<*eyjQSK=*j9y#$_;Z>8jW5 z7Ua?Me|CEo`fJ!4BJe*Dfn}}I<8c1ARLhK!h^1SN`AIKZ7^gVjEwGCbyeSiRdBbHv!MLlUxwa@D*e%|jFSdp#$A-si9fNAnLc}HA z4Qs%`VsnQ^T`gdlZ|l60DqXnVw5Z?sU=K| z?xg>1lr5YvJ@kjPxiPHi>*F(bJrBN?o&U6Pw=L`vOt!cym%tYHHzmy}wuTW;ESfkG z_HgIpJx`K{8AEoh+4yjY4czQ!FSu!^3om}pHM(G~2b(ANUUuVyAxv|OQ}=sN2gdrI z&RKO@43p!_*QBO>24#tclh%*=4hk&}WS)@Jfb5=N&YJIL&}OOMv_7>Q__l5Nf#WX> z;Ox0vv-`ycaIDOyJ15Wtj;i&@usUJ}&8LNIs~)2RS)B)qC6+vBkQui0?m8Yk`dOxD zvri5?75Dmnsmu;OQ_X)|w%P>Nu97V{a={XQY^#_t{L%-|wV+k)_4j5l#kpvqER_el zn6*8)CDsULt!|d`ppyiqwecaFMcTsd@^@ZM?y%t#=gLk&DL;U1oXdzlm%o65;|c2> zBX!}q8&BVxIlTj+QEq_?f7F3VTdI#NXzc(?J9%8#Br%8YVq(UOwK9O2T{=~VPnAGR z|6A;Fo`$ez-!8r99y5ZYFZuqcGO~lssv}Ilge|u=0Ua0PURtz0lDTADQ7()Ik zxv-C=H9Tu~BK4}s0?s?&zvkr|V`$y6-PDuYZi9-GZi1w+hrqjkkgkJWcXIND1+Cxq zpGX?%^mtRbULQVh+4>giwk|AL)izspM+hB1wR<{qyA1MlHcj)Z`T?c|I24iwGH5w_ z@qq9YHXO9UxYQ7h&s4o5jT?Ec0F{m}yI!7Y1vfr-)C)7a3zjcV++MR?53a2++jh>y z3I^J+KKH2o0M;dl72aufa9+R7qZ+e(flIII?^ZsH%Db9y%Gx zk{j`%ms6XY4=PRIjW-s%TI-|q&YN#?s*)|>!8haWyv7EScZIKC=V$H#*w22AAA60T zYE6F!YWh1{(_j0BuOo=Z8n%WAG(?~w0{_2?z|ZMbKWY4H`H=PVI=zVNQM{F0*4*@5 za`r+p{@!Er?x*GhsmGqHMe&?EssiR6G1Hu-Y5BXR?e!pWL9v!=w zoK*OswrIdSpz#AVet^agIr_`Il{Hm)WXY_e=legWkR$UxME_ValazKGT>UMn09Zs1 zyMFa$7U^ibYt-_Ii$I@_UpAk6y&9;yrYw9GKN}PrSnsNykpX-vPmGHDSO7Hj0!_VS zQAQ`lwn zP5XhS{Xo-xpsAPC)C)BAk{8u&KU)u-N$TD28@BZGBJeT)!_ySb8q#axE&Kh+S)fgi z?qPez4I$I5y55OKUmCszTSvBQ=?yX_ew+r&mIJHPhjTt<=aLIU)e+lkv&l{YR|Y2D znn;GFCOjP0x(DMQX#A5J|6q^xQzNNe7TBXsS|z!#lC)`cvG-wi3aKRd>r0e$yVy7g3Bh8fu>$kQ!lBh7iii^YT5}j?F3Ix9kieDE*<1l_8ndkv=(%CSs@s7 zGJ|O+(6ke1+6jsR7T>vJxC#t%TB_^jvV=UdQP|mQK^`-$PSKoI46@L2PU9o?6UkX4 z4t(Yf9R>vLhL&~sG!kgqPioo^H0>uf?IAVoAvNt`;y|`cThtB30chgDw1?ER2WZ;E zO#gp7$u;c(UrNLBSVM=An)dvf|Igdg+ttJGxA{Mn#L6BN4~eY`*Qtc5lfyk`I|RUW zQ`L#bclc8Ce~yj}J^zn02f=^y4W$4a_Ft!1V&<`)em-uIZgK9>93J+dZJrxy8If9%q+Z+hPt<8=@EO#{eib*SPLS!t`uL~O z>0eYcsjC42>my^I7;fLIGumGt|LOjJU9Oj(pDQ3cF#OY+a7%;!2L>ND0`)Oye^yJY zuIJSC@s)qJe0ZNP3I^A^*avLs@<`Q zfn%v!cFw3A2DbC(><*m=$If7TZkXIg4TD_3pzal zO~sd{j#1ia^^@v%MHW*p z`KsBP4uie}?@hzaNS$|p(XZNY{NuA(Qn>ZzmxN)d_S$-b9v-}D5UUS|rk}}ji_+EV zXGah5&)W71q~1)m(tBvCttabQ!T|$419)I&#pP{PR$BdRw`(mI^n42X?K0Zlc;C-< z4dzTYxPXO>i3sbZ^Ql4)xNLZZWi`q z{Kx&+f1`%kbp60Jl>c23_{r{lBk#FKoL#{*|G}Inl$d%xc#t1$GObhT{7=@?IH>{Ehs{X*8BoKNn*kUn@+f2}=hxKKyG_~d-By1=SMyNI9m*(Vyu z9=NMgS2=BVXyTlwO%!QZ_jPKs#B8->&%HVGTV|*k{~aAfAHalckQ!9sLR_*SdyJsN#fbYg;A9c0*l((H+ZTchoFXq0XrhoBse8k1( z@$*uF>9g6wUIXH^e)dhgEexBNO14c%i+JBJPFt_j>oN9A=BI))D_T@|_SNcBUaNFv z^XI3M(Yx&ZN<*}MQr!FvU%z$FS8EbmYWt;pwR2_7(UoeP0CVuKi7nlHQ8wh?zJyYl zEpH9E#GML6d2!V zFE3vYSI?-JNVh1>qgQ(t*w7z+2MNLL>sZZ3VeTiu=hnfDdZ|XIkuQ;sKxsG>)ZWle|$o>Q%>-8Dw^Ae`jYD ze?aDaDKq!)vpd&h1895G_1z8|u`nhl-@=t2~lc|AkEs%dw;~%)a z%nm~SfyO_n@lQ6HVBtg6r}59!2Q>c4eYyTcuhITr5&*ip`TG2xRG6CoOTdojqh%h_ zRdDEweRqst&DAG2&!^`9Z1xEjuLJtmur);B-x>k5bTi_gE#1U;__=xc_{Bu~ z#h{B0{9-&kJR`m0JbgXlP)crBcMtlT!+;S94AS2qOi^5@7xRtbpN7fvOb60TkD45o zzi}7Udn|8W|lK6Xl8?;NTPO z;~C}V5rsC7xJde_>-&5sgW82dimCr+U(^CG^!{7A2Useqd)|bK3o0*9vK61$T59-yT&7#O^ zF#pQanvNYLTK{tAM}sN*Yp>mi{6h}_EeACZ6@< z!1$>jPexzA{fr;fo`1CAxXECf!4D&*_2$3}*8|30e`~Dmud_ry66wq9>upk}#8Pqi zxG-ovdDTE0=kvqviZ)IA1fI4y)br^1r`q_e3vz#0Rf->**bsq+2>g2^@blZs;DJ@g zL(Lb>S~R_resBfgH_6*EB!3T?e(%euLsts{qd(O_dG9ZJ(nj5!?;Bh}{;+wqQkB04 zM2$(ys=Qc8D!0eCoB95XI_bJW;*R{|9AB*3dbN<;mIYlO-hpKNDn}denrob1m0x zCA+?CD|9L?WR?h1PVXNzzWutbVCoE$^u&@vt-X74c=s5=LG{3I+eSp?9|s2x-bp%v z{O|nnu58rWtswp7on^a93YjIylo373tR|ehqS$%dV9}$J5>ivI!t7Dc^WOz$)rx9< z;a~a%!YoHz+_rWrx!dpFQuES6t^M`k!#$~fxtrg&X^;9fH|oM?)GzR*Td%oP9KOx- z3a8@GZqcXxOD-0IKg1KLw<&Jlu{@~Gy}KX&rTsv(#%AbXa8KRwvRgq~J-Fx+(_!qBtK`cz_R95RPm=h${#do&*RKhqo?Hdti$&?tNY5P7 zCwFXm$TE1LsJI-zZ}HfhBz_jqaP`}a@v_~>9&DX^TQT+|(C8J1>_nC%@?Tk&ais$3 z89%vC51R1&Z!7v6oQOymdlP8tt@msfFJHIcfB&bJUd_SHvHOJI)=h>JCv;lxpY|Hg zZ@stk#>)NFH%c9DDhvHJYz+};h(JRG{triBaZ>t7&GdQXyl&L=xm#K{)(j-m(`N+K z#$0#f9uphm7U>!r?dRp`>gDMh8SUv76B+C6?GfXPZs7Byr_GTCa~Pzj&&;CD|0pug zocgCYs4}@=%GfWU1v{m<_c04-v_G}!p@U7}*s`1tFNYh$1A^ThgGSbXMp6CsMs$1$ zmS4!b!nHNh>Zfi!+N?SX(H`J`f+#_!UT4-Dv3r_#QBRiY}df_iU3b z%%18JnSS;OVC-`ZgO2TRvV=@K8UIW>o##4FFqvqo_205plY%KD<*?`Ahq8xHINEl? zc_PEb1v+r&j1eZ5E9$iId2bXyamsf?_&|r-=ca*y*3Wt}r;memBdt9X2gZKMocl`? z2kL443+C>xEZyD&miA0sQkZ6<^;5ZjdfBZrrf}7&2No{vL~si){aNuBGx%_`?^@U6 z4sgst;j-ap=@$mqu;GB#d)`pf8rr4w!i3alV~ zX8p}Sd^|M%<9?s0+#AHKrVZ2XzlmLgsv!am5%`xz;OBIHkkz6MTj#~^_7T3>pMMMt zdbGIhx^{=bShulbCrn*UKCuZ;KahWnWc09SWlhA?)j*@y+IQ}9hZz35p48Zrb85<5 zE;;UCrgaZoZhCzT%TKKNG9%%7n@k_*ma2U z&*&NdWW9coZ{DOkC!=<1{4nidY5dgJr}%dHaE$WLv=b)i_NL;a@l(G&DWQ8Omm+&6 zKH7e%r8T9)P&_sLV%o{l^b3rC7~rz==22~Xx=u8!j;h#BPI&Zc%d3S8$md1{q!(7v*1d|;l%x9=(t1e|I*GBjen4PaTf2*!UarwfW|+m(KCL)jI~@@De}YE zGyV_E)_Yj;mwsvVpnOK96RP*upZ~qQd_3HLzx0aw@yZfh$Zt1&tZ8$onxc3%wQF~n zd*F6Sr$KY*-~TytSVAuP*RVB2pdkYPS0XTPQu?U>bO|Ig2abtzb@TJ_c8_!Oh((X~ zj*9j0^Nx=5KvzPdiy~b;@VzGq0U7ih7~wA$FfbP~*rRU#SFhr8rEYFL2zdJQnmUV| zl_1=t@>Yeb5!^AMxS8I9x8PvEh=IZiJ{+`u(AptL9}g0Xuej(!S&Yq$_>+9tzS_b0 zn3pB|U~)asv8gVUm-K$AKFo*U^6BQpFbO;}f9cSWRxD^G@Om`hdo>`utDSm}k-(!5 zPA_M*GKJgLbHaoMda%PiPpR27OISX-PC0!M0k3bo{?bmS2Xk|5LcBNH!J)e16#ce_ zaNx8}+Xsf#fC}I3?HeO~y9DQoWA129DZ23PeWM!i=1BDI(H=&yXunJ${4jr zdSPmoNn^PBwWZ6HZlTd3S^m~nfa{$>C8CbU$%AjM1-`v8mEdLbp0T(_d4CDgnxk?5_~;*s`4z^AMkD% zttY`z<5bHRzyby;+y|Tz4BQr&Q{6#Y8jiK)s!@gv2sA54t-S zH&1SX%6`=wo(?Pr(hKAF^bj2c8Rh6<&DQ7t`W+}S+QrSy+v`u)K;Eys6rNJs8IH;$ zn>{$t8qQO2pLfkLR!&Nf|Ihbch3M%BSONjz_6e3Rw%>t9HNrL^x(C|TM9=P>%&f4Lc#?vxzJ_N zq!oM}KJ1$L#Yhszf;)Z;Y8>Hb3GX_zICo?L4}`0(QK6B(E;~ z3#e-w6;>^I3q~|sYFcH(ha1~^>>kn8OlzOrQimKN9OOlRQ}cw z1}^GI_ls%&;Pv`+zjnKfvYxCm)cQ#oVOY}mhc&#v&!mLiA7#9mI_%8sDeu9!y&Hy= z^fiFrzpN{!^kZZ}W!W8#;Un`0wEoeqBVC%br~^zp2duv`_v4WtU{btQ*+I$A%j;f; zoDUOAK7y|S>hsOFzt_$;o*pwgQ?~b}Mz0SU*1-BNVCGbPWY5;L(;QaR9ywRG_c@p? z`NUHMZP)r~bR@9Jz>N)T&&00*|D_T5`RO#!{jGYYaPPOZ{?it=o=MJ&vG`)sc>~$E z#j~i=Hp{_#|D8k3+s*{ztY3H~ciaG6T~4HbP%J0=jqAIrw3$_9%GhqPwM1@>t%dl_RV#`!8JiTAt;M%ks4gB(#a>US*;O$xV;Ws z9GzmcyUk4C|BBsXCaQPm3%5(Jk^iQRQhb-4zpeODQj;9n`n=+OL|yH==^NC$(vL2g z3@@qEwoPk2^hYEiul%)VaUcw-%T@eKt72k)oUhr-#-+WsNFncW8V3`@cPlOU(ruwuT5aM4%x8 zzeHe3|MbzCZ^z^3O{IRH4^Inc%|J5#eSRu!&8$sCMaD%&d-=t9czSrpc*MlT_;`D{ z`9^!YMtQ_W#(Kul--@S<31g7{c5IAX{N3w+_Hy?g$nT$C>z94qmSCX^uk4%C{Nz$o z=->D5Ci^^e>9War!-5Q3m?58e>=e=`9p97dfb?CZ8MTcO?i+MP5R2Y7>5I~W(z6)~jBZHno!HELb>^p&++D#E?HS;p%SUb-+nkJZ{1E}JErQ`pKD>aGayH5KWvTq!In zM0lgS%u|K*?q+!&7ZDD+Hfu;7LUqA5y#Z){pWsCQN2r{UuvW-Mev+Qsgswv6{a*#F zDun$zrbMqrcHZR2{WB2ySW6C^NBYfI(sNg$a-*!6M9Tk@A+e^bP`MU7Y)<+AoXdXJ zAC;{I9j_%LjC{Yl^4yjhz&`6g^h{9=7}T}Z1qCV>vGyj{Zq`Rn9YwRrxhWDHJS@m!RAaUZ$LWoFaDmxf^O^~uvKuIrEn?`;oGD-8!xDYIIBS-%gQ zy*16_>S`k};pCBKUB`!mtZf%f?8@DNe3pCbraSh59-em^2L%iOX3r`Q9WL5OW*jXU zyy4MPMc3P(yTc`6VChoD{U!l+!17Y&z{@+({@bO;JclkM_jS!0pR_!OoVf7b(5DuQ z$g0QUojM*1flF=kbtms;gOdgs3B$Y>k`J3y>knwUkUTx~c53^q9N@XXx?h`D*}%D9 z-^1M_76N_$pdKbyv&oulH^u2e3jw^Ik#gK=AvioUGb`^#Hb~oaqE(suLg2X5rac+G zkkrk)p)SqMAxmUEkGt^~fqt>>gI@S8B#*Wme|AiA4oJ+_&6d_=lZT<&?tbt>ARMWC zmsgcd!c|Xs$72_gYsAUrl^+KIy=$g=rY=K)|FloBj`L%{jp|W1n{A9GZ+~%k@v&$a z>DR}upQl|Mc*jo;A2n_;7~E~WN1e_va^uKPP4hR$kXeoUXZ9$FB{whbJu7e5FcALc zv|?0;ShB$5(_@C z^4#*y1(5NfpW{{4J>(a(av&Ht(E|E<8jiz!Wi?D2=wjGW8c5844&27Fiac-s`u|I=99wEr&W zenUSSBG3?l|A!*5cyjuf|9k@gbN{?2`rR$oHOkw^%hk;{%FWw1)-}@GJ37iO$}KX= z!`GW$SV-P@n?ZVk;h$c(|95uvf07GEq5Ds-t;WX$cEMjruYF!k4tbbEC)j?t(_k6Q z%h)z&WgnC%U!(8F$=cX-3{f6kvC-?rcoR6b%DoLdE7IC;A6ztCki>@8ZB}=m)Z`Vg zG~K84o%4_!I_52T@!EC3=m~ygn<2N@Flp{cZ>7^~vI}U``A!5IGWNGxOsp(BuLpZy z`3y%r)`LchmLVWxH%LyNJiZ1!IDyf_v{|0l&2?c%y*Y;*S{?#Nc6IM{TVbZPpZj#u zuuksM^6r(ZBxXBxVdHOGlJX>N zS41|fyyEk~q*!RW>AQ*6{(D09=glvkk@v@6TKkHn2ie2sw%d1t z4f}bI5l;L14KR8$^Mzw}d3+@UDuAdzR|;3T%w3uDfUULfZ+*oS)|tR+{^6lpn|uY& zBBa-^#GAqbw zOGrzn_6KXO>y_8eZJRwK%cA`GrpF1{3oId{A8)i~>dwfSBxC<#Wq3@t!;Kp*Uip8B zMjM*a5P^mW{7**U=fXhvp=riO2L+cDLo7XcE6K!meY%bKxQ3iWvXgVpjVDK04{Cqv z)&i2g6btDKTo#GkEtZi-tLuz~`YB}k#?X%k^)_kkcc@xKI3GPi7UaHqcTc{bY?O4* z?3v|a(B?z-lep*W$xRFOPgf$nEMSg-q45!Nq1&9W$p(esYZLL1^V^SW?OiUecDXiY zFSvI*qV4Njhsd;9Hl`+%50C{zzE4!?7n3t2dv#wS{bZ}{kFJ{>LJugY?Vfb#D5+=A zuh~VP|VXuQEbpucePst+Ff$tJfwVhY9|Btx7ZOfy(m4o-Z9;*rQtz$IsW%3g5( z?5iFbhsjwxoAHu7Yt)Q=$s9O2;OGvLn(n_vewcc%xSq>MJiH6EVLKO}+j$ZkS zFlh^jNNSt4I`k-5+U1j(65M|S2ewG5R|IC z&yOFt9ei@gd^gVivey3UaNjQ%_Z}dp8n4K_x3n0X8j<26GTsSHhh{y#5V#yj&N=PA zL+M@4F4}S*T|D>si|Lera=;0yIkx=aYBFnXLO;sBb^f$e6S5H8c5nS;iRA_mHh@c* z+$bdr7ftEB)ua^6uzE7|!N){$$q*OocKO>txSb#+-LaUwQ}=dG$CNFgo7?aaY{NCc zz{z*Nt^H=wRk+`GG4dmG?tB~~`_rG?BjaLc0=KJUZ?#AEIc@ds7$g66y;7H%Ufv0g zq<+Xciu^Nve$D^q*yvd+zJBf=KJHObD3v10 z>FI`2Dbfo+S=n0{q!)mwk2xywA@$T1>h;3LetX)U8o=OlO?hnG{o?~q>cRT>R_A-0 zevzJ_K91`-ntyw@0M^IRWj9t29x$1N-?^(Vs|Bz={?q;cx?kyket~W?CqL)fVOuBf zClij?iAj&J%Q8y`G0Bv_7A$D*`&vx=l)E9!Z+&94 zC&HwWEjEw(o(}p>&*>fi7yRW5kgvBpIwbz~8xR*{i(5?ypY|AbZ5`Rae>`lc9fk4W!0Pn38BCF}Hxj6zvsqfr_XA5ZV7sJJ*y`uSPw6X~N5 z;eUC36LZZIbDa}4+JEe(FQ`8k67Zbx9nb z*7_UgH~VdZjh9EPcK)qB*pK6HewLnot>0s<{cnCE1`JHQ(#Y#KKP=7G#qUMVCX3&b z?PC3BGDjZu@WOrWAK#DLmeRIj|3#qgcvGJfT8|1DMOg%^b%PxZT{7?Y=HeShybpkA2g zyr#5Das7GHzB$pi)LWI@@HzXndbLKcIB2kX>XcpM$cHxP76^CMs#gg^&-7paN})a+ zFwbH5PN4Cx^)u$`wBC6YT6=}YUfuhF+4LExJu$vIUt0fv?41cbRbRXKIomvMG7p&( z(J|9ttp*KJsf0>pPC{e|O~#`_QHIJqWvCEMC=01HD9!VrloUlpi2OI5t=IPJ{@(k! z_x|7i`@i4k{n1zq#dUHs(TYR?&;_h?I#3IyM-JmeE1A*nAw{DJaiolo*%iwu2Ffo{~x2V5P##_cm)0zM&NKzgwL=1 zFO)BO=+1lLeBpd7L(hlDw-aJ$BlABay1t`5an}F`CtV$9Z5LewT?0Ksx}iNW%-vB( z*XdXH^92F(;rx$~!}*^aVca@W}kLSMxt2L|tn!4QncPH?6uJg`o@i_f7GkX%7Ja!c2fcF?IQzj~pC zC_I1O(Tx6m3N-qb??2JE4`@z(R1;ys4N3dcUvB&8c9RP_m{p(GJn;iK%$6I}=v0D! zhdqxb*9ySHEQKofzhNVGKG%0hT~n(ECUBd9`Yd5c>et=c;1oTl9vsa`lD9Ayg?bkF zzV1|B7^#^*`PF6&;&$gHKu!mE6IG=0Az2)9?|2~_R44>@$Ttm$LSM`e4wHAi_Yq_=LpAciYXE)bA{EkD$99R8-dED?44MiJWoCLYwNf}*_ z=7KK<<4&Y{3BzOjo~O^mD8a>_Ugiagd;=V--0IJzh{3tU+}r6Ma*!ScY_7WYgWTH} zSOQ}nf@ZljdpQPw0)x7zZIiV7z*57rYfsVz;r_vDWi4Md*tagmV80iC%;mgq$mtMi`YxtQ@P_?&(r4}V4n3}xzmqcYJkq}It#Om+9uH7^fK-Ftp-VDdXTM8!0MI5``nnB+wsRyu z-LMycdiz@EZS56;pG3LMa#y>ez6C&3Kt&$bt<7m^Miby1WpkP50|Ic{?x2r)FGS$d zLKVE_20L^=d1cr>uk?z0JI;MUixyQaHl8608+?*4Aad_y+$ z?O+_jte;Jh5jucEkIe`viPj#^--mt)$G7naj7MNR0{@B#{J#EN?z=`aw1bDCda=<{ z)*CdfI5B5tqBnRWbmYsL74e|;^_$xX3f?H8K2GTQ5pSe%bhFBi)KFk`OS8(qyU6_ShOoXCdT+2cU z!uIp|8g~PewLeh}!Mk)gPJAJFyEmTu;?{1!eKc^9!wXrEc)#fV>Kr-n!!xt~U7INg z=d#CaH%|q(TIUbk>kmQk>Z>-q++z)vUG6|SwX$G!b5ZQBNI4*^vEY&I6H{PtJ3R0< z;s4SBwf!Q5|Ky!eZJHh(^Zy`=HY!GlN}rTN$2{d^RC`U)ly}+_2!FuFpsXeB*Rp_b za+|+e<8I`$M0GLeQ&}MSChNuab8_Gs&uz!(Zd1Tvpb)Jz7=jwBX(0vR|&Ft zSc6~f3bLR@VnTQ59yu^)zr@V5ji$h&)}uB5zAWmt`Vb^nCWoHg`4k-d*%W2Vsq)k= z%?8#3+ZEWaXCdaDTur|CvDoX72G@R&apb)C4h)@L9D>;P8o+3wcJUDw3^ zVyP_R$-S|vX`dXr=#dwVJu*dU^CSdPSwlfW-yVHM^mSz`1xx#Uu>1$&7%1FQZ1nUzNN39h{xX9^FyCzSpVbNH(r_s2E_fp$BzFWTK`WT z6n=5y)~WdpL(&v33Fuygr$T2u~es1pe1W zfEa_}_fL+&pgYrvRX(&GiT-vM;%=%gj>JS9BRyS5ePa0!Z9RwK*3WvUd88a}1s%HC z@3^SGmml#+6Xrg}f?s1m_S=4hV!Em<$z#dn_U z7~KnHWT0oPPyfqaC^j&`?pWBV`>)_4tJ_Zs8h*kZD*5v!rqlNNNBI8T>bKKM(QWtgN z*AEAg_Qw{TplCb#fl&fYRw$9Gu6X;o0ILJD45RCAk&{AdL0kESuUrYbJ9L%g^X|wnt0LkwVRjw0# z?d7H{_Q&Hal2t|DBqoiws8R#>zkUCA3(x%L-O76SeU*~$ANox!z|l{dtM1nM0+IY> zdzb7;MI^W2$O@IaBpR{wOeMaJ&isOL3Xu5t9p^t5fQuxff1eI*cnP%-%g5c$Qb7#D* z2fx<)TY{YTe1^V0B_KRSLSPNSN&Q86wmWqR{*6^Gr?t5r&G6#Vv*llc1ZJ3Rup#W2 zE@iV2dRtGqeYGxq?9Ho(=Ku478<7)Ui=Xa>o?mu&mQT)v5f-ln-rp7&s*uZBqge#o z1Rn|336=<^2<{g26|@sH7c>%778DZb7kDplPoP@hxWFNSZ~-p?TY&`vdIItSeEdE9 z&HRo0=lS#bi z`;51q_Y`jiZ!~WJuM@8Y?+jiw-U+-cJZ(IWc0<0&YESd2T+g9fT%6xH zUvb{xJja>C8Oyny)0NYb6LL=Bl;C9J=-_z9QO|LTBZDKFBY?w+!-8W5hZ@HO4i@${ z_DAe>>?Q0e?7P{0+3ncP*^Sth*@f8p+1|6=W29-BNHAJ&64 zV~yB(EFX)3F2v zXog{Fc%&?qrQ(q?qZtiL!6T(dGYm_{Bc(<&8g>Mal%%r5c%%fC9l|5UsVoVP6r-|4 zJW`a(67a|gR2Gj%icr}>JW_Zx!>|K*Bu-^v6;#?;|$NyjD~sQ43E(a!#r??JC(WPj7?Ow31_%bnH$d7 zNM##whAWl1;*1TW84cTjGh9Y940FL5&Z8L(bH*7?qZx)d;S5JAbHo`AROWy)?5WHi zXV{HqG|UcXtf#W|IAa}^t-~2>M>7mti!*FTGa6=#GuBYq8l173%2wlyRaCYLXV{Ep z7-oYrtVc5%W{oqfsLTpytfaD)IKz_4EOEvPDqDdwmQ&esoUv>)!?0yI!-C2zaK_Tn zjD{`68B0bp3|oRT7LR5$Y%$I-AI&h#9A_+|vPC$=Xe>6-EXHfD# z8YYV~DES|T$>5AJ`JXh-pyYoTCWSM`Co|@!*Ux`5!mVpyYovj0M^N)W+D|-!lK;_Y19$`_|D)0R z@d!%(N2C3~BPjVFjn;=pQ1d@pFCIb7|7bmU1U3Joea9oH`5&ztkMJA)`NL>kcmy^7 zqkS9h|7Rf*;ZWk=zix}%8nN|P1F(76&=~9xz7ReZ;_2`ZY(fledjUE>&E^H#9|U^yZZiUSQ3uJjCl~lq{;!?-!<*_6-2@=%$*GSOK_Y z>V4~N-MyeFAyTchuN@dq-nh)ZO?62BpW1%ek%J%iajw|b%?+t1^>-C7BkUuFqed3{ zc8)Q}N&R1Tw8k4`$$I;RK?_^KKQlo$JF<$y4ZeJBw%A0_+|HiiGLXXw=RY7 zD)NPqL$!XL7hytjiVXfWG^%{$Jm&G>RL*tR%#W3ujGBO}t*vc5_U>ClEs$IwY-8Xi z%Z4~;?;g)^$uEUr!CQKQ8iNyqBK-e;`9E_0g`uvtz9XHkuWjr?6h(aH4V~#Ojyg_8 z_H-w01G@e2m;UCqpQIfA@@o-3|93v9y+^u){eOJ;I9b}e6p29jeA6VoxG!LvyVeWW zPGPu6X~AMc1qJAG_V~U`V+r^1WtzM1-x9r8|%X;Pu#ko4yX z8;d93>^?yH$j>8}k3qK)1bqngHD6+iZK{ zyaXgU)gR9$RS}jB#WDU&TkSBl^s)wgr^NL%VB-Ynsj&0Zr{{dIB0Npa>4d@<9=JKy z*y#ig(_YRR2-x@ykoxCt`|1t(Ghn^haL6C#SBiz?(`hwM}B{3@4qOt~cg zr7t&9?r1;Rn6FC)lAP+tuW}W}D+_91#`nh~Fdl(_-w6Cp&g|ZP>D80i8h;o^)7cgHh zV!mE<$lu`t;Xh#dkC^_W_g-8jN*fQL)8#3ToVfR*?lr7OcfUzT*I70?Xl*zEeB!h< zGdT7FhiLngeNE};#HwIP7S}99tckbu6v5MtYLA>`Z>(zUGkcJk-HYy}6Dmi~0b?AZ^%|Apg;XgeDWxbq~VN`nI) z#OCTYrQwj&53pLA>zow^4)?~=QiM7q*ePe@hTH zL_A6HNEd^ozKgv@)8nQ)@W z5I@V7d^Vm0;eqL=U?ubi~cQuMN^MC5f2a5&A{DO*rZ+7Dh)Xl|C% zl!NLU?rQetjc^uAQFQZ5Qx3=xSSzMiP(FHEt`6N$+i*-)J~S(r3v@&}S_Zl-f@^OU zFV{P94(_U`47)X@H*;D zhaHD!WmJ`o<7qJ7t?1ep0Xe9@wpncZEgXhK@7}h57Y*{J7?(dE(1feAr1BI)`oQIn zz2{xpTR=2DA#=xT9e7PyxM%Aj5h$}`TWqBzaqF5g#kH#pSYSpia6ahG3i)<_sGgR= z0eRTZoWFia9r~?UlXQw}5VR>QcCWwC4{jIshkVkeL$<5AX0Oe8AgAB$-8ZE;V1AUR z+oob&Xg+(A>EKHm6imA_{}Lt$uOzo+O4+Hv*Pqf`?&T1-#@So>joaW0p!-%zUSA*p zgWF20+5*|3(l!iwKb1Y4VUv`uMveg1(Ue1118U7wiVQnFPX>jUneJJe4cQiO!5zuaahT#>L~@WVGd zTvi|dvJdH-=UMWv7l+3!z1P2Z(FZ=Na_tE+wS=ehF0+-gJiT#@&YRsAB(;(*yeWN*jZ$8${r&)U~<6ZBfmh*um=BS zcv)CTKDq%whxiT98S_gs)ceK;&h^Hrurf;E%E?hTI1e~L@3wc^JsGh>_FZk#t(t(Z zR?-uJC)1%w@AOx!k2&DstH*h#x(dUUW;L(RgieGT&$|`*hSK0fi)mlieB^_-`CO*@ zrgOpmlk=z7gnR?Kn}oDjHs1%T10Tw-eba!u?&a3(tKosHH#}x=?->N&_};}X*wX=4 zTJKOj#wiLf$DQD0OzHpv3)h`JdSe>ATp>W4872TDN?)7rJ0k?GHVkl=T{D8p$1Q38 z4ifNYL|#C;8V*a{HBP?1I2HEYPATJT6NXy^Eh4|0a6vB*iPpi_!mxbibrrs%J|Nyw z`*gEi66g|Jznt}gK9roS9GGW}!*$B1SB8jpfzz>kVnWBYA!AeJgm#fDBau(Hy%`~0 zupg}NEe)NX*aWK5*M1O=eFoxUH}8EvQv}lX-==$&iNisiCpJf>vBOCTp&kndX23Yb zgF)PrSRlg&__0{AL8*f?0{LHwL%HIw4{LWz!0Z)e{gTbR(Dy{q{;28GpngnE>aqu- za9j255V-;txaNkG@$PqqaLJ1FQab?=c$Z;dzRrgSX4x;j{G?C~>MXNxsJP7uCyIo= zToT+1&U(HJyRU8xCoWFSTE0;Tew)ARSw{T;xVE#V-f^`uu1F zu?0FX#i?_{yS5%6cW`%T;JGiLu`5%)*;N(3+&VXVsqF+9CGqaGe>e_pXRdfX<)I?H zdD_h+>7gWyP6>~eH5P^~*xhLoXAPm5U$1QM12O0v^ZKUViyp93=*0KKm)PI}cP_Jy zf_(7FZT_7Rnsz+mZreK$xkgbTHSwrmSWtAhmgFU#frlElO}v-82jKuJG9#WtJJgQX-M*?sQSALJHW?9ndO_c#eliR z#j_=z+fkYr9e~^&;MZDa@!-w+!<`9rTL3upG2O!|0R#*j0vaH;J zj)=Yw{zT)_r`e^^N#L7FVzjg z7I7z_0Py9zyh$=jtZn$(DHVbG>L1jDYujC_rak(S7x)^xA+MA46 z&DU6N`M3wn0zF;tOAjM1m$SBVEeXi*WBATFdf_1Ez0)0!jtI|ud zyIzIO0{)?@)*rLCB6->Qv($6)!Q;^QeQMb$AnWkkirJwIAe`x&DDFzM#ys&gk)F8~ z-I-C*!8s)n89&ax7@?<)y1De{6sl!`OnDW(WvSj^-auCEtFl61_T=bOhF~~oe|&Q} zztw)UbMxKwfd$&YQt9lNdr1GB(o z`KyB5shP;M_`vPnhmnB68FirG?Iskw^_+>?U?ytsEel=od>c3+@_4YNAslQqf1a?G z9su%Qoq6~(cPp|yIQ`1g++Yy!OnrmTB{$@iNju4XhJ>!$&W7 z0KtByRS}jksJ~+U{0XxH!Pf)wkLq7#p#7`1wDA@NA@jXo0>ca=fXmTk_ns!L25dpn zL4wtbz;j8z%d0!Qh(!u@1hRbm!6ieh)tZaff{v80zUm!bX!R?lg(t%NP}KP?FShOX zLygB*gf5HPh)Q;;m-G;yGso#>A7jM>k=6|b%_@Tkv=@tLn62xAntM$08|d*U_7Q#h zmRW(QSNwu{=%qBUZe|esku&ayze#_3{NasY|1P};&#NOSQ`!yb2Kr+=wTGI|)WPwi z-4`~m^MK*UQD;`wkZKOu%TvXx(7o4CnQG$A2si3~Kzp5h7*%EOIcj&8f8DFr9b zm(Da=!UGl0_HmZWeF9s1b{mz1bHR4~T}HK&aJc*Ps^S6%ei)-W?Y&FTMCiEia(WW& z6^O|_CW#q~!PRYbRZoaJXOa9*cK%c@)eh+=wOoxzS%Jy1GVq3WiuLJ3Vo<=h<+}ZC z;>PuQL@U2X_=JeqxvO^Bh5T;xihXy7-pC)TslH#D?&q1!tW zH@4Fw)><>tb1`snV%}z=`}^79H=B?nT{>ic?y$AL`1f_jqlG!QTEYBN>(dNs+rSmO z-p)A%7}SaSSub|%2Z-LaD&pJSpCDA7^_}bDSD-V?yc>`GJ#WY_p&{d1T08O*a&s?59KQuxZL>CNXO?ZoFjd5L-mJNXt!SNc}|pb z?AC~F%qcKaB8PqmLd`?XSwe{C!=DX83~eiLR!$a9PA((5v$Kw)y@7*{wvnz8oo?V{ zpzlD;f6>)-G9r5We=UI4@#Na@XM>QVdVr|&;Qq8v9ScC+Us$!%ROb0T;Pq78)YL!$ z&bB<3UXUyU6TBxICfl$;oframL~CnY6lMb~cE4IM*&W z#GwP|o)f{(4jZ?VBhd3)i&EQ);G)pCDrSH~f z-IW0Lz3sfS?4S^wm=MC+F-;C)Gp294kxJ-`9*N!&*A0$gS6ZJR>j(2Z6W&Ut3d2bq zCWnQdNy9}pay1gfEjU)cIqwi~oCfQ-pWioBSAuJVyIk_)gkgC2y+xbWyMYt=nSNC= zLQvj#v6}VzPLSH*Adz2AgXLULS+8&E0UwH_f2^1#4L__^wWuo=hTNL_yR(s0Uv5E=j3a%K=!pNRu(ZgfE)LzqZh9Af?LHI;3fCxp0c-EfKW>90PkxSsOM@$+kQzNUacJvD{$t5 zM~yu_tzvlLh92V|KfBoA<&~}zS8Wl54=?5!XbmdBil!?STtw?RkNl?ofCIvC_>)Kc zYpyTHG-hTaa{sBDguzL}-}4k>jcgBOEXo+^McE|9XIt5?`v1oFo^*O#jz{4C_Xzy1 z<5seDw%N}($?FmQlF#`XP>Q(w^My?vTKP5O>cJN-h{+Mj7p&VfdGLh`pm4y{CploB zuz12tn>xVMNAG-=Wd@`k2k~39-(KOmhAKpB-KN-|0Md`omwOeH9FW{;<>j;M zyov!+pX7k44@3(NzWN6{fT@o%?aUTi4Ah~RE?4#@=c_=v#__t|fl^?Q_@Qa(Kpof| z{#t5??+}P&nSP*@)F(M&>LaE;X$LX&N9P^}dVk$iG6CF{J!71!a0G@jPQrG}Y`Cxb zLeDoJ_MxwX);jDo!Q}e$)6M2 z&RQuq`^gt@yP{X)-Jxedy;gEsmzE@)B&f0b@-ulz@;}-6Q@K<-q@UDs6|=9tQ>_$( z$9(n}uG%yavg)(<-)U2Tp^y3Yyjb!RBpsd6()Xhah^lZ6o-b<#q&}Nj#g5f&-0)lF zgH4Q^q9gi?1Gbf_e-nqrr4yzkWeP!3e;ebZRp!zO@PXB_C$?4GBl`En@{XK2A_R>t zCa8aol^oGG+_B=-%Dm?wWomFxh+{7x{XwM#3p*nf;QEufDQUd&Bl<^Pd#Ugrod7fW z_}+XoyfdPIwxZ>_*<2ymZuMMw=}}_xEc5k7r7OE|M>c`o-E}+mPY@pQrIDFsr`JmWhxe@(yHc##NC-cI<&TdYDc`PIPQ}^`ACxm_l z0`$&XqHCl_^g{=tEg!QHlZkhH+b^ijF{0mMD|+l4@i}r)+tR$s?#GD#w}TncCRbVD z-sYQfoQEck=i5&QW5&nDL&OF{Qp!lo)hQcV7PQZ2Qj$zO83 zN5fAMQaQQ5{A|~C0VAdHjEM}$xEznbcm&2HFdl*N2>dG}@cW9&-KC7RE8yT%JH5`S zKNAp_u*xR6+xx)8K%vI7&9Q({rem&4aPMcIy%lfoLl-z%ZD+oXMR9btmdQU8KtpMr zl`x^t(Aaugmf&lzr@fg(aHbu!Oy$nf>R1c3q9W)irsHe6v3J0qFVj0mC}TMK|o_+8c}ZS$6F%Y z5!+Gc&@J6E(tuQfk4)j`GN3Q|Lraq2`#w9|P$K&4S4Au{uKZkvDqo83dK11KyjV6Z(~Rd_8Lj?uRwsdKJDMIdk4}lpypURF~`CChQdcc&N;7 zxDT9B5b6!ki9?S2EP8ub&LF3WNBw97&z$6OF-j*6$V`^xl4d;v+U?$&e=yjGTCnsj z`w0D{uc!O_Sk8cJTgEz5caMe>6N%#e8hqkC=AA%>xfl59te@yLFVXkA#-)%=u`r4_p&uosvZ8t16ki zChRj;YBDuhuHY(G%2tfziv`&DX!c6wDD zxL#FnJ=gRgS+2>I9}BjhBJ@jVEj{G+n^*Ba3eq@B=yxhH3><&kF{wGVuGU)pH1LRB zIPYfdz7hM{{f$Rfsh>syb1a@z*X|p!AA7_9%4$M?_K%CUJ{L!LmF)(+&W6xe3||_( z|KdKfjWJkUqRn}I)(MmyYU3q$Cw`=Dao;m`A%X)YC-qs_GbdK8BlOi~)m)Ig6FW5EntvLDHK9d6`AF*>ObtZ8;MQ>ynbL{ZVlfUhg`b<9hid>(r zPt5=M%l?2V;LQ2p+nnp7Vff%#XTzu0U~%zbVmtDG@z-vs|DRZ@n}xfM_&2_dM_@bx z|C$Iy8%FH1Rin2Wx?j)S(2y`BNF2Bx9Do{OHN zgOh^+UEj&Y&{2E12T+)oV?>Vh4pS%m4c!t+@oyfUmpRsLULgSOoN4EbLRq1~kHuSV z*Y*HsfmTjnLELoyL(%gtLO{SP=fwmxI+d{53|QIK1=)@ZH+e3`qXDamE$* zFqRSh89?~SQVpdsc7|T?Uu~L8O4RHYio*qdg_%d*@W7OfH`aJ#81xwSQx39*FWD!U zEjhx;at%_1N{0NQ*|a_hU=fAZ=|u}sk>VITJVlfCdMGQwt18D$l-%W@dCUV3-Y`)p zeCk@{!75eg$eSv=a*heiauYb&K=j5`#aPf=o3}s6xsbY@1X0AjU zQDtJd^GnV4QWWvw(mjSvE-++|`8F^$4V?1k*t*2xausvM&?-G%Bi{u%XAsFz#TAuC zo71S!d~#wBp`V;`@`&&DQsCi}n;1{nncEvGCQK*BT~FP$NP$lT^vCV^D5u8(>XR$_ zuD$zd;_1zuRPEyhGMpzh&<7pBqo>LOU!Jc9+g$DEUn{Gsy5!1s?l_wdItR8~j(W8k z)I}9#Wb2_SO}pwNGMQyn@{&-eU&9x9dbTR326`b!H>ZklR!1;%S?dAArq#f;&p?3V z{RQw{zcNhN^dy2`9tf{=&nFfI>z%Q=r={v?m(}9j$4{%w_p|EdTGRrzW4zz&F0@o> zUFtjLKDP$s0%hEi&}TcI^suM(0yJ-68E#gE>-2THooCHAavfcIz zKrr-~Jxz(bL<#Qcyv%X7P&L{pV&~sg9por7M?GDlhHx-&bkK1zFx0g-Ftm5HH`F8U z*{E$qR4T&@MF_Kbog5lGG9>>?RY!r%HTR*aBg72J z)9%%p)0jwbBe)HqKfRjxQe~Gdq5aD8#QV>}KK7tlg8>wCs-Fa4XJ!w8D;73@Qg8UkA#$$Pj znQ*U(Y;V+;soiZR)=Ed}7Ebj=BC`IeqHbQOVU|bUXH7>C>%Pcq3ZcJga+vt^)(b#B zIZH#G_{qmZMAH3;pZo_8Dvqr6xmndb!)MkU`HNN6r6&_LmeqpBYT8!i$cCzE$@Ry+ z6IDm`B%zht3H`fx)6?tkFCZD@l$ksCBwBK?>h!ryqpA*nM@?-5VgbN0O$|fy|A*=h zjl_FD6Nc&z?t`eEtUHFU|7mf=`v1hg@ohW;|LO>2tY)w>2gO_NpD{EjUM50@C6stR zd?ieXp^Z#uh`ayN=}w0FbZ28711CLW7Xy8Jy0)>Qv%M2t*U52sP`vPj;NhijhU6d9 z8S0hsU+w?n4TS&hs`w?3F!iO_ z#wq5+vN(bzJ3cqa!I(owJQ5hHP#UZQCumYI<;S5DnlCh9>o&MpQdbOCyy9|v{6Q2} zZtLt&l@o+Fx2#l`N>YRo$ueaksd6y!j5KYxt}?XrJQ*J=t`3V%eLZ$NM+vqR)`sOQ zp+nx#N4(F}0q^T9E3fDeUJCigZ34!;{4F)Tu;gRHl zd;9sd;F|Ij$o^LIU5p#;q{F4?VWTCb&TwtoOSG-?T2sv zA#t5tQp^*AgT>}NPyPCkLfYn4NpHnr-!_KXt!1jP_wLW> zSVs@&5!`Qhl9gC9XalQNyhAT|@6uOYL@a!!Xs)TB`jH175pPNtnAjSdFqhO`j-8(_IseN>+Tkn zOFS@q`#vvYVKrD$v_pQiKe5nT9{wmFEWGO4Bbu`DJKAa=QTf_?0(`xCUBR}qT5#_E z-7Cx-`oU4Y(iFLf32>U>Q!l+H1^8-Os_oCO!cgXJKu%B*>K=}_7Eu&}Il3WM zC0X6z>E2dln-eln=7Z;Cv(Kzh^4fFRy&R7~n_c4h$3N7ewq>z+>E|z?={e&(-v=Jp z^~OF=H%c5vUoBIfzVZvY5_*1P?PE?@tXQYLt49NdJkDxu2xtYZjGMQsS9XIZcPfaXVjZmT=fymB$ho~*@9SGhSgUnxO4Ic0kq(hlXJ%KPJ;YtD!rpcA*b+%N z^u_zD?G#;P-CG^{_)w+43eGqikHB~Y{yih``+C)?fA%7goNRviGj~_l_yWNr=^~Te zQozK9=I49ha+-Soc3j3Zu%=?`N1gP8sxraz$NfzQW7gdL_oWVwJ@E@GC8nxlC^dIebdGs0KCt~^un0^B5T9?{MZ1uqL%AKobK{3Gk zgz#dS#=Gc1j!zQ7(aO8e@(hAvkV3tK_z-96Bc?uL>I0@eVCn;=KDf0cTSj0G0*jw< z2OWyqf*L~Ys%=<`(Lwjb`9qwO<eM9<8JH-1tYzHvyAf_FV-Ld}!;SZ=3=W{3g0c&r(FKj3L zVRDpqG$LfkPbMe*2TcC~)Bn*;$NEGAuEE%;heOx@GT`CVoYyy6q+s|B-fn{eRhZGX z#@ghV!SMHV_c0dg+{IldcMq>_(DJn*6s`0!-$p~xhnYfF8SQV zAa&?9d2eso0Xc{&SMc1^R)*<9&3^4a=}=GBFo!Ke34Teuae|k)ifAv^S@|_q6|&y! zZ_O2uf|W_7xvN_D0}mcg_1#baZnca#yksjYRNdr*zb}`7o6NHoZQz!JEdz0pLDo2Y z#T#3^=Q^<#o~!4r8a{F8=BE*qoXP_;6&4mO@1#L-dBx%_JW5bzE*E=bzbLdU)b{O~ zqyPu}t4;i4)S;>G`T1N0a?s{g(}{NsO?ZCl>~*=qa?mkgk5PB1By_QJ?S1r)m_^9? zPWIDYRoI^R=-ys_1(@z3bR$q*0fwEsEGo*Q1+ns5ef=9$;Eqh*SJh#f@JeK5%y$tv zsJ-dt=Ywl;7&AxZ!*N+r*!tXlb>nVT_{s4>BxIL@3TIbL6d>;IH_wu z-PH;(aI0YSoD6L^P&_yNXsjX(E8yzhc93VppA(9A&MWe$Ltd`Kjx0Y#NBk)Y5PR%? zg%07Pix1M%69&aHf37N8C%hubAKdn&;Y zW5X+#l_$Y0*^krR2k6kKtMs97^(1I7`ZDiloi?2NLTbvMgR1cUDhcT@SGUE#3>Q2SSuCkZ7x|^HT<7NaDU>v86`(V8ej1hQQ{kQTHqY1b)plN#}-%rbj*#uXjNzLa~Bc8tP^Kl|DI zvwSD2Ly4O*beg&xJS9HQPKzN5ZLO^`OB?C1?n2~1mYWik3go&}tS|}6U%8wkeozli z2`ORyP%{ZmV12G;U84;PUmfl8m?#GgcYF}rqj(TqYAM*bC3+V+oBMXl!k_%Gthj04 zvx!oWFMQVF^p?YW$n z?Hmf-{Y>Rv@P4G%uJ(C7JrsTY?D06)-~dwEQqrEyodzbhM|KW`1S1`;cURsV$U*^9 z&j)V><)Xo*g3|tzlL34s_pttGGSJy4s&c05C~8t~>!Ke|0Dep7CnQHEgM_a+jahtU zVEG+^y>TVB0RxJkOeEKq(E7{+RCC^R3jz4m{Jiv%uG>097iS+0#Jy zvxxIrr|v~B^dSFg_`Jmg*9e@OM))IoCCm9#bs<=@i@RB)nHUfMdWvZ4xjeA(oMp?x z;1VF>*2G`OpA9rROl*%w<^r!9)}8{}y}@xIO!_6=32e%+w9=Yd4-BnR7EW}m1&3O! zW?M}^4R$^J$dN9O(1&AFr?1M12S2UWOZVQ71d9*DGq-b7K#SP=RRV`gP*Al6UH6H+Z=V?6|&7KFehbaH#OP z-=`jm65rlQSd+2`VQCHgeX$3@Vb8pl^Je=|UUG8gPNQgqUExu3_?(04HZ3k-H9iO` za6_flYxR&}h^56AEDkk46g*i|8;2I=>P8(?$wDUUy`j^$cw}wJnQHd73_P)Xu9SR? zfzT$NIj)*P$o}$#AZ;Tx z>hEnwPdq>?q-nBqOQUJ3kk#@GRt<>KOtw4J1m~34h+DDVn-pjX_UiDo(8T4+eMb z=CBX>vp-?U6CR5zAhupJWwm=Y*nG88Z$E!NT4VTmp?7*MkVrpwM?v*065M}GOSh;L zt;l`ieC6dOu<(Mj6_-FY>QudQX0rnWX?V4rb1u0G@-}>WSyylYJefPQ*aTDn-=jOU z9hIWcyhA`|z11%Cq6_(Q2a;ZGzCDHk}Ogx-GFijM2K z2D0Y9|9X({=j-Gi%a4th5NFfQoh|foG=EQ%NN{;7DtkYBnQ3Az+NakvWxfAtG&o`U zBR93vs5R#VOTw#4WFjlF++3mrd4^7Cy53sxj~AUoozB|C-6#IC(|Izu^Jr4o&!uZ& zboQ-+XIECkY_PWH&Rfy{&R?BzTmPj9{ObSLW?@sLY5$iDji3C#BLeAx42;=7Z@!;z zsDGX>f{!JXc>b$@o^Ln|2hK$Us%d(P-%V8x4^pe#7 zceR{$^jCei|K5KpsK08V@#dRLCRL;9S!VAlxp0KaYDzb7<4EdLv7Jb8f7RqCSJU{2 z^GJR2d{S;NIUOxP94F6rpTwnVLD-vf$!M1d&u@FA-DjRbKbH~5Be=RZ*nIW{3*xkw z6cN&n*2_~)$NS4>*rMl6Y|@ez3*%ZZc(xRx;Y_9 zecsn4U+xmed0uGBm=XMW*ksMd7*nM2%x_tLyiN(a!cL5G;Pg#0>hQ0o;T+ilLs=PDflJ@+FklLYGFCdqkaC!A0yZoEpWRKgmkCW&9$sYCi_npbdKQ-Ssp~_KH$B5OUiOa3O zY>JU3W)du)x9VOS46^BdR8GuEip%3E+|lkn^sR?CDof=cc{M;@9r#`U#mTFO><@t# zLx?50Jzo{aJ%6kWNj~LsO;Mn*@CYZ*C$A>Rs|!+(Jg;Vk`p$!KI3(>-IccXu+Tju_ zO#z-xdHFI&QG{5Ju(L2w|MyjvYwX_HE?Fv&)FaO$IcfK&e&f{dlQKcbrYN^U1=$h* zD;DY8d`zqpNOJOgvK+EpQjfHkm})J``|~Rx?NT{uhve?o{vuJFY9r;6$E(!OhJ0!h zAH&^>rl7c#LO|+~b^@N4)!SK1j9jIW#}zy!oj7>a#&A`e0mZjpf7>VR9PJa8nWd07 zaJJN3 zA8@PQ_>_M-0nCij{!&Ot=G|ecnEQz2%yNQBeWo1ogVZNEd6yY-pFB=-(k{u#*Fowt zCFyrCsZVm!&ruhJ+WLlc`j~6Nq1(}@fr3Id9?MET7`0v2YPNnf%uTOU+1xKomOC#0 z|BnC<_X-v}mS;4Y4vmlX9cwi&WIe*_%esVBjn+ZCM$5tauzOe$wi|O`o5%K&t(+&6 zEuMWV?<4k$9D@8@e6P7V`OdR>2tMJJ!e($a(QIftxhlC5xHj`Cb1h_F#-+s0&U2H` zjB|kVA?Hb+Z+wZIQM^Anop}tnH8}yNB*#~d297+IXB;6MYdG`;PYLeh$;ZUlQw3ew zTLote$_R7|Gzt_7g!8%xtP?Qe^J1k52>&1U-UBR(W$7D-9awT!lAwr!33YQ&*={2S z44B2lMzbI)CRBnV$$*LhGh)D;P(<0m1fqxu6*Fc)Kmk!C=vT|`j`!d>_ulvWzyIgG z=lz)Hfu3JYZBI{4cXjo2ca`x3vl5e2dM;*}X1mQ6o5Y%pG`7_5WNc;h(de4dE~7>8 zK$^pi7))dFxiTq7C>KklVxd$h5DNJ$wp`5TOIch7hbL<{NX;eZu!LM0Ps-v884z%b z&u8&rPcfGxmvF=qhO;} z*lYz?Am&LqOg2}>l1TXqu23dq3WPSzG+bi2geQPK%q*@9-0uj%2M>oQ0QWpXo}9-p z`K00E^JD@RUo4k%gfNTBk??tZE<6R3%Vx1SEn2F%-~r_D_9(vKq}?2A?}%g%V9C4JONYAaayeA;xl=2 zsemVzuvAYcR>)xCkyyrON~K(;l;8ZKnhSmch2k?<3MqueV##>|DUUCOH3%#&yiq+v z4HuiCkcwGCmI9`xLBcGSg2Ul(nQVCTOtDT~0~cQ);R#it^@I|Rki~&Ft&*IA$z{Uh z3b+PbbzV{)Ln7cX;GOa1FyqaDpGlc4IfKoRDuhfumYRz%ftnjATzFj#esJylrp&-0b{V{mIAqi&4Hk2Od!h{VpwFtX9#3m zD3w&kv>T=^n1C(iia9bKQ!eK6nFLN2cIGRR$koT$Lu1aSV(!Tug?5!)Jn6*%Am^2^xtj;mZ^pCQl(? zaSiUPxwr}@i>HvoePj{_4_dfT#^v)^QcxQZP0xJ0hD$1wbNF1i6Pv|_SeOCb~RmJ1T2uC#+8y*PMtz6DzNyJj{ zKLT}Du3$io01s;%da0TVsx+Gg`dQAC$Uub)7%W&g$z{u-dNU<*3#{g1f{59CF1Tgm zvSHC6x2Nz-ULHzd!vC1-WGIf zOpufu+6YSlT0+Wa^10wfPAKL}EH#~)oC`IY%Yfetxe|Dxpt^)I4*UY%EkkZ>qs~jt z6bnHyaG*;BDKVk_3nd^?fkMbo$T`*;Whj%gLG(<9SP0q;OQ--PY?rOh zOC}S8I);u`%wq9)(7d3A!J|m|z|0dGxTv{c!I>DuBjEFS&@0M0QWjIeRPf=2F*#Dc z?Lak`M4(_uKyPqBdo#hKDn#J~l?U3ND;Bfm?c5r;RDCG)L{c_9I;d~RLMRvT#asnT zECtQrzR|3UPP*0v`P-!pj|iPThsl8M2@D-6S0)A@D*~wDe3_I1?M~t0tri4J%!0mE zAQ7sfmGS{V4+QEAx^ljNFZIw=XgTy_QkGD{0wJ=*GEi;`=y8}lP&_QL&_h!t;2rZA z0tLKTCbU+l*a9Y#E#oslbqH9@POsH>1V=y6^Rk&T(5MVH7wjD9bL3)BnQRtE-Z7_v zi>Hu4CoJF!xDX%{90BoVP*0`Md4cH$9Zvgi>bw{-0S}B086WH>=*OVzRxqHO5GcT2 z6LQ>JtMig_#W#DljCg@EGnz^DL~0xLE73?>Wok87+tFKF=$DQE?@ zKq!aSEs%rLVX!7uasXG7tu-ACm>baXa9Ipcw-PxYUONkRC4+3B@B&5aaq2tD*+Nj^ zDy0oxo1is9!9jBg!917prP9_KwaNtj50(RH6X?#NMar0l1Ps7CqOAz`*2GkS*bj~ax*g{}Efm(&m&Q3iF0NucZ z8VWWX8{{Sc`wHw{1`HfP4@(3Roo~>b-T!ryZn&d>$ye|}>j|U^77qp=P!++HkqAKz zfxjY&Q7a7>1ME9!WDE{iF<^T_%i~BSAY#6%6QJ!^b1^`{fvE%)kPEE{o))Y*c$os| zn1S8!LjxD|^b8QH7}j1w99ZZ$RaJ}+nJJ*|F!+|nYAzTGsXTv5Ku(};neeuFP%Q;) zK41ygAX3A{mqNeKXA5~uvC3i)3!&|T^5wx3u-ST=QIkv}7jn2929F13Bn+Y0U?f1s z(2w&#$mYk?c}c;T}1Z5OR%>noHPFw;+DE$|!;k z8M-0~yfdkS! z{NFVGZyNtMjsIyX6QXJSukIG$t6J0eU-cQiY5d~UVu#0CU4e){l{&v z_fzLb%U9~)3Y|ViD*l;??)^_4)5o8O`f;9}aZ%mREc@{aB0Xl>`P*9+%3;d?p`4f!v7 zd%I&kc=-EFCcODE-&n~D8t|P82?o2j79uQV@D*q;AXjP8{|2yw6|U#);(k|O6a@yc7wbQ zmOYrFeAS)#KvZIlAN5}(jCtvTZ6D_}$fx|?uGmnn0iOP8?#fjuKUHhI(7iOdsLFFl z@hFEc=<@fgEo-N?!8I9<4drWizF`i7a{QC7CnKnV?Bi?NJNOR&Kn zEoaRg^|1vOov%>QGaA!<2Rcp58d8rM=d-c0zez>L50bnHJ`EPES{PxVj+o^guJU0K zY8q;)1E&ot0ut4s^=Kf3^*u0M10Ql>*$sSdhFMeweA;Jop%ak9>}7*$7+z?qP5+=G zv>_uJM+z`qIy<1d-A}Z6+mn|y9t0lU{d9$Xo^D3%h(moI&o#l99&}uLaB?Ne&+|Js z{>*1I$$z%LwoNaQ$4$$8Vba>yY2$oC4SQWYUsrH@oV{K-6KA zQEBco16;Jvv&U&YW4x{1?Y+yl+Fit(-P{x46Gxdor>PVc*dE-rgwvgq;?Wz4Qv>j0p~=Fg7XO5ZRM;po?Zd&b!29Hcmt-3N8t?*vm zDe6Ei>2xr7hk2y|*50cyBXEW3+i~dI^YL9~zZ;B7oZHsS;Mo0Vw6Q80csjA6e9g+S? ztm{Q|+&E39FJa#-`idH--V)z;Bf3K5*+!{0e?|T+<~7K#ar*CG|J%6=`BlX{?ne!ZFGtqIbm?2$?RtUj0^GV1J(ctNf5?(Oxr`*g2WzHaWX`^B?X>C#4F7dc;_ zY@BYZ+>zY+LW!tx`VUGeptAs04hjUn)jX9_(nI#`SMj@j#Mo=u(;lBLOvY2T`K0wY zFl@#A2)%z^^Ry!tmKbUnkEu{C1BB@sn2u-h6|e?EwHgrCm8ff8hl%bDH4jqC98uN0 zE}B2>n{S-@MpZd?`AOi;;YnR~c-5gFXU3V2@v*{cdL>fROEh#{Ods;u;~MzOmPl$} zr*QRg@Y;xYT8#rm*XA$Wb{z82ZIq8G*3z|n+VpZR(Qx0tan0kNm&Mf-UD7;J$iJKR zKYlTx;!(8d^x|Uui8sPT+bw&I8}GJBq@gP{bft!_{6o!yRRzM|RbE5I6K#ua)#Y=~ zQ`q~&=eY1&S8?FXcBtmkePbwC0g<6&QDO1c;-STLi!_TP7CSB0S%g{4vrt%!wHRp8 z!=j6Yi-om?9<0VMG=E~AW3DtmVV-Ed(LBO@q4{+43Fbr0S>|ocZOskMYRpQ^UYOl6 zyKHvGY`@tSvuLwmv)N{TX2Z?;nDNZon>m`9n*K2TZ2Hb;C5nBZfN-*BOQx&NEaPjx`);*u$`kp^Krlp`O84gF=HR z1~~>wgA)dc1{)0`3>F$pH<(~B!~h$x4B8sl8W_@R=q2vh^gZ-j>Ram5 z^uFl5*UQ(tp?6X5nBH!^^?EDxg7l{9`RaM=iS!tHu6oV%=tMP9OgtlUi44;a(?C<1 z>1fmbrrk_EOk0{-n$k?Zn7lX1H@RVQ(d3xPZj<#UD@=k+rkePgc$(aLBAw0pFxw6nB>v;6CQ#@DO-_ub(7T?`2ww`l`M@Q{QRsEBH8GY#1u>Dt z7z(|(=1YvG(6djwiBS}~Bil)gq)_hpE5ry2<**Hj;S{>DdII4?q3ai)5yL2yWqp_! zN}+4JGKe7*x)RWp7)+s4?Mex63Y~CSOAMmWu@UcxffPEDwx1Y4p+ncs6a6W4Fn#}hp%6gMi2z!X|}k4_*8 zh3AhYL=*~}xR~&yP{_zwq6dYREq5fkQz&?oFVT%c0U1_=kV5`?`w0PsX7vdt_!OEE zxsc#dXj;-of=i*PPI&}}LJI6muqia*Q4ztSkZ;~If=QuqralCNLZiQrB)U>))EI*3 zLZK1sBZ`B_VtQ)r=a zJz++n`TN9#DTQWyvm;C>q!`hOFs9JtHlc(Oh2WDvVMrnPx{edXlRf>5l*4On+_6T6dF|goCu}RK>AD~ghKs8^oivZ>UYA9SVp0~^a^4r zg?b#-C4wo`UH2lfghE2Ux5Q!!@lV$hizvi1G9VUGh!e1$SU@4x7IPwqLX2w?Vm^hs zd>&5BqmYO45;2!T9lzcs0x8t~S^0ii-JbTCLXKOu(DEtd(65m8h(dPBU1<*~WNSH%_JBgo zhL_UvC}e$_M!QcT%j}M{dla$|h-h~yWEOmmc85YHw*|D@6f&wBP0OW_L0K=_Eeg^5 z1<`USq&J&RyQxLZ6gp#x7%j5=q4$DzgF~M$;}*=*`vhvdeT+#ZFq^m7W;2~;Ll93!jT^jKdl%> z!*iC|#JO+R!46**z3pdhkM|B8bZ7zuHEvA*690Jt`^EQB$_MD`+}UR zTsmwsvc|)E?r)e;O^83KeDC9cXKd-x>3O0Z2L3z4^kkwpc#?Zms6Bpa-y-O!mpyJ5 z=hHBw`XnwU?{V{Lba)=;#SNL2iXVq1A}V>b{PTkY#CXv=NzkBCRNv)k+7`=JXnYfEZSn1iuhtpy$HyGO3;XppPaX#vq{f3!yDB7>%+=r#A8S0E~XB~?ctZIo}?FXFpTMM6n zG>3X6fJn0vpu>O0q=B|@O=*(^nk3L9fhGwwNuWsrO%nKbCGcxTIAHIE)5SmHG(QY! zltGAdT|1BCvH+x>Z(FR~knJWha&+i_Bqe|_h@+I^wqV#BAr z4rAvd4PB|BBMqHw-D&34Z9`I&@3|981|%*=LeGt6mU9GJx=2GOHFVTH+r}GlhgfVI5JIr z85z^ni5&eS*0X2$B6Q?OY;D_<;bhL)`wt`ioDi$>l;5S$MWlwG)bJw>KdIqI8h&!G zIc?&lF##xFD!X(zES4-e%`Kd{bRl{+>c@<(V*}8G64Y%?SS(t)UF>r#c%hb`)bJw> zKdIp-HTbvin{1wkAPqm#@T0|k^Y~-lV?=%!tPsp!hkVV<&fGDakKPR; z5~O(;MVefFYCnG++0`Md|5wBLq=ui=@RJ&TQo~Pb_>qPmDV!b;GFnh7^55+DZVi1a zN?*k)PuLnqYUoN0oz&2g4RNA(=QE#0e2hD|m@gyqtFNbb>c4{2(3KiGs(mkR-nst@ zgx0iww|)L&rJe_2Uy!p1eHc2i{6fSQEnTFclNvg?x^QKFNp!hrbD7NDAaNo&bH|Cp ziVlO2dih+rQbR`?I=Q3xN&7l!43fmy6?1C0l3qPh$-U!4$#yOY{LxEd(2Gs45)JCM zBF~lw_IpeSMH%^XL+Yh5q=p}9_>qR6)bNuU{(p?c2#cP;|G@97;Z#eW*l6c|yY1UL zug6hiy51Svu>;<7$}`|$rwY|tB$}b2j;S8}YdV@F&?JE-2{cKdNdiq0_#9rq8L2mfp050IAh`qU_rcUX*{^j1h0DyOHW~TU7I#6LH(zM7BQeDNSQRRk^=&ibD0oTK&>6j_ECV{(n2x)7EeRy( zc;&wO(?5hpI6D1BY^t|0u3Y68q#XMdJ^Hpg?b@v(q@`yrGML$OmL+aXSG|4fL=RE? z%$;XFXZ*^CmS^6OzncE5oM)oLjOGxwSoOPqJUo>T4d)Q_jArGzk^YvycFg=EkLq0g zTvkn=+Zzk!zZ?Jh6yRQEp}RPF+B2u|tG`?z$f743Hc$B&k49JDiQDP;PhChx zK)x~9k$L3SN^;}3)59FL>ybAc4iCsO*nl3!xIHj^vy$9tTjn}z@;BwP_RV!o@9UtQ z>Bi$16~~k9d$uji>-JC-)Z%nNnBE5DpP}P+>efoM&vRYa^__ZXM?tSx*qOu&%XS?2(Kq+Zn^Zj3LQIknY$0@Yo6&Yd;@gRgl}OUkpI(lN7y=V4e?8s z!?;qQXNre>%bQb08W+T-E#g{Wf6s3N4ZD@2e$4h$X3eQWM}~b_9sqP^F7tM}up9+N z>YVS~4d`6A#qqI+nqy8#{*W2^Z$yv0I(=|)<&fh)-r5~` zNI&CC?w66vuUO;4%XFJ9C%MRJidp)Ax~^o#zN6!ZjBrCqG`vtOY-;=e-A(54 zc442Zhnx5E5S6&**VHJ-PoFz{$n-8`)nNX!sOV3ksSfvoA|hLnZ8I`nM89)IbN$j6 zA6hCTi>9vqdSziQIeLd(tHJ9pq4!MgZI*#O`DD)9`nAh)(UlW7jXc+0LM>b_xqk29 ziY_c3!~OKY5%tRJ{&so%U2@*=K%89jK2HnUTVD!O71$SJ9l#@GI3jdXyCTHsM*ij;0{4=8n=D$el7fq zwtsi0{@npPWO%%9?`Q8W3-EHu^3kp5U2Pcu(+qX=!r))i@&Bv@VC$RdKi`0)-TDT; zd7+O1AJE|SR|pPed17!_DwKk=Xc-%PAaffuKdigdrVTxS*2?==J0pjGza1KO><7}O z+V{~IJ4bP%)28bD{`z_!vzcdSpR)b?S!2I6T*n31 zM1oe@2iB$`niDGR7Rz|OW>H^}mfi^K)S9QRjy`w?Y3NEVU$j?k7|{Rt0TO%*gBR{U z)j)da&cW!_spdcM$mcPp+fVn#bMD0Sjt`}8S{z}bcGwX*Gg`IlGTb}dTSs-?u<;TS z)uF9@0tiFEg%|}wh`FJV%Guz`mJjYx*-~&{D+O<~4Ydx|`D)WY*dN*rn2l2xxWPaE zX=~~KZMyub4}I_YNL2nItzx^OEjE07=i1oSt~ffwyx*)DhWP6;+bwKkTU>FcZ@(Ui z2KW`r@5zcpL!7sZYg0739$lIEWWX~?8@#aQX#WophIrVtj=Nfm&G4Laqc@ojbijK* zo{zu2*dA}MLPp~z*Q1P8AIw*NGsa8SiIb9whmpFMJBBDG3?Q?QyS9?A zvd7it^2HcB@EkL=C((pnkXK?6}4xU-gIFnYEmHy~Ey4!BAS(~~X)tMeJ<^YK%{^>niwo8!C*MEN4Bxcy z=~~VEhMKvbzA?{ZJM!(D`TeT1F7}^Q+F88I2Dg1bleuS7J?irQ{@Mp?E0As5nVvG> zuj}Zc_}I=2cbhgp+p>i&F4jG2VcPpUdfWALMEHby^kMcW$8D3E<5SM{M+WSvL`Cx+ zZFvdL)so)wE&aPYzMeJf-6WnhHeR8(pe?+|>U$f~>3nBgdemfiKMyN>?`*5~%i(#G z$?r$!mN{epD`(Dj-e8S~+ItAnfxk<*)y(eWKcT)gYw3EH)_Am?%hV5E&X^uCt=sSm zmiYFiqM-3s*0|zq-gV2JW_Z)q4UFUIme{~5-Jt*T?}&9b$1nWd7PMnPrO3O~1?N|X zE$W+QjbH1Z_U+ln84sMQ{JM#7!=a%$8F$WG<5k0$cBWg*@PXTPTch;caJP+b`YnvK z$1nRA54qH71eyQlvqf-ZXLP?)r`cmW`jR}+hWbjVKUZ|VTpH15B1z8OzIAFcLh}bq ze`--Tl^oQ4!RgXLO-sW-Z8RNC5@?dZ{}u`SvYz~ISobw;vk%>vQa?K6JPJL_Q0%u@ zhz@MKcznwEH1w6-p9n}uCHF1a*SFwe5OSeU_S}$o9?ji~2GxB{MIJ`B2YVizMSijy zJ5zA?JSqB^ZwW3#koD^K$2+f`LaxV2<0GaIL+*#>7r)UvPfn}vtXq9x9%;VT_6D!Z z1yZ-$J5K5hA2MP7K*n_D6msTsyI1_%f#e)%bkbf=U-V@z>3DB;Uo`sEhw*2+O(plW zxM;khx)V9^?7-PhH>8NQ(zp5JltKSrBAET64{6=Ew9SNBVd%i}@CVNlUX%RpM|r+crDW-V zXWgb-KR`wc_H4n$I(V#}Z}T1naugFAZPAh)MP@}se0yE;nlxi28M)gfa*}D8PDa%%Psj6|$;ji>KEKb;W6_dM zn+7cmeoD6Lcl5!P9)9HK#TV~&Z2bvs-elJ6@ME2fe%B1vZcSyAwtoGcN<5a4SMEi; zEl7TfhMC_zCe5Epemxg*pw|5pdN(I&zIWT# zUl(=gUy9NePA&a>HjgZq=1d$^_!|AF)sZ;9EG3^Yisru9_~4J$Hw0vYU?9I)->Uup zPFSbRZC1uEDGt4}R*%*x3GZ_E%qedFLpA;}mLFlE{uCO#(nIAK zY-Z?89XM?mia??|w4OL1(g_y=$iRdL1XY0W4d4+7q5`oXJfJF=4Uf?<1c9xJ+O%OP z(gHI0tIweSj4nIeW#mPn9?lFb3{e;^Lpc&j<(#9BRes|B^jK1RtSsic4eqpXu)FIB z0-un#^6@RU#LHUz3@j8Bqt$&U2Zzix#~1zPp4IiX#fkReaiS~&U$0zbk?d=O*W-*U zUAsAB?!JVnHfhG#=g1s`J+5ETX_I}c##R&94(i9PxuC5&QwG z>d+X+HP5rWs!(>kqh7aXHu&}0PUr5KS>ranw#}(+XNgmePsw&*>Efaz11!5-sYOG> zM&-Icx5we%cLp6DV}slGSZz1%<`*6dBfC-M zrriz2sPN*zDOLRg(cp14Yl>XAlNx?f!%u4XNew@#;U_ixe{>(lhR~?LyKqv?qM4)Z z@jqW}Uu}w~FW9?oeClBwtEe07dOd8_f(Y|}HH)V0Iv_rpKoxKnVt7K}AGRvcA_Pc+ z@RJa-NiL9b8oCVF*RD^j{Q2lw`ieVG6u4ne;^(&7e_k900@6JEaHAl>1fZ1UT z#jkdjwO_PI`tX{!`xxrJZCoR{tMd;2aTbjUQ|O%EjA-=)dR(t{Lg#U3v2S|V)l&K> z95Qb}+9WQD;zyXNoBo9GnJTN%Bh*6&P8+O7NK}Wm=|g*mV0aLy76PI{WMv5T$AnO_ z5N?hOp)SFtU_+A!Cl=cDpEmu*AE>ng`m1zOVK-NY??zage*11S+46`nwqDGL*YPex zPqNGE>=iWpDx@+kX;?XhogQzr*xJ>WqR$Dxvez`zlKP!CA1$Bu`rPnx!}Uumkj}*V zg;8#G=;GXkHxK4~L&vI{rEYC*jJ3FBT(k_D#Cefs<(KY()!C=_p~(Z$-M(2cgqaBfP&+q zipf8}?_YLMx$DQG?jmNoQazouPx)|C&0stG{b&g8ArjBvwyZ45D_CPPWQ0P6{DUKY_V=WGB9Y<~szH+=J-TiJY z<51M3$Xmzk;ft2RY++KTqwNb2L*Wh-ejMp+jrI)HK#@(!#5T(tDf zjN2Yt$4;ZX-OpmSThh$#=aLDH-Y2C^g_(t6k2k-8(^^aBy z1osxO|5SAuqoZDnBR+-D@RWWdKkhFI$1AhFjx}5SO7((qqRW(B-3fDiaAV8a2{#|0 zNrAcRA5FP|)|IuLTK35W5BMxvv#VW8Sf4lAWh$!~KCvdQPm+}-?vp(-Oip*gr5U;M zocDJ4LpXCy+)X=Ny=%+)?s>rH-*MC4U79U%%D6Sc<{~p)3u zOSYykhv?%F`&UsL+F0X<8`>yJc`n$a?Yrhn2Yx}WnJ)_r9@*fl6DxMLdtrskqc4^Y zNT?#YYr0rJDJVy`(Z<$-OnaOd@pJ37ku9*YPlwruk5!{vAD0yy&C0KpDcSE_N8=o-wZpf z+?@RW+BqjY_`&PyehzeWz;^ubpEUi9x+JHoDIdzoUb>TOJbc{n=Ct|!m&`ZJFlW|Y z|0It?yB`(#(2h(-y%cp^!6^48wboeRO}I${O%iC5K$8TTB+w**|9ukpH5{1fkPy^A z;^>T>8Ci+1mXN)CJigy4S%5y@-<8mEv>!RU`MV(*ua+RRC360$(gma^4+%j@y@T{nvv18Gl)?X&fD_Ks)=`8QH%Ge*7o<5!tQW=Zl*PAC@ ztDZ+Tuk2}LJy}5}RDb0@#R#zc{e_HoY0Xu-%~zp`)i{1CEh!K=YV?!!s`oItX- zPXIYH>q3U0#*Z9L4!PvLheK{{Hg2F}jtsRbtH?YOGk`2lU+R{8d8ToYX zDgM62Vd!mj$kkmzfv6zAiqK!;kJ@-CyJw|Mfo=7zxX*aeXhZPA{2o>FQO{#7dv-66 zKt0c{jT%014%x{gW1S#Bl$_OTqq(nOHu?2|jXW}QGr8+%czAkL6k5d}`1*lCFuCHx z(ffzzhoB$xM|qZHiIB3snQQkO9;E){zIq>h#-kH^!u(z-79p>RD`LO+g^_sXfs`<# zNR)TYv1HUzF}ZNHvF|WuJelURbYQQ7VaWBeu3O1Me{!wd!>`S?Ddej(myY|n(c~Q2 zoSw%nWGdI+Gx~CK>`js8OPli0uBz&X#mf;f%>Lm@x<9$cJM6-BpFffzE?B{*qI*$iYhBDmVapb5q*Gr{ibKgeeOJDopRkVX7m;nf@8Kjepd?j(V!$G!0=4NzY9AWspb0Fzfl#tDTIsy6groEXNEF!1w z-Isc^W*&O5v|D}KDGD@p_F<}c|=+>*p{6*x>edj{iFC)k(hRyOy#Zt6&nA5g} zl2By5tH2;OVI7Lz&(D!RpM(;8YBRfKbI`0e0g_g?Wu#8LYr)aw15iot=}xcd)6nI# ze4qA%HzV5{bz$P7P{esdGtPT|kW86drS$d>BeM?nn(ZC16BSx_y?N~YL3C&QE82Yj zFcgi5ZeGkMiYc&@N8l(hFDwjJHJAC0NqA9Uax4;?UY zKkZ|fNOqc*)S*+00OiX4TaSzj*-E+ncbu5ABY4|>^6R$_-*=wkk*x=$%SRg~{&D!v z=d5N!UvJqSV$pU;)>_yUCtLb9z^kK zY_qD+{_pn{=a&^G==nX{*L}TOtNDqv+D)4_-fN)7qeR+N9qc4fG+*k|`&C{JYVcB{^1`x9_Ly6u}eBXuDBj`csUdD`h4 z8IQ+eNK^rSpr#4ge4&KT=gAbX^@SmkOF8_88V8=twP{1mvxQf$TKJ>-YXyH^$La0O z+z8xj*rhhEHnnJ{%aaNI8(}{q-T2~$bEcS8cPC-o5ItPN@G?2@!XB&nE0LQ2qlRAx zLsox!X!FrtlyqAG`Rg>w#{_HXT0U+1n9i7}D$n0I{Epz(=_y}d_SHU-Hr3WR<#Exo z=T0ZJ(*l}kcJepiRzx!mI+ z8QbLx9?*GV{bWf3o;5q+f^K#fSTtbu?=BjU^O+Kvgu#(Py_1VoOU#4}j$EqXLToxQ zLm+9Wa^N;!n^ILzRsFP!1sbP|k36cg>ST;>MlYjZ8dr_-QZ8<|xx^A5aDG>B5?GE} zAMO^blVX4!chp{$)l?xZ-GDPH)Ze5KZ8WhsS6kQ|Yx7yLw{2Jbmu9$jWn78bv^b=t z4;|j+U~utQ)Ht8XS&|EhGhyqC=Du2fZ9e9UU6a>8(x=L!y{~&lUIEXUuA%?+3}El- zi>p3={_E@N@-!_PXk4-X+wreXp{IP~vDCEf+B=LXG}vi5Zu~2CX37Y z^L=lIXitm#PRRuyvG0r8&9nPPUsEMfA6JTZ@>a=gLg@uV^-ODttc)GLM{N=8HDhqR&btBm`<+n*hj z`LF&;%PMBMr`8}r$(Gew_HKCG<-Tp+o9JbHIa-#@*<*zb2QcG1oVkds`?4ACUs~a> zlOd46mB^4*K$ z{%+QIQ*Gq?$^uv%npr1%?)wFKbC8xQRhKhybv5Em z+{)a!(-xnRdXKy@m%z?f+q7GOU2#o^R$_@+CYfpey-X%B$Ek786Y_a2aAr!xZDmps zS#xrf!(|^!oL||o=RydQ-D#xp(Yj@pcw&uPR;Zo>Zuu}`^v&a6QEB)Pd1zQYntI?+ z-ysgov0eLOt4_tP_;~!@F^@(VW05ueN67Iaw2_+~*-u%&=x5FFhtL^&TYhlF&iA}0&uMLmOV^KF zy|B9xruls~8S(HQY5v7$g#+CcvwB%}i8)k6wrHJh`nmx8J=iE$4TR?z_+nD-EuSak zwyV#d+QD*|6uaSr&7Yd%9o<5!Z@4<(vx|6E$6FiVR;PSBn!37RQ{PX+EJ>q`JB*GW zmOG`PqYk6kx%m*}TNFGrXxCjdJb$-Hal{f6M*}Wacl zc5Zl);l~fY1HYpD7o!>FRXb{1m+{KGkKnan4KaSQ;BG;D=pM3XU4+M) z4_nE^%G>+6VhhaOTb(+URf)n)hd&pk8RE_ZixSM8+hU@U_% zqAHfg!jFEiN92vEKu_!Lo6Ol~h_64kviVB)z*!%4Q!bzQjCMF&xz|rvr^~t|QKDMm1%h1Pvzk%=nWSece_upFrzeZ->3s(7bb`X{Si1V&ZT91Ux+5@kG z&ywfHFuFgOv7huXjJ}$@BnmmU;O$wmJ&8QV+t`d>aU31I+{Q6*|8BA_qebP}d-=ab}^sT)fQ7R8bi6LH4F;4>)v{M9bc6^SHI(WmQG#RBrmZtuO> z*AAlFH)0}Y2qVeeBWvgNP=uh&jBaaMbzF&hiwb;rmsNm!cckejWeiQ#bS#Vqt1}tnauT` zz4nl2<|gPraSkNkRj2uP*teGSG+7Z}9=nK~en8UgW%J$SuJccF98xc%E3IjVgVt_G zRm=0dCOq1W`rlY@Y1i)@IYD-(x3}3Wl+&_{Z{5;3@@aXA&Bf}aXvNxlXAke0gl=B< znG*9Yo_w}_AU&kxdZg@d=GvC2Ymns1(8`R}^U3U!>tDMuV#$q_2DKCCC!trv2Bmn7 zjzM2Dze$$(A0*FYnG}7t+>cCr;#-fru@;#cFS*pmVpF5foP0Km`}@}>)jD-6bfv>d z#=ea0IB599XA!$w;PttxW$H^;*hDl}f97l(>I9u@j~+U3($KR)qB=%c=^E%57#PB) zMzNU9mrL1V_{79#h*@G5k1JCs7%Z+r##66ShtQ4AE?tVw+s!n4um;W6Wnn3Klx{QII-koKApPGf3voJ zl|>;rW8Syi#L?O0vAT{2p4^~gE&aip%zojKy-?xwPG2`a?~VHHX)(-=Yxkd_(SNlx z)&Q5b{L5*N-wetwdwc*nYzgT7VO=u1e1*Xun3Rb+#e`U=<{uz;9g;DnT+M=or#z{d2Q zhDyN6Dfhc80Z$o6w=p|uhud)0MygW%!^5go*T-9Vy|@UeOmW-vBJfR`b&03CJ~Za! zcU3+SH*Z#Ez;(Q{FjIAX(X>&=P2qa(f$1C-o#(9>k`DCaQaiEg`o2Add=)-+yTcn5 zUNONidhF&bQMzDTg(^RjmdDCesk2+rB2~JPn0=LT`mU(0L(=|bzB!`dX+w)t^xcGh zx+)E$Syrm@NpNs4oVM$-=+$tY;VS&M``-R14fa2p^s51O<)K;0Zh4*kG7_BjoEE)f zuRX=tf0mkg3JN#-Qq}s5NFAnPr}ERBEW4|LC$%35b#%gGP8mPHB~pjyxUP(^iQ>8n zuPMu?zKKk!zN(o|Sgowv!Jo_kCEN887wy zla!j7Gwrqo(IIxO6&e0ixhAW#|D{0pW zH2(DTSLH#(!IKZi*P|!bQcWaj1l}HxV-4pUU@iZ^t!>v4ymIvAan0F`VY(FE5=|a@ z`LQ|f;}answ$%)4^I2wn^S*SHDc)UXJE~P&A=1*PS9q_=5Pe10&+XXrR?}F^R6Vj+q{|f8$`Vo@!bfld8uxv~2 zmJPplUu)<{L)T86hICm<64?{v^-wfSlJ8`B%}H@>d@y*5>{&u{NO)L4`MFSoYZ>)KSE z&tG3x=hNhsMk}xXQu?a__9$@83Sm+Wq){#*jj`9IV6Ut*(|_aF(6zOR`duS_ZFV7z z_$j*fM3aUzN_GA)2w=h&8W!vKRlurcNCs&7nJlm1OI~<iLkmxz`+~ImQ-oW~PDNE~^7(->N{??kFl!yzrE~h@)36WY%!{F{ z5BkPflGS|fm%}F5Yq`hjsQyMcDr5V>1m0UTTYqA)2h2F{?p|K_3Owi%7uMh0{>2J! ziTY(GylJ`eMsM_g*n1Ors=n@j{JOa2c^)%s?q1`SvNuhp zib`du%%v0xO_U)sg)%e{sics3Dz+hsCdpK(BoZ2=Bntm?aaHH@`8=QJdHsIR@Be+i zkNtA2^Im)1b@o~JoPGDW)+aPsPQmGNa(Xg97I_&5tkvpeg?1$X1BXy8R%|Er%m@J8 zg8wG^%S&!4Z90+|=f3J(^2qosv@+M#=BtoCl5_bkeC)(>;No&Ux2%x?u{w$M2`Dc| zLAFnvhTbm$9V1H{td6JvgXAmAp2#jj+bAPTI3HM{fS7sIlgZW~QLgsl?$g`A$U@n# z?%Gz!&?0sN|B-YsK1<(%pa@iH@o&Eh+S&gdp z$NoR<&1VJIb7VucXDx!;qonPk98N<&RcqZ4O=DXBpMgDqfc?#+84JuUrH+c87Byn*~4kAin2AVpw>s^M|bY5t|z!XIV5ioogglR^8H2LWA=dOK%U)(aMB zwr)%_LP=m+9_fue*z3p!`M>RWdqI(5`noODTFqoA3aPJG*ZH^zPs;{BQ`C+0# zckhy}IO~-DzaIQO6cvD?E{HE4Fl(2K_A*}d zF*pL{T=%w7haE_T>19#A_%z?gDARxZSeF^{Vg6dYah>n9ylbA1l|q*p5c9|=U28VY z_i=Sg{C@MZ8FGFj$kn~Uce*_9F0e%&=rjWhP95Kg)=l$$g2g;o_g4mj=N_lIE&U?s z@;)1gx=br70@23fOSSlYBk0%lc~iacD`Qn4P+a|CskhHGpRii2$)mS25NR3j`D}>k z`EBe^g`!IS<1Qpx|DO{?Us$X!6uJxAy<(kJ;IaVvw5|8sDv(3#{}UO~2uyO=znSzu z*8ILN&X<1+)PW5mRD z|KkQsf7Wo6$A(+n4Dnl(eWvd4FDZQsGu;w&JVohClziy;w;F&)NE--_Uioli8+FhWc8^?_Or z4jN>8N3t441KX2qr=q52i|x+995dLdY1v_uh7%9n-L1RmbmHNY#T5K{&}R$&S;~uj z{$D)|o^=TawK6~|g^TyyTW(0swy=J=WEjZtcWFy}H7IscMh^HB|s6arKSWIS?5JY{b`2z@--efN=>_sb=KCyJ}96%4| z4=<886oz#Eq#m(pJJ9v$i+m{yi~G5O{ApBG`7>tB-V=O=a(a2_e8Lg%e(v(ah<;y1 zVO%?zbr|5Gs;I+TZCPPrbG3X7>kmNZbAQYezGm1D=;bttUaLV26`C#w{d=L)1ry(| z%yho!wx*^peJtgZeBScO=SK?C`Sj1x=~jn&@1cI#nWi`0ATxBv0y7r)@3O$pMwk_I zU=ch}A+x__B=Kz>>L2^!S+KeVc^(Wm8zNPqAD<k&X)X2VxjJW#CMngXXe)Df-*LS|SrY_2eOx_(BJ9PPTnWFXq=_vR3P8h~Q5ijtffzKEX=4 zAD&+Xi=M5ql=sU(EZK#ndxyU1L^B(=H$T6KPP3~z9QVrrck6C-FStNL8Sga}#xeQ* zU*v*T9!&t@y6;<$VS0uWuI}>dxQJRkE4*EO63|YOg`}O~X(;6lH_xp0i(u()`;b#X z3E<3UyRkToA0fina2C_wTo(M05Ec!js%|^2?u|!d<&=wWVxR;> zgFUl)742A)kl@4um0A$olydqeR!-gsQzHsb5|F%5{%P0KG_=P!$~|vY4*FKM<*Z6u z4QM=fwnXAd8e-Qy*mV{wzxaHA7IUne&lCW|(Gs=&2RE}D$wr6>z^JSyN zu6U+|@!X@0N`FMJXmQPLjVttuLO|?_;E`|F2=? zm|6e--&G6F-0h47W-RcpS|GqIaMt8NoSH9fIK;||fSpW?gesOkBk7V?T%kVzcd^x> z%z;r5o%ncVY=1Wxn`5Rt@NobvFxq!{>C4YRZZzdl&PgFS*U>QHX#){9vhVOMD98H3 z!+Dzyl(NIVp)coRuDk{c&&RdLk~+ai_p0aojz2fo;!VN%_Obz?m=)6bNoScqm*jLI z?`^Ga3puLLp-YcK(z010o!`MCJ8oir23#;*XteC|SU{qOE;{%lK93%a9c zjV61p?>opH7mDgao&c`yFf?>$=W8kP>qzLt{lM3;ptDk>=Xi=7N*yXtHe?sWZ7J zF-m807irG#Z2>HRq1GQ40n@euGJ~vDrI+-_qoHY!pyw?;d+65nxnM=|-4JS^^yDn; zUq>;c5A#v&?I@0RT3FmNJ9}(+*}=vUi)2WlI%sNAG4F(`lfDSs!#7Sm+8F)q(f+eX z{MX0b*2cK@;I1A((WUUGVbtSk=YaNOQ7CA<$JhhIC(kuR7Gw1Eu@gZrWM@N7zWA&w zyjbj7`LP}veGRy~T~lX4r%~#aTnsl>y z3e$T^>7h&-M!g83x2rcRLN`8^@;fxVufwziqeovkrAZIZg>fz7EO{6>e#dSVwF59)HSjyvJewl^V>{+2$e(}DsgI2~* zb^MavcdT0Zt&Fpj$24}=cbz*4eOu2io&Uf4HKW%-{~PJgDnx(qn9Xp%WR4S|Zt;E+ zkZ0};SikdSSwceJ(X_leuq3IUSmHFGlgN2IdQyA|<2z9rjFez@- zJaG|5OCr)=$Lwc8I$NG+w<=x#eG#*Uii*1CAD1!H>MnLrmzNfDB~Ag_G|aahm`jDe zlXVxiDNpL3+mxqfPu8$kw{?Q_=%0?EMsC}$ z>6o66=hO4$?h5|5@(~uQMH+Ph;;iCe!iJ2 z6&d;bw(i8uw&|FAs%vI$Zkh64x?+0vO(6YXs>9|M`n1mUY}|O5i>}zF-f6k1EBP=) z^y$f%y`%G|3$VoLRSx^Jw&`gYy8P0CH($D*w&CXgu^l*noZX|nI3&>aj6LU-(-OG; z(JP@{^E#mCmZc9FSq5nRe+IVO1nh4n%~)W@0y7qvvA~Q4W-Kscff)<@$51B@yH42hLAEva#QTPde~=Hm&RNsi!Sx4u*;UfSh3lmt?+4d8 zx7FCE^)#BkVo({8g%xDgHM1)Pp-8sDi1?BbK05> z!xE@*S@%6Jr1K~5i*AQ*FnPZ}fWrZ1<^~=qI1syNGYS`k*F!Jd=btwRPTKS5!g{(L zChbX>vYQB`^Rchsw`9Na@q|79{CQX&x=P&Mn&Zkql z7#;r=otE>B%!`Zd=C=U_&ZEVb&z_*)UQOJjw` z%47~O#Rrm{^?V!Niu^uwhVB3VWB;Ev|IZCd?(X)nyBY;u_q;kZ$2A%r8MqQ9S}`&I z&%jto!2V{^j0I*a@E>P^_{D*;Q|t4+!f1^@e`kMZf-iPH(fY#(Ezu|L)a>lY4k}uX zYIe4^DmI!bS`;jB9p<%1T~$@h#&ODf&%0%_Ct80PooM~hC+BFsdjFNyGYMfS$D}o5qKh=S5kUTEm&N+n6Xq=9QMAYY#`(e zfEU4PuFsvAFmK~Kt6k5_Ll%psW4o4o2J7-39-5oT1Z}zuY^bd4aABBiU3fnhcA@h9 zyWT_&C^5X7$+2Aub}P3s1wLScMFnB|c;8^#FD*Z_T)8R_2YU`3cNrtXT=2EvT$gk+7I^f5o4xcYemI`;`sKsT0+92y^N&^rZaCVN`#I=_EL;(*>-l=Q z0E|)YeY_xt8>%@xeE+bR2wP(E`1#Jf0@`={bv)ep;Vp(Jp+T}NbX&H80qkf8m65A& zHC1DAYuH)^0^A5t{#@;f9FHLox&25tQyvd=@n2&r8K?|*uLzf;4h(=NLGk0y4txRL z40_Q0sWg0WFVm&w<`BqP(RIhFk_W!#UhOd-i^mgU=fq-W`2}nVlFsxC{sIySrOcd1#d|ysTZ>bgJbO&<`YM3hD{K z47c$;U{nBB?AhX1{9O@N`kZ$CtiliT50yuY{~*E#tM?ZT`$@pKhn`hG2Kk}V{o`V@ z4hurL_gQD&_VK~`(Jlp|JU=v~O5X?yWQ3RRNo92Bwu00=y@7yMA$X=qlWEh~2w2zQ zl7!?Gphnm{_ry^lc*||}u@KcCprm4c4WA|_+>)n~`i>wDZ!ztUe@@;F3MI9B+gG)r zr}_@}%&@`y3Z^T;n_h@PpE%2P=X_qEa~E69+zuq8@Y+3Bcs*~Rnrr?aHcQN%ndqIU z(q{h6SYXBiGZy#<7Wmow+_~DrS>HQh#Nob#Q}Q)npf=j$b}1k2EHd_Fa0&#ze!9Wp z+PUcFyxfn<1y_KYLX+Tz*kq8Lqn~vvpdPF@S3kB`Km zJ<_??@j+KF1I<1?&eX(R0mhxE=f(8PQKE*BOF;S~ARUl>d0~G67*u_4FnZ@UVmDaP zt#~RE9rZs;aWg4JFGb7kqC)P2-ObL*dXI`g%|5o;k-S_K#^n_IL*pjkxGR6Ra`MD-YA-Ba;_3Mu+Q@&sWuT#o%CAmgk!2o514jKL2OM znaG7VoaNZAT*R>EL8D-78OSvxYL4%&1nzu`m)-Np1;zdp?deQ9|Hdpd4{_fLm7dQX?5^TPK9k1feTRIAN{6-K#8;njnh400)W&1qU= z8H3QLImgCYKIbFh!Y6J@XDWa=Z#PlkL=jq@vLeF$UIkK7maT{Ii)ic!

?5_D~;YuRPpQbhJ*PaYpEMJbyc%XIxO zgDUMFe*GO;pzh_1?CYs%D0=>x446;}E^ZMYKHHdyHZJlmjP}Vu%f-LU>3@9#Bw2Vc zafKBAKK-wOZTtR<0L0RunQi0IZCCC>U%8%HeP36=v!f552nWrbnE#*g|Mw5-keRF* z3(Qzx#sV`In6bc&1!gQTV}bwc7Qp=f$^Enb{~S~(WU2$j23x_PqDG-mR5fiKZER^l z=~V6PRWv6Z%na=>n;5{x=)?dv-LD^=;@!{V@n78`))p^U2>KaZg^uwQ|9N~q7#Ei* zKEVL-v}f-dzs%>|AeVaFCh`auJa>P8|DG_xX^PLs%l%b8UXFfUx;&lIuSd_PQ+)p9 z{fI#PebVtin?JJ*fo`!GI$?pI12%>O-G-(?&;P;ygHN|!x#Hi8>`#UaJzQ7N~co~dF zm!E9p!>FJQI2(k0etp=Q!X4P>ru3sJ{b))*n$i!Z^n)q=Xi7ht(hsKegDL%BN2L!Vc29)o{9cHBNGApn@KYk_^-6U=>viC zlQZakoZ+0<4Eiz7W1Ixq`NRx5Mret?e$tjgc2Kv+0^nlHC)FufL}U$h8>*%X77RyI zLzAjC5!FqA*#DN+0HM)8ZGg}lB6KQ)UEp6i@C}x~!~5s$x~SC`4c=%9rmDS)28PT_ z3=IaOf!x+d$9l$ip$IixuV;`K9%R?F_>4I;wh-Lq(*0Zp-fbkD(s;%TjnXzg+&RJv z1wsG5ou7E&$~tlN-A`no<#G=tv9G+4drQLCxt+Z5nbxx4(3iYWuJz=}?Hw}k1lLWY z`&@i*ze#B3Vwa1c^xk@l?**5^)du0E1!_Aw{0J*MRliH*)3)bHCvlEx^1)*oci!`!@6jvfcZpol@v>N+G1_0ket zj#-PrSaTJ>l;@oAU27@RtQ}%d>>Y;*%LPuDK%us})=0xeXZba`9`*uI$h+~*7HKH5 zT#f2-TnsKw>W^Q$OB!+oEg22;5`*PFGA}P{_kpg_&=&n8Vo(+AnGe?V0^P>j%f)v~ z!_{~CH#ETYr~lfGTYp0!Ye&uz<8M{N&MhAf!vC ztaG*?j7y&%aGCiXaN(oAtx#rwkxp_l*H((bN3A;ZF7L$d7a9!4x(UJu`X1+(?-Yd1 zJ@sOCKn$9f)({oWa=>yuemz|u4%oMuYvD^PL3k#=s>k0{40bMkvET>~2kd4xG?N@A zz%;!|DaK3E@M1B~!vjguu<6$9hlfZUFl~=+z&Li@7q1>(ylLH zBXhtTTil#pKa+tTk&82#EcT(?m?Cy9*-%iTc=5Z1Kqy#{e{OvTmak%=v}!KK-`U@t zrfBRD98mw4FHIrs6Fk^uk3;h3Kfu9{j+go1em#zwtGhcGA z*3=grKfIvky1p+ulQD}&+tL@Qzq9x_Sb7xrX;_5bNI#0rq`UbgHy#CN;uqfPJrN03 zxOsDT6vu#5hj-j0ER8_Qg^e#%H$)(|OSw0peJ( zI?k{}_;he2(CC^sYN-~DBJKB&ns1FmH~7y4m9Gs4rG65($xh**F3oGb@9bzKy=jZ1 zl}#K%rt;5XH^iZu?@pl!Ug5y|!ZtDadC`a)9kT8pvjwqL)Y>G4X!JuN+yB9-!>CTk ze}0zpVWiq`(r{PT4jfRW9%ECEM%6W4tAsGUOSZK=+wP9dzHiTP?d-G#=gf-q*K|gK zZJ%<56pVb3y4kqaqRMs1??=Qyim?x}dw1zl?a(@u$2!hn@j43BNaS=TI zviL5C+9@#m$F4RWjZ?spl096c;SSiB3(b<)?1YrrbwoB@jzUhgubqZ{4+Fl?j3lR! z6)4ytHhbIA!@xA$aryhy3LQ+TP_5i_7)Woo zW_H&|0g@DDrTw3x(AbZR^=ChXfyHWZudMr`Q07IhJ*=<7z@9T{&94`y0G(xd3)ES| zLF?=_why#Z!1p*?;m^%c=);P=48D{TVDrI|k6U%UK%cDYp^l+Mka(#2;l@ZWz!S|? zI{QN+DBVb~+`sVzcAg_HX#y;-^SVdA_A71PQ`on4@H8veb4{pyXt8+;~ zxzgQtS6H3~mH}@P3SCZvZZXMZ&$-9ZyU|KwopTEEJ(KTZ;FN-@cs}OuKX4lC2JYuH zbdICSe8y#ww&BPoC|~vz#T(t9T~F%NPDGA9t%vJ16VbyHt)>Q+;b^SQgoKlSb{1Fq~0Tes415A-W6wz=&05hiz8 zycPA!r1k%aL=u7h81`=_{g+xG@=&1S)Ee^nL9_@we#U;r1YhiYq6dr-TB5HZx2Isy z1k`LXUlR^CT3S@H9Yw`ngKUF6d>Rz8n$2VnShQm0L=PCF6Fp!VOu?_mIt>4M{M}k` ze7@LP|FN$|V(^EHHy>C{gzpc?-HIp4Lf6&v&zpIX;K;W|Q|}c-sFuBL_D3UWxH#|R z&C5++P(_Py-};jRFn{oJz@2Occ*Qev{8knN)D!5dQ;QRTM@pOqgr^vB3G&-o6PG4|{faQq11w*~3i8~VWYyKWJS;@ROwmjfiq zeNouFv371vsXW|a8cd4NxPzKhFDjYTibC5#KFjFmqOeK2qI^jvBRs7#e)pGj zUvJ95TP9Dp#7juR4Xka+W2B?g4p8Gus-%>A<8D8w42Y|0o-U5pzw) zKYRx47`{v0wDlg~*tK}~`YnaPeQ8lE{Ba7nW!ePZ7C_)iS#f)CbtsCPXG+$?`0DpI zWYhSz9YK`9>QE4_zyBCb|4XZ=2AY0J5i1Y+q?U6WlShYh(pO;e;K~K5Htf1+>bhv^x@fALXsVp(Zu&ZE z=+E+?i-(Sl{-Pf+P|5MjF#U$XE7s8TPu=hDEl>??QeNwi&f92vPzAx`eX-{iE-i#U zZw{~FIFbwFd}a?;p0Er(5UB8X)Bp6g2SuH1V@uIeQL~{s*gM!zH0;%EwH)lpb}BZG zWL3&!+k>}^G|~3J=tSG&SJ!Q{b9`F@k9W@>V%B`g0}Y<;auRSL!3yP_=adhM!?7fj zQ_r#_Aa(bxmV5Rj_}F)IV(n=ec#db-WSaNF`O=yZU%E_3L2U@wP+#boE)s`ZQ-bc>*Rnu^6XK?`_VL1A@x|VQy#i2|I^gHy zF9HK>w(RKtXozw*ycsX*VTX@6TuPUXj)ArSTVfAE7;a}~@8W0UfW7^OZcn1BfXVrV z=kgVcfwoF%R!-VG{di9(dHQTb>5#;C)7fZ8xpkF@#?t{J-C1uyo2dFFO?C@_$`+~s`} z7+Bil5E*?B`1y2S6g^iCj=U&*;9XGxh>PKex!z6aM^FiQ4OUlQvUqafgk}}+kBbO@ zz9|>nFyZ$&*<6Q`6Err3#WkUe(JjqU68F$5#!aJ(nGy21ccw8ls2&s)w9Lx0zKc>9 zGkYXotOLh(A0kzKtVFx_@Lzn8n}gU_BolL)>VQGuenGvC3bg(ka#*R<1a!kM`8l1t z`|I?-r`-a~ljWc6|EKx?;{?y#zZKlbj)G1l`%^*LW9ZGYXJPg&Ra*a_kzh<;uIEMSw>+VNF@Cn|urkWI2i zgoMR-vfIU%&OC<)61EmK)8yyhys9Y>$p>ZM?l3bt)D7tT52buMy4f5sd+B+xwo{C- z!Q|Mc#0SFg``kTCf9303IGna5NDkI5-t*YvBOjE$w3nzJF9=_=JKv${U%%i*x156* zeDN)C$4z0TX}*no;)`@0ez-HDFWjhJ5ZXH&dH!~<66_V?{_LJ720d#Jc=rS{P4hLi ztCbhsm4Xbrw&iK_3BmX~RMCMBIaunp?>GK#y@8mQ#JTXAT7G7&gE+LcjV~J6n2XF+ z`>)aLP?WVga~%r^Os(0{81e*UhkHI!lZ z{9!Y7HJ`ts(}lXjaAEYUc>zif-)%(YlWw*gAS>?}QuA8|5mmNNG}kf2#< zO}hCA*x%W2=s(a%_YQ;jtV2G0O}~qnq_x((%54GW{X3ZA5;^F6FvUkxe9Suz^F~Y$ zYC552hUo!xFEdluMN`*BQ`bdP+g{30u46o7XSClUI z=ad`%Dk>;X{)U*9;To^y~Z*C<`tBsL*scoZ2f2Ipl6E1sq1UGBE$og zrpDHp0<37x`Mj_=A->r8q<226i94V_-4t~-EwU{YYZ|H9s9~PUHMBIyR2y4cTP=Gm z)IC*m;;Eh$TScdTc)Dp%INe_hjsD%ru=S>CSK8+Z!7J8toNDSs;L%(G+j28Gtk1bS zj9G{e^1m6(yR?rLKIAR9WE3j}PeyDge0`M{o(SIP?r6aZpKTCiXVd3`R`aTqZ`ynU zbbch|yq->l5ad(rd|Kg;`Ql)Z?RpdO1{@tFY!Tf!28KUf#l~^Pq56ez4U_!@$X_JJ zNQh8^kGq^!E_*EnFNYLlSYvIMgTR!Z+%X46Dr!WYJIes)3~*lmxc3`iud(uWI?4^* zGr##7h7e)Rp5B+)7lr`eie1btic(O!;b{1tcs96+m!l1sY`5bIO6-ct^4a!T2CbBowCpSO9jWMdEJ z8L)Lk$V3;5wGsqBe@-3-@&YTbnCy^)38C!uTx-OjW9RVYrZzsfz3oNm>|RB<{2H0f z{HhpCT(fF%hhQRde(QKN=^PiVD{3H!juGI_`xV23ETYi4#Zotqr)?K%8X;nl~c*!4G1-SM}>3w_=qdG7NufdE5wWN0zDwX*~xAT?F^CvUPxO$@?EFy=y}Ot&iWkc2)sy*0S6nmLuQ^ub|SL zoA<$G#dzC6jDKSr%>hFz(m0#gF}C~?h-Zp?5#d>nga*<3_Fw1M_d;5HFs(qE<3+@v1k?%#@_RKs`XKy3La3dDU z?RzCs+gSlF+CCUI^y&n19E?`kH_FlR%*cwA{wg%??zrf*N(#slcXnh7tVC=M?7I@; zvcUC|NBTC;dXB8Mn@@R{lp{kd^l;*l7eK#g|J8>g=h2;Yg5C!_&Y;~YkHZAhGQn0J zrRo~xYygzH$7=1)pw+yE+Uvx3gJQeyPnYj6MX6&3F=2`oAZqjB-LAg1=sQKU$MJhH zYP&tqAIpCmNNO^*6TNTa#$GkCMah3Y_R0f#T+2!q1|&eYw>wRK-0_2vVkfGZd|lYF zr>KT8lECA`rW5NS@$UHJUygUT%e(rYedhT-WEbb)kNNyi)S8i)=6K=@Z&)Cjb z&sffw#hAnx$xY^#A@!5mN%f?1QWhzR6iM_2%P<#?VR=g8tn;)vw%;c(@!<}l%a9Apj| z4qgrddp~e;tOS;RmUfnU zmU5OXmL!%)79SQ@7Hbw07RW+okzwIwA@KC`wDZ*Sl=EcqB=JNtn=nIWGP4XbFEfFu zpQ)Xxo~fKEiz$gIlF5h3mC2gPgb6Z{nPixFvE3LxjINBw7BPt!N%SGQ60M0QL`WnPWr(~)0z*GTJ3~FKN?<6b{hvwynHJz-lqK%j=HTqG z+hLFUK4uLZR>NTx946!NLL8ot!*hSfq;MP##o=Ha4#eSOIP8bRJ~(^?hrMw45Dp)} zVNV=($Kky=?25yCaM&4#cjE949CpNEdmOg?9W&#u$BerkGwyoKxa%?FuE&hK9y9)W z*0}3$#bGNPw!q=dIJ^;uH{kF(95%t>H8^aH!-hDlkHaf*csUO1;V{Hu9UNZrJ0{`E zPr{X-geyM@SAG(%{3KlYNx1TpaOEfA%1^?TpM)zv319xjxci~vFs?o!;p!6-u0A2* z>J!pJoWA)uJQs&&z zhIAhxu`s7l*lVm=lNDahMf{NjS`e!$ce={EkW1IJ^~y zt#H@^hd1N!MjYNSG5^oROW>?#<-o{Hnz6u)1^$aI5VSvV-sBn{V}F_}1Yb^HP6Bp1 zF;|5VTB66dS5;ThBHNQ4s9Lm;0@!?&rmBXmy{#QKSEWjJaGVICD`1!(Gci|%(TTY# zy2k@LmBAGLdg#~exE5;J3Gj4Y3b+$R{SJ=whHI55^Fl54MlJ3-mT8L5$IH?4S+`!T zDR$t2c)B2cS=XIhRw$mLWbQ$H3v~0X-6S1`rzt)kFGtVc&Ny;>UM@exZtG9eKb!x* zdUp3uE%X1F`E$-<#rWf8J3H3aEj z(*E&zNeDRJskY=|z6ZJ%aEq}o&jY>xn0YO$zyq!1YUlJT^Z=FLcfjTX53uK+&wGIJ zb3Ss5DHM93T8CUSo?;Ksd9R$`Jl_MH8{G9;1CzgA;&hw^lbhpgvO>Ga_nl9`PA6Y| z=ZUX}?gYi&mP}Dop;GO&)Ew-nR6DAznyQu>mEx$1t;1H;AWyvVdfAF}I`Qh$niqd| z;6sWWNgy@6xz_{gQrVWX?B|0k6l9i$F>ylaug9b^%o(9*-d4X4Y$IU%b<>Ej@7$0s zf3D*2o%ll{aLzjArJvSb2Xy(Wd-iNWB?wT?mig_{m73ObO>O0+stOi(Cd_?R_v3y* z*Ym~whrAwz753k@JjsV`F{AT$KNh@kj@SuqIeLU_StT^B$F@Xg^`e79P=ORwXo-c* zpvy-th!Y#r90Ns_UIP<%MD-b(-WG=Ki%TmykQ@kr`na^Fh*+tYeH zbp}d3@(R$|_#H0pWf_RRNXEouQNrQ80-(Gj?-6f#23_7~r0?_^pR-kf%k=W<^s?+} z{mQ1-P~_PvwB%i;Z+B@nuIkcI(fq>_8(Pi94(`^TUzw(K5;|xk#8%}$hJFmIG6fI4 z3e*goGr4$diQjyz=5lr>U?&sx7Di~vl8=Ffz{0}nsHsJ@SGCim*lB61+u3Q_*=RYc zYT4SUtJ|vC+uBY9zZ6h$te&X1Fgj6h(d#GrQW|<4HgD~LB5A1+P%GxQOiQbge%G|? z*)I||)49XnSXQk(1|B^v-*zir9$KU+meS-M9-9m=;{O2byz;JZXHfnFA86g@ktNGO zOizhX?|L2Ks}{POl)iwiZ$3T}s{Tg1&Tsr(ZOO!aoGLJ&&lD>UJMDA7rMPt?1=w}b zrqq~_eWn(Z zl^H-)#V+!N>F2y6!LzJ<_JOhi0h!&AEYTpp)bB~!)Q}3CBj`;gm)@d03Yf6z2#>c%kWD1o^*7(D#L)&{r0)_Y9 zC0T_XhmHxBg?XJF@Zd{(3(0iWiT*!>8-YE5^d2KK>ECSuY_r?Ke|EE*hMl8^hCQ~+ z-p+=qZlk86=AfaerQxXJXrn@Lv{9d2>T0%$aiX@u=%045=(W{f(N#_2-G>|nU{As( zQ|c=r$f2pS_WM;Cs2p3JwY#4i&i@?wmfXh-Z=(1`y}9hL-Y{RO4BPPSFsN`|Qh)>l zjV%V_FnMj!jq|w1x#5AcdpW{~nBk$iucB60#o>~dpIIsw6W|K3yy$Y`5U_5_p3QTg z1Ma8UVOH8;|IPUeOB^Y?FJgA#D>Cs`?ywE&cD z&uCn;U>IzcS2*$IiVQqB$U63>j1``obGb)Mofj^0+t2ErDg({Fwi66q3PVLDx##QD zcp>v+i@sa=@^I0?EdO<~B2a-@n({pU9QdBSGdv@Q9X9&gEWZoJfrF=;=j{hV@KXN@ z8O8#3XmPqF(<(a%RIw~EJ^thvSSjKpS(_j}Q>*^F->m;M{Xc(QRv&+r^l{CmuTd}W z^0)8;PF`*NOk-r7I(j%p)(&hrFS91sZxcBB zxT+^8Kp!P(>?pkM$N;PqO|AuW$$~ni>&GrKFGP2#C9Iar%K^)1tYOHWYoM%d&1hKh z1>iY$EjEIZ2MXer6sndNpc)oO@t`Ls0C~I0=d~KgQS+0hoW8pQk)F{z!O(972y>dz z%POt{(s;8P^1faHvvV(+G$x+|E~3DBH+v2eoU`+0!SyR>p}}>L4GszD>zxl0m6x)C zZ;qjZO-Uelup^g|Z(|{lkUY)Yaoi9oSh2&IAZW)a`fVVFd8iAD*4+ zn2t^^zr|wq;R0Hax?tJH*t}mCyU{kgVgA4W=<$ko4*j`=)>}0Z?IJpd)-oLdY+t{hutJv5&s@kg9X=-ZNlGPok zj*bpCHd<<$RLVq+yFNOYPA6(!`Zi;6>`A1z73nnl%$8iEi{s#8qb0*AI}eOxP|Eh$ z^8z@^F6_I%yA#Y!G1C&(_zKdxjJ`+RBf`IWnA0=AEgYBl461AmhF!bT0GIu%fPxr4 zSp4JrK~-$4@5XZ81zIl!q3`mGd7^T|h~=H_u_JCw@b(#*p$!!zIIMY9G4c@;tl#;R zIgfN5?B#f)lxxojd0#l*WR187T(fRC-3i4USWcyr!b04f?bM$(BsX>Lt=Y7u1pQ-KW_WPIU&l+J_yq!tV#vw|{ z*Um2U8gSHn!NBY0F66tNvfcSe7uXkh$$-_O2C)d$3cYFW0&b~dLt#g{P=?OclxM@w z$~^tKz6{EKDH9#AQx+*3E?aGQ@zuJ7Vvw=p)YV(5w?K<6!Ufg zZ*k|^EmZ$yw}QYj(SYg!04L((dd|!-DWE?@I^I zXG5qW%8iHuM4+y)kHpGP17N!%xpOhwH`K85ON$i~GtA znwO1=z;Tu$*7*?}Fvx|+?b^F%$Ub(3`CTU>bba@*+H9B)dPodEUAcS&z2DDs?V!^z zTJu#Rfx}lAx~|$w9*brw|Jq&`qiH~b-+S_la`T8#DT3Fl!l1)sd6Kl_Jp)=7x}|U}!sc zD6qZKmbxj}6NNoK-2Xk-5oD|^SMgy8L~6-zG>@BDpt0c7R=g$VKtiK3Bp~B3vgvVW zS;OXsx>~{xNNMJPelg~F`>Urxnw43eE+_;>r6VOw$CD9Rv~Yfji9NWvVzaig!40q? zU`3Kyg+C&i8W66%x`|lK^877>)6kVsiy$g*8j$xBF7#Yg43yl}&Lm#BgyvW}vWYo_ zpkUXowRgfx5F6$p(P$(KogCU6ciAu#Z6z6|GF0b-x~s>=+-onPgvO%Wz|xgy$6B-3 z33Hv%)$olnFf|f5M3^7ivH_bsP%kwvy$}ROWPhY*iUVLjFjT4~sef*0# zYT({#rR6$|WAOMq*OV4^9a{gN$k;{TWW@f>q!|m$Sm1x31tP@*$x~ZU6@nzOj*s6G zza<1;?0ll{gArPyZ$Y)eCh#>Ws#LP71~xRVf_YV<+S*W6Z8g*#HLwQOWZ%b}({!SR zh0%!?mN2H^&yGbH|F1@#{)+#6WS~ESgQ@(Cx}x)^N+M*;IQ#Lro&=OSr@GqW>NW7< z{WoKos;j_H+P~gpy9Cs!dA=_7&;t~zot4#;O@#as_Kh)*!pAXrGdoqFsH;6=R788y>T#G(uWTc z`B@&`!h-(J(qn9Xx~c)3vbxyt&WjJSxi#0BzsW(GDVuHxY-WO+#LFCRwM)Qfdv=BE zx-yr0H*|iBG-iPJu2I_0ACrTX=Oym9i)SI|86VNUdS-1DDF=NYG)Y;lW`gBx z$yEV&9|5pUeMMTM1XTB2_GI}52KY2$S%GyT6Ks8}-^0W%4<*z4_I`NOiYgB6)n8f2 z08bvAB!vkVY)usjlutGxflSFwFSfzf-OjSr8Zt5)9nfH(Z$}ux$$U8X z&Ee!{8zo@7`FrJKp}y#9#fj=D{#j7eA<8$-i>17$u0c?ammltFcZxVy!~|p5jf%G4 z`Hn7>NmO`!8%GLh!*AQ#XPoZSUe1{`V}bu83;b+ZTcs_J;%$#9Tg>v_3ea2AKxj9T@51ll=VNTI|&wTi7(7LaR@a#-B(b^J^(HVSY8~x zzYooJ@8vvZbPk-?ZCaeiG;`w$c}%EJf*Qdz2#fm;i>%;d#rF_MrzV z_nmc&5So2-=(xB*E($jAboZE>29`dQcUiNo6nz^#E23PMg^si?1sT$*AU2-q`zI_! zx92>*TF!aNXj%5!mjy42fu}&($J?{=f#Xfef}`JZf%staEBk?5q>VE0-Jcf}~(&b$oLUAgQPM00`7`AhZEjR+moEvINK&P1%^0ntGbC1`u;KK~yz zIlyh3ZBY1y60mf!TtWyD3oQ>)zR5D(L~ErM-PiLd0blHuO`TG*!GmSPw^TaQ5Zm+M zvwRshz|&TtJ4&rNKt8hF$0O<5?>zu41h|&UpKk#c0M*d&K}ka)JSgWj?~RB84BonK zgY{MWpo4*nCP!cXddVMs{#;GXLCe;Ltd1@DQ`53haj>_sp*Y%U*x0M6syo`Mt4y>B zHV$u~(}}i$4EEsudeElie?2Tj)jvN}dIs=Ry!7tY8Uq46&pp0H?FS#EFrKzNEWq*y zOqZkQ(=q*Xt)pe+95f2>wDXvefUNdwuvM4&f|EBZ4E}g$@zPa%5Rd6{^n5y|%PCr` zY)Q@(fY@#QY5HgLXO{f^srmlzF@M&9hTc0Di0`!DuiqTFsK_5^_jv4;$LQPr-16ZV z?U>7a@=>8bO1*pWNHs?Dte;9-6#9cx`bzbK7!KM%&U}U8mDMaKP8azjU&rqAmoZ$K z8(H}t%O9%lUI{ST5;?Hfsvr(M3|+C!AER?^#Fy{GsOP*GhU|hkP!YR!sS8H;Yp!DQ z!T3j)ei1RpFj&j6KMKP;mzj3#!thO&nyg$5&+61q^TKdQra%@Z4}Nn_Lm{iEt7`rE z(iv88F=Uw<->ewnuiiVw*orA3gRm~A| z;Ho;&@Uv)aqtl57AZ7oN2{+%TFiD3tYNsa|>Z@7=A&RZm?vwER=!k!@OLm!yQ23_$;TG z0~~OEuXO3jr-JZSnd{R7iLU^=dQ(l91P8S89u8u^qX+{#mkjI+Cqlsnu5t@=2Dr{| zwcfcDBK$58Txolr2oH?B8|~A11wv*gwf90`b@xl;!&UMDm;wweW3#}#;-DU^qfcda8-Nv}bu4@qHW2@5{=RM@8-D-XZF zJAAS^Pz8?5wER$;CJ&t+RxDgQOA49@CY{(mstHsXJ!vqjl!qf6Pwc-mMGDGIYuS=Z zQ-%DXDVOM@oa$rFzI{V};{wzR#1qxiVG)iK0#QUp% zKV!A2|3B9+>6dl4B8=IRTKTi_Al|)G0;^YN#q~;m@Ns5;$Ob-_8@n8}e`a0nS|oo^+%bK*pnLz^3jK!>d@P@F9)D)^l~wmf z?nUO;Z8O}9uJ>t{M8#wIgTUs(mFn(=`N-s}Egs!+0Xp#GP3{d~q)UJllM3NhZgC?5pJO&dB?UI6G}hG!$u?aUq# zr6`APXYuQ@F_-mnkeNf4n#nyaLgsv1oN{_mK4O0HfZYo7&Uu7MYm^ifn#$s zsuw;U?)U=L@1Ia#vkKk*sU`H-7 za|1bU_a}Tes&Mv~hVKAJ$rcCupC8&m?3HvEr;#!cM_j+^xg|PtMF(-PHf#s%CT6@2b5J$-owKLw(l?E?j{uIFfNbf(f8>^?hJ7lhs zkBP>Lk$!gsS)1Qe7MJbgpI|uOTaeb!$J>_t?m824NAOnr_I{oGeFhT(Kgg?tpp)l6y0=Zpo=~aA?km)?R!WXk)x=NCmPa_@SoB zsdB``#CzxMljYzC%+poYtN_zfKlt}@yd2m6{sh7DpC2F6Ks+c5OA=>8E3y)XR!ksaEJ$j;N!+{(uky@zCO zXKm-xJBORCjR)Gx$j1`9f3&Fjo#^mH@gE-`YM~B$-~a0UA>3~9QqDF?FbnRpSVrzy ztqgVE+&ES8Ko{mXtXFT!`hXk`UNrvGdl~q0TJ7$VYB|_p|HinEECU~R1-)vRDGPnd z6qZl)QG>d^i(jb;-Xo7!e7CqK(FvMQRWXi5(c!LJ*LN}8WuTYW^u*uAvQWkUK4V(# zTi~p2yvqIS2e7;}c(cuXS=dr%G7O<=!NZ4NPAHCT23IL8+4F*XAcg(-dZqDDSXfoY zZB%>?cDknUjT?S}7y5Hgd;Rzd^o5gFn3TRp&I~cRY+9rM(>o5vo9fbFZBG1?#;G61kBzeM*9ui_Q@>*18Y{nIcoYdL z?J-cb4wHj#kH2WJRiVJ+R+U>XZ(_jhw`WeAeOwc+C=N>Ue=Y@`LpL~e-&KVCpFi3c zIDG?AYu~RO_l*aKpKrL6a+wT`BHnE~=F$bIs~tBF4S9#K6;eLjyhDfEH5cnmXw`!6 z<+vRIC29~kHKpTCY9;V1T-Y8&Z$r#gkCFm}OySX^{%xI$m7&Reo03peCx1RXVRFw6 zq`>r1%+u}%i2Z>Z$)!^a2AeeWSQ&f`M&NG|=>OT8>tsn=9X)lG^Qe8z>YKojJF`FB zJQ58&Zd-k$y!1h8CRHqRU9cH2xYx|AzC;1akn2Lz=#9wr84r!k#~%Szb$JH*2M;0F zMsg~LJ0*Y_%ZgpzZaIaFDwKw&1WQ4a?cBum$vcq6Iy|FIcTOO-;9ir#$L-+6wg!i& zi2dM2_|A4W`$Hi8^#ap7ca9@lzAhfu@-6~-rXQAJwD%a|_~u-B^28D(<;%Rh@`xO8 zAYs?(@(ni-f9tZmD{Vrg!JdZxlEwj}ST|n=M~HkaUZC>4xDYvTDyLhxHwQF1DbGDH z{4{byWr>02vD--E4&iYf`8bewx`+2(G8?JSd(FJ{RRH!EHgdQ}u7g-%y8Z{vLNL4J zOy|nFOr&(NplWWF5P2)@sx`Z^2uv_J@tMKYisPJZ0%TeD&s#Zh z89-$tt?pO15XsxN*iusV2*|X2YyQZs1k_k>agOSaLy}_`>PLqP!2I2MYn$}*z|j-6 zMoorUh>Cva&bl%LTo^jcrzE-%Xj6>kR^Q%-w0mtnl+cg}^0s{ap=5Rwn8|+SJ*de1 z>-E2trMb1)-{)QG9TF)E?yO?1o-q6^44qRZxnac-7=21q8?fvD$>{ih^5nl&k3nHD z0)r74jKIGV0d(?DE36mce?IxAhnKsDwYRmcm5-U1hr73xnYE9#kGrjT0mW<;3zlBRQzIWc)j7Ss@g%!fqAsG z39P^A%bg0gEt#tdS88nBeAk->Cz6n3Z3lVq;H=ECz3Z)+Tw@<}7tNoc?zH$Zrvu>n zBVDp5&6y5U>SZf@wWZ;-pM`d1Ia-iCUDG6Syc{fz(s4_0?*zCWEhtOSZB~SfuD)F> zFp-A0TGv?JFi?X7b@Yd{#-dhN{C$rU|a_I zzbOL!C%LnA78^$KE5#4h#1Gv9`Qm#t0$dkoZQ{%S{uw-cLjs?wGD@g4(*t>*7r2l9 z))m1X&J_u;Y)!kw%LVWUY=0=`FRpLu!Lii~olnm60H$*d9GUN2f!Gh<)@Av2%f|EI zZr;d(UB6F&OHSo;ehohkO6|w2h<$b*_-{WrR}IyJ*1enXefV+Y^3IIQGX7`4mhD+P zrX*z}D`wwgAOyKwc1dr~$K`{94k**f;@v-3z(aosxO-zSiy(Pu6u-1^9XC*lVGPPX1|WVfD`sp|~J#?%O@%A34M2$I})H zCl7(!>E8<-kZI`nf6IS${J({njgPmFyQjIAxiva*v>BSE!_?i|&I--sW@lmUjZKi~ zt6nAg?LhG#Zy@3Kc5$46?mFW8glFR}?N|l)a#YlW#Tg24|F%8P*Bq9DxL#5C!M+2L zf?K#YS8}?!@J6=p{Lm9pP&I$C-`+LcK6_kG?0mV%)bkC%?HOB*Zgp&W4K{(VvUFE2 ze27dQ?c%Edaa~&0o+Kyv23)o)_Ij@I0o?lX{1Q~=!mN$gTT}BC`s{IijkR-pNR1T4 z?e&HRn8j_51C|*nr;n2vFmTGwOUtslz`fSIXSs*zFsC%Vr+EY&F8?O6*nWvD#C4}} zz6Mnu%6;~@pPMsBKil7N9^m#nWt;^?o0Q>z8sA617Yv(j?>qMm=`;)$rf@MCz5PD4)DRU~u!?U6~-s(vm+?Qvaf z50t!GB$lA|e`$NoO>NEo-v7Th^$HuDB^8Yl40D643@p<=J{$)V&wnr4yU1{`|Nr2T zHyDAz2neLkFDQ;OE(-tuH~XBOG9$Py^`c%c8jba^&ksg zR8^=tbx9Z6pPh8kblp#Y+wYi1d$)o_@6&Pnu57RTyhVC_ex6%>obfpM9oV57!!Yz! z>eKfa&u`TF&4ibK-dNl{XGotP+&+6p-K^>9p8>vJ-2TC%gU_CM4Da(3IsZjTdKeX! zHQQYHIsXT!_%_pfZK5>%G0EfUywdL=EcW-9{jvsdKV|Wnif69@ZvQ6U<<9=wXv$Qd z*7)FSls@}m<2KJM?b3pgG>uf}1Im3m?gzKW{ov~zXpj5Bw+FY!*NbnDQJ&Q|1Clfh zyR_TUcbi<__j*|naNa@U7Fb~MJgzZCyDvVty~pZhc^5YR?u!p@KRxBpudS=~`~1uu zvs<#}Q4bioxs-JC=68Va--U~(nZHL9a#SrGIalqpL7#uze(#8@Z6{h3`gGjBa{V;> zam%#&{7dnpRgAKv;h&lS)tEC`35$*XSdQ&qwZY4oKLv15AB@0Y1SBN-FK?Z<8opZC zTqCil$;f!x9pv{Ky-T~D9*GmcFX!|5JG>32-2rF}%}2dw?gT zH<5SpQlV8N1~qPbGw0Uf_5${>Lbfuk8f>@{KJLogI^3Q=z+S97@(0+9{WuCtozq{u zy<3c!ny7VV+)6`EnCuUUTKy2Dsg}f`I#?I`;OO~VX+SD;M?)m453mO@*OptL_Q(Kx z<9QkRKHy?_y>Dr-NdnP|7pF#R8_y;?W{I#4XCJj0V1w%0%w=s5Xb+p zpyuMJllN<0AoSq8o9CC`5Xb*8e}H{oJO|kK#k1v6#jKAF)u5zo*d6VX8vskpdWygJ z@ScUJj?AEPG<8aDAYxrGz+SBHA80T3vtRI+%YSP-YukV5|KI!kUl}yHJ=(X!*8y$} zop-ur`Y*VKQQ&3;wu+wrQw<~-H_?BCufYfmMqn@kgAo{vz<>y#12Am=wE-Aj)}E%e zp6L8@=4g5ub5k3%cZaDBI%kQGr=1Ua_W+x;#GiadbcX;%(H#O6bai<8eEgvtjsv!& zk3QWC3d=9k;P}o$XGfPb%&-0-vcD4NA7ta!3U1k0?8!}*hM|=5 z;21*_zJ1K>nP!0!!Y|u=KW?e$1g~>lMfL`gq2C}8V8)o0Jacp*KP ze!o?)AiXqk?Wo3I!i4$)e|GEo^b)dBHLSDm{ zV$iK;)vht+3%LJc!AEzKZ-NoK^%B0s-2tcfUizvxvF!LaJSjl+sXGk`|Mj@ zMJ_l?Ruq02s`~4@%s0?R{i$1a|1F}^w$)psKw4;uyL1e_sFE zwESjgpZ&w3G;$9Qou97DA&wd=)wf=!pNCJSGNpxYKuOA~D5*X_ho8BYtTd4qmQvr` zZrm>m@$HnjIMVELdMWeXxPSBQc2o)(G@B%5Kc@xelkN zD(*aH%G*#0q8*og>rpKR;En<460H1kTg2&>sX*?gA&$kV zWw^aWrCi|h`5P*cdl69${%XazeW=%$cKkl=C@$vVE?=lqtgy>D! znOcV1ODO-ya(Da|g%p%Uy3NoD#O*^DH+k1Su8#tI%@F;c+JSxcm!B?M_-RrRvU&yU z!Bk3BpFV$5McHDYdH$N4+fq*gYe@YyE$QUCGQRkrfyC98c+WO%6B<(+haB0 z+Uf~u8P-kU>g24OhkH11bX}VAL^N5hDrv9$-mmg~_Uv{Ib&t=|aD>5`ZTT5e5VsE* zKC@XLbb&ryT|VqdBij91Y>)dHx8vrsHRx16xP9Mx#rF92;Mq7F?u`Aygli-cd)^dB$X$&iB(PyvwVq7BZv2(=rJA%>u9kwWJ zkp%itw7XI0ef15|^|C?7&f8gex!a(<@@>$$y{)}WEqts@tt`>YHw)|$Z*bBA(e6f3 zw7Z9(4)A$$al|j+{)E-ECvbRe061O#($@9UGGMpba@VNZU~qqiyVY`ZSZeF0Zp0hS z-#>BPmi1*f)FJZag?>gk6ApD)E7DbTcY6X)ZP+x=;_1f8ESU40w~^*ff=`5BlGZO$ zg4g1Y1aG`64TIBp=X&^ZeSYHSCr7`3FM+0OtK4QVo(*TkR~nq?V8c!7${j{=3h?p~ zR!VUy7rsk;boNE7JgnR-SbwXi4g5a1vc2^5dyseAgu*gZfL-4wL^9AG_H9?6boT1F zpGB{u8Zy#zqI#glA6bpilrss*WhC% z^AGd@<Rs(vhZo3rqLmJZD`PJ~Pvpkse#QKEr&^M=)h zQMDiZ_x~ya{pWUbO-}51H6rn)dd0{G&(~$B|9zBb;AN@c}1eE!s5MR}iOt z)AFOF*MVS8s;8;E5ZSW2?#baNIf$u0)knv+0z9Y9`CNAXIH*~CR)L{<2;_A}Ik#^( zk34(aBQ1CB5)$)zRP`2Aw{0zK9V@dAe7hlI5F=fJB>Ih*RPo_DGVf5fa|ZnpvO}px zP^WYfsIFw8X~d5J^niZj=yDlU?by|wCie*Vt!BIvpzD2OgK2P z{ceuJ*GHgu{C?`Y&#~Yy=WtfNzq5az)v~t>CkH$+Zf7UNwZJuV{1&&pX@_y&hyQ%D zG6uc=xBFMG|LwfI?9i#sO-(J$-O;oQUS>AdX112*p6HDLFB@+=?58|HC?jgyQT%UR z|Ihq%vSn3;lu(T9>KS>eLzvP$E}vV}#uvYTbUyhiAq zkzG4|Q9U&^arNhqh@sYrr|(dGY;yVgn%N3M=h#nrQQ4Bv=^KyzBVR(OrtS4p`6^wQ zSp6kJ*@z917^oWVy==1AHS{If6t3RF4V|F6Uk53TwSqX7dn9_~4S(uQjc&ktwHy-+9Gc zVan4zj`oiwg*T0NwS}Sf1CP_A6!M!XzbL}vL-($-zt0sesL$sW7pV5xnMqxo_v$oP z7^Hpf_Qgx85MO@q`X7(PApTn;(El6ZW>n|bGd1ugvt}13>LG}z28ZHOA0v-@C%aR0 zLe$(kzIL6jK{9*1ZWWB)2dcMh{k&|6HFCF7{oc$?cfi@g2}@rus0R7H1Ef?PK>WSY z&cQn$A}^^jFXhqee9Dn=5|yYQ4gNca@N+d__;@+fO4ofzmhn)dLviDnm(F5|pORbdnbq2{`m(BY0`?UE;tkpCrZs#m+gW;*Nq9^w#g|Wb+Z+PqTvF z`r79w!OaJbURAq;kS85!fd`H(2L=b%X^&|-iL6|a7oi;!1ZYpKKWCu&Uw%7!um3G9 z|335zyZ*1M9+7HsR2#Yk6*Ch7c#hg zMJ={HKh#dEXtmJct!-`-jen{_)&@nEO@=BoY^;>ZRl5tC$Lu-u#Zdz$dxOJgqVymS zxVJ{QEB4u!Gp7&Hek2DKQm5FRi=sf>z9&QP(tO_!0N2y!Y;DtKQ6O%Q`w4h5WJXG$ z1jOy}_2TyU_Tbxz+vD34@mgiu-4!x0ys&5JCl777QfQ_y0w_WHv!HWc_8s7i!>6Qi zTNEIE1mb!@X!lj6>Q?}_$NdB~UpyB~>F=b7>r0;`#i-L|{^tK_)0U}bJL>xGIpO{b z=TNgxY?SY_$NdClw<++OwfnY5+TyoN#UU>yY<@xiy<9-(ZNb&aFqzQ3*e7(3mzCHMM;`aFVVBh!8Nu6W&&q!S&)am=a_m88d8G1K#$HxQu1xLBDzr(=c=^MXXu6F>`v!So;PVjVmstvDsv+rj^ocE4~Ns^i<~V>#Wz_H>8f?S;zp zK-7=aj*K5iqazU?O<8T7uTg+Dn{J%yz5yb_x*>9ft@t$xIn;6W@N@SS$e9b( z3YVfIfs3K~Zw_i7tEL;W9M#{;z8bS1)!(?DYBojfKR3cPiKw4b)>*Ofs2;QZ!EcD_ zV*kj}ho^lqP`%dsVv#!P=li|rZ562fq@~Z!9Y@!DeCt{(W7K}b`Gq%@pt{&Uk|c5b zunlTIHAlgDC%RrN??nJ#ZfevGg{-wmLaCnj^=Gksu`cMI^(w|7Yc0T&%wu^p(4_NX zedn1&8LpwxeR(zRt#+1`L;bMx8lz%Nc8inQZxsv~rLGqJE)ctxiv zp49l`A-dil%TC_9b#*$B{`FMI@ArdjqE*IN{(}fG(XvmZ_Y%CXyw|*1UO5lpW%5q* z4)S7nA-pv_FW!9K6y6w~DNmQD$YXGSaX)jLxR1GaxYxKBxyQNtxslw>+*RDA+_~Hd zTs!Uv?hvjlm%{1feBeCeJmeH}ayjQYDV#XYcFqQlA7>F~CTASSietc0<8V2W>~HKh z?5FH|Y#}>~eTIFAy_;FW%wwiAk22$#JD5St<;=y*S7>1Fim^vm><^dx#TJ(%uK z_nBO@k&) zBT?I_@2K_E2h<{JHuWs^2sM@(MqN)`MqNOiPK8tp>M*Jbl}-6gX`{TN)KKnH1e7b3 zQB@@;Yf`2snWoIs8sZz8WGFCot% zJCbe5Mr2L044F*oAiZaAW3OdHOrkfk2R5H&l<_nV#%?n%r0gNvyu6T)Ih2r-6Z9Z&XJNydr9G>K$0(MA!!C_ zEXk6jPf{guND@+CrCv+bN|j3?Qkhbxr4CBPNQFqPk@Aw7FEvGKjFf4A6_PCI|2+8V ze@+D0BsD4VsUGPBksK$IR3bS>Bu9xPg-DW#PhNP>uD1Caz0$$BDLMBWDSu75Q#sLtR|9GM6!}dRuIW@ zBJm>1bVj@{YBnydT0g<>8i3^d;ClY5O zaUzm=L^791<`BtjBAG=bGl^sdkxVC&X+$!WNTv|UWFnbFBom2b0+Bcpi35?0Cz5eQ zGL}dnkpLp$6Ui7N8BHXkh{T>q?1;pcNNkA2nn|nBFQ0=Y$CZzBw0j~NhDW@@B(g*z zLnP8f!Xpwck#LBFO(ZNLVG;?0Na#dDBN8f+P>6&~BqSn{A`(d=ksuJM-$c?wB;7>v zi%5PF$qypwB9cxb=^&DJBKb}v--zTZk+cy>E0KI5lFvl)iAX*YNehvDAd>e)@{UN} z63H7Pc}*m*h@_cFUJ^+Yk-Q+1=S0#-B+rPXfk^6!q>f0Q5=kwQ)DTIvsQD5n5p{G!QPj~5AIvszpj&U}=HB=6^)&Qxk7j9cX*-04F$>)?5CU*W}-TjczA$CPOB2 zj}ICCrI0`{|AX`C5JK|r`PlzRX8Nj>$?>0^@zvmYU8Jsl4)?by7GT`5FZJ}j>K}`y z<$mXjZ@G)78w=-a{V1vSx|j_9dMiFuK$1Ujyem5yzqKyDg}zhp!2gk)>Xklw@p^Xh zM+*}ajnk9GxA;TF{@3z%HPv$KWR+e7YR_?ZT z9v-&nSXT=(Yj@E>AHH}qj-rDfpNt+yd=$e#{L^R#T<_F8F(+$v2bkG3B|XFN0qD`1 z+7(*y1r&zpuoEZ9!-|oMzSquHf%@U+M?XC>3>IusoxGt3&Hr!sNcNFS4~Ws2H1(O8 z4zx?yvXL`^4k!MQeDdQeI%$}W$v9JKCM31183-M?kmFYy9Qs)np5Sa7T3M$K$&Zxh z=^kXlU5?|`%6zgxP_#-w4v7Ij+izX9G=l-lgC?8r|E>bxZNG>7TCW4=A1pQ+vQ-{# zGrD3pQtAb8Rb0B-W3D{>qPY3c&EFL0HbWZ(#gqr#st3eX`?W>Qf1Z}1|0>}J;lX~=4SN87Hb z1DTEU0=zcI+2H9c2ebt5ZT0IFEVm$uJ%*MlrojZA_!Dv-@$i2AW&O|9_5w~QRzRxkFvh8dN z^~y$cQZt*PFjcL=(G++aHHd=|AV#47$815J)Y9+`*F6>ekCuif0q-ACY`;aPL6_jb zghJ*4Byi8DDP`eFNWI`hj_;z=h*$^X6He}kt~vt>KWckl)!l~Fe=0wIuxBf{dq4Uq zsw0b24Z69f#BwA z{~cqhP9w7?=s3lp(=6u9Pw3bEpak3orwq8T( zztneZgSp76vVa3F>x&V0^-&|bRS>Xo#-!7^H?u)S{nk}sBeTHB#x&ons}XRVTcHyQ zbAi|%i0u)ZNsqQ^F-wr6%{Hn(s9C^QJ}hGFQ9;)W zobp|kqxL}I-0b2mY8J9{CoKlGN5-riRe`P-+5aQpAi7>eyxza2sIam@=h6M=5h}9a zxn6=WOJzL_EIH(T#y=Y-Jvnmv<=jp*dCcg4EqRQk4?3NLml-;}gE=}N#ogS~+S0}p z9ioDcL$tN9@H7+sVu#M#jicz7n>aHN{ws{*v9(4j3KF7Odq+eD&E<#AT@qbhk0#c0 z?zpsCpA466lV>udx`Eb4;}*@0qElyL-R=84OWJOtF8O<3Z(6sG_0J!FlYo@55qfJ+ z^PstV8@Ja_+lQlPjda?8M!^Eb`O#d6J9sgxZFG``giuE_&f9f#2f*#sm(+C%W%_#o zn~D5z`j+dAoH2DfN3U&UGMEGJs_t5G5c=BA|dsRMLKOTc?=%#{|ffE zP*CrvZH3V=@`G$eQcx0N0(^%ycK+hmyVTzZWoH2Y)8<-FHY5SDE_nF(%IOpA48)<# zccSKm%YCQeSWlNw{vtsb>-;wXa3x*Py%zU`52c(p-N{ZtxJ^Gxv%d)tu`W>HN^?JD zr+~eeN#iCQIf|@a!#E&ZCxM)(h<|SVHW0C|8oqkzk)uGY18>)Rw|2h`{A*7SQSunO zKP8Vr9-mrrF7g*#oW7ty|K}$da$DubTD`p9>whVU2notK^xyw)uV~u{h+Ww)+TJ^b zaM*~j5fbP}?8<(nXuJA`CtKP0n3|coqjSKRp_ldU-X7kjcD81=W)>c1<`zDdqNX9< zDI7;p6QPB==zSZD-+1Qefru81UMCxr?Q7n&feNxK309Nf`=D1R7tHttG#uY-KKot? zR!NmQ`0B|)vAM+TQJJM(j}+nW_0IZVGdqxXV*7{Wk3IV)bO3(5UQ|)L`NaeCKMnhvz6nE9<}jPHu}+iKdIgyIk?n{kI?&3oYcsARrgkL?4M(;EFUA|gNnM)}V|&|0 zRfQY`xJu8kY{*2?>SGJu>?*-^uz8e*E~*2u4xW564&Qy84oqInHNSPX9GS5A0&AGs z4MeOX)19BHqdM@qZ_%!&b^|mSWN9gzJ`vpfga}qJ*MYEQ`{VT^D-f~H7wbry@$6(& z|7#O!Woc$(`}Yx4y~A&rpyIN=f57|AaCJ=nyLi>bFwEkj{N@*v673^^_}0Xt=-$R_ z`-tu6{O9OrQR{_5?+b6eyga?l%q+aoba>|0-srSdUe?~0UT7jNGcODDmW4Ss|M|{G zR8i}NqNw#6ct7$_d|+$#U*%R!#b2%lE5e4_A+sYcGSSvdWlX~sJ!qYpJxLBGAO~W7 zU3>KkpSYn{!WH1rdpCZMMXrJ23GBqNKiv5*{l3bke;)!18>p$*H5B2@0yU8ztM1iF z%xVf`T|A_{yITRi_UwL{vENM~W3_L8j<*){>)g%0dFUdD-@IJpKgaG)-b(`t)PEjO zUFJ=NwaT4u0`7MLw^4B?Ge>Im>ByP9DpW^sJ$ln!SLO#Qlr?XU+urmGtj%4td9E!5 z;`;HdJu6=Y%0XOz#~tZ-(6$@Qj{N>dLb7kmfk98T;2orw_KRj>p$S$nj&EBYZ<jGcQAw}88TvdCR?dH7J?@DA&<4s;UhA(5qi?>}_`T$enp z_Bg|z3nylJh~jzF@W-Qd(K@jA!03H_82@Qr#6?voBIv(u%bsbD^SX%OJcizJWk|jr ze6ANBo_s?Qt`8cA+B1bd5qmO2EDDjq0nK=%25~R~gAw@uECT&!PhR6TJ&?QqzB`*L zQB!sSNNkVEWHwyqFMt6Ti!Wz_6+w@Oeq+Xp^Tm4$E% zRDoX`KU?ebrcr#edjg}k%iq0hR{v>>mW?BBKLsfB8)A7L1&9 zsp0I@He^mvY|@ywj{!DRq@3?_Am~%-txw?c6|YvF%6afRaW1n`^)V6{U>WduZxf=m zWE>?@={)F&o*pQx_81h@*v4d_`cc-4)F(*)>|(Cjk_+w3AtV58T{`*PM`-H#VnzOaEGqwHut-0Ru|Ei$t0K=Vq zegWK6Wbr)erw5GFy`*|futD_vU$O#y{!fiYVes`oJOa^H5##@}J1KKzBxw>f8XfHk zVP)lE>h0rgV`kxFXKHET<6~vz<6~`Z=VONE_!C`&B(=EVD7qH;(>28qG;qYXLBjc> zuP#lX+Fyrn+R21IC9%OP^1HxOE#5IyN3{1Job{T`f>PUO25qBD2uCy-1l}>{LHGE; zIe}I`fvR1A*4jV{6r8SEKBb-lzty@2y>R;pR%ITZ$3{LPm5#9f^AKgYRKKLaCwvje znRI>DOtfzWua$JPeDq7O@l){hB@?1RcKXW|ehn-bC0jo^?_wR&IthGBEqOrN#RK z9+a9oW;HLQ4(JA~&}_Ky3VgpQ5fB~Oe-4Y+BTQ!sFRuIb>fUoXnAyB&mc&Dv5HF9J zJamI>&VD5ORg#s}EA_!UDR^N1r(yrOfP=)gp2`KL{)w~prWF?;4nFaREftO-ZI0Q? zRL2}dIFm*$I9^-;Qb+r28;a`W>tS+gs6N4PIB9Z41}JlARN9c7kNDY-Qc>6228!Q~ z5zMK`K)gEQRf14m*Nw4U1=Y!pO;y*)7myg%><8ryH-JKI_JXYH<6!wdlgdK!1u$gn zqTdftU9dCv_a#&x$L#v4H7po}sVn;hh3`O4t-ZW!(~LF981>5^U+D!SYp!%7j;NmY zu1e+rs`K}sf0cbH5_uSB;ke9tD$rS=>sR$)50WuW@Z;O1NN`QbKgq>@DzLx%Uat|= z|1#{*%EIn%Jv@4Uh?wAJS@pumL!aTQoXOq|GKp~Odb-)Lcbm}+mE*96>p#y>>0|9_ zY2j^WW@BgTX={o$Sl;elHg;$S4|gA1Pfsf>i&q42mj-TbO ztHS$5Rk@Q&Nig8kBAs6S+0%_awv%|Ux-0i?h$IDWm_}9Do7A7nWnf*O@GHdp;#XNX zJSqLtYOSxJ>&)(D-8Q=LqwQ`}pW8n{XW;Xi)~caGxAiOO z3HCfVX1=ankf9RXa(6Q+;g&4?4r^L^?RVQEk{eN7=zHkxwfx^8?O2gv;q>pI_IXYb zQ&AdTU{K%BTB8WJG^`)nqRWMaGncAc6%B(Wrk*!k&1vv4d->9#K|J_Uw?4QB-JTOp z442odl;FHJZ;xI*v;O$Zxzn}aizO4ztx!cXP%3t7xNpCS)LmgH^?u*3?_Qe^EtG&! zO=(q!QqbH$Z=~dUbvfyT0Bznqq_D`(t=Il}kj6EqRfCP;z<+QlDcr#b3|I~Bwf&aT+E@~OZy$T@YEu_QA8bBk15 zYAyfm$S}G|DN}qg*1(_t`@+u0CWT1t-G!=NHjj`sliH`uPAo@4eeO$9Gs2K&)!Ow@ zXM>QfEzyh;vl?)5zfnW2aUn3Zk2%$2{RqTm|9hOvTW7L+WXl-K<(SS_FPPU58}<#J zBhAcg{$W5lmZ9=xl&x%RiLuK>K>lXgt5cv+s^&L=8Uw$K&*0!v2dqXJDV7Nhe~yZP9cu+CzbOt)W=A+?|4xc?I{U z#XmxnBsPYau7Ab%v0-jWwXOl#nHHnQ#yqxjkoUpL+8S%&r7%6)GZ}ra|YiXc!n(c78w3*QD zsN;z7A!FgzcN+393#&xW|0!$<=1%nA;A=1fgAo{v!2i++>GZ)Yf=41C2Ek_r11}`y0pY@N%5v%c&b*e3{$8 zgaZ+`8;Ap!>+?Hc{eSZ}pd7D98khYpgcVcZpKzevpE$1Li23V3dBSxZ|JCxr=f`-& z22m7&{u9Dk`E9WDP6JmZ(Tf2+)E;8uc6%^s{pEbOF13SIZ$%w41yuGcg&}l}k+`TMqd;3kH z^V6Wy7g~$Qb{%}XMbz-3C~A0nrxzu@@vFl>A^!CSqE$s>=fRWx11NDm5dX9sZnq-i zek?se5)MSey$)x`-hT@uzJ2i+Ud@5C_|-QnqUHN=VEI71Ka~%(!`F-3hFUN6U zIrcsNoLl){Egx)n2iBDT<-EV)ov}EoB3k9kzdIg2@z=)nXj})@YOOmR)vtq~VQLvM zC@)>soZqN%9bw%cuUe^g9Z7BWn>R}HI%tqTYrsOwS*mG`do-^jO%4GT6V*jT%dq1e`4JoCDcD6_K%4DBVzwR>>r5zBVzx^gkM=#QU5^fABg<} zv42GDAN=K^i@BMF`QNYqdmp>7L8%c?n)j$hyG#~laeeMOG=R- zrISvP?vrklgrs^hmGqm`Ns%J|q)1aVDfW~}l*N=yl(m!Q`zD^%eCQ zjZRafO{8r@bLI(XFKFHL8}tHt7CnQ08qL1f!0=!!WCSrTF;_A-GgFvJ%zWm3=51y@ zvz5i6$1!LO33?a3jr@`Pn%qdPAwMLSk#CX<$=T$K*kXlLaNiRu{N%^EqQVL}(#ey<|qDN7pFv#C{ zZM+8F1Ku4=kS;wljMKTaT@YX4jKuGg#kP zA6bp8GS)>_8fzabhP9RDkLK1}z?#dN#&TrwSvIWUEFG3SnqluZvy=Ir`I7mVDdYrl zR&$neTsaOLYmNy=mBVKDusayVjGc@SG^61u=3eGHrZ>ZnF^_Q^&0DyeF`Y4iVavGA z$YGdqjxp3Z7dY2ArMx9P7v5~%c%BtcpQpu><&nAVXwJo_+)8daHvdxRUo-N;?T zUC#C7I&o)kA)0m3h%3qY%4tC}E>=@cQW9y9W=Aup8PRw&N$L-3HMJDYTX>avhI)*8 zkQz_jMGd4brOu(6Pz|VZv=g)=wB;$@(@A>-&|-WJ|^-YPnaE{o<>45x3Qucxn~`_PxrUFfsvQ|ROAqv=*? zjzxXC7F~tu#+;AlaGcB>#~j79WR7GGV-8^|Gi8`eCW-M2&FA=u@tjf1c*GsbrE@28 zx1l*D&+(2@nkaRY4yr134Aq*!DzOgT@S~4p!;qniW}Hl5+-1Ds$e|f6wK-!rb2w8u%g|hx!JK%`8O~Ks1?Ls#2Umlu z$mMa(xr?}axF@(J+_&6r9*bv;X4drKMf1XW2Y5Gl1-vJ`54>hx7b!_Xf|4xJ`_o7z zNlHnp-VuSqT@0>Zq5y;InAn2B8w{$kvNVi!VC)zMNf^Y7K#`3BT?7hUSlI>)d@)#p zffEK(MW7+4ihV*VP#J-D5JVc zaQlC!MWS{Gb#?!8+N~l{@ghTkAifrfhT@Q4#g@aus$Gk*Cm2*_5+7mHL zTMVp3pfp+pI#_)%z&1(85)&;jFcHCUh6t4AVSv?ng9jpJfF08YmqknmI|6jnFm?@t zTny4hptMK?!<8{{JqB1^(pid`1Y@Er1}+$2dqpW+#B}X2z)W-(VC*vn*sjvW4lZ46 z=jdWPN4FF+$-^KG1FS93J&Lg-7$jh@5refDti-?z12+*UV*_>wG0|qja0^Jod#A5H1=R7(bxh37z+`B5q6+! z1Y#n#2BR{J&A7xyc5IHsD#}P>kvLLP1S7CoFaq0~>hrKNEK2HFT-33+j4Z~=Trj|PqdFFskyw3E zR}+<~D~rGctEMKHXo9WJ7~9jv7*o41vQ#U@7Ql9#+BJ+##mcUVz|>6ynhP;-#YFM( zH9N@g_FVn_qmNt*`qhHkO5?N}sa(M*trAQq5 zn` za$C43`WMi{+6k?x+!BJg9&jz{$9wX(<^M6Td zkp%iT_!^ABU<3vuFc^Ws2>cIBM&W$k95`jYo+| zyjBVB+&*bmJVycY3!IEhvqr%U?Q5t`J}g*1WqaDVj{k?fH-U%pYyZdXjBV`uS}H2V zV9+w>l=f62C5bSjk|dt&Qu^JNd-}}O=Xsvr z_y797{?GILoR`~m&-Fg{b>DN%ea`#rhX~^mNI}Lr#?WAb=JAyz5!h@0@uWwJ0Nfre zP|u~Q2UQ5YyG)|R;4?dr>n$n^PnTb@xeE-S?FwFrOV7pNmGG!k#Ysv~HgJfcGH({F z>v&=DW+xMwd_L)GcA^*j3{busa9tmw>x=XSU%UmY(-`&oi=Kdaf!v|!JP)*Mu3eRX z=QGIhzNaiyEC^M1P9yeVOF~W}?5j>veh+A#uUF*jVS{C7`5kYGoB%scTre+@>jN{m zX${h!WuaF_ICGb(Cd@s~mp!}WC74aI%L?860DKy_GIP7dRJeFn^0Nm%Jz$n9@xX0y zNw|~oj`TQI8%p=Ii)@Vk0Q_c&osP@Kx^?&4@ORGFgAIqan%`U34@CI4*w2YqhB0A{ zRf_F1;L$cqP$&8ZOpdComZc7YB4X$aetttJdiTuqWanNm_3Amj>&xZfn~W{3i!lHA zEfM8b_UZ(=2UCR}=}1B)vQFe#ePd{x@3(17$QKa*lvgb9wlcIa*T1*UXckmYd}4BE zzAzN1$tGD!YyqbXT#wI@<$)hvG6oJhd;t4q8P|7F)geKD&e3n!`U&nqmF`k6n$Si! zQ)T5g zeAVd;GFU(fpq$AB)t6&Z0yB%i^w#wC5q-hvp7E>aJLc_13wc@x4Fu1iJh=Fy&&kE0 zteQWmZRa86{Z(+!j-*VW)V^hVs&qWqFaG1*W8XNyTLRZRC**g+i{FG7Kg~u)w27~0mL5U&@gf`UpNL0U)l)lrjdbDXlykt%EL7$E?JAT^<)7Vjun^rlPv0D2hEVFI$#JNo z2oUYB6g=0f1VJ$?457tQu)q6@#G5G<$aKz~z)!L9Dl5GL9CD88 z6u5Z>bygAQ4`rMNvkAwUv%|}PzuL!}V)gkbgZQ=8j;{pu`uBAUPb>u6PU+CDCszOi z4NFSxnTsHXYQL$(G5{>p9!TA@EgyN&sW%6&o&%j+9$PZ*7J#ZVWs?q`E=1hBlD0lt zdLFH`&r-9vcL1y;xt=UJrPUB%AleVqQhlS z(GXFtVC;OP42p^2U_CbP-o??BM03`2(IXp?3?0cX!^;*rxj2$(dQRBvRY#|hvS&_p z2`)#ym)wEo@a>0+%Z$JchVCK7%2G+X5;NFWK^eTNn;M;BX+zRg}$2 z`V1;sV0KF9cMyHg_Jve60b0DC$`|PU9T*JRG`xHHOciFJcpN&Alu>-i$Cg{5FC6ykPH&l@V(eoz&r8clX2S#z`WF)oH8@--5W zmccdj+FFvO{}g|Js?T$2sJ=8iJv`LF5zWXQ%bDzXqjlQ7-CGSxJ7ud@~nY4AO>sw`^ z)1vkx6D~-=m+JxfS56+(4yX?_`G%J3KB<-2@3?yKTfieqY>v zPtV4Y=fUrbKhLQDCER#7yWDWFsEWRje!}n~Q>+pwW@Nu(o>Z0l6rNa1k@-33uj6xx zsnCD8Q}f#!COBIc|KM3A#p+C&&BQoobfDVEA)}*%(lGR?RCt>o&@i;H%BiVfrkW|{ zI86;eDT_Dbq;9CBAVb87ATIJC4pyk?qEJo3v9ENt@30e6oSiZpe^p}p;g zDf0#6LD)|Il}EgCz@XKPfPmL=K<;|n(mh!G>I-dJkEgG)N7A{fVHX;{p?lQ{sqI-kAFQ_+GE!e_f@JE=hLn|H#z6M12{ zN3eH4;VwGyEb+__^{?1^kNnPNmk987;;or{PWhnr!$#LSWqEi?U~tiWI~7>V`+Ti} zgBDc2p(`|FQBl$N;!_#kG+g|r%qDjY}bm#Ob!e+HF-jBuaz_~j@sV<(Hu+2Vhb3x;K z@T2MEOkV0&Ad$^;zWiV>;C*9m96MDQ66CZzeJAk16W=mzJzi+S#!s($^A^ZK<6Q4! zM{EgjMo{WoKecY4?M28-*4BV)*0om?$tPFFr z54F@Sm;mqX37*klr~;#T*Ye9HD#8^lHfglNZjj=)c*|C7tsB!e>H#!C4u18$cTM@F zCe&V0&q%K0fflsS!OX?dux7IQsUW{zkb7vuN{uN*II!E~#=d*%P@2c;T~S#Ns8m~V zf9)A{IN|4#H_2KRV1E8fgGA?t$a-pwi0uJ{vfcMYHx#JDB@?ADtZKXsZfQi#9kOc# zl`$tn_nKg9${v)>b4bt{=gj|1rWk+f|7{Qa;^GZ*lONyFEM}gyX-&Fu3h3s#MK?R1 z01GQEFYl$~qgOtcX1YJ|2fMXSPkZZ`1m5Si)}l3Mk@L%&k-SG@P~4A_{SO=?k*=xd z?R>`^aIJHBCa-Y}67W7Gbiq9xsmsJ?9`QQ^+Mmj5O_eG@L4MTumy<4_&w^S{vT}-0 zzkfCNp$0GTI#_wz?mj0pi)1glWJeCN8m7#^3?QdJW5vMyFjUZDJnz++li<};k0-~R z6T#+{D@=0sCjpnY2@XTuCxC3^Y`vFC$v~mTMrz^5GhoMAy*JAy<$-`i%fy4ug<$1N zLI2CwPJ=1IhZ&#xub{+ZF4yALpG5Sm12VDl=a9fm>#7A$b3vC~=T@0ZsUU_))Rv!h z0!=%y>e70fSdZ-JI4=@q=DHSo>1&H4k-bd3(KpOFWIW3Lph!T0@a>e~@lzO;2 zMfH6ih||{dUN@&0_3hI-=`#Y?lv*f zBmJTNKRo_l3RIqP>6kw#55r7r-@5Jhf|1{zFUHod9Tt&U-3cPtrigfa%IPevOS+0sBEfcDZJ{zNzTC-5yZ7^jprzM?O-bd7toU2b9r)m@ z@C|VL?|#}ocPcBs{;F5G9z-Qi)S%D|GSn+f6w(MrN1qmI`c;} zh@iF{j+o74+1WB_+v9@o%_u76?%KRrO#Hq>%IdGsv-GU{5}4r zSFrdjeM{%9L;hI&!}6`i^ncmm;`^Q&Jo$Sv5)&UvyffH<$LD%bwZQL0Pcm5J>bv*u zU<2#ECIwP_53%?f`vj)6|3yDDw9xho7Jth3^8?TSVrQVQC_lTQ1-Q{#>yCi?xc#8L zq@_MZ4J~L{|H14ya38nNbvMM~=?yGC|It%Z8Nc*Fc`36RvatB4Z;X&Nmu*#16857H8S&gU@D)AS*Q^KQWy&EnlkNj+PPBED zPnUtKoC`}P)%2p3Nku)Pr@w$H$+pkWc1c6=vH+Q&x6)8&n!oNwnz>!}R`)4QUWEdgy0XYPl@rri5 zLLQ%{by%j#K&|~oH}*~7E031oulM`H3$ul0*e_fv1CNU-`E;%y->nJv-?;o=_P}4L zEM)HBp1f;>FMF09Ta}B>=(q=;iLOHx0UkkzhHj%z+j%B!fv3=(KDVgi&QsvUwt=&= z4qQV^wW(od`gZ|;REVOLMHiYk_r@WG3m?G6-RZTkJ|ATMjFUPL@e&E$c5_&7-T)?d z^r?ARmm^1kS=AmzJwQ?HLeEOVQ)ENE5j96I1D)Ml-@bHa2bfUfS8(+xLNRv?n&e4$ zkouazW|xds)aX(B*#Fvlq&cWP=T>?nYRxCrZ=2tav?+W(c^chFPld~-RNNSNFC|Vm z?)n<_{Sde`m0}GvW|wr`4yr_L*HqV9D|aC8izf3R?{oBF$q(_Dscw$?Keqn=A2!w=uK$aJKFJllOQ#7!SAI)J zLCHZFd@WnGBV}a$KW+gYF6?hSjC)|*1LGbT_rU*C9ysh9v3#uQ(tQ8$*ria>P;BfH zc0N*nz{GIC>krs2A%+HqB&rLxMToPbGnwo{#WoLdA{*)%8ITS1O-Jetxf(IJ9H~Fx z8#Mo&oQmDx|K=DT-~88OMdlHMhMi9Ur@Xi-!8L)G8=Bm4oV{RW514z+eeaZJS%^zc zoHHJd2Z!8u4E!ztvmcA?I(iS=RrP(zh0iwPEXj#;#>4Sm@$Z%&wWT1ZyhUCdbvfr9 zIP5idOPb#oFvEKOz%*$gsIb+@MVN~Zc04q`P`_Lj{-|C6KigwtgjP{@J6g-bWdkI& z1qD*@rRbC(>ec(dU60%4ydJlUU&m<=zdwFG{#<!;VhX7Zvwh>a-cPYSh6K-P-8zR!^W<0G?bjSb{$)L$yR42O zsnBa*pyFQy;7)^--N4<4DjwT5Cc zvX$@P2P_`HdyPnMN?T;@& zS1u_}kUmm?M#t(kGP}>4xT5$1@ZjaMmlHAEICzbPCvOuaX9@S-=h$bH&QKMdBL%=A z?Ae@X?DN4*@3)m>(lc1L`}L%HX2p^pGmEh6?>r&ZFT?b@o61vEGA>cNG9os2Vfu>W zKFo6MBxb}dg1stsU)8Yp@?w~Lr2H)UV_5?eN>_bkVAp+^V|Fwai?1Z;P87oK=REIK z)Uu5gl-tb@S2|;ONo=N0LeVwm4!0GPZesD;$qA9gn2c*A^xm?$LitRloJ+wzKhiSZ zNyO|k^^&J5V0Mm4%wNA@QV~;C$Gd`V*+4n}DIJ-`RlxgiXbErROXDsRU#?WXC{ zBzLA$#^O_^Z9Sy6QV6x#S*ItSVvb45 zl8aYUJ~!u38oONs- z(#ZUO-bya)Z#;~9VB7=$aUMvT6=6PBfAfzX?*0jN3w7fP#?D9TZRq`GB7fB zGSWAqIU5)`=^Hycn&=zoV}o%Wv3(#(h9)ERw)`m{amhCR_W!1iHXQfwhNAqd`m4#Y z$uOE9hCkV@7j#kq8Yk$6X&Ow1A4GMuh!0iZcAB@Xq0mQ=%O`NNY^N%eY|ePYoz)2r z1r@)U{y+>aQJweoP!2!jSEEc#PkarM@@LE-ocac)yPjD4?usO|itSs@ed{a8n^v)j zf3gHbO)qQDB+5cdZrKz5K_alQHQK5sM-bjl@G&n6)qry{SEdyv$ip4orzPGrioxDk zU81{^8uV^CdP~Sk5*CE-GrZy`3zwZvH-5eb+Yde1`cxJ+AGq(3vg(YOL9noksIQ(l z0n&>z8@>ukz}^=xT_#-?hO+gFjyIo9f*KE9)OS8rgf{+MaT7cwU?uk~m1SGBVC$o= zjQ%4UkT*cMhzAJ4-1C&sX&O@@|E>FlPpl`vgJrWk795j>zAx?Piil5#UNN7N`|oSQ zdsa=1wgO=|clD-s8-i!TtsY@!!f(sLzG(t$YJ2*C^+Ad4BI|wvb7pzJL+1}LcR?ig z`fu`38k$tES&{1?2L~&+h70YT0`1JAZd$2o zz^Puhi#8VW!v!8Q$cNoEp_b5#X>K$HctW^6(#(qsmfeWn>wjPhT)a}^?sT#y{C>^( zpqP&g%=cS#{+YxSs2xuF_}o?*@(FJflkbp(o_+mWb?50qmO}CK3N&AN~c`zp8FQ<@KXGkU3e6v&mKIpO9b2D@tNF;mnIX( zhvbZ8Y2*LKJuvQpaS!|h5B#ccZ?&2~wYl>H#qR^TBBcUw&8kb3Z+n86hxvJ|{2w6t z>I)lcQ!0>wBIC6|+Y_KcZR$7oe*g;h``QNt>H&%bQGSQgPS99)TP{AX5K(Qk7p=*y z0O#+`ys+TK4U{A&XVZM|4ycSC&}AOV0=Mk_KTlW4K-A?66*pNoqHtPxxM1{2(EeiI zM(LiLpzej9y^Lu&h?}D*Q_xWlL?R2FbFa0cRXao{+)2qpaXjt^!gX$g!3DFwWz=2) zw4>SnT&5);d7rFZsLpMa_?}i-S9=9bC|M*+G%Z2fR$mUj*in!ArHY=PzSar~in!8V zrR0H{D%0uw(I?R>+H^I+o||a9RJ6XdX*oKf^yF#9p)900o5xU7Ap?ZWkt5q%H-a_K zyM|WfRv=a7%!oxVZUA-5%}ZPF-9gxXldpx`^pN4_A3yq)cA_=nB^wUJ6#^&g{_K{D zEI<(ZsDCRU73}tS_oB412DNJKR7nnP07jJ_Z$u@EL36H^>(I_7R5snlW7DS`a3Qd( z_L_e@@IYNcKC&4A-i=wKc&Zc}U$*4f_e-9jr{mC-XZP~J7P8Oe^3Sm-=2|=XqE--+ zR83P^d@>VUA~*Zi2UGxZ__M?XHyT0yi8@D1`Deh?cKt09ISrc>Q4+K}`T{z=BMPr;8nA#vZ5OVHOF@t)f(+Q9TVdoztHqG@@4(eIy zvAf?t9)(|;Rpuv~fo|&BQc^-2kZ;mbIcq6~&gxhD?S|9N>=a1nU; zH)CQvjC)|*1OHPGgvmv$8r|nsD_8}qiO-qCg`JF4!!a=&@M<{KzzCb{>TKZXtVc3& z(l<25#%1c$OdQGjE`~I$CO%UAz9DrMm#kr!!)8WJjoACA|JsUMzJBeLhMW(?)(}O_b?jB^~rxy`hFd6cBRY^8d552 ziDn<6YiweO^>qJnhbvwX{V}?7dRY{%O@g%PusF!j9oqqU<-fKAvWYPb8^&eg zWMHJHM>BFZ)T5eEjSZYkom@zIrjB}}i-T0Bm5;m`m>hXCnVTd`Sbg==aQu!R(JRz8_kf8{-`(FOF|y!Ao5jwR zl*bm5Hqz&~%I)K&PRM})@NVwiSHtmnzAg|sx&PNDemz`Y-ihuP9e%EnQ0|*@H0t>o ze6%%J+}#>46s-vLE_X17tmNT~Wb+T09W?YqL1$U6(7(rNfEkxE{bc@YrdE1s z`Ubz3$i7*;R%_r1nziE~$(s0qvamL|o!Q+1=DHOpN)J2%V|r$khM(WF?hf{pIlKch z$@tGJAPIu^n%{h_-R%%xz!?Dk0)I!JWih!tb)r~hE>9z5eVHg?A5?KvAd zQ=Ls{Gy@ksBYizAks7-=(x_BJ{n4p=*F0v9Bu`9^Bu@?OHTct^i|PNB<5Sj?GZDsI z@RGRszR4G*;YHQm^ETOu!4N*?_DEw1xVF7Q>YIcB%vo-q^%a|*0ZhMB`|8aUiI*VuQUBYT?aFXh_erHFO#gU5EX}D&7>1Xw z${Bj12|qa1%GB-lgkb9vsCoCUTgL-qEDM6Qg&tALW`D~pD3^k=hrWK(+@k>%Ufg^Z zhTT`-v(8$bWl~Uz=2Ri~wHpLge(Btg>EGDuyL}~p0<}>E?+orvfLHY@j}GfiH?K^h zr;5R9(T&^fv8-TY+R-X+-RqPC1J%1FvFGGpB(icH<_E5wt2>6jZ}*YCjDCS1pq3W1 zFg!*AzOGHZJp8>*L>Sx_lN5kT{2d169uQ<0r?y35`jYzG;<7wZsKcwofBc#ttfwn4 zIEd*_El`@#wD$#bj8>S9PbX;8I^Qr+-2G-)s!=?12YoyWH<%+&ncNZZW8Rewl9g*n4gVL0ZUUw+lwr#w9 z!L$gFqt*SR2gA{z$6DHkCn`vZ^dmvXuLzl3o~7B97!1e`K`pu)zccS^efMLgdxM+p zHiAts6{K+IbIsm4K+(JGdLwh<5;PUO%<#(2Kt%`o9K!rRGw+B`Tz2fx65udnu%)3d z0}Rr?$sAcMhvMF~#BXm|0bY4EJyg+g1|MjY_{rxbk=IAV&BeR6fqu7_9!~0M=+1N7 zkB&3>z*?t($Xmz)G_O09+v7mPwj_HN6+QCAO3NWqyA4aB>m0) ze-fbRwA-Ym+Kb?J_k&L~BV1s@%XPwYJx&hS|9P;f|9m0i^Z)-xNn-S(2;+#=qviWK zA;W#JZf;!I$w5KT5Q2-s-rWFq)#Se?;Fv%+450~-P$Br4896tWlaKHNN6E`M*`9g_P)ucDv<#T*Di8`O}MWgyJh(iw? z*7#*Ltd#gI{@>^Ur=2@?Ex{6e#B%)kaQkC=i0e7!s2$nyGCb~>Ul}gk{7A@fIu8zv z$sl|n(ckD>EE1m%sH4jee^d56@l|06g7W1JYg{g6k5NNm@2IL4U5*@59wf5_}^p z#>(Gr2h4}ZFPMj7OJ`u`BgqF7!vQaUo4S~qIvN_%OtJMdXht*=$wg0(l*FSzf%UAM z4Z9Zff^~CODGUayL8;9%ii{8SgOXg5@Rp+Q=;`chvC46Epg`@ph)6D0ZashHBl!~n zUX59uW%{}csL=O3^yc{nHis@s(EZtqy5+6=P>vAHqS;-vHQ|Cb6rO7nq@`d`y!Mn% zY8SZjwdv6jtRK9sYADV2X)k(8qMZ3=Bm=iv)xMv$LLTmW{N+nX;CE1-@vyTsK^A65 zS;cyM9Rk`l0Xxsx4*=4(z?o&X*b)Z^;~&23 zmVQP>mZi($`sLs$SF^KCbuUr4g6u-s7vAU#5Y9g^C<1R^jh^p0nHyGoYWaf%K7vWf5s|aL?2anKnDb$`jn8KAt!u35i2($`3!>0GHcB z`j>Lc!oAUzx-Vxb!ejQpshEiHEQ&=8nbG&D4;lee%&nSF+rF z61C9amMVM_@{A@dAW)wA&gg}EVL7r_Sn$)q;tu#cxy?#KbbKE1NX|6=Z`=dp9{BI@ zz^~$#?HY!MQnhx)(#+gXb$Q@X&l%0VTnQ*Y!{pv|LJm5^eQaJ=5w?!p=XY}@kH&$f zJDv@@*X%*N&I(;>)2l#K7iqOQ)ALaKj7iw&Eno0qf!KK8HF?lnnC?B7n4ZsQNkmK=97O zx>W5e6WQ%`Y0`)Ioo4j< zNJg8&WhR;%Hf`Vhtmhzu+LtM!l>wT!9Q!t8eI02NBmH^YucQ1U&-c43KLWh#J+5Rw zxq%L%j|8dKJe0ld%cswp^$03DL@#-ikGe9u=cJ`xMm{?qc8P7NMVhuX7afNx5&sNn z_Zusl!HRE%_p1{wgYDf}4+>vrx-- z$+y!Yn!qc$s1{J%&(I}fzp1q+1cd~#N+M9cO!=ywWV18#BZ-P0a;ptDP}zAD}> zKn11I9mT0tpotPxa$h0^T^-b29$uM^CN-`6sa{i!9xC}yiM&;fc)1mummkSuZ~n&C z|JO7AbN{~zcyZvgpGtUOvdD_Y?la$q>;HUQ54l7I2y{Ma?Bu^CM35t_ z|8b2RuTnT0JDXyQ?&+I28_=jU7c$k^(b3Qat4}zZI$}F2j8-Wgcczb2DKN=aBiF$E z_1EJ<=9R&fSW!-=!Qw2p?h~xHEa%}qZz^~~R`iL#{1Xinla-zy5`ia=Sn_Dkc`0-Kc!PoJek=EKBnf3a;=z+%8YbTBfmPcBftZy11LR=s9BKv-_X*xP^_z^MV*+rx^&F+Qp z9IS}>-|}AkfBhc6C<4}3m_Am#amn``|HfKR%IH+2J*>`w(N2P0OdKEmY)v^x`euE` zJqVqndJlb>7>k^2R#%6=OGDX89mx&uLExnQjdv0+ zZ0dy7!A+e_OsIy=rZh6iglvj+laY;S`UWFurkSy5wdiY^$tbIs{qX;xJ+GW0#hZk0@uXO=EUx3h$U8}cZD>!oMZ)n&%%*m95oObS3 z`gDIk@Ci|Yu=^Bbw zKS5}7$VPJmQOK#M*}<8^&zUCTvEk~uK7iX1nd+~N*+-oA@#j>_{Lvkhzz=czm^;|Q zsGbPflz;gUP`O~~R-WI-c#P_K%lq#=irL{K@weBH+E*V>IY z5|!0`EsR*TxUp%7u1p>KiK~PABGG8xc7$19&a+!9GJtZMdYh+?D+-O~eId-z*2Q4T z;*mWPqQZL^#? zI&Cw7c`c9p?7hM(in44$x|35CbF6igx$=u0eZNK%~3i2OXe&YIR44o(K zB|}KO*l50suORF~HX<7s7&y5YnmSUQ z$y8HA>=QOH0b2o@s;B2PI!fqH$+eMu7n39TE5v-mCG+IcOs99!nXsjJCACr%P}3YBNx47MHPm_BDujp^Ezm1x(8xo?!0UPBe{7ObBtsPmtfFa1h4y8Rxd zmdhLadfINM_BsQvD?T3w`Dq|Sr_F4}lVo(Pa8a%Cx-js@WV`?4?ct!5&)|)}!x0b~ zm$<`jSpZLnl)Mn(@dhS7#uE%aKFF=d}rRpt_*zfZ}yQFa*Yx*}pZ5FUqB=tje0%Or)xR}Z5D{kmZtVZZe5*95;A zUOfx~iSMWxVJtgb#hM4p?`MU9tcjb4w1Ui7c3z*fz3iG5hMsLUJGukg(h#?Q>>K=i zCnF4W9x%)4nDPRpT*n6 z?~7N87m1$~PZEz1_Z8nFZYRD>oFYD5Tti%3oJXuntX-^8?7CQi*lDo@u>)dz#N5Pe z#a4>V5z`Y>6O$6-673azA^JeHO7ya5mS~D-l&HU`tEj!GxhNE!Av!@+LX=PBvq*=? zeUTcGB9R=CB$0z6z9Q}-b|Mxc6cLh$hKP&^k8q!GyKsx}b>ULs)56Du4+sYey9sX+ zUMW0ZSWj3>SV~xc&`WqjctEHnTqc|&q!1Vce}V_Wo?uCU1Vh3Ef*gTQXh5h#=#fy3 z&{d%vp;V!RLP0|ALQX;!LJNdQLX(ALgoFh91m6j^2-XRf3SJOACKxLiDCjA;NpOwe zd_l6HmY{;5fWS9_Hv*3ZY6X}A=LFIP7y=;z9s*PWOMyiKh636GastBq1NpUNM{AH?s?@5H~Be*wP<|73n8ej&c^eDC<4^40N`^IhP}8wo-ZPh%s`!nmq0f zG&kCIn!C3b?=3=%ERNnH#>lXvwS*XH9IYkBNU@_g2{Dp5dXpF=}!;hoa zh%tQZXeA+r7e_0JF+A*O1tEqTM=OXiTSIRULi1c;pi12!v8lvkzvD*o+B{U;pjOcV=X&+mcX#a z(X&Lx8g?{?z_7y693sP#9X&%}tj5tZM1}=BdYZskg`=m5jFs$YHi5AMN3)3xb9OX~ zz*vr>SwzM%cJvg1u@py75gAL^(USzmVjMk5WGrGwPY@Ujar6X{v49=TBrwcyG?U1f z&yHpg81ry6gUG;2c7M$Y(+LcKqv=Ejg&j>JFy`WD8j&%F9Ze-LX5(lokui%MJx*Yl z;^=WA!-O3@Mqn7@=rJOL%#I!9{iL0~B3D1*pQVn^u&h9ZvAi3|mH^dN!3;@^Wr1_%E}6BsQ1jV3ZU`1b&T!Q$To zLIS!N1{H`3nEO z;Y0=p|ArA5EdC86GC24*l)zx|Zzz$$!M`B{28(|~hzt(?4JI&H{2NSUaPV&sfzIOJ zAR?WEfAx5a}HJ>rbGw_}8CE=ipyI0-eRb zendJ4|N0W>EdKQ+(mD8dFM-bD-@QaS2mkIN&{_Puhe+q(-`xZ{i+^_$=^Xs)L!h(x z*M~^w;NM*YI*WgI5$PQK>rJ4u_}80A=ipy20-eRbUPL+v|9TSWEdKQ*(mD8dCxOo5 z-pPC2mfv%&{_Pug-GY%-^~O% zi+?v0=^Xs)LZGww*M&&u;9nYn&f;GhkZfgMXa~bQb?Q6X_iM>qMZl z_}7U@=ipyQ0-eRbjzl^K|864CS^T?+Nax^R2LheNzYat?2mjg==q&!VC(=3icO!w$ z;@^!#ItTyS5$G)bwIk9w_;&+=&f?zItTx*8R`G$KFTFpBYctX zK6Wx5{#`r}VHC0cACF+bdjpIN45&u>*!IbKPNv2*lBvFvfu5<6qa#_*z{La`$1u_q z`|`CBF8|mItB!r|;bXsAe$CCLa_xOB=z6>{5na>xqW5$)XRKF7A zhkII6Lt6v?zE_dcjiYw>A*cPn*kQAe-?w=FS*XhC1WEPOtF3qD%N~THa5T(M#NGSwkiX*GQ&ucdEqaGOIFhP(>~&suG}_Y?T%T4MSh&}2P1BiVu*;(FJmInJ0Kaq2nkOuB!( zPFhLSj*CT;Dg7QW?IOa@HiK@&m3I`+Lsv@lBWLh39jFS zXVpvvQTo|ZEPmH=@X(LZ%G|o{__@%fFDSMGp+Z%<@@Py?8PhXy{gV%#%mjyL%+00& zgY%zvP?lBcoKKaP2V;85n4XF2#|E{H-IvB(I5}MH0lW#u^kd(PM!C4hy*6k5P4@As z*kn}$z2CC-;mKlR;O6|CTKBy!kd~dUm_XSC_a7PQs1B2jST|ZG`nwf&@QmHS)R1h1 z?Q=*XV}sI+oJhvbh9o^_Ba)#r%|K6&I$9QLi;fz}*fBYhv1?$z>+mtWcx|=I4U*uo z?;y?0;p75eE+{J1yLHCtgD5gp<3;W2kHGaoR%GvsF0g0v^P&*OuM8Z!lr6m8W~Wi6 z{cF>{-EI{6&(_b#@$&qgQx`o({%;O!d-g^M;`#$t`ph3aR^WI!iSlZ2E!c7Qk?_aK z(*GVA*sp|OAR;ZPQ}bPj*X?lKi+E52)+7vwCjrl}Se3w?=fIdAjp@;t9`D`^8gmgY zx!6W6V)F>~{>=HjAlJNkuPmQ6T$*Y*_@INt438^Ox5na&!J1DSpYFc# zXt*%`$M9{Py~vY?G7twZau;dac<`5dzhAptQlt-^RqJyms>ncG-=6a!MTU44ZThaf zT9;ahqEN8b;+qQp9xogICI0`Ezg~FL<4p^WM9i|CwYtUSSrrJ+NnICXH}?uj@LXhjT01HZ|YlVpemV# zcwZ7Moi(cfZU-MyHo6l)871VhW37*OCUZ0gMwHQsw-HQS&z5kEjZ8?tWvHWreQ)gX zR<++R1)b}}JnzCcaOd!PQCwUj^?$CXT*488tN7%wlmDuKkGva)Wne0k|B=74W&l8kp1Y!F9uOkwTA>d!qo`>^YQir;G>Q&X#DOxA_RzO|@81h5Z&iE)O*hpU(q@_nygJ@cj-BTdd6Jd;bLl zge`VxFzrE3Lz8CBozD$bL#m&M9}$6qrNSB8CS`-8`t3*0XLJGWEHVsy_6_hJ&M2O> zR|FbZYqfrFeTC*M3RQHqmw|z+cV~PT<0=1Hqc}5U+pj7zS?9{C1^hjz`g`Ui5n>1O z3Vk{C2VDkUZPpl?V!~6Nv_T@sUF`&-=o`_T-l+0HDw7tj?p?rK`I8Bk{C?CUIz0o2#q z-F!~E0=vTNgLMT-NL9Mf=blUiSZj9LtD@^5da=J_!mQQ{sH0hXrPG~kRPEzcVzNJar_T*O6E)w?XxHO{K2P?pAHMGuLGn!GDNf>1JM8)kXb?O{4XMg=6&u)ge_KKONvGP(#^A}dr)tH_#rf1!UjZH};L*w5Q&S=xqfS2VGD|i`N7nhxV z8#)*6k*gaH5e5+({&B*w8e0d($xN zzUHBgBpgh#C7i#K&G7l(h2Aw<`Kz&o&nnAEJDMDM;kk=b8)vQ(gn0bpm+#CgeBT9> z>b$z=sr^b=Wc9$g^((|7 z^_RquM=}RdTRd;1V?#QtA~B{1V|rFS&Ajo<+Ysr4s6Oh9!^MVlPCZy(6;p`m0b9bs z=Ar(^H)B)IOcQJ`9r+H|AKO*Q$9*39mdsR2WA4VLoZ0@XQ_e`(exulQ2V)~+M`t4g zQ)2^X14BnAvLn@rtgmP2LK;ahujY-e0E5Yqgo95t!{uKY=uhXEH;)ZDuV(`O?g+^i zez?&jpXb6WVJP0T;>fdiJn&oT&S1%wFCYyt5Bk-s{HDJ&b*HT7k`ZQLv!mf%p5Aiq zDado(9zZ+#865Z_7^r0|0(A=I8+=6NVI^qFT0g4?+>DQ$+-%SRo-eAETY6;(92Rb| zh%*s|oO=9MAJz8(+)n(~x^u@rTiiC+Td)B@bjXlGMCC7|f+ z%UP6KV4$I|_gvr+D$%am9w>GP0l)U@J)uL)HhrIrxq^>CBK^)+9q~Kh;2E(zci*3s z2*+=8jRYT|t8~w%=VEsNchnVMS;Zd|JMsO6%>u8P*X10=&x$<-WNL1@N%A05@#1~) zM*@$)w;eLXrBf~%DyrST#xO7&t zuMc!e0g8uA3t(tBHvNC_&1n6`--EBu~~6r!Djy(2m5|*sASahvbFD58L0v$N>0=61CQ65Kg|inW*?8) zvEY(gcUV{yo>4t<-7fG~*-cAzVW9N8YOuwSr05sI&5FMx`};lKHYqsg#bLqHaG5dv zulwN7flHhEPdnDc{C!zWBKFIdfvEAa81ApXmc@Rw+X*K=!;vXAG8+r5&Vo1YB>wBx zQFvL2tCgOd=3{jheJKxL(Bu3||66d=_iCpbH=HWngTGK5FvsBg_^_G^G?ztcw9bLQqto-s+_Oh6vDf#y%!O_b9*`FcK z&LZES?Pj4pc~iGT-z24}EApsV<$u#ZPB(bvAKNg{z=TA0(I-(&P4vl**o=RgsUz9g z&~UgBz-6@V(lxnwB;8rG$BfUFfM?#1&(It@7syWS&`}G)a6C zbZC^5%pK|n{GN@mx9)uaA#r?774idMnwOE!Cv0;&p^!Ow7Kh)1oN14*KbO>oP-CLp z+4gT>L%?ZLcEShbQXY_Tg7xy`3U8q4^_n(9x-AzHI<~><0!<_7PzJ zmLEG*YqLSCc<;yD<@I3K$?}O=B42@DvD#D;5`g9patDU>{kLsm)@lr*p0lP<&q1)f z=jqe6eRYQ8MI8JG{YCBb9>z9r$sf)*{~ChEK5$ArPLO<9jLf}&i%42@o0V{HChC$q z>sws$4opor!k<`t0bO3B<6;?*iKb6ZUK&Sa>A}2M;#Eu!#`I{+4meOB_Jh!y3C8TO zH=2=5uq7t{$fW|{-J^G_x~#szEh-^%md1y`?Sk$z@~RB5T8zWLT8qJ&#!SdAhE$RP z$)2bcEBviM@*r5fu>|7P8nIDpu^GVn;Mm{rvjqGx*O- z$L}^$eE4%KM(@8Sa0Zi~HPVm+)l@fduH&xJFiIc4dxv5E?=;zf1$zNSJm3Ct$fA17}795xJo~-_-38;fCt`nUd5128uYq$ zZqz(*^|t%|!h~3Z-*lA~G}}CF)I4&i!^8z-LM$okX!WMlEmN1pyXY)O>CeCh%YtUa z8Ul4>>#TlM#yC&fq~4Io+5x?|$^ZbHp&TFiXV* z?D(nsNapc}kVPV}Frm*nljIjLq|ZXRz|9CIRK{w4EO$b|zJ~qlDhm_r%QHyor9|)l zl_63z{jsMq&;teFM&nTAp~}$U5KsO4ijw&8H18rw^DgtAkEyCK@>b)r*nBGtu57!m z;M`ms4lVWFog4qHs$$0eJ=8s%?(X}#0@QxJyVXi->#FoKdjCjkAMp~T$8?%=aXl9@ z`}LX9)IVvVDV+QLquuEV)zJkSmM(r=nk|4OGlO;|Z+Hy;bX>h%4Es69y{%(o37a19 zO-niBTU9ZQD`=8M*!CqiqX4^w^K7e*TQgMFa0jY?$DXaW`nUI)VX(&=BqJEimK;mp z+I#QVUIzE)!bML)&sUXC$mymqM9&fM%gPplk@diJn!4G5qdDx5> zHF4Ch@&EKauQwDfL`q8|IIuWGx84}dVf~UGD~sCyM(w$aIHn&>Uz$EJy=HpWRA;)! zG~IL^iUKg#G|n^%#Q_*@>TlZ36q)o@AdSL$Y47 zL^4+rE17_z0t}V-Njgi~NMw@65(kO7gfFf{u>qclZ;LO9PlykSGsT<5tHdedS>h?; zv0^of5ztHQEmn$M#ZF>-v8mWt^hH!EdMdgrx*|F)$`$PqrHj^z7Nc3AVnq{C#DL+V zzM}3ZW`L*21w{?86^TS#lM0j9CXY-CO)i)mGs!X8X|f4L4_IK5U=nQ-VG?N4&&0>1 zy@{8Jt4Tu>I}=kAW8oKJsql&Lw(yehgz%s+Q@BaELbyOUQ#eI9R;U&Z67~{$3tI_W zqKE=^LQ^46@LBLi@K|tDa6xcPZ~(;>*dSOYm@kMIL zi0{M);yH1TxPoE~jIB@&5gL?jVP3`UU#Iufl2Dba|iO_&kJcqLwnKf&+d z1^6laFuog4!&jr21Bv)Fd>lRkABy+EyW#+M$DQ%IxFt^Tzw^uZFZlQQSNUg9{DD3E zbpAU2Vtx{TI)6NWB!3vc55Eh)E#HmbgzvyN=i|m-jo%qRGrntl+4wYyMzGsB&3KKm z);Q64nsKDD+IWz0FXN8Jt&L^IO^h9k&5ilIN?s}N3GX)V67K}>Aa56MGjAm?g*Tfw zl{c2B<_+fg@;dTb@mlhncy>Hf9uNDBy~Unjx3NpuN$fC|g>AuBVH#{UHVuo!)Yu@b z7uFHQE0AK1u-ce8#^-+JzT-aS-r-*6p5h+iW^=c4*P=p5FN8LPXJo{+;bM1jZ7|PB zz?EZn2yGB$tj)#l;MzdSXh&%QlqSJ$6WZa_W_vDn8`ln_j5d^UC}pg}!)_7UA+%3g z9Zabbq~h8^R8kzfNoWUBMmr34R)xin~Z994=F?I>pLTbA`c8Sme%81VSBB51LDmrJRwxz9n z>;j=}LtAOJHElJ<@(FD#+A6^EajlZ_<$|5ZwO*8Fhn*+13TiXTAEbIxstXr8hig5k z?T(b@PHDE-IYKL^Hk%RHSwibZsm=JPTB# zP^yfFohGy{l!}T6QkzpMa*Nbvl-huYog%c(lv;;NsZD7s7duI4n@}p+?If;kOdYoY zb^_Nn(w{5RobN6>Ts>C{tR@4*2a(&sn!NH z7uQ-*+a0l7LTgDGQ3*k+1*M`4ID%`?N9 z)Sg`I5TO+rtVlI6s0RtHkWwXF>>#ccP;TpBIk=Xfv|3mWp~b1q=mL?-r&M$;2MDb( zrJ}v}t z2~7;8+9MU$Or!RyhwZ^NQz^|3+e2ukP@833ESu0wruV1SXi7!56ImzGDi_;LXrd_9 z4ym|iBIUpj%fdAiD9w!0#_MgnaLqVMYk(mwlG5s8nYd=GK3yiE8Dnt16W5HUjNP!E zgeHPAqI^K=C`v__w1d!uQ!2`+9k?cpN@_@RA%^CI<7&a zN}cIeLPK*ZI#Z<5oQe*LRK%%t@k%2!G^wJarr{czR2?y7M5Kz!6ozdfG&HNC-zriO zs~U0ZbFs~YhNe{;KBXdBrSIM*LPPUvZ61bHnpc}~v5ka==GEGKY$L8gyh`Q3210{q z6`cV_sWh*mY)d6Hh*#+Xj8vLfYw@u4gho%Sv`Q0e3of<}*U-f3Mrky$*22~i8k$%U z9j+xbG_ls>Q7R%H8j84U`q%M&98{Q784qpU(qc=D$TET zFl-U7LHtTl^&&z;(<{3DNTul&-E%FWq3IPR)#4gNuT)Z`A$FyH&>CEW$d$TbNTa#c z4$}}C#I1-1c~}aeLF7t5D3FTSmA=4*godV9l(R^s=@tD5kxJ964;NcNXlQyxxrJ1k zUXhwiXlQ!v&c%{(4Wd`7P|qhch+SzOnvZJ`y;4uodAJ6#E5(yYL*z=;Bjynr#I1C~ zxwr-LCi|IParfju_E_KMZ}6K9~d?RPeIH|ZATg+ zRspv=7LTVOUZrgDL<&u-Xdh&xY1IXb!&7Klt%b!ADKxF3&B%yom12%qJO!~TwRbF$ zLX#@arHqJF1&A1?<0&+!*1@I|DTq^PGC?XLRUhFFz4_EbCtkt)UgQ;8IsQ&9mz#xS11h|9yK5GgdL+8`BAp*htB zn~bL*PNhz5GLeEPm1akz(wvHNIGRYIITih!k%~B#{;eUECRJ2>n1rVwQl&T>X*8!w zut`J;&8ek(x-P(3FbGKT;8;qATWM z6NnU=Q&F7;sWhh|>v$rC=2XO3NToRy)ilNtDKw`d)u68NTpd7r5%Z(L<-HQsOlPuryxe92m@(|P^qeOD3L<*DY^-2A_eg&Dlj5Sr3n?) zrb6%(M5t6DL>l5#>KPnDq#!yq;vy0Y##0cV3bdswK|~5-Q}nB# zRYa#oT$Js|iue>YN4QuZk%9=7ZY&{{W>i$!LMqLuh@}IF6q->H4+K!}|1q8!N5JFH zL!xG>$$^?2_A(f6ePDFijslkTPrReOe@ca^eGG=ZhN9F4vV@(Prz17#s0 z>R=CjPx_A!JLsNtB>c4N>remak zvDvr@&9>?EA>gl6$>ox6{dzcG`8j9IojdZGLA0^jW%k=G9lH zVSJnJr?$Rbhe}@X-z#})nU@@WaU%Dm!Ziek%0t|wZZf6PBP2+U3~GJMhVT8t>5_*e zUGk_&Ca8xU6-S*~4;A?wnfbvSvSoR_uU-S*mIJm#yZm<1r^6vzGKtxC2LIjd|KO4b z)BGRS8rG@R>@?4WbvVgLx1gN0&DP@NncRga?AJ3DTkZ@C%5b%Rr((-1m8S~V4ScU+ z%b(sIdU?7lJ%0UmSYPt2CJIa7_wBIT>C%J8?qM+Isl`dVsZI+fCk7d2AQdH2;m;F9 z2_nu6q|*^Ykf0W3uwWn>V;SNV6sidE^axQ0d4wnxp-Oj|(hY?`_Y4izkKFj$9j8kl zl62{#?&7~%DPH{R>WJ`PS8iaORnQ&|yxDDo(D8r}mg*WWv)Et*Bf{Qx?(o$DP98p> zd|;^&^bafBKY!tCz}S6PHH$H6U7DRjKPdVwW(ZU22)ork2=-f8dkiBocYv72+uy7Dmg8*g`-Yh z8aomi!@P6HK-2M7aMH|D(~M(Sm3=^7QtV<47cOkjr|cPg2^c>!(^4&c?CyhMLa}km zI5T)~UVqQVVLiaAEz25w>vRWP&QS-gMsHMm6&@2;b~1(+!>_vbSdYGoUYe8Yi{5@Q z_9#xqkb7u6e3PM<#v4ah`?qKXJ0cI?3bLWG5n))pt@esx69;({I+vahi}QZJ$Zm^J5YH+ZM%o; zxFK&@_6QP;jn2JnkL)WwM#$UR?g5rPLv2TlAjzV3cf-w){nV0r4MUNARdl--St$K$ zk1NSe$UgR5W1$N9iK^=<4n*k%TUyRqQ+@zUc=_RJ#(>=*x|pnqN5PL42f`O1`-u+a(#->QlY1|^g||oPD_R8KT#f95CR|O=LH6XL z+r?D+o#H!+3H9ca`9||Hj#+AerAhr{Z}Bv;W!KS_Gmw42q1VJ2OAWbXLivOjkEfDD z_7=XLg6s?G^%0#zcImEylbQT!K+((IbTG1q%MR^D>A{!qZ;vP3n+iTBc|2b*e=ZpJ zC86c6f|aDp@};1CyJ+AuaOlC!$lg=*vT$d?O7J?+x4}vnP3EP39<(0WW1{`fZ9(=S zQ=AU=N9i4+FMMD7E91Te zjHIjcM#u#<=vqTeJl_6N1t_}t5i_3u4p?@+J8DpjF{F?1n|~4ftbE~%N(|-hd zKhF(*j38rYyI}nP*BXV@kN2k2_gQ^xxtx6l1|0tf`SXA@H+MHL#c#*Zb76CJ(%4hv zMc6Gq5luw%4z>P6|6c3g3wS9=5&4_#O`EEI2%g7DuM;v})4%u?qhu4mrdpKlEU-vg_^i{d>;&&BW#-ym{*e&A&woE#bkSEgFl8lUB zxunUyBeoKFo7~I%&979#$@QP9}CTU3g z1t%LQ?Q=hhjX~_bgzRe?|(Fc z9k-f48|GLWe&U|qF~6w|te?y_)z* z|7o(R@%O(u2dde9$jZF`Q+i) zc7?4blg}F~r!}`hVPfaI3UF)c>>pvOe_OUK1xH!1#`+qLhgd+$-|7w<+9H_~G-{nBEA~8(=X>ZJI z7b>KD#Eb|IQt1XI64b(c74I%XQDp;Blp-&s+ABn+4s{C*M8V{PgV9{g!JcY5vX>Qc z>lj@{M3Sx|GM|{SBYUxN3v{5ru3OATN7OE<1CKnloH4VhHGD@dExGx^8Ctu3Z0m0K z4t!mCp-~Ul=fJXSncKXz-+;y7+69wHBRK8!*!`pJK7;<_!#*q!*MS={?^^Ekvx9Zg zWjXJjnL~$5A7?thY4nTzQoXqGi7o2FKC|isT-+jn=N#Wo2pn$?8_4&hH}SNAZ2xzk zeLkF!D}loTN+TIn-Yl0rl5a2NvI7IfObN>PAq@MNdTA3I1pLI3pr z#Rfk{hU3;U9QW6#8qE3p;~H+v1=q-bRt{8G$$H(W-Ry-Tx6lmxQqXy~>izH^pVMN~ zRr*hlkEjfGox!d%*md>4haD-*Jp`JLlDy4*o(;MRLf_#{Pm^}vt(!f~Jw$#;_}c8u z^K5dKYDUK#WdC5jW8PI{uQTq2)nR0x^9payYkC^+wwsg% zT<1)oZc~9cebJD7z}TCv|CZl1_Z+!9t;}18{4oB14ng11@M&>CRY*sDsQhphb6XkA z?8n#{%!HoQOZCqk3^ET-*{?M$>J5to><>%BW6YnX9m*s504@A2{8Q&3VDcH7KFxBDT6#i)iO_aFR9!!2$|fJfhf9) zzJCF?aiSj{NYW1vDqQ{VRzGBCzE%E{R2`gswE1T-{Ise^>UpmUkn!MIPSiX;EZ8nC z8nW98c5d~y)m95rm>zRz9p9RO^REswtr*FNi)(ZH>28YQ`)|FD^Lu>;uYZW5lJjuL z>osw)QIZvWa&1V5B+?YFezO^#F0xl( zd(ZW8fhR5Dp4xGbw-uCtRI}sbN2FT9&IxuS-+$!6Tx`wz=viMuX4?1=e4rV0+IJ+a z9F24!%^&^n?`{Qud@5^FW^Dy~3KYYx&9H*?rrf%Js=Y0owZx*KdtoJTT4{aGI0KC= zc$AtfoMsQ3^!n7mVf%cbxiLs%<+=~VK3wW_>Em87`9z<-bEn!v(^?K93uI5&;q9`y zY(DVlP~t5`=@-Oi1usPV*F0E$Q#fi&|7UZc`XMxEOpi=egpc^*=j0q+F8QEOc|m`9 zA=p)Rr80Qu0pRSqZpZ!hg(P(JEV40xk5%L2zj_bR8u)L6rSX#Gl=*|P=-HlE1O!n_5mpr-6-@Hc$qIUJZU*uXy zmYnajrKo)&i2GV!e2aeo9Dd~SjvtpxX2>s^lQUPVCe0L=eXKvr@Q{eqr8%tj>@++M z97-5{V)4BUa?;TbCqD)41^X9`NMC(D6KqM$K0RqxCh!?!HE>kZU0}>wC+^h5y`)o6 zH>b(sq=ub zGa>Z<+A~e*5l-vV*PgJ2XC_A0n{v?%c5&*c9kbL4p31j4)^05a?(&jN#^TDsW%-kl z-8$EXYmTUk$4CemWZz|&RVZrg2lNmRxo!m?Bwk+utYF8DS7Meq zeg>Ad8-k;U)Q9)*v=@6k1Xagn?Cj$vB@09DoDxCyd2$D5jN){5fD@-o$hl$W0GBnp zkURdSC5*ZF{Rr&Y6prY4&3nB74Wu|0^G%ug6ojAd9Ny@3CD7Z=px(}ht`FaZH9Bo# zNMCK&r>_lNhR6QJ45V^Ua!Gn&!~K03gFN7T2*$&zuphWa9o?J<1+g*a?8%O z=sar%A~5+;Bmc`eP~9=mr(Kt=w=T9_wPEXlkIqlPmd+J38^5^+T2I_N*Vg|%cwe@~ z;iXRCOQrKi z5IFL4Ml+ON->aRYI)5j#q3_3RUE_FDhxOw>k*#O?^ojqGqbsTt@uA%LBYDE>Y^oDF zt|7h7v@u>x9rvx{`_V4Q&zT)T_O<=rlOESLzbZrL*(utueN7qi$+!a<(R9@Q!zvKvqOj`CR zyrl_b>|U5}z`_T0;rJVElV>77wh!uuG(q{%%W_KhPsq=5P2w;OIxb^x8QI|JRCHV) z%cdKOk)OdE&pS?N^BolRdXq2~`5EoxW^=dqF2LAx3i4OkpyO`3cSZcDeQmgP?up1} zJ8Z%JvPXT6ju63tZML*1dSOy!UsPw@;o(y%!1s&BUI|va!Ha&Q<6O}FYBBuacY(VD zRPW6DF~`XSGIoou&xV|JY5>>HKDVRZ#MEg8@c9c3$ZGsUH@#EDiV= zxm=M?>Jf*t^R&Npz{A*rDGC5KsYjU6_2mbLnP~Bb~ zzUxrRnWBOI8_qxR^*sy54}G?DLlFT+8Yk`;^D&v+SvG77vXj%|ue*MJNPuw8HkXAT zl0o>=jPHKccJjr;{%^jOC4<9nc6RZp_Cwwa3%>g7Q!@Gd)Oq8w$LBx?H}QZSc}^r@ z+S&So!x8f8l^MO?JU&OpZ@WCMAF_MD>+>?-;RxV+`IUY^c0+pL6z{dV0NIB~Ms7vv zfgwF8Y&_f^*~yzb7w+yMNG790c6k2Sl@GeTOp4fdG>24F&Z#q4kPK4%PP^G;<%5Ii zAGvpr=72?)hj6fNiYxZ0?98`Y4{&n!Ag@Cb}rq6q2_H;Q}_w^Y6q1ATs(D^Le=E&YO9KP*S?T7rZ zr)0Ao@}o}sWFOY4J-D^%*@!Wj`@!MlH$jUg+mKg(bj@tqsXgiSM%gzQ+4Y~P*?{q_ zT@HWi*q$_`Cxcx#gf$*z1Im3i&DnPa?k>{B=DSeRO46{aUb5`C9c9Gjn)l1?J}c)CmUWTd6M_8Nrv+Pe!*AR{(A2Hqw(B zwPD`SO`jS~C19&Vo0{Y;HiLr4PmXVPvxdg2waFv)SyrWI?Dj3c_?}Z+!R6yJ$JXO~ z0ez3ot(0ffhp%Q&jC*xkP<33!&OUCPk;OZSC^2N8XZ^al^E%&kfUSd$ZJe>t0p^dY zgY}YG!B~e2TI<72;kP%9(#G^I1*!c~urrp=z^&|_a8^-CPazuphWa9rD}<1+g*GU#c+ zki-rSHH|MOLu%xIHwUUK#eUm*Jb0FPpw=?*z{ceunewT#Y*YSjvbM{Qi+l4=lHWvD zb;h_H1b(yct|-di4SH@{CmDk5gT9za0EO2hz zb9@#$E@KZLv~TAybllxHT@$Y(KV4>j*zi98B*;G5(a{N==d3A{mhC|4r*3LnSqIri zsCFLjQRkDYePUk!rJvuZnh$-s&yt6MdFRwkb~N~;3$>VX_`~-%y8a?<_E{lD>MLz0 zb=}f#niLP!>A?2iQH={3*Dmw$DZ^;eJl&6U%XfZ9uj)=6u|OfUudD2edlh`c zb%vKX>Z+GE1^3b#4|{h*rGGgT@N*?jh6ZtZ{I(J&0q6FBm#t@N;aJzTTN7U{f{Tof zr3`8M2|YR@{_dkgCR2MU6f(~sYVOt`PcJXGKu?+64FxU?R)z-274##+<}^J4niZ1t zqoXb=V9d%6WZ;Un{b_}h+b<~S@f~O%^>k=7zX%M&mvIhBc<}CwF5i4hzmO~B8`Fpz zJm~nbmS%wCGqBsu%db|+cTghRG2`6ZO7L!;Z0!W!Z{$bW^tWU7is3ES)sJ&789}4s zGd`Q@7lQ^VkMKyzB``ny^p})&1RQs7%hz{J1n^;TLBa~r1rTCyXY{<>4Boa*u~Qx@ zCC|hK?psqU9gG;`^%pb-srv`_66GnMe%^F`rWthb&mN6+`UXzS zFxqFi`332^ZE9c)>UVq`*f(Rlr8SJt(bdl3refxC zObd#j@0`g={Jg=#v9zxp>M(`HcDS$pZGyIewn9C%gNc3Hmf0xQCT?yN1$NSY$>9 z@M<0%|6RVU@m`e!)sK#DtC~&RBlQj*RPV>Crx764UUI4R<0ue&Zl|MH*$A@h=?ncs z*G>R;O(achIicX#0vT2)9uLBt2Ioya5kP(qC^}dkIR)(LRoHR)05$MyAYT5mjg}0n zo7aGh)d2I{8*i_2XM@GBOP6*zJPY`IHO)EgF%NvcW)-!WyM$E5S^3}eSPWc;Txpu( zK9ICb$jVeN(1KN!*E;QbG7wC5URGE?Y&7tUQxg-5eZj{^-?=Aq(n;SRQ;jb8#gk6` zuH?`Ek_{sBS{1bZngFy3-l37nGs)Jl>5Wcj(#h_?}01|*KvSJC}vwxK=(c9`M7K`K4O0SRhhz5$lIdjz|?g@%N>d&onA6+!MYnOmSz zA@vCMLV*cG=m_Ih4kLQfRdpokDmoS4o>?7{&a8i}nvdz9{rGg?53(7|86DLEy{2}Y zraW%@6i!4>)_g|3_{VWOCprR`M*A<|4h(89~JTB6W z$*rD${qDsyfZwmIO2-_(5pVm;C*AEKgBh8r`0?UNcVS*(&Xc_FC;uFsTYvtdnjRG< zGK{xjU-ruI;Kc)BS)gFK{e3Wb|TAPRQs;fDA{8tA6< z2&Fm2K5`Z#X>M^u<>7ZLHT>gWt-}`ImhPJLmE^lE^X}cw9G-Emi27_-M8=yw`Y_<+ zC(viAQSbRn#4u~L?P1Eke*TO7Tq|?w7%6HQ?eMX2ts0(VGNeZScXObc!~5>qGbD4yo?#8T-r$z+ z(FNgaoePt6Rm13I9j)G!x~)?&vklq4mJb&t98%#|=X)GjyIZ%dO?K*qZwFLfORdJO z{ji(qaaApNy)U)=RF~G7~0~ zcO4#_T+w+2Sukyj(-o9{McvRv#>gJ+zWp4h%L*`Oj%lRq+Dvk|-HW&tM3tR18s@(4 z8A`uw&j;}VbX-GR0^O;hOH2l%^qU6eNGp+_E5i6)rJYv*pK#X!jjPV5udd_KSvjWp zn^esC{PgVbP{`c8evN+XMn|Vvn>)NyT5&v7yB_d6SDFoH)Xj24GXPNSf6QncN9e?V zg+$Hr-_C(~lM=`4AJ!4GsKL%Nd}jD?kV-cMkf0W3sv$JT5Q^!520SZd>YxyHh^Lz( z&@E6Z^OOa9C=?-T`XSxy(`81|4{LVBI5u`c2l(q6A^M=*(2EOYAKI7I^?L@cginin zqO|@6GwGP^3}*J@C0dDgSBN27GU?coy&vP3k?ie^U-ou7$Iw5ovsf=@Cj$OSveW$u zGj>Mm{Z+rNW$cXnyW4BV$}%}tBk3HdemMH#jV82P*1&V(9^gA>6j=Wt_r$f)qsXta zC0WPDjsnq+b&74qjv~+ZGFI$FSXDdq^0v{VfGjawW?h{gJj9(NS4NH^mD>iKi$Lim z`z(epLUL}CRr8-mRM~;S4h(k256K+Y;DoNw8F|R(g5`qj@b6_fn(k+`Sa;>Ol8Er3d=pL?6Mk>4ViY z?;=U_t_^ZPt;{eQH>6W5dpR!k?)rJ?uY9shde7qQPxg&uJoz#n%!&0L zsCa7uqg_h^9IqKcd~3;fw`FC3v+}Td9*6D>`6+$pJys`TAC?1l`few0M^0>F0S}FT zGt}{#Ib@%g-fomP-&;G==b_}6?M(W2((Q51!)h84Onz{xf5&YHF4^5AE`N<#`+dIZe}LdD@59FXOc=kh2|oi?~13QX~^DwB`^Bmt6MtpuFB6b`!1=*zZg4l zaEIAI|841QmBq#7J5JQTpz|KKdEU{^vw^-6eO$M996!3J?|xOi!ZT?dyhnqBnvKrO z_8(Vu&PjDP@;jhQQ3ahJYCihsAW>+dOfUIwhZpccvSykX&tCxt$qSva)gNKZ@waQU zTS(>)NQ~51Stibkp{guHW(?sVRbOR^p)-Q%UqAy3WL|-8sP8hw%_CT@P@>3%p6)0J zt~@jpjcW*@e=>DF!x%~bbf}>P)O0F;UH|kc7Gqy+``V)SDD>&f)Iog%KTjkTo0^{4 zzil*G6#V{s_zLf*Fq)Yf)z}T-`=y$NlMFhv+y0EpeH3wEcylj%B`vk%+ z-o$z?w}QvYdQ4rk9E}>|N4*))(+o29T>*VfJ$hPJrT=(tt+3$_dzcOEw~5?1uxM>| zm(XXQ!0fDcc;Az?;YP*T^qfdr*s85m*BFhWo7)e=m)g{ac0*g{eay9lbEmAIv-V3l zVC+j@8{M^xCZIfGWRGEM&0tsIs@qt9CwMe`?6&4{wpDh0zSTD5+bl!AO|Qzgh=RVy zTz1()`mUmXOn$b!yaVOu51`Lao0>iZCWC6^|2zk(DYJJ*-bROd_68U6Ecrk8tlxw2 z!|cz-RrbgI!%zCk^2v{1-t6u*cn9#7eQO;_YzEI(x-@;&`UE*Ma>kvG!w!JuQPUiY zxyL~FXT?WfBYW)_{**`ePXLF>`|hq3YzB8GcWgGQ-er(6b1HZ0iQ^>hSKj)=r^BSd zP8#gs{MeVF$jgQdhV%UE>;Fl?Eo5$T_h08* zqQ?J=!L^Ezmi1S!ff0kBTpV^H7{)I=>VI~2Db@bxaffh(iG+xE4JkECO%D8ra$wfP z#7X~pPA{gTBRE*;=7mPFOTE+~p&nkIC^C^puqPVb9^~d09HP|E>E*UULf0CQq-zZl zbehx~Pl`OKRr~4ioc$hL_^$cBp!B@YpiXJQBfojoZ!b}jpO#ziR-V`Tn!t}mc(+(z z4BG5w*;VI$=K2Oy4*NZRkgq!&QO&ZJ}Hk|QyX zsq^{Abp%wib=$$+?$!Br%cHa+jcLyyh%^$g9;!?mpcb^dDQA?SeZBM3!8R1 zEgke4gx8(4bTd}{Hi^N^cC;J4Fqo0`U5@puU0G4JKeJy>zt}^TN=wM3W8{(^7QG&K zF@sDxMhMOaSaSi(-4wr%RL=)%LcyiaTgzeJ(Tks%?KugjA2|Ewe5DPVgK^@&=4+;v z7ZT!y+H=8yUh1Gw88z)tFp9+y7#gSuaYwV^x$8%$xR(0SzaJ#&-%UMq9n?zowX~?Z zreBp-ymzkG{n7VBEa`IG{qcHW0ohWYt^%+nBGLpTW@yp)M+$mUe_t2-keE28HPWLCw*cqw!S3MGeu`}}TZvV&qwwMd7 zk^h|>sD8%w-X42{e0ijg$#B)Zut;*uhi(bR;gP^QceZ`AQITZDd*RR$gtxZptep@Z zNfPafg2E;XHxiJW0cOGS6xzx8e;7BN;neGTYhmmvldIubWgZ z{iSbL|57Ao%)Ld%JKDm~d-we-Qj6iNG^?o(J7-eue`BMe9MNfE4}Nc~5UDlGe<}x1 zg=*U0t57Mt(7eJ*5A;+EQOZL--GkMkfgz|l;t?cOqE@l~gYMQwN9czdl2sKdsu9dQ zyf*OfY&h|%9I)kQ?~o~1OU)r$>eJznE&tu+v(qz}*^j|&$!!0}k$yEPwK~(!x&Mfc z%$58>#vANa(h&Vq#mEa+&Mj#-Elp*RRnb8i{L_ybO=|5a`~4T8Jk_3UFNLPQcVM@} z@9LMQIl=K{bFQfCAyo3F{=Jgt85F2SeJ??ZP>(>hrz|u?F7rY|O{5`8G&EH0sUHxk z3@D~c9+Gs)V+NX2)%k8{6^+<1zNeofV5Erwzq*;Yy<#_-{@fr-3^LyTVJD~Ta=?~% z0y=Kpe$pJWWgX!pSHG@Z0ncn(%LwF;E&uNJe{jiz;=T{;U3xFwG+4N4uOG>l!#jzV zbv)t=7&-aCRX<;ZgU!DkEa9{O8#2* zKXee$zYL}s>8=h&;VJ^v!OGxZX=o4vK^`)%prBy6Tq*Z7eCVLqQ%RRSBm z5pwz0)#Ci^kRgo)@W5T|inb?8!9v^He&NSWsxY&iZD-OCKlJorr-b)_NykXWu207? z_~SsmoeSwJ`^`Ug`rjm>O^hFX`s$A!*y;b>cD)~qnr<6*#{H*p^;4nK*@>&}rsNn7 z5YLkTUysZ76aQ}Hxg+#)Uk@Q8Pr_SWR_tf$5lm@#gP-|To>J}1+ zy8Y323GV8kAa}XiJt#O(N&n(K1vn$C>O0gErw3Z8I#2sYmiVa=1YGo~L!F06W-WhJ z=PQzpR|ZsCk0juN6N;$?Tb#-7d9rhs2oH(f<9v2R1^D28&HpZv#l1KSv5^%Z$i>b1 zY7Yyze3IY05+s)v-S;Az!&{YV3~oCr3{|E=00!>!X1-NbcPX2?RI*O&ur}lTZ>$AFdzxbWqNYB^P z6!XHnl9CCwt3AGUCL2`LAJHYd8|dP;JvygOH*mTC?upj9UBU5{$I6eq{GzLCbwfAe zC=S-}lAd&s2*}qR7n_Fre^IS>ds5VBsyV42)?N5>hrgGnr_}w|Va#*~66m)xZAQ?T z&Co}^{?&n&-f)`9p>ANo7BNx#n56PeEJM6B1y43&W-?zuLoNhkg!M~W;>X{2|2kPb=d3^lsMlSr? zw~^enRHryoSF%I~IXKOOXG( zW?iOl8c}BQ$Me(sv8s7?Fn6t*&z>#s;*PFpq#bM z))uNbGv{Xu@8#+Y@%~lBy;mpmt&4Qe>z`g?o1I%V55S4}nW>xHAE*w`(XE=(rckGk z?{`l1Vz6o1yu<>PQEJN|@6LrPgIzamT6Ntb{A<+!1b$9|=3>W9Nu6>WWge1Ug& z=%*I7(DpqDqq__xFCIvu+W&l`794S+iHKOtx5L&WP_z6#2WE|#WAtyIZv+R&iO^6t zG)7+*jK;tz-GV*PYeiJIMdS9}f;O-G*4$Yz+P&6S-!8VZCe7je+Y*OUB~C6=qpl()n%YSb=;?uG zdGZLBd4`0BAYF$)oD>&)UWG zHrtOwwq!~KTkdnWA8on69I$25)~niuXUrj6GW+Y(>3x_L@s_ z)8mJQVCDPz>GrwRvQa~xV#$>+Iz?E~$PU-P=ooC2d(}L4caiSCw035_9o2H$+9~(l zeBSGlANMtT)U#UtQu364DtWd|w#>6P?F73CV$1EO*M(#659fbc(rH#yV(h;zdCYej zXmq(}DC#Og->tjL+=EcBk`Yu14 zUwh)2S_!&3btM80mx7JA8}Z&wwu9b{wXM$vmjYK#+{>ujkHL>!b)v>AB`~UMUPzI~ z1d9BJKe5i{KqIG^rRTz(Vb=Vy%~#GfgO(#z(H#gLOjjo=Tg16S#p-3__7CO5=|?AZ z{Q;JM#dT`Wm^{=HHXbp4+G5T}z#nWqX+DWT#_q7DQ_$)Isenn}XpN+M>X3infa~hI zbBEgd*B2k?rFlIoYkJfsvf|3Q3GzkD$+E(C@viTVl73wWhxo^=0Xd(vYj;5{xKL}y zz{ed9fgOt)R{{m(RWdO0Naq1Zc_A3rd%HEfhvCbhiymxd%EBuNf*4jsIXBh^?^_iwC-?TD9h|Y zK(<7ie!HwsG6~$>qXMwyw@-p`9a>pHwq%^>(-V*_|L*pGFy%EjzLkB@w}bmI9*T$m zMb)5I(=E|MzNn@r&ZrPNd{L$K$w>Uzf}RwxwG z5VgB!pgS6F;}sGrMO|o8wcJzxux+!`h%R|ZR+T(zge=vvqt>j3C8y+{&0&D=`rc9` zbK@gsQZg~B|Iyc<%0XmMQLkr6+P~s8e2(OI*E`$ae!}6KzN~97;iJ62WE)^Gv3&eTiEODw>BaRnGiUY*HVknl19mQs1 zj;K^rBq|W)iLykgB8@0sm@3o=0b53IutAEJ3P3BZwD7 z3IYVa0w|CQ90g_q4pB-J5d}mZkwv5u8X}&EBmxLu0uoZfkuXCo?^3)7FTnHgEIbw0 z;PH4Q9)SDe5SQYPxEapjm-37F1^hgI7C)7*;m7kM`2l=iKIBXJj(jsd$GFtE$hg2b z&p69C)mUR3ZyaeHVC-uQjittp#%9JGUMa7LSHR2TW${vZ8eTjvk{7`9I_5mU4@@1>8Jt7B`iv;l^_#xdB{X zF62tNj$AV?$Eeh($fy9_OpZ|=^}lBMXXbz@R-Zd&xH>{TT0Lgs1nde6UuNL~7QV#7 z7g_iM3+J=&c@{p$!e?3d3=5xT;ZrPpl7&yO@NpJC#==KgIFE%%7S^$FE(;%F;lnI^ zh=mWba1IL}VB!5NypM(VvhW@j&Sv4=ES$x{yI44rg?F;>4i?_d!Wk^QjfK-$cq4~Z(!k67GBT7>sWX#3$J0})hxV`Ok7MCT79PvOV_0}J3rDc9Q9 z;V>2+$-*O8IFyCeEF8kZ!7Loa!htOOmWAK2@M{(>Vc}OS{E~%Vu<&yhe#XL2S@;PH z7qjqV7JkIS4_UZ~g&(l+eHOmQ!gpEt4h!FA;ae)K z4`bn>EIfpT2ea@X79PmL16a5}3-@E;zAWs|!hKlSkA-`)urCYuV&R@F+=GREShzb2 zcVpqMEZl{KJF{>n7VgNx-YndKh1;`mI~Im43|Lsj!fjc&4GXts;Z`iHWMMBBRQ6D}6U2*ZT_LLh7|v_X9VF9p{GIzhT%kswwOE*KzaFK`jq3b@2; zqL4UBY$ukX_J0J?k7!3UC#+H1zXUHtE&mLB2_A=!!u#XixGQdlWBk|r+x!#!O#TXf z0zZP^kKc~pf^Wk&GA=PLG(Kv)(|Eb@OyjY}1C2Wvw=lLd=JDR}Zt{-tcJP+*5_qF| z{dw(p&3QIFBdi1~#ExT`*eYx`HWnL*d1J0vEezwn<`!~~a(8f7a%XZQxc#{8xvtz= zT&_`xQK8W>qfDdKMzc@`(~D(NOP+N-?&!#{kkOGXJWyyUr5mrMRgx>ux;|r-N!>i$ zi=^f}>pF~0CYQQNdCe`7n*G%% z^-{`t%`B6g|8jKn8d~NqHT{c~dV!3rO)Zm}{Ke|w>Gi+Zdk?56mZfogVHS4D zVL=d4F=yQzM7P_V6(a^zHtIDZ<_w|?7*R2!V!kHCfY}{X%n8FaTodMqV!*8YYj$^* zWt4aC`=9Tf@4oNHo--6xQ`0lk(^XyFU0o%XOSsj15~|wWUnBv}sy+!->}X24QpK$z zNT_UgH3T#k%2nLTehHOW?+kSjuom^C=+u71qlImR5`4Y zDrMXN--J?j1;Dz6T+A)yli+VxuMk8@!u9t}@Uvs2A63lt^GOicGb$t^uE00Jm&GXK z`uZmL*bzlAQLWwJP4KcKieUeNgzF_p@U(k&s1l)4$@LT@cod=1 z+kv?rf&}*>RGAc-s=IH3n|-lziActE^GR^EdwhjVDdM^c5?t&qm(yXSlw21<0^g3R zgjHsljLR1!@QP3&GNy{l6C^knp~BlrxXywECp)T44%re#TqoZIuH9paMN+wf%k@bh z>>f)Zl}ovVAi>e@a`00WNyS`8L4rdOstf{Ha2*5*96PFcYF5nU_>hzBN)<_zLZZNj zoMcCn38gaPhk%@DcQwTQluA^@cOPOn(AF`hv4ZNR)#1miA zXnz+FDkUSjDTzk{QeT8BlqiJ6LqAei{)~pL`p>b=}Yz~Qm7d2bVERPw_^pjCR%!~ z3&?OgD#)ZnAt$a0$ZmF2kP4+xN?i3NyA~-{0;2MVFWJSOD2AB$#1&t%vmH@H7fW0g zke%!bRVhSDv4XfHAUoPoK`Vh;ATD~5VO;+JrdO88h-@FSgWcOhSV=K)!G~;bM*}Gl z66XbEJG=5=t2-?55$6PCTRW;;39qjp&I-sjc2v6cz=t{`AX^uu%9JwVw18}7M}>{^ zpecw`0pnOTS9ivzX<`iKPkPYq1gqK%HmBb+d*}z7nRfI$&kq`%c$WXgNMRK8vIN(jz zw|jc2R8H*oCRKJMh z5*fav)c#dK56FmJek2$IcGe;|UP2td z2$AkXf?;YiEX87>h}h;!f>~Ny3h7G4g`T1Bk)Cc!GT8qZO6q|i;Q^CiJ3weg8SRf&kTz9iVBHli3Js40jwJ|tMA zw!4T#P~d6-3HGS%#XfSeN^dV367hfG1KADZV7wqlM22f+{AK`H*0a+8DtYkrGPV zd6Qs|+P)^;5a;-jV2#?}T&V;xn(a%1Icj@Rkx(cR6SMqCutx28LD4`Ho9RP>IcmF; zNDA*X!;b`O)Q%Nu3Y7A60a<8}(wYL=iIPb4A;BQE6(CbVp$Wbun4^UcAOmM#g7hW9 z8Z9J>!I~uE{YWrI?QTU6`^3aFKN76bB4kiY#8h7r?9sv^KvPPII00GIAeBi$(@znQ zEQ1u>jfq$RX>X9yeFc1$7<&BAB?7?zANK$6fPYGUB{fh|17rZ{WO2x9Iwb;jF;AGl zfqyKq!Ywf}d-*t+{qChwgHWc|NM*2*QLogfv`Vc}BQ?r(3Y|fs*H{7`cRDlC?2rZU zC+}x=eunpI%RjeS;cf?lCNa^_1h?5AiwQ^mOT8tQeSPEGGEae>45zB z)WR@x?2lIIeam}sbKj$F7f>2t*Q zH^{KeXJx0S-gxbI(fzsZ-Ao)^ot<;S3aBYL!V@FDd_if0CH-jrf~DJoUOV_>=6)6L z^K@m(6z(aDtiJT|s=KMSz$fYVudi`BABbfuy=Muh1z_*-ef3Zdbss-^F#^Mk9aB&V z{QEUf=v&*&rDmHtmA_R*`>v1M@H+~-aHV~t24B<#V?S5u8*mlva;bhmzTtODY2&iI^L%&vBj^dqx!wSJ+s^u zD(BktxMQRT|tKqiK=)CZ=uUir|ccq?}TaVv~i=d!`Gwz4Li*i2yanV zx>|W7>*A2`^(-G<^Hk@r0}Sxc!$dS3v-#QQnd=$2e#OzL&vRX@<^Q}5l;N|K zQ~Y^LIVF)Fnfm&-V4-M+HmS2~h)8a|I_57RCs8}({JX*HDc0R$taO_~Wy)dkGvzbo zF#I+;bDb43z;?cf7c;#I0e0t4`~PVNdLCLo_}%#-+C&`Da?!D_FK6Qk(dVP;f1MmJ zBMFOW{?#sQrBb4Y@V;UQ7irWP05KR~+D|B#7-4Lo(n!oA*A?+p4Xn9I6v}V?!rzz|Ea5~MQ7dVkSC7V!v7v^Iosso`92(cdFrZTAy3|-zD+u` zzn$cX*>r{4<;xf$G2e#tIuratU4Y&4lS zHP=w~p!G})6Nvaaul&Fx=aQM-*D=g_OS-h3reXSFjnWPrS3jw_xl+~I`%OzrSAF{1 z${p%Qk~ur1>yMiXW(;jqI()x%H+HP4%R}#B!)xxdrjOgI9y_er)$4UOtF3hP${%Ax z)uOG|G}ROFP8-jBL~OV;W?SdX>pqwbnF1{QJj{lF_xyjIX;`TH8(*CMa&hSf(~dTbnE%|7 zTI3QmyK$}~H6gXb(a>j(RI`=o_)HZy)b7ZN8x^a$p#~FF!$em>&N-ah7x3 znmIq`UUJ+E|Ne7+Qv*m>i&#{q^JP0UXuyGg%%d;>^p6>Z8FYF`(4-UzwMwI&UhNPW zVSAQPtAHrpItW&576*OPeGIgjdLd9qd%C1ze!O!dvR6TNY8K2qwj?}apQkTo!Auq( zW{PF#EIgBiGxzytjEV}Ivm`e3_QfqXt$NlpD$e75euZR{C6N1kYYOEAwfo+?2{Y== zWS}*DW(<>JS#8nRBiZ`WJjwP;Q-e^i8df%>%EAq}P2({dL&6h0d|#X68(y!NT8eb} z*G+@js|WF2Afg>;Or2Du(2FHTJuIF`HCmk%nh7j{o0~+Bg(DbfYZ~70ZRn%8pR{i7 z@RboY@N&-O6=S{-n0mOTn_<&O)OKm=h>&X`II;1${hkS5Q0olu{fh^`Mn~&s_89oI zES@=WPMI%1E>eT94yx9;<1y+%R(R-iR0hBChfO|5_&8&SB(xFGd(I^q(DY^>2XweU zDjzki&hb9n(;5FGv|?e?5N92(x<2BAwU%O7kfZ6w%kmr74^|8>h?qW>Ee z5dB9B9wRVi1G|QW2C-175NVCz03#L3B$n}E_cFK4Ed!vfWzYtw#QLJY0&9bvFSY9R z4xOxeow(Zf1=`uQ;~)Kwa`LmF|rXG~K!X6~8bNKf@pVY`a}KZkVC7?`ffX z|I=EAWD>uNLVtYZ%r{fGDe2a14;$3)yOkNZn0wF~%4U<=Iu&6JhPm4oGUtso+*yv_ zyMmYKR8gftPqzQmF6hAtp}tV{ZI!tWj<~hcAJJ>K`{F@!bIW|(+BPAazedZg(pX4BSt;wl&E`dK6PNp#@87Mrz z?#A9n-_g!{E77FBUy=Lc!^b!6;$enfxe@11H9sfJ=C6Bj0nqcQ&qq^2WsMwd{2j&q zTtm38ji0%0;n|b5_9i|-9V#UG4c0=kLF?~+lSRPqm>IF8)R>J~UbgG{gowN@s{(B} z!+-VVEuix=9lO?Z$ot~=(|Lx^^86nEj26s1)c@`%~QA3d_E=J<$_7@ zA~fR1w8z#URRh%hs;X|4dwtWy>|V9z!BB@t!#Zy|c~PC6={UMFnXBG8?zvO%)6r`6 zrHDzs222?nd39>iyR9|bb~96@kDdG7L1D6Se>D^){MXj4VO3#+lUOjSp(fn~7xGfxFA(T4vP((5zY* z6~bOCs0&5gAKZ?C-lCfBm}$8;ay_R~ojy{8+&l8j@wYZH9zD<0vmo#;)xQ1Fn*BKi zNHg%^$$NVQxMCa5#JYV8sAsLw-Xj-2Bag@RPhH5phN>NGqmQcPh36KChpykAjVkVH z5kBDLcO)BI>g3t#57F;0m#)d_=!&CzwC&q6(G4f(&3Mx0!F$xH{_u|uA3SmOHpfM6 zqdp*hwSAZBZE(W72G6OdA9{s)yRzAW!mK-JQB?yQ*bmP#M| zVT*S)e_Y>n=GobuoHGTR=ehP;ntToPwUv?E(Bp@05)StmnTkHv zS@(W$!=7m3)zc=|5zVPr-F^NH7@10yIvw8NO8uVHpR-nZQv>3uedFt_U)6Uu(tUj2 zMC`PTqPU~lml_z4=B{jm4Si=*nwac^eoo6!QPo8|7W~pGaz(p23ks&i9KiZRP2Oho znvJL595%Gc)cvlWn>aeoEeD4V4mBJy9lOJB_nP1Xe}}u5JC8ev+uTv?I0<$DC_Q~W zUV0oSuE6g8abEpBx_CAAsO!OZzt6ele$aiPdz8Zn_tuVi?v>qsxLtSK!Efm{)6EDw z+k;%+yPhW+xc=@s#WjK(;wmQ+h%cT~UA-Nax;%GW>yqX9!DWTZ7?(~iwH+K?oCz&I z$7?o!KR=m2gkO;;#rwv)>hKtL^H1mLy>{``o_!oo6KT9a=eN#ho!7(u{hrQJXHTc6 zPDdQwol=}eIkfi*cBsxY;wnTtM{qnfYDHQIpe{7ZHQ=QsmB@`ojYbS1tBiV;p=wz( zmr4PSvto@LqPl2wa){{yF2~@a13ueYBlvh$x#nOgOsr8Vv`QVg?SuERUSpIf#I$1* zI8$iBNGx}|GSSXKtc0m^jZUI6=pneMP@@t-${MBI09kPXXtW|rQ5u-|HfWV%J#a$M z7@1rHv9R@egKT!Ieg-g>^NtK?5NU z4GN=CFREDC`dmhdPH2$Bvbe!$6shzoFstDi!23=DE<~lLSj&QR!FqUD2|T({Di%YH z>-16$cI(Y>Pmr4TR>{NQK%piv+(I~Yr$qExaQW4anM60a5-}3bEr6mdj%)rtP zf?Du0((B|xtyBnZr9zFgc1Oz{br6UVriKhEDR_I!3__&@(t5)RgckBwY4o+LTFTN0 zVU(d1Lfr$8L?ooAm}Dv;BxTTub<&#cEM-9eDwRYA*^a?Y%pigU;q=lX6m5jKEIMt? z`j)bw&)0&#vq+*5D)dGPL`j3zsDj3!gfFD6@yx;{f=^+f7by(}iB=EM_jM|X!U*oO z(14Ws8YT;uR-@M%U=9Q3GNgKldZpLMPXywXtwPQIG{*snfzX3y@Z5fO?3)qmx1m z9+65*SR_qJw*+VcItXnq)j^P9jTQ=lj8zJ`7T%Y?#9WpJCLD}Hy#}&Y>2!J>C?|zM zr!&F~1Skxh%=>pUm&$06>!G4V^mBl`(Nmlni2+uz^}wVh^j0pI?NmX-5Q|~|jvnex zqLUaQLzYGap|!>CyDVG=kZcftnDC&#CWPPD$+Zxp4{i_Er_wjh$_10D1`vM(#N^gU zB{~o%DY(BYMPe058^`)wdZP-y5mdhl;wOWWrG31iI)POR`pbExr7Tcp5@_f$IV6gM z`$8odlm?{{D0;0>=ls>m1ri2|QKW$yg`irXy~J9H4!(z04|`++j#;>LkS0{C6zMcD zTLThi6oZhpyNGDb*@&us)JgR>E#N8 zMkCbdWf~>K&y*@a$$^rQ3B5a8%aXx%7M)TGYD_CP(z~Ew9#JoWYv>l`GS9*VIa(kB zlU%OQL+nk^6FLQyCDCcbLL<20x>&jnl?JA#4ML$tq}A)BLOCqt=rsyh@r3AMO0Ce< z(krMmN}&>@UZF5ZMRGYzL&y!F5)4AIQXmTN}1lp`i>e1OKZ@8VuOB>b|IDNpevF? z+t(?DB2g({OIa!-^cmoatuksrn}TkJ8qgU;(7~zT6`aPHxs-a){`AZ$BoWc-3>qCc z$b+Vp(OsIvATNK)T9#NU0l8P`LCqLpiUqzaC|C{X4Y~n2AS)L{5!9j)J|}0Vg-Z!@G@#<4OMuy2*!iO~K(mJ6-bN5$qan~L9nj5zu)s4ZG&-F| z3-u1lSz&}5i}fO*XPBidunwTXY4t*(7Mi?Htkr6vht|tMLgXrq`)&)D95jeh16x<1 zlNBjI73!2=2I%0%LilbDqb*!A5LTHEKDQFy6#P!0NQI%5=Oi zg96h1KvSld-E~TdLTXfr9Uoe`q*93W1qubU4Hy@oM8K9&!8)J}e1pYa)~_uAO{0fy z1Ns#sC`srs^m-NOFd;a#t8~EzOIgqujCxS!B8b@uarZ$gK%ddg-k_K3bU|aSTvA$x zf;fN#98$r3i7lWjI(4L59RxW5zLNH<^;IJ>!LkACc(`unffKj2;x@VfX zKyN@ZhM14wv<`m7U@qyz&=G>Nmw-dAP%yy4WdIS@8&r_eMhV|g2bLEMt&Ok{q}9PT zJibK(KHHtu0gL(l=1q?ii!$oUZV7}428uaj9u(tyy20{M_VUAd* zlj(J3cor^D{_yQZ3YigBVwF0fP7YQUbdgXw&~^D(Y!fgC3?PWG?NSL+0|tT~+>(Sw zo!$V#tI)f~Tg%c&Ad#ll0IC)8@aPrvv^1>jYE@uIX$^s|EL)hYATS za46`$S+4~JrY*DB$|W-x^{}NwFNV%t27NO0@DLPO3%wtF6o)2OE{GNkd8eS`fQ|>6 znF4a)!ZuELcF+?-cWb{5Hj`eW1Jw$17czK$Sb!F)NQ{#%AiUF7VQh~470n?#mJ%a&>Fx()Pv5JLYJ)(tF$`*aBEp0onV23 z-zW4xLQqK(Xg{!tLnnuh$-ob_a6to;>7nt$iz%g`Yaor65+oLeXj-sX-8fb*Ewo^r z(Wrv{Q4ifQNB}4yh!+msxmfS#ZRG+(TmkCP06m!o!u)|H32rjb^o_JF;AiQ#Y2zRI z5)o82M1qEH1*#4d0PI|aJy{yLOLt3IpdWN9u*H?&E3Tq9VL_!ry9fIU20ua`wsMJe zDkVHAZPn=IBG_Xh0kx{s8NnhJYk5|Agi!&Aq+AJ3JP_xcw!=UHK|c?VFEJW;t<7b@ z7Xe!ZHatPUr`5>ybbkw-oEUO{z<|qG^?`*83}hqh8=-A0sB9PuffWkv64HRf&_+@9 znpZGH!v4#j<>?`z0|==WOjNj&UP6b#2JLEqRX+)Ah^U%mxg%_>0E=0vg#HnHkm#lc zG7Wt=RHRO;sXEo`&%b|1P>_&B8Z@N@3_`G5)|BrtEKhZ!S_>(+!O z7tO9)0B!!lY%fsSoy+6;4_&Wwk5E9Z>VBbr+(GpxzPq^i-dp7MebUEm0s-DtWzF{a zUtIBgkBmKrX(6~g+Mq4u|A9W#4GcAOa>vd1U1xB6axnAN7`ml=Cu}MI2P#;&9LjgZ zY`W#XzSeRIH?1-Jmiy*gpVJdFbmo2x9Hnb@Y4N?1w9`y|*lk*CR^{ltZzqh;JJm8w zaL-a(HczZI9bP`llv#a@DVZ3OCER(~x~XEV)mw6+$>sEh^}knIXWc}RWc6+|*t{aY zCwI=Sc5%JQx(QBg!Q<5yZ^%UJCX5uTm*#Y}#cMLj=5JlZbqHdr{Ny-9yV*Odv$nNp zCa#l>J1yGOZgC`mN9-B>Jh*>uyn^)n*J8u$&C$V12zb80f-fvH(asF8LI;bNup9w1 z-VkrhEG{F(Jx8J z+UZ4qd^qop-}S1ntj>IA?6a}in-v9o-14>ajcO~up$U^deG#1ef@*A99$oXu2jsqV z#aBAW%vAa?kttE{d7r^=-j9 z4o-5NIXyawkKxhGzc0<&l)5wJG3wA`$fIx62Xy({!0y*XzIY81Jo*~&0?m6o=eNN@ z1JRNnH_M2xgj2PqX&Usb7F;4aOa&H-PLqJg5B8mJJZ)GckAG}B)ucK<)@A8#6w^Mi zfUDe!e((FakIS*?Xz6(8TkcDDQ)?P*@8+!BN+m4VTdRfZU{rQTxz-;zGtsdJRp&ol zIfv@Lv&IgQ>tM>MQTVJEoSD>~*TP{}R?b20)0U^iY#Km48FRqTgFhUJCuRS3_RpE< z=4#JM12+voE2|jpyyOk1mRub5Y{#E7?e}&?)iKBB`bluieGWQMZh1hT*KxS}ypK2L zUwDY8e!S+|FlsIIIqv^jaF{*^961fJFs=m$3n^`?sbF;#+&Lk>B{-W0~x_9V9<@hg`kGo_FKILTBmp(%e(4qmp$IIX@F^%(SdfRbc zVV-jc{P6mM;3ZZ3@sHErK9}n%$Rww=tkqfY9_^BLxOGgx!^_L9_;~sFXDawb^V^Tt z7N9keCjL4{SG=dxZF4y{9`B7^koc0yZQLimMw$T6pZ(%9T|W17PjREkg(g~X`J_)Y z9Uj;G+cv&!$Tzt+t{bm3mw#q}+}L!bL*|^1QO_rjDWuQ#K^f_X{7VFfsklPHY3lsP zAAJMnPs*Cz`p=5XP@TxkrNOHns7E@l3TRa-4xMwVJ4aS&8D+_``@l5Alocimh@(DE z8n?o!aTeOsSKUJtan3ZP#X&=G@Aas2*A8*{jj|}q2AFf|r)BTWzZ9{as=j67g(>wG zqw-&P@8$3BnA)ZGQ&b8}L@)8cy&LK+ri{L054`(vN8NdLdb2)(iEP0!$FD0!bdC{E z_65a8;T|42^R`!9h^OuubLIQH-BbNYH;d>Lx0RaN6{#^8h2UWT4zds?6FeHgrx2oY zK%z?_I2?eNtXX7?obC)Xiw?6_wkA~RV(yiAU%mq#moDb-oc12w@Eqmtu{i{naoOqK z;z&NKdgtS@H;U(I^9yu*Xd8b#W_$VjE#COy)aj8&gB!Tub|E3bJF&VPCM(1IIZ5HCFIMq^hVX6RkN`uJ{EoI?!%e!nqmjL~KP z8O^+~iAj?^Gu>NzXR8}_4s}{ZC9hl<&|*q5HL!o`wC2BOQj@sKl!>0J(BeabLI=-E zMvt#NIoEZ2CVCMuBY*BxUur48!IlX2E5Pp_d^*9A3cOi#mhoHMOUtHMfM$h2PkH?6 z=md27fY3R;HPy%aV21v_@@;2t z&S#2EA6fOa!etFjqn_u-8e=P4M_-81mUp(Lc8B(F*ZPi1ue~a#HtFw`1rB0@Aj1*Nt>ue#v?7~C9klJBb~R9 z`TY(&GmSOPQLac!v+;MjSAXV!S(~Wtp}!ekq^+=(KcV`ZQTekrp-Q8hU5NX)m)MMK($#N0GjNCHssqpcBH@7Vt$?zy>MU8D*4$H##^o8)-)o zBX}8_jeyu!hZtDI7+{P7I;bq2^qM&iUx}YzaUX;$7~QM?+C%SAaX3R~L*}~TAXBr& z!MGjEx6XK4xJjPP$DC*2ug@1o!2JJv{vQ#@gZ=RTm+(T_Z_=Ro)gJ}fe1Gz@z~>}d z*K`jNOgXiPkdFR9w^m-v z--JwE|84#vvQNj*{z>xbi6=g~a&6Oj@Ahl4KJmz_{TU=4T{d_NUsR18P5S(mli&E8B_E(!^4XHk-mlKQaB=WY@AemW zTq-`MEpwiM>~dz-6u3(RKOi<#G;ei&U0WZ_z`r`r=3{WneM@2XeHr}ko-dJnrpEq5 zuu$?F#eMcG!1I_$fed8s%is*O(ru9iS#%DAGmt4qd&H^s*Y!uJixqa@)c|wihxdO8 zuxieyZa_y1(7J&@M^zpl9R3*SO*>9*MDsIr-=V#9&)_`6&y>T^i%7mXQhqU_pIxm| z_@Yc5?x(C#K40D&#}3Ru6LROl_}}|)j{n85>r4l`O{DamNuf{&bI`EKK`w^92C!ww z63}|$<%ed$2WS?2dZ~t9)uf{$?XF&G(7khxcuc$UFaKQh1$7yIsn461-q=Eai!Ai# zR(j#eB~z6QyxQ$dg-g=HU=~R|s`q`j~YM5kE}^O?#kf&XhY z0QPynU-i_xK15w=nI~ zH?{bq>5cjavrTc^CWiL^l3~hkr?{0k{esn%;&)pJx#{0rIB4gX(hIm5V z8BG?%I>W47slSqZW>yXYeZU?uh@l|X=wREOQVe^`VTlX221Cjoq172_?nz7D34pe4 zn(nG=7jtZX{v}X0+zIdL9lZH?)+aRU`kEG|r+J8jhmPFh7DP3h9Qs3bjE}EmjT~6E zw+nWxv~yL{FJ3sPnpEIk{~2QF)9S7ikvH;@sM@1f&sBx=?1XMc1(g4Bd6+mSv9O$Y z<&8f(d*xH-8lT&ZZ+K$nzU$9l>2lpYAKiP^ZmQUh&cV{ishxtT-L>kyT~hzE?Kulx z?ROnG+#h#5xbf_vt|DeU#!*!g-MK&9DbxGbl__4yh4G@;!eqJcKO~SM`{DjC;f0dl zvF!3)7vl!rUluBLQ;tB&$eU-{D0NJq6Ej|awcz8nkyPV`Id%8-FxbARF{xf(S?t*_n~onhlIPX_N!h#QLXo0eYvMswVH zUqeyJx1`Pbwd|wg3l@deshWm+b{g|uydo3FYy0T5>z;t@`~O|p*Tbey*wU+k2>^O$ zE`&OeD-}`*Z~|dGU~|0C+}qD-;>tj??9)qg^iiDbj~L#iR0}6(oNs(#2p{|zou9P! zGd1!Je2I|T{?AXD(swU?)Mu~@9#%y(Em`P_CH@z4V(mwsFvsV z*QtBY5t-t1_Rd`9iaN9}-y%6W9x?puh8>iI{ejwbyqgY(+4ZDBR8ZCeifV~KN4du|oRIfs;^5NgZ_5xT(KS3;}=g;6Xs zK%58-tj|NF60^iloA#7}X33}5;TY+sp$GB`aXIOb3I7)PO`e#4xzmTr>bZ`+&(Sdd z)mX3c(;PDIwb@gi=2yyl2950ZnVMYr&7z5sj+xBGbAnyV&~yf8poL!OpJKVMkL@~! zZ>rk?`g!og3{5bUgZ3w8bUy#qC-c9~{w{3UO(wN(+PoH@c|$9-%_`;fiTkuN=CS(l z{-7aW=G{||zq}-5&a`x7rJJmDla=$Al|8x8}?oNCct{40RKbR&i^H*IFGfgam&~adViXdDWgdu{! z7!bz?V!c3QB5?54i7e68msPPeOn~MFNyqM|j}Yt89-#dZA?~;2`@O4F^Uv*D3`=y! zQ;wai=QW<2S+SuwX4wfhT>bUq8bMQEB8JY{`(+eFPr;$T^>yyp&kf({yZ6e{>UmVV zoImbqn?FHSYqm`Lyde+WTt6hU+Jb!KxBEnJqTT`PM)czDe8|VQ)emPpSm>S^JfPp7 zAKJU%AKqQBRk`p6<(0XSCfRWexyF6SY+dFR>KXgz(!?40sO{pO8%CNwQPrlLKlD6* z9hIGb{muT%u9)FJxgk%c_7vdslfJFXzKo}0txMOowOQL(zGp@VYdl$9TG;%4k{y}ea!NyxC4Sv2MpnGF;}UsT)*(XNcPTiVWU zgM0hW=(cM351iEIm9G4B7npkr`YX}5%{@WTMu^d1qR~e7$JkV7z|ON zW$tN-`m?z&0%#Wf5U4l%qb}g~$Bf1CD|zRg@Z;5!fBaTkfG;PG`hwHT|Q($ zfQ?^TIcj3BkpXy%ch-%yi~Vfn&%3>FqyE4W6ur}7)4AU75L15mtmA>Hy?8iv^Xi$C z=lIz8&p#csdDao|>1;vX(M~C_-nTH6KfGR%pYMUv_e;$Qz(t65NW#RXO#`D!Qm@%n z$$#eGLeX#1VY}{YHCxnJ_k#nndrOv}?*~>bpX)uzChB8d+%@ebOQw;Z5*{w4+#ax|_&ab?g7b#q9E9XF% z@5uRA=Tk9$A;OVHWJ#Q{sP#s(+= zX<6+Ga?y$wcQt?PeS=aIf$PeOJTSvwd)C`?O(uUw;YXTu%WHKHG5kR-2CWT+Z3qX? zo2KeAO4;}wI-l3NootP6&QweIUVgUn-(H>-E?*Xaw?C~o%%FaUnDSc(tPI?@oQKnU zyH|d$_ObED91;gaF6H4q>t1ZVJ?V$_zQs$v;rY`=?kUAfKCcBiXVsm+lJVz%M&1ic zEtLFbK2x&%3w`*Mr;KAC3ZB$m_Z-Nk(# zz%Sese5^oSNI!qCXRfB6lsX)CN`&po$zLCl*`e7+lolTQIx1+Ct^7GXypDu5+el3s z`F3QlpixZuF}>bzG_?o*Q13_WM%mG=jaDvG>f}~$&;@17 znq?p0FJ-?Dd=2}fukVYyU#~nvNBhMjm&pKc&_lBQZp~aV8)x{K^9;`L)mhlEd7TB` zm<_{Q9GFVA^TCID)^GUaG9Uj|d-QDmqVI@}GknZ>250zu62HIr&HEu@!?B<0A31xW z__?LOd;Y&}#9Js;{~iAo%6^N4-g&Eh`;LnkT)oDSWMqX@+TlyJY5}e!dtZ3K`8DMSvMsjycwSMrXqNQHg<*Yc7jOa zc;$KJcx8KKdZl}%dL?9Wn$YdBk``c|>}Id0-ErM~H{F2gg0nJ;y!UJ<~ni zJ=HzQJ;pWFHOV!`HOe*8HOv*e3SC27yAX~45-)}q z#f#*H@i0%w3*mY5IL>*_InLS6na=6Xsm@7|KOo9E(mBi-I}4pdoV}enPI*o_PT5YG zPU%jmPDxHNPEk&gkP!hp37tZmyq!4QJZ=s*o14i^=caO#xG~%)ZX`F1i@8E>2-lm- zA@YbEBAdu0(uq_e$vw(F(ml)_y9?bz+`Zj7Zh3AwZrN^`Zs~5RZb@!2Zc%QLZefrm zLFg9Z=IzFD&2!Ch&34UnO($ZAC?b*wBQPN(LI`hy6q@A>X_sh;~3=_ z=@{mS9fgh|j^2(OhdhTIhiqC1f1cO-?-0$Br=)AZi>Tl@q_1I+VX$Gyh~d2ZEIfyW z-(lgmSoohT{5lK2%EGU(@JlQ_n}wfe;b&R+X%>Exg&$|(SuFer3(sWXCKi5>h3{wK zds+By7QTyx?_lBSEIf^cZ(-q^SorTOd_4eF(vT$z}?#aU4S-2|;=d*BU7S3hijx3y06z8+vm(O}%KI?t? ztoP-!-j~mMUq0)7`K4efg~S<+I+G&w5`z>wS4YioQSZ8w>x!!auR_ z4=nsW3xCVP^H}&R7XFfjKWE`jS@>fX{;(+S%z8g(*84fL-p`r!e$K4-b7sAtGwc1F zS?}k}dOv5@`#H1T&zbdp&aC%yX20Kk*8AqL@H;I077PEAg%&BAxF@Et5XorR~d@GUHS6AS;H zg|BDfYgzbe7M{w&SFrFD7QU2)FJ|EjS$HxFpU1+J==FcXaTdqj*@+LJRpM+pf=Apeq53aMNmClX|O)LYz6QYXimXQ2U_XQ2b(&BSmp zGtb4n=%Cl)7L#SXO!j#T{UgP|{A~f*we!E(a4Q`ZrSme`3sOUwH)O$>VsIPX;`g7#xI-}PF;@Do-5$UX3Q~9eJP%55jpeG7j(<$e$eo4T zkFL<@#yce*ozUEMeOuwIab($=oOV23K&*Q_9T8)S`xJK${lD1_4*>mR7C<4j3v7&o zbVL$5{h9>wBFP}GAtd-QLJlHG+G1{YD^8^{(A@k)P=NhW9&r1Ey;+uxm%XPv=lLxg zO&Te>aOiLdo-jpEW<98an=gBoK3&4cFIsOI(rbD>H1EdIxgtG*-G6xXz25UPy5xMd z>59YNcyjL3fo~&3*g4}_@Vsu`c*@}>LxPWZ~gx(?PcI~l~mW)VRZ<0 zIN9ofrsD@xV^(^{hrmx9oc-fewOV*VW%p_|()d`mVwcO=3}@`$$ggHzUR7Lo^&b`T z*YI%H@*no^3=YQLliKv_en5zM9rC{RukMArkGgbZ3-E6#HEGbY>hd2!>4ro97<$PfkEK3J{{ENNK;a@<>m}iHKkm!uacxC3-1CM^-wr(vh_sWGx3-%R%dR27Jk%7!CpP z0v}%HPe*S1M2DBuX+qWNvuHzd=Zff0YKe^NG987+&$>J_SkKTYE1j~^nR1Y|9Lib_ z8c}uj-fO!kN*PGB*X=t+H9hu0+o)+4B1<*+yyGB6eL6Rh!FhM^&H?No_h}dCVzlvD#(**fv>|m7lWmBZt7W zdBBgX{8U&yQzGyqD?jr*s9R#95}pTHpNFzO54suQ9tO{Yx-X=*!t)ee1cazf2fzx{n=Xd<}%1^<-;0OR%t2A5c=6+^~ENTLbp z5a~>TkfuxuDM2Ckw8Y}}m-2D5x$gsL?)#X2&fX2({{P}6;av5$=g~s~{~8u<5;A1;+9ag9b?Z{A`f-e#BVOas9J~P1~aW!@C{%0{GoV%V5ZZxr9#z=%N)*JbUwYa;YT;ds*oT6R&)n&{EUD3J;3s)O!{8$qO^J$c8UoWS801 z|2t)c$Wo3MW^m?wF?5Fizj+d@Lc-zLOqZZ9{ni{mzU#YbF-Kk?8SacWHrSDR?)yg@ z&YWlHH|{O0J?_~Z69cQM+Y@J(J6ZSs+%{SE0Npm7xtUfeEdNr`&4A#f3pt>F+48u^ zW6Y@I??=SrHk|YI#TAnA(EH&_-y{ryJ}2bw_Bk4(3g*CI^N$8HVCx`PwieRF3t>jg zs8=W<--o%)ZN2f1f#$YHPr&}_U`7Wyo4BuXN4zbRNHgw;!#KQ0`fYBuHfG^-{0aZ7 z>zV6{E029WJnk=lOgX=XzWA3H0nx($*XK({0maqe{}A1-&zavPYhv`3rGsJAo(2k9<<>S`Ct4{_je@NTgBf4RT2MZV5B_>q~z ziJxMJ)izs=e!_X7LOZ0gbG_i5{>w5sn{{yA+Onzf+M!DW-yQO@nL5;j__(R>!^L(`lJ?&T}KTv9{DYG;yN@ZYtymp z$?MP_*SNGUQ`VvDQ-!m?k?YX*rPtQK=%0die;T^4>%bJse_f3Y_Xnn+CTTm@DTbv` zLz<}O%!^7vxpBW=ij7V|EvboRmHko>FHScx6|Q^MrAh5Ah7`*0lcMVUK`E5!-pv;2 zzok$+kDjip8jwF z>HYt%07`!U*&3J?KvuCXGjgXz(1G_S#7^ME!vE%FMga7WNjPYfDaBf)#wdg^++v+x zB-E-@N}W=!(ufTznOtF3vAgpZG0?1PL!f|v=V%A~%+Y7i=n?xn6i~YGXq~3CUPj<@%F0E?@+~7`!WXWyz%)amD+Xd_7U|Ma`@`1 zuRPo|p?<4oVeinXwSKjJJHf$ID)(BHp!CB11&6E8UwoJnW3dUoNVY<+Y6sCq5SsYPMM+t z{*{w?{&-3~#~a`31mPbpJ<31SKckxZOzxUd#s>%NoY}RpT7Xk)v|i(*;^WyO`Ig{W z&iJm^*{UfEO5skWxreuPa=@W)Jo+8_>Vczpc+1k*^0@SzXMVWM-ZSG5l;U8={n0*UMs`Jg8dP53zLtv(iZ1*Ey-H(CaL=X8(2?`Le0#-# zUM`uT>xEA^AN(6 zRWDfk;&SK~KNCQDeq; zDwj1do%$N>=9qnM8G3k7yZ`*`<&^8{S47gEX;iN+(Ju}^mZE9Nm1eEvY(%98H1zpV zc^-Awb=Snq=c_1jK<3tRYd2H17L^|J`0OflDgRkn!TDwA!OYwKwUi)rVT`zfVM@bRR8I7GrqSvsoYfxD~1UyEX3avjUaLUD@>7iDk(6Tm1FJ$a$zjlbp?`C#*us8PQo8vcc4x z&qpFufeX-_k8?lu-G&cTB%Z2Sd{;SdZOhBq+Bo7hB+a+bjS>pS$tw z9bI!z%(U%)kB`f`?yv8|+?NfR`?H~i@8=n3hVSp5FH!1Djs1sUp@M4LW@Fl;_}a{< z|8~^_WnHD0)J*u zl*s}&TG#rPs4X*jCrsn}Yg(M%dEVr{_inSNAXNJ^Z|>!?6*jT3KgIl`1C4 zW?flqTKx2IfK#)x*4c(k>ukVWQ{~cY*H7ws(A0YUb+?DTP1f0ix$4pnJ7j<7nb@O$ zp`y6q|4pC3T^IZXIMVaSqv>5X;PGMmN>^>P14O^l-xYlb?;#Nj4Mx2f!p}n5bG?za zQT1Y-P_7p$-&)p_TD^9?e_0BO*4bQ)xz-e zdmV35h6_o%T+caS-!?fP^#@#W59Jl5Jl6rAiCt2AdFVs5VD{Y0Q+|9#irl^Luf6+$ zh^G#{;@U%$o= z8!>)k0lFtR;qp!W0Y&~ucI~-q9rZxuG^4-g2Q)b0SVZ+@?s!z})^nflc;gBWZ+nb| zl&Q>nq*jfu8hhm}WqJOK*5?;kpZ^8Lb?P~nXs{ZUy_0fxz=U0>+KbrK+)E{~KbZRa zkHJFGZ_eK_-9Pt-Wdbczy?fg6MPu=0t0>=4TSV5RW<;*E!~4mK=xgc;-Mc9UoLGQX zKiKtV*q&)f7Mt_A&-!#Uv()6iZBH(s>IDXckK8wn>fFrrq}zscYNvX0?rJI;o#`KY z>VkS2detIpc>R4d(FcvHG?^JqO=?=n^&OfeO}Gk z)TX$A>!zC|+CKtk-zYPby5G_-aDT1YXeza2bjVGzXrC_z*(_%LA8p^C`7}J)0rwg( zZ?LSvcN`P#v~AFDyXo;i*CB=D-j9F5Ne2Iv{QfaDK>Cw4EGvOcr_gf+6DDxrAM->3 z0Q$$+V-mSRp{FNfm5`2FYE-Ep7^6ZYmWiZN@P`*!!vCA^l38n;hn=Ra?wSK!I|?6{TgRNMgJ#`nG7t5DM&t1 zswDk7-I)IEU!gLs-}ct*-3J%Et`%3kU!(EV-8@h6;qYN7O+MVY{e`+VoH@_XH#F== zEYk0#Qun9!3Z1_j)$h`aFm>2R&6_d4;YOTcgH@83J0>1Dh!{TRI)=ah*mAd)XY57{ z)E(`gq*<~H9lBx~Dvlg#gZgqSPJ1q71x%7a%$!N`vQPLxzt{XK?0x$zX`7iGKqY3TE9JIjV!6`miT00mwg>`eHT7DyKgAR8qvED7IB?X zDa68JTo`yLpkhXysKkD__rgZeQ-d7vq_{M{arat~eq^Pl4;*f(*H9VEnCC?6U#L=?BnF4Pu0%c4?E?J z8JJma;Mr$~d~EpcimUpkJ$jEAI)gL(OgRjlfz0<`AN6j+LC4~AJ2w<1l}y4_jI%8#B#tNh*rKDz7ZMIGThQ;zdP>Id*slt{ff3?TDtOvLb_ zcr&YW+yBSjTfjGwb^YUQCT){8l__n3Qk()sBNIb11B<)cVo}`Pi#378-QC@tOmTT{NHvr+>?`&JCl3wIrrST=bmri7aTsyIRI?Xf~ z|6rSupD`~!=+eLcJwL|2XJ!RI|0hhpuE|3qJ5GZuoBz-C(&x&;dBt%~Y<1~WNGl4Y z3qV|1b-3WMdo!PAI>rPqI8ef@$1g zrgjJFuRg`nG7=%uga{)JM|OI_xbTfPigP_%Ai?XVJ=*|AIds=qy|%8oku)7E7z zo71x z`}_7iSiUxyF8Cm*>fy?5*vuRDR@n8}#@=BH$SOAXquXq_(y&jnO?21DUl&h)xP$H0 zhE6z8FB`k=Y1hRc+ zwAw$2HsuvF`DTtfxfI)&R?77*x#BaESVEQmaqsi#*8jSU_5Z&xX8v(eEHjG!xzY43 zGaN{`t~P7!R)-oXn6bLhBO9y5pw$5g5U9QUGULEtna60N^cc%#H*`xMSq@%Jf7=I`^R_dYXu?^BXI ze$(8|ohNRm2kpz-qj1(_`c#v-tKJQY%`lqcj{7_B1*2K^QpND*F?W(?Z_Zvd@Pv2& zP(dzI*NKCLZ$sp?j`j?~L^UsqO>= z)A(TW5!2&=MKzN%t)`8GUVbW2eFJ^C;-ZxN2d=Zro6O66;^Y~&?DnN2i>|)N-q`cn za-ze0HmXRaNSSRvo1C`#ZS~a`>2jy5ubTbx6#a5R()^jtFVpR&_BOPxwt@XcStM=} z`00$MR;$b-JglsCHDnFlO8{ckZSmf2Eo7wPV9;e5A{6n4CKG%vU-( z>w}-V=U&IEZofKyu-pcA(U4r{v(%R{?k^t}P9uUD;$@TBI|s>_@dJJudoTI{ooio6 zLg#XF=D?u2=cnBcVX{=6`QCDmWI7iAx_e$JBDr?{PqNQu50uA{qD}S*5?{}?6UjUdU;!qlsz(8%iMQWX?EMq17GfQ z_vlpp>`~Q4Q;$(;L!`5Axh0!^9nwj^-wTv^!L)dXG-damdu-NA7kh83@RA*Lze(o8 z%l6T=_oY-1Km3sGduy2cnEn~vrfl2t=56=r&r?qBiQM>#*3D{vr**H>Y~`D~1Jedw zrZYRMH7z-IKb?H5<+#q|ZTjR-m$p<^y%KF_DAl^(?_~0d`6fZ%ckJmQj3fKq2fJ!F zUx3B3_)K+<0f4luQn3ZOkX*(;n(Hon#^3qb~#n7|Ckksnafc-KY3fu{L<&dzLRs_(Ougw&+;+#6J08J z-|>(pBk3RYx4eBf*`9}AY30db$4-5fFw+*W<@yfxPfj~>uhx|QH|TOZQwD5yDVTgG zMs~b$FO?oTb|O$-fF&x=}gxSN6cp4ymWUMlzSjS9frH zsyqF-6gikaeOy_l=BHuwQNQD(9>2;!DEU!;{3%~h%+kx*rt|kCQT?2~do5W;4=+6J zW}xg8y*mH&2d$#+u>n;-7|-6gOW!SRsX1lyHnx7*d52HbJVn>Jly6_`uJf$ce@ufc zHLuaLXX@@!q`}3I);kC!Kn_dBRA>vYE+UdaNJQz0D*2J9j3m{|o)OUiv0Xv(9X`YjCiL zqAbYcqczwlt(wxnVG29S2pKCDK9oGI8RS{WWM zXNnbbt&CPgGe_jDj&C7k%)^&EM`X>hkJb!YONEC9FsT)5CmnA0l07~AS<{Ei2m1WB zYW=H)k<6TUjX9{)2ih^D`_!PR@9A7Gn&%8vzog#`4$&{Y_JKZ_eOI?%4yUnYyLVFU z%0x2D>UMd(S4J`=i#Gh_w31}P^z#ifYdxh?9v8S}yt9uUpYLwb339mPT6@B6SpYMq z^|Rr(&&DwN^FuRlT{nc?)M(_r!h6oLQ~JK?A&(o%5`mTHHaH){G(RG_SNHN+w(cO~ z$Qv!B$xZJUnvi6dCP(G3Qf1=sjIFFc)xRHoDCniW>X;{OqMj}Iv}oSv#=p>U$?Eyx zTgTGV@^@cn{#cDYpRg<>zP6K=FS@_$Lc|dE&h0zZ^Pc&cJ&`?!Dfz%oHsx81uWN5E zq5a!4!?tx@#(tW)vdpVVOW3G|@5@F8ZDGxR%|h-6Z>5VpyI&*Q%#E~Uecj5bhc~cq z2JA{KGH??+abJPim6Yq~*9()bR#>);UOS~}nH3Wk(uK=si|#ti#U5JUa@oBsE?PgN z%&SehYNh89(psderkGcgbk#`DlSpfj zuIgf5b<$N$%&SJas;1`=(yFAZN_w6|T7`5~7V|2Tu1e{7gtQXrs+gW9kya#K71Hwv zX$8_%K0Qw&El;}2rRNdSa-{30^gN05C(>0`%qvT}%B1HJ(lVrrNzao=8PY|k=Mhqx zbeY6F6X`0Qo+pu(CS9e(yi%mAq?lKdbd?bEN{}w2m}ewihV(o_Y9L*DF;7prbm@5# zsg88PGNjjEE$O1t^CVJ=bZNvq4e3&gd1}(767y7~OPQWWNR_0kczT{hTAXwhOV1;u z#Yk6CF|R1;DkA0;Azg*VyuzfbkeFA9bR~#+38brFdY(jDkaQIg^9qoz{9;~y(iNYc zM@ZvISHAQ-i8LSS%A1}?Nb{1eJn4B7X&%y*Tg=N%x^jtmxky*e^gKeElXT?}^Ky`` z?CE(DX?D^TC+5YGu59UfgftuJicQaxNMlJ?*7Q6=nw4~A5%aQ;uFPUyX3`ZS=Eab% z==3~^G@5ipiFr|^E0dU)iF8GZd6A@xOwS{vB1PfEH7 zF^?c!5;0Fgy8OjFf70bA=J}D%LFsuC;v4B4n4U)vUrFZxG4BiM?4O<|AwHANeq!Dy z(%CmXk03sh&OTz^2h!PF%zIBddx?4PNM}zm?=9)!h)bkX?EeuLNvGKVBQB6mk^e^!=Sip7|0B+kPLcmd5NAoJ z$p0gVGo(}G{}IIL2q*kM`EPyn6w)bj|0KjI(kXWTh?AsK?EVoaNT=BSBaV|!vHM3H zBb_4mk06ecPLcaZ5JyO-*!?38lTMNQM-Ycdr^x-25C=)8*!?38kWP{NCn5HePO?56G_m9|1Iz{fEgxEtmMeZL#B$H0D`$w>(Q|$f`Zqg}o{|I6?=@hwt1hI>BirqhA zC+QTsf5Z;bDR%#e?W9xe{t??qr`Y`?wvtYf`$rI4NT)WvN)yCp(kXWTh)twZ?EVoO zNvFvDBZv*8Q|$f`>q)1`{gV*uNT=BSBi52mk^3hh){suI`$wz}4u0d;(7(IC{{x9y zGEllrIzifAT0xqN_(+^1)+p|T$_N)>AxaT3K%hG+pBuIUPXU^T8Nwn%pM@R_ofnoO z4+JLP4%wvOX0nc<^<;VcXN2|*`t095=t5{QMHL|Njg&tPvIUufPlcBa$`bfeUL^3i z{D7i!@X){|;bX$%0uzJvflUIbz%WUpkgxJSid_MZ0`@BMVeMZxU{*j+#gu?rvPuDk zf|pAal12W>0aEFW(0ZZ8LS>3Oid~8+K-sGzpR0%u`4*BbJqitj>vpuPSxm*W`{wC@FqL<2enFzL9f$klyMJvDVxKlK>`3Hz?OADUDj!B zHVdNrjY^|luQudP@JLx4$o{9*8;u&H(IcVMX>lBr(%bDul}Z&ii?2(ivrxF4gK{`@ zHnUl0FrW&JS!uP~bw(J zhsI*jI@A`O#;W8pK-pAkr9%@vhObNK38&PnEmng=i)xpxcAy6Cw%O)TL&j`Z8o{wU{4aW?a3oi!GE>L^X;)j+ zW(wQThy}44l`5lAD>L#^NEK&A0xulg1#ma_&|PKF=n-nCg^bOZubofItVhOQqgJD{ zBIP}0x2siFvqA4L0|uB1Sml+n;5;C5(jfrFtg%^CR-;a3HyH5^gV~_W65^FoQ)Y)1 zne%mK3^ct;gD)6tTC|o0J962+c%`r%r_^9n=+p+iT}e?&ixt}fdIUJ353*a7cCA5azz?D{#AuI{#o<5*jMbsCW9!{)x2V;)rVUMJHsZ=wX*sWy zL5o&!K+jY*hef5bX_b^78enzcfPf|N4liZ5SRGci)#xx=9ZIztBMoB$!%b;dp@@9W zOIazK%8c(~1X3F8UZ_>jGBx@dW7_D5xxq^z&e37enhg%97+M@>cN}V!O|7))5a(zm zKJZdz8!Up_h|!?2Yjh5++F;OH)D&e^0(DtK?&YPB1yG3t26mg$YA~Y*El?`815pQ7 zmD+5L_THD?ZZp~)T08bV&3d~^O`#(E9JG(pp|eEsqY&DqRq2dqd1wjtL=9GK_t>>& zjTvE|IzuF{Ll&LIrZp-p7}6Rew9SU&pp=F~gT^@|n<08Pe_uL_-l0LdC_TWTF{bSr zJ-Xg*#r2eSy)J8EUP^1WK@TX4l7b~Nc+|*lz*x59f3w}s`x_~vMXR<^8neo%GHCUb zl``0D_|eF*h^?aFZG2rS2ZigK^$w#Iji5CcZ8|jqqwKf|t<56s=9RKxEa6;+PQ%@l z1IIEfT0PoD1M6yV$Vz#okXuoyLldZsT7%9;VPG2!zD(irRxvqP_>kVhNpgG??u6|7!#8UBh&$_nFb(O6aZ zX-Zf?BRU8_oT4ZVrIhn^nH@Hx27$7GKH)%6Ffm~4>Tsq+sm0K+E8;zMnH?BVW}{k- zFj|DpB4HPz+Y#r^je4`*8ul|Ug*(^VC@7NNp`@VNHit?>nXLw^-QrMbLqd5e99(gz z?PiNXV>2UM9wr6$238L96D}N-o47RW!D>E4q*Wtl-Vr1&P!ol(Hd1+4Ms5rgw|oUTcQ09gGQyomBTeY zDGVDuOekdo42QvJfW^0{ESMT>27@x@lUK?Nv!^y_VMH-YLDet@)hbwO%n?Ro5N{{U z7+pG*!E9G?IW=K$t(;xKFT^0g$(@{AJ#|?yijiX%y{*)$F<$Jb5{VDcuu8Sf;fNpY z$BUWK$rLj2!j@ysKuh7r+A)CjI?Q9{_a1N*ktO%4#%X-ZfLOUq)@- z8N8HEXNKRw&BICuv<~g1N6ukr8jPpekk_BTd#lQB(c7#lqXXj%4h3?Q+Azu6plUk1 zDvzG8i$bsJY+9UOR3gE=!)7(Yg6T1ft6`b+xo`1Ob_`>SQU!-a537lxXR+G!Rtt=> z*`d|t_8L|kgN3cP;C8r}D;0*0gPX{?OH&p{Zdce_fBFBu8~7h^2YjQ#+ptnvEBaV# zv%~H4m_;0nahO$zP%FHem*Nsn>Ge=H$^k>EQo*jn3ALHgTUw*FaAS`YEEA=*z}JHH zMUw-H2WHx#$H+Eo;EfcDvlVIVcFsCUI#G})|1I6G|TPZk&Mz~)}hsA15@Y)Ft93eH# zs?p=2=qW8u#+uDW3TN;vYD+{d0EpI^h6xy!?ugQpFL5dk1@Tnt<*V3tObWBGV*U6fIW zpM;3i&O}NV1N+mpEXJ<~3v5=1z6y=*df|G!Co);B1>MIy1t_xWvrpS_+FT zU}WT|9=_@CUlESG0nWTb4Yk0Of;drxz}UH=gK5y7!-+eu_Sbj+_arjb|9`KY{+&|% zNX}UQXRQA-*8dsn|BUs&+LE#U&shIwtp79C{~7E5f2W`M*LCr0myGp)#`-^F{r`Jy z`MhZCCB~3swid$|0?Sx?TiLHDl^tc0JjYLd{^99NUM_(7QFH#)=Y0K&f9@ExaPld>q@i5f$g-k4P4vDOm*2(G z9i~?)l?WjC{TU<;Y zk7yGLkIF4(HR2@rbX^xSKk;-L%0E1=>&tkd!p(E93J)*sJZcJKW?wm#W!6rT*)+6} ze&Yd2a`(^v5!tfHnfR2}l}(Lq(uIqci+MIh!L+*PKOv^;TiSFwZJD**S9;uVm!)mn zEp+s@BIS(DCa~9EX=2As2xe;Z&Mm2&7{VMJP-|DG$G5!lVa)rD1!lCB2&lu~+t5pt z`dsu;lZk%J?Bn^HC6$pf1R2-UYLO(Lm=kTGKPi|p@+nhy)O}6+>N&o9YSEl4gpA`p zc{*@uH2HHX%hwb2y`GYzewpciYyf?9VX00fs3G*XC&l+oyPI*wUfd7=_V7!%^|nl10ENDmU%>fIyB92Wo5@UtZsPOz27>GrQ7G}p&2=( zKl^cPkHAFvINDq^zIDzH=iI9znxCr;e)dBXPdsVZh8|Auo2l(RmR&o{^0=z2KfS!n zoz|C(?@SR-*e36s_q?dZ2i@x(h3>1aoHxCw`D*T<@@7=*<)5rtb7V{rm za8EcRe|@acF)vE-B`?ByE!3?a~ z|M~dV-G+5{s`&H3MFv-ct*%zh4}`~9qu~C&pVK#3v9F{AvMoYiY*y?*+w|BcL|}>< z`_(q3N8!&~XZp(T)|yMA;Y({HT@{=(@W6K3ugswrxs4m>Rmm$&7qiAOC)Ru_xMbN1 zI_*%w#(OR=pg)vd(|3I)1+(dB#X^(!rP7J&At!a4AJYBa)}B6c>}UG*jPqNWk!f_h zWp}a{J^6^v+keo>P%4^HmVDc1b*hZvZk+o|dir9t_4sG{Vhp>u+q_g-U$R}SYiJPT z<99zga$s`&3zmA)Zt1(Xfyo#APd3T>JnEmb^89W6`~MXNwz2)hPd)r@l#_gxXMb{$ zojK#`uQ88P*jw>mE5F}-kDfTR+`McTF0#}1S{l8$lESuc^nULAMfd2YX$u}7yKs@V zmg=^s!_^eJ?fRo7OE131_M0?2x&7mdbV{yc3#UFxp=T;bZfv?4{5%~xx3T3Zkqc!} zmEQ3C#4X%ZBc&fV8y!2sR+OoEu6sYjLL*~mZ@x zN@hJaEV(!yY=a_@%4|X2du&E~2HV*aJALK%*`^Mf_-vzBM<*O7m}_%xObYS)Os|+w zxXsY`A0rKy3h1D`wn4zG@3iWC>!I=f3}4SHny*K}`1s;pe9(Kar~ZE|@te@exhgfC zcy8l-(H$gu-;2xdV(B^!?f3_Wlz9iC>g6&Mvi0c9ba+(z>F^xeh9)?R|4DDbM0k3O z!X7*qZpg)58WCB*MXTr$bYsU(J9bX3o?bdrdWf&|^cI&Yfa|R{b)%+41LwL}>vm&y z`+cFmgoRw#wD-rowQ1_%mbvzYF}@$i%lk9!b0wpm*EC;GDC6V% zo?Ifc@5(%f3N_sjeC8mq7{H|JDE{C6nL`kklUec>7vr~5TeoSlI z)?Hgd{q2X|w|!p@qM}p)j$Nhu=8;cdkDT%Gy-o4GaL-=4ri42DpR>z?YGp}TRitC3 zpn{F3og&)!(Mg*V0{T+#O!SbBU4}PT&|W_4<-5Inkv&xEjM_iM{FTt8C12A|>IY1@ zx1VbTJvzWNYFhBk-my)&^}p0FGl%~dA|*lae*gs4|CMX*o1qbmi6}ZQJ{MuzdW0(4 z?7*Hk0~-k8js_i{eC}l?@{0E{M$^rzDyo7RzF_!XnhFdXmBu1?vJ!m?UEc( zDoOI*4t^cWUJ7FBzd2jELYup^?Uv(6-)h0k`;TQF*PZf?c6JVIzP|7mn)zm{JHNp+ zw)Wj2viVbnu}k0jKZ&r(m^H`dI4_!lnedrYChUK3!z&-k)G7I_?%}AkAC9EsO-HXN zHvSv!>OO)h*`8o_jLLjx-%J1GX}w;|OWGI0e0}$!&a;89XkR@w64KT$xF{s1^U2SD zy1Z-AQmHIoPp0qn7@J*dMVbfFdwLA*oMZhE+PWfCQE)&;V!H3D_)ign2GXN9?rvJ9 zdwW~wcMsl7We+VsS^M6sCvI|RaHCxdhSNzyRkx@q-PncmUtOBhbt*mWc!k`{<_u@g zE^)rj{(36S!^a;n`S^?SNN2c5ccb_A4_JAp`&71Ji&8->>ftf}`kGF#NjvKlV*W~N)$)(sGj3jYy*+x$#OG_;;C58o6Wi+SY167g zPll|Mv+k`eOpR4dPndlCbmm*D*8Cm>ocGLURLPRN3cm#^RFGYD8NL2eSEyR+Fq94b^XiMjSn+4 z|3~fnZ{rJ^Z(4aWkKWS3Ht6M*thOq2r=ZjCmK3f<8&9S@>pgP?-6w2r-Kb0R*&cMc zw?j8HWZ&j{S^sj>Lbi2@TP>EK?MWBBIBsmE`SV%XEVus^@Z)wJZk7MSY`We0#5H*; zEuf19Pgq`Q$yEC2rWJSnGG$}KlJ9Hh%Hr6L%@6dNHCRix$XV7uQ5M5$!_KWf6q)Ng ze|7B_d%r!6VV@LV@o7s;9Qz{6u?@vvYU$xirmt$8Xk=@f&cs?6n%z(%b=jyZjnex9 zI6kBPoh!g={)*YdcCYUDn6d3T8*qE2gXuR?OZR_10h+JaZZv8Esmt z4LBD##en!n3P80K|2Xd98s7Cr>Ct@O<2SApXX@fKzg*`5=X8rgnJI@uFB!JJrlZ#F zJ!4<;l77I{K25*yV=5}@B|O}Z8^qxasoD)4Q`EqZ)vlgUXr!Wo}@rjWZ& zU!A#4n_A3k*zD?kI=031{nzx7%>32&Q;V0AG6&^ndPQ83G2ze0Cpgwfn2^RZZysM2 z!NeRJJ0fzP9}`!V4y|`OjeXZ$llMgaS9H>iUFR43M}Fu3Xax3d{ISDTm4MpnyU!|? zMO{0FA?2m#{-eI%f1la;r^q6^u`MjlQ41`+*gQM0ES%9G?w|8Ee{|#j*Z6|wm%K4> zE0cZcyvPzw+vL9Dj#H$N^&VYx5A0U@@`Hq%?ix=&f9|&Iv|Dk0sNul$^X{3iW-rgZ z^P1ZW6zB!yyYXFqyRrM%7`tljy_wbx zDy_9HKFADRP-cX_>Onk+r~XRwed{+&`8p*I+TcMvg~)xtOxbZ728sF9IER7QbCyaHjIiIna^d}NU#YLV-fdd_9xX19>r%2zpg+;lz898tPl#ZyPtHBC7v^$b zDgIn6=llF}?>o!t3=C()WhnV9f7nbJv*Pxwq^)xw(w&%+hip?md@seHi{*TuFSt~$ ze6Ed=41U%h{{7w0|FMSxe|T5_2bqHASG>_OG+URR@20+07978UeUPir@B%1vSWZN) zLAhpj(5JcMH_*+iG*gd3+2m>0htc2%M!%et8D$+y)=ZVbSG9gWxC_dg!+(7<3+2OK zX32Zv^NtO+HfHetUC;K3%TFI+nUrjX^(eEK`N>lJ%n|le_TrU;PamQ4jn#~OopOZk z&VrjYI8KBkS4OSUKq>7!Y|4kjqdjIBrN=C1!LR+J zS9S2Y*WB?{maFH#{Qgjce=lJ=UitOQu{7banvd^mJm34j`o2%@NA-!n@2g*2`s(@P z^2cVGm;dX}Gt9EM#sAIk1+!dxRnpa3ufx7fs+&^abdqUSMA^na$8T{LZnG$%aJG4F zK27mF_tNrHieKxs$-6(f%zdv^w=zHF+Gye*hV1qsC!6@}^BcYUmOFiple^r}))jM9 zSm#bjmh{T!ob6trIZ3iZXL>(h=2n%DdXcJLxBTeR zCCl9u>nhF0;AAyN_>~;rl8* zTAKZVvxl?9Uuk|_(m#j`XEESNrb>qt8_1)fw&VP^-G~GXII@U?m0zOjdGi-p;kE?LPPph= zV=C70T1Z%drQXM(3%uAN$tLgTZXU~>HC>D#KBiRM<3%!9=fzClgs<2KJ^rj)~7hR4auF^#vC8DnWbj~Sj;FkeV-DHPxMcNHJ! z@+qX~LD~m1(mvxfEOx4KirQ}FlHwq*g-7AvYMS^;kK%`+|G3v5P5T_L*67P4@5Mo?c!s96tX0x)o9y6b|`bukXFDUY{Ia3g7>`a_PVSW5wp>{`&I_#rO5hKUDrk z@i&Y!G;MXuz1DQ?dfa+9zdth4#81I3NNN$!b9;_bU{%yBEW z-E{p@e3q&9+e>4{<(OdN`D@(#Gm;_ST>xIumz#XQW8|I7VOMLV_`dJynJ~8kAY-6(6~hex&$Aj?sQcuP_afP9|G1 zBhyRLs88w3gAfp9_{dMJ=zS5)i z5%~4oi&Js#@oT=t+BRJpDVPEiju-l<@@KkU4qDY^V*)ef{pht$<~3cn!;vz@Cp@A@ zkpq?Io8r-Dm%Td*Ni#8X&nAy=5*frK^vSFFTw29Uyj?n%O80=i-C-qr?&TG_TLD_S zKChH9->t7o?G()PdRn1PV%!`ydyV{K^B#z1DyFfgrb^z^QKP!G(TxdWeEiVYJ9~~i zy^QtA`*qoI`FgMaLXZ(*SW~BG%&b<8+HKIhY*}z6oA@-W&hS3-*rGea4D8&)?BvwN z?OSVSuqhpMQ5BAlW$zZsq3pVEEBkKghg0-}WO~2tUdWBaP4v6Ry^k}CZ_pPyw#@vb z{z^7(;DjmhljpIjiPf9Sj~o^~^v8Dl@5UYX&|ga5$~v}kOr;x4&7~cR<$u_Nar7wu ztikz|U_U?qOw!_h;cvp9gx?H57k(t%9lkMqS@`Vm3E|H0{^4E19pOKR*9tEmULw3W z5CO7=hlLYi@57#l-3hxGc06oP*p{%BVe`T!hm8yy6xKbg4YC8)1x7&WFm+ggu(&WX zEHLy_Xlm&F&?}**LidMm4_ybeP5wZBRh}Y0Am1ThD_;cEfHCr+^4{_ea*Mp7yqdg>TqiFq&n1tR2g|<6 zUdtZKZphBc4g*JEgKVj6mTbH%N!Cx+S!S0tk=2xylNn{jWO-#-WT7%i(7T{#Ko+41X)I|J4Q zEDo3vFg9RVK%an)0oH&&Q&PL`& zr?fv13LMg(rM0Bxr6r`r6*Cp%6vGvL6`d3|MPo$`#ZL+YkQnkPGAk4c|B$yKPeX2n zoDVq~!iH=LSspScWMYUbWI#yQkX9j0fz41Mq-2OPBt9fIBs@eK{2};-G@mr9G)zhm z?}_Kc9pWN!oY+HbAyyLefPpZQ7({d@+7Qi&x9#fy^h6c?B|$K;{<6TmqR>Aae+0c7co&$ZP@`E09?QGK)ZF7RVTZ zj26f!fy^Y3kpf8yWQ0J53uKr;h6jL_F9h+0CAm<3= zY=N95kTV5xhCogi$Y}yORUoGbKs|#c`fvhT!RRprKKvojSiUL_dAj=D6If491Aj=A58G&R3k`_plK$aHBQUY00 zAWI0OQ6LQhsTW9{Kxze&5=f0es=t%OB!QeLkP`%Qyg-f<$gu)BMj%HE?)951hTV0b`r>r0@*HCF|4cuHLv|dGyJr4}uRs1T-5#)v=|9Iz z^=2Fc$aHNJp&Re8T*BiJ;fziQ*zxYbe3eI=Z!uM#I$h^-vEp~V89=gyy>tiZP%E-Fr zvQ0>Q$>u#U(LA!%d%Du|EU|^-A{h6OeRXP$eMdLBaKo$zDc8J7i7$j z@_wLCd@C1cU7N;cWfS*~$r-`)4}RRyWR76Q<*S}_c|ruUvuW-EIch$o$JJZas$z>48PZ63LCV!%AeHL?q{{wefAy`Y{<1m;Wz%B|PdZ(PCcv zLj#8ujEe1ac)~AqnNkCe)@e1Fe%EA7i}8E=u*J$dKbAT=f-aIaKRL4RPB$y+pJ#n|@3rguVOr^m+G&plKwp)WVg zADw!02;23Ntx&BKqv?Il!bh`K%9%c9N{js6fB48Wh~9a2kVbthCu4PMB22>+O!w1a z4WwBn0n6C*=X&Xjm9%Mrp#VS($37CpA5;c1%RIZHK7B0BC@ZoKX8F)D3{bXJP{ z`2Da=_71mAF>f=!i2Z!QZI~R@W%jE*Zt3B*3uhF#ZIVo_dRBYliu?0yeG6UHeeU$J zjKD?teJmrlmk~t28<9};{i%zLwP)?YMUF3Gx+i>&9=plpjB%F!bH!}zjhW%1Ca<5!?`IOUFO8OVSJ$-HnbARuL z199eX#?fCgBWPf5CX8w4mwj9;!!*ruJHBT)Gwx)w&0%5r8FnxEa@^!#=1@RvzMdn# z(SsK)p13_YhAHhhP)7SDf_X6M!tlh(;fzW4^3i-rex}In=96b;iC`Kmn4AB_szr3> z4nN1n#zrzz?bXihIvc^{aJHW_z2P@{%g~!-)2=y~cjF`WC7#R9RLk9A!m4%O=p(Ze zlcneKGVaRL-`rT3l{ve*YSm3&#?Yt79`3X)S0oc5ncDt#QV6qZkd@roBb>ROqssZ0 z{c|&Aob^`qD1CzNo?5N-ghpNIC5CH*vp&pFObmg4{ZCL#W8MqF_v9;<2fxlAIc5Z# ziJG^2;QDcNzwm7Jvt6*#kI2BtGkI)mkzrkn1$A}O0V}4hZZm5<+gx7oOPxMd*hh6P z#nm}8jQtk){QRMx#?nus_f5QC8WU)(Zq=^^9AeFxk5tSxZ2?P>4XZ5aF`1sWrq8zV zsr%?@-G;TAU2Q%)W&F%$Cok@2;|5eZ)nd&Y`pg=?H3Mv`**oV8UJ2RhruXc8)WH@o zjm>-MvZ7+y(iFa{TS={+0%}$Kg4w30g&;EYoM8>{+?!P z%vzu}c#eZxv^XMewIYg*(rN+hV+H~he~9?g+y=-Yuj*|<4@XtYx&uQe$1wt zWLmCPQs(RV#7VJhB*}`oh2yB00Om`<631U$ku!y}mHV*h&^LOzCNK({dhu+|hXYRGy zgHb02-8Au2Oa zL%xLk5^^zQy`oB}O#WE91!#NuvhBfx{r!S} z4%R9<1J$pue6}Rm|DM0wf4X#BK*0cs^jeSw`|2@)sewlW7X=OsY!s*tRLCmIa{FBg z`XrkuFANmI$ndzZw_zz^E5cg?X~FZyWpC6d^#JeDA%&3HXal^FPEF}K_#H~A1X_^I zTDX@d$0N{YfJOo+84Yl=IK(WVj2P5_SOSz7P_7E_urdw{5{)P+q#i@0z1e02(umav z;3@#j0TjX*$75_cR7!w0r~q7~0vHs&4qPQW4n_je2RIatf;^6e-EMGLID9s{Sp~=> zN(aCi4bV*hdx02!rQ)_UA5q<3qeOxuz=2UJJ>VrE2BZrd=tfX4@VHbeM-%=xsDb_M8(4RA1k^a8XKfKNF57#vW*{b_-ZMFjFv92gUt1^`KV2Vg?bQU<-n3PeGm zb~)4`(LN~-WC*}692_6UIl#yO5yrv80Dgp$7v!Y?CT1VB=NB{SNU08YZObkqlE zKBY~{<7YY0iU3>#OAFLF;D_n$NE&0Y0T~S_A698QzAin$j_hW%C1pTUAjO3dn0>fW zWTHc2py*n>lnyvC9H>v3`5Xyx{x|8c_}P%=OdQ)($2 zuo*cFLtqW+kp)Z-or*Gf>av>==niZvvj(__Xm}&g00CvA*8yBc1IVb!oD^Uy0eEIM z+tdz=2530kr~={-z=DjUKs7ppx25yZE{Q zm4-gES+$%>Vo?ulAV9F;$(s#Fp&jvi`TGLsAL8!;A_Sl!J#f2#m;=Njjh=&P)LY~d zuN1BZ5E(#J0g8)jf8bO?qpV8g#Zg1Et1)R*hN{JIpHu#7rxOj2R0iYg?Q}hC|(s2-PYCMF+vsj}pO9x+FS{%)?12j{u(s5{IfB=M@ z1O6WXOjS9)1aV@p>OfGmVqju00ICzXZ#t+s0Qn3`gm)Uls(7Rvn85%$r^8`C%rmel z7=x5qYXvZ=3X9pCcCVDt46{dBtpMx-FsmJ?i?|{ziyn!WtVKMKeHx%=;)grz2Bk)6 zg{cHu8XinR3jmx1xTGQ;G&>9m3TT~5KtXZfrCMk?tiF}9>oq{G(&+=es-_2QD+Po> zo7$?u>fMa#2PjrR$5R8qPM?E^p~Vpuib57B06hZC5mB(f&Vpq$V_pX4kv&&FdEvhe zl>{_6VEEw*z>PzLTJ?5d6LNsMDgY!J)zKIEmauXk0e2RlWSGy?1~sN)K-U4K3@}T| z>|T?p1TrS{1K@-hrCPH}2e3{|;W~_S0M6x03!V4(?+bIe1L#&jue2F#co?te3>{!7 z0S}4UIbS9IzJRF(OeYLMz=i>6Qwfkl%xXx2g_As#D%U(t3P4{5n_dn0P)x)|4!#rs zUj}$F6tGdXlrbT~Qx_tDRp?7N5da9qvdIQSVIah+0T_lcZHVO|ZE;vYZ!mDSm%>yG z#6k;TI|0LpzO}%B#2(}C3-(y2HDV|Oh88G%zkD;ki!)cFtT_Y3QG|AhL?hmql9hd0B=>m zfztq(P>abKsGY#()dX(krO*h_T%8p#Xz=GKWbNg^CIL;Vg(;=7p7BWGP`b*3jFfNz zfbm9|EoLYzYyl>Dpt@4gmAz7!e5`s0+%q&KJSX5Tq91_ksDxqBNt3)+1X!94+6^dM zJS?){N9ZuT;aC6}(xy!C8fjQs?!YD2A()P|NE`^*IL`DyTMR0{b>1riO&OhN(E@}F zRvm{n0Pc$)0w^>Le4-;SrPp&e46BJT2;DbWU{^S>Xd9e2ttxvIFNMhtvEMco6bTl^ z29?Cbq1RyIMziXpcnCUB08@h*x2fPB>S3@sivdtq6$f*v&ve6kMICz54tt7W4zB^C zl!1efb=dW4jh6Vv16tbQL1DB)XFSsf&?2pBbQx#GFuR&_dxs&qj5~J4#BG}OnaE~y-YhbdXdG8URn!tI3spO*FfrZNX zLl$_a004!sqr;v2vjG+lPP7&S76`Ns%wRgKQyl0iD%dl>!v+CVlj6rZ z+7o(&+rl)@trpP;F`kT8u zjmn~h69yn*8}Ji>RI9dHHKA?!R^=eCzL`7s56oF9+5bn7`rBa`ysAO5h1wF>_jVG3ztd|8To9*8dsn|BUs2 z#`-^F{hzV^&shIwtp79C{~7Cl-mS-KK4bmQui`S+{~7ClVJXdziH!BXAOCa=Pg?#v z*Z*Ane<0#Z6|Lk&171oiNTzv$O?@3btcSBaAGq@)oE~4SD;{-oPZhLy7Kx{=R_uRZ zj}#dyl-OrC0y)JK;~KTn;wwFYJHDKtzM!2@{Rc;*_3m>8$M%)0#n^ znNZ!$)$@MqhKi&2lT7>=iT%{-P-ao~m>0vR9(B*oC+V9Meb=NO)U>)SXph_RJonNR ze9q@rU1l0_sn5Uzmu{P??CZZX`{9cw+nX6JmALMIU3M5Dz(sd`sCGrbd!y4Yc39~c zwshat`@0N(W~w;zbDu*GzqprbzWE!^XQ9(-b()`a^rM>|Q}F(*9v@6wh98{Thsnf_ zx)ERLV&p6Lms4$u2Q)gzR=Rv>?fzvEbn}-jgBH*DW~z}l>U~V3bLm6I)YvNiV-yi7 zeXdbvzfR$^nR?l)mMhaSg6VuEZk0?`06;zE{v1uj1O1}5s_<+XTNp@zf(Pp=wN+_$ zU{?>|FW9%!dh$<4M{bdLMjcAesLO@X&5gJp{aHW0TH1b{gn2;ZU8(N)kv0rh-|ARA zf=Rjl)8_>;A^BPNb}sP!m}f;RK^}RkJ@Ubfk1wwO%baUBxqAMw(IfP7jAyu8Hw+z@_ zv|-l<;SqSWX;A558IU&^+o0H;#GbvUm&mYgzS7fMT>Sf=^;SU5+zp>J_G22?d0{$~ z^Bo<9R=fhyMD;cBmt9aV8;=c^mRVdynHIn%Lg$&zPC-j7vs*WJoWq!^j3Vc z=f~!LmVdwW9}|gQ_V+SfZ($eV2Yp4iXf(1{o?%SO-7$B&)bmTyJInGWz0{B*fL?3g z-VcNadJP5l2cS$(WXpXPd9}(ZR0CF-Nj)l zaW7wFw6EkU{_S5{{yd%UWXqiVTOLPGy)k6+rBh$o&ULJ#roNLTQ<}k3 zhJVk>pXHr$Qk-OMIS^B@JhLV@F`}nk%PkZ_9$z!{=tuS&I-Escf%A7o3U!srCdik`MpFTks z;#Cm$@WI@R%cL%wock4%9=>D=|l?d~`}g1L8X^rPXUQcNF*t|K~F2;b*y|FBgl zdGCZX9ey6ea`GFjS!CSj4_4JV#_@gcJ9@Atm$4cFa zhy3^VzR&-d?~_l?P#v#M3#y}yI=HV})BS5Zg>`4@opJNI?KZfX%|+s$HN5RjQ+v$R zrs3>WLo07^*S<{MY_RE)>2ZlUw^p6m#1bvm&3$ot6YKSB-ANB~7h=zBqPI71{b(S{ z$CZwqb4!cVUo_2pMNxYD9+3=e|`YKw~jz~8e%MfAp#c; zd)0_AupovI8~kRa7Q2|dkDxG~@@Ni9kLLI$nQUlK--|m8z`feGTDkP&Tasyf=BEWs zta9c`vg!PqA~~6o2^AldkbI`!zgphxm!q%fr%})E)u=9G68m0SS9OnsS-}FAG<^p*uYnJ2$yy18&s4Ab}A%a8{fkp|f-@8i#kDS7tvk!WVf zM9byL#0NS)^R@}D8+n*k(OGsHYX^V7u8%Li?w~@4o1AG7#)$8;OZj%)F8q{(sYKKq z^)fLBGk#UgetXA9GrAKWRBYQq%+yY7_c9&R=uG`}IVUf9M8}4g9dbqamFD>*!}Dd# zje^exUr~m6k~DaN$}*KyomdVVb9Vd5WO{d_(C z-s@)heqEoOudM%OD(*8!hOg^ukc{%5Z2@6@P(6R%OChOw57b(J!|~w;y?N#%#jg*} zBlpbetmN4zboA2Q+KwM?umg0Bnb!}_v$@LN>XH-uV^`PYt@UPuNjq!J*Ag94X&*d= zJHnvp`QZhf<3a1&oV<_km3P-rZk8E4r26>o_#AKgcm42!t$g}bsh7B}SKhtk=H#($ zaowzs=PgM?J@*zgDHnD239HR=c^LTgvfTG(f=_2FpG6-BJ`baD$i(OFNH#UkQ~84A z-C0w`ZSxZR>l|`F{yF^h$fRVqvfZpA#?0s3Jdn;pA5e~aNsYelCu!?U<@XHR7JmG9 ze0R~7$ur{SZ!~#0a;j{T~GWq!Fo@FCjj_UVMv$=Il7@gXoq~Glt!Av*51!v@O zRhSW1IRwLHNtCm~pFf)OCCkE*3q;u?USF@%;AtFA7y`cBYH@I9tZX>?;h8XEW(4|5 z&&1)&FyMP6=PP};FD`csN&Bq7{gn;~z8^WbgOsUNe0IN0Dmmkmt6V+*p<3=2bhbCI zN6>AF@BE(`zmHocM=-}ejjLE zM>sVuvxR^{)LS#djd zf%K`=l#K5m7N`R7^BRjEDgPCJbOgB^Xdd|6+Q&-NSaDv+sSr z?|x_hmwCkZt+i^_^i=?cR zKO(fkKk?nf4q{WWk+`E+OUx51i|&dpiS~%{MH@xSL<>c8M6seVL_9H=IE`(=O0g1l zSCl2-BkLaPEaxt#jB|-|hEu@VD0D=%3Iv1=*Oc24We@nqso^~4t>H;|OS$1($aUb2 z<^}Nv^W1qhyk6WL+&peJcQJP+uRZrFw}^X``+@C$o-@yjmxSsD#Pec!QK*{0Y2I<(A>MA@R$dN%4zI-T zi^htkp{fVzqBEisq5@GFs)z8HXh-xSya;!~nTSDE7Df|6#B4%JY$c8pU$NELE|lND ziLKApW~;JswiVlh-G@DjUCVyWe#U;lzRkXdDg}`2LiPdnPWEQ@dUhr|jh(`}!Me

&v#S_Hg;*sK^ zVqdJBOK^EyWv&tap6^7|b1k_exkCj_f{$1+*O&K}_X5>8m@FE|ognJW87>?o^bqzD zlEOk%rD3OVvv9pIQEuEhJwtH*=!A%Z7@ zdx9IN%E4K|JCUwPUDQ|PBC-|r78#3%i~KpAIl3HMPH)aYPG8Pg&KTSW?~em~5k3!} zfltE6qbwN7s3yX6L7ZTmAXG3y5Fqdt^b@!W>`-ll9s)x_2Z4q_B;fFW@*DYe{5Pmt z#Y28Mzm#9XFXkWPALQ@iZ{cr1RVyU?CHw{a1pZWhG(VCb!Vl#8@jV6ALNj4kp+2g} zpen?LD#B61VQ5DVrmUjsr6B_+Oa`ttl+cP2@+p8a@Vg`f*H8*Z%V1Df8Q7RnKs~{_ zUe>aHNx@|b&Qst|fe!^bGO&6H`fC^$$#cNthvT`YD} ztvCuyWMEG9G~Xv{1yXM_(2#0rQ=lq?Vc#h@P62gDz)9BfUQWRh836e}_MwE?6s(nj z>qQxO^`}}Vs8%`!%P62sdqq-RVr1aHl+ra)t*J5?h*QEQN|-|dWpiL>s#Pfi?|KRz zQb4_1Z|Vu&)C+kJp>&=USWv*FKt%=vKTvR!0&1)WQUfxufD%%NJ8%rua-x7*L<6bO zwK*vh+9Xn7M}f5rd~9Vff)b9P7Ez!VB^*xyb)11Csa6IB^JEY>N(O`Fx+9cszYJ`t z=Q$ZstyMDcrIwq|LRrgaIt9@(03_9gTA052lrWbPj-^`J6r@rxpMs$>=tHd@UuwDa z5zBOw6dxPod?#@wiN?M6+p-#1D}eFFTpQLQu? z^m!$N!96IT4#Mq%tkoB}RrGmxqZUKoIh4?p62?+Mjf5LD@_kRsgzoATP{#UFpJKNr zO1M)7{Wvn{`-}qW9l4L7bkupGA2n6|hEc)+6v*G$1FAKJ0{KCN%UW*o=gpv6{irSy zs^vlfHLPybBI-{~YJWeOu73yW3DhC=qgF_tl`>(U6bh&h*8pc(YXEgF7{HUY`qxrG zy|Ml!REwIpz@@TQAhnhPC&*fUA1R>bA&5H8AgX5&HON8KdkCUF9YH;1PY4oG#;E57 zQpXv1h!W<@V8m?-u2Mi9OAs{yLDWavmg*TiT-GI+S}(!6R7;t9-hCN3QS)HuA#2$= zQeYwjTk51^yPfJuji(*;sTwdqCbXv}+>Uy;1E|RvK)tMeds!Dh>TNoml(pR7QqS8! z0W|^k9c3*?>OI)+rgS?f*h~TS`SbIVwVX->)yi(nx8^qT-xvYeKn9PMfx}%2Zcspd zv>m9$=io~TsSl8Sv#b^LgM!Z#R8T-!3A!XyQEJ(2ArlVQlYt+#g#GrT?;9y7 zOVOYDtoW44^zv0QKnzm?-NK5J|xh3aAfP0Cjo_pd3no7Nw&; zK*Q~2t>M%sb+EOpHG=x24qrv-s4ok{7f`Jz84T8@dQt~Jf*Na=wKCxV>cizCrCMiX z;4+y4>ZIy2lG0UDKuy03<+WX?19VZ9={)*SUFs-!F9YWflx{H9dP%jY)2K(RtmRWe z!3heelZSJPtmRCNnNK;@3Z`Hf1=N|ynHmru>Sdj&$?>5UpU)Rr&q35w`Tmr(e7{ma zoo0NgQ;F{ynQ%xp1v4q=PXTrM9ilOw7j&k3^D&w) zwT4Zhz?}jY3a9}YW+!V6ZIFQ}BA zxI&2TIwlK~g-R`4cH0p_%Y)w7X@%0qprk~h7SKo=L6_5TDp>3h80*&x_t0J2X;F(M6 z+GNB#^6aXtVUx)0#X=W>Kn<=Xl zcT!#6nRQMVjvanj4cP&kPR&}*_uxVUx%~LKMMsgH;*?8#t@sXAB;9{_7dKu;yegdU zQYYRRfYQlx#tt)V=m;xUPK`Bvr&0WN+j)1>JxyS|Tpkd4LNxAK9a!SIx4U+xW-%=< z`|z&CPH{rwu_ZYHqT-wBcLwaeq7Q$Tl*AxAaCg78`z`h~k=wYb+53>(Y|R8!WQPouQ))eQPZU!gyGTf1!V!HqC-4Addcje-JT^C9%i84@Q8J+vt$q4oaAB?B1 zd<%BW7~weT)lX8TQTq84lFyx7Rv&rlBS`(ORiE~viRrJboy{@*wDiN6u9 zTzoU))ANK-_2PN9sS8}^>cE*Rcs~j|Yr?|@*FVO1JOw+8@APy*@}YvbrMXXd#XdjF zhs4>dLAt+~`(_HV1BbY)b)Oxo4xg9@i7y!#!uLT1Eq01?&1xS^^ms~MnrKznA|E?N zdkL}wFB=?JLw1TE%ljARd)(c(Uk5JDUobo|NeIV38vi58FlM0`)twK>x7?Y9AUtvas* z#tmkx(k4hiqUrRMVV!cwjOn3MyE|o(M~6hz+SP8s5Wgu{c~NL#_tlt_H@2tZc~Gk_2*>bvU|qqbXSK-#PEJQwGVe(fMc*noq9pH0({MNGa%Q zdNh0PgR8pZj_Q1Ij8u+vy#AsUJpZNjLs+D9bL0uG@vKB`1x1i zcDoF~nhZvIj>-oq8b+22!=>cCSsQPiwq8iqe_v@B9heWcoKBm4dA$?}*L7)U7@b6> zcv$NrcS_k+U<+?|fPa;z_5KXcr}lNo>vXrKR|Db#*n2O z<6+#4qFV%~Ct7)4e{bcXjLi|YsBWX3gQcAfN`@VZ>WQEPCbkwRzj_qP>ML7u-A)zI zC|h~jXsQj+U(2d%`Exn-e(rqZb}-BJKs!yHc2Hx5qfNMeJ9r`arG=YKBj^@MT+ugZ z1jYUjmQ)%yf_Y`thCQr#uutD-Tf;l?;K>f^%~?Ho(8Wy6I1lpR>Z&b6cXZ%EfnUnr zquq4im_hC_^Cm?DhnL>-vkiIhCC@T)2zp+>rFEZF?Rii|d*q0nA(P0}Cfs)zOx|`LG?ca59dr-K zn|b%fARzisJ+{6&0I2PIp1(P+FDSn{g74APiOhS~^rE`UGGL-z8*rod9I|$|y}5=U zkZdx`XDyqxjNEf<{E(4mGeE!?-Pgz5-Tt)lZ0vtum5>Ee@7T_;Ug{19t|%^1S{Ml9 zgTs7o*6mEyP4T3zFE3y%8rc%nGH1aYr4~+wwTwi|<=6G)P%Bi#1C^Z!Ge?CmP?Gpa zlz|*2cel4kY1L6NoJiTKGkSQJM%n7?jSQIl#b4_Xm)7l;7ysR3qa^tS*Ny{gPAWZ_ zeqRj7vNkTN$Ww>0em%CYOBF$lv%8#*#B0DU#eJUrY+}K$jkS*EIvTL?rLM@sf(zfw z>07M%TnMlBuR8Z)=0{+1YZ`7}DTL$#E#=Z57<}8;)ZF0-3)YUG;@aI0l_GU*|Kxd< z03K(THC(n4LWds`JKUZP559GX9%bJE9*Z`ejud_ci&qvN4BDgx?H3l$-!0K9Lr>V|Ih?t`qc>cf6#=Qn+8=(G0=i@H(olmnNWqK)yHW2?q5K$cX?XDzE{9_ zPIH!hJq9DDOgi6XTRX^KTas^@{u!kE=KD?Z`2s?pr};-$ega{Yx`xmIOhU> z^-g0uSigJs;_l|UaL<{jM8Uyy(z4@*u{(QTC8u2&Q9DNc8aaLWs9r{9x^P_5n(Nz< zyhLrq&vsqX$yc3AR%xUDgN?=})L7}Zg}nT3v9`(n-;Thq?*k(U?TOF2`BC8|+lu5@ z(u=4ZEYhNv*X#D*?s5&-bW4JXaRuaQzu1}A_LP%fRhFF{U2uRbb7@#UEu@?ro#XXL zX;vu+9Pqm5th?ppPTnk=?pLpZHD!fY?yo*X=67M;9)GtStT)I#P<7=hnOU@FK$kU# zK!I5Q-qC^s;EZSU_|TAY@b$?Bmy0t?$#J>)L(;okBdhJ|63XKWK;Tlg61k@wxWpS* znxvlqr-#n9{J_Zuf*3-KuNUPki^R7;t0NFQt ztmc(X;QX~S%9{@5f@#wBxgG5CzzUy>BNm-ML6+Zm_g$2`mOO8;f6BP@6Xd<%n-g|% zvdJxx?_#ig$iMza&f3K4k~wKZHFq`S=yocQRa~js5}uBfoHR1 zuk1T6LfLm*Is&gX{?#6cM;;lhCmu@B<94S7hdb4StLhr=(v?42;;w>*nT~QbUs3Jx zI0%DNJFm{1wWc?0Eswh$a@Q`UD{O7&YmQZF_wya#$axE7_8q4rPwqVGJ*Z1aI1(f= zk=faKY?SMT;hK9U-C)7bgTI$n6{xkyJ(dTKbUmsP7`wn&8Pfe-&+L)eaeC=$8<+SQ z{OX^0>C54vMH^-ZMT|VT9jv>R@_eJI5zL+Pf#34JqBN@3l(2SJ}lII%6a0U+Hzb9k4p=dvXTAbA=9Vh$Hx6icB#+_HK^mX zX!}0edSL#yw|{*X>}!&Ixc}6^iVjbfzdLoK=$1#Sa3udYB71`D;BB@%+H(V!Oxw35fpt<0nU5`VLn-+oH5th%oJibvxobQ$G zGU#_syF8!&EhonCub(vUK|y>2alSEYeY6HVZ0 zpMA>%-PGZLeGOgD99Dtjii1`J-RKDidK^%$Tdo6h9e&Io7A=4WJ8zxb(ZUjDXsjss zxWE8bX-{m9UiA)qdFJr&-Ai*A?tV>=xM&0)C_m0v+nNs!UNNlLcXT_jj6A9w8GitD zJwCc-MCoDjQ;u+PXJQfvoM1dVdcb~i=cCtq*wqE3Lsqd8YfAxX=dM40@|hjvn)(kh z_qT2Vw!-p-_UAW{(HF$mkM7(+j+Dl|+_7sOxIfAzft?iv{6;VGYUh|kDlcDG7Im!< zRBf~j-M(NR5H-!eX7FGUcodtFU2ho=7B(4n{N%P75T0YfojEH>r4Jtue_Xnke0;68 z{>bI+q;J+{UHi4G$P?SIF5H=t14cbu(#dP^K{B{`(L;^x+ey*LL)YCFA0+pzy8vIk z$s;=lS$Dp(e=B&_^yciNYw09^>$|a+4`c%8!pZ}Eg?q``t*O)XQ%?h*`SwtI=R7b` ziB+xC?=X0Smtfy33tRpEw@3HM{+$1>#dnCoS&xb5^&SPoNx`q@4e@;g(`>uFo_YI6 z%kTeMAiF{%ev4ogTOhEDI-h?BHQP(3>#XrRL;!5#t@fq^;w zfgg&uwiFnkNNW}TXZ{QRJ^ls$asGDxdj2y0JQPnohVRRFoia5JDxtugk z5@!-8g5%HW&#^{v=~^550)*uj^f12450x5~N|jQTGb;O4wy3OBS*#MTGEQZfikFJLikXV8idgwOnqc_@Pv>Hq zDlwDBN3i-5=^U)HO6=&!=*XDJ*!Za|cRZa~8wZ)|qX{{|GouH_-!KLGAtqma^r>M2YrDJKWH6e{s)LP-vNLp(}NFx-r zmbf&W)>;zMFh#8eE)Au%7KC(+qShRjj;6Kdgfv7^+Z&gTqP4vV=}1LwFI*Z-YkLvW z5sKQLxO6zJ?MX<36t!l!G?3Ps5z=9bT2ov)l-8OO(jkgk6I>cVYfT8LzoNDWE*(s3 zdk|7TMXfO|9Ykx538}B5wmUBMp|#x!skfrG8!jD4Yr7FrFGX!vT|ivAaC#3i)<#Tp5TqW{Id;1b&ZVqXY}qW{G{;}Y8cVxI|#qW{G{;S$>a zVxI_!qW{G{;u6~bVjl^KqW{Gja0%^yu?9k-=zp<#TtfR_te%i4`d_RLm(czft0N?e z{uisoCA9y=Y6*#=|HVGw659V_9|(z}|Ha z3GIKe8bYGzf3df?g!aGKTSB7ff3Y{Xg!aGK8$zP!f3erNg!aGKYeJ&vf3a7%g!aGK zD?*~^f3cUig!aGKOG2XPf3a#@Li=B=nvf{^U#tq3(Eb;zA|#6b7khzAX#b17AS8^3f;{V#T#kSO|J>=rJe{V#TlkSO|J>?SUu z{V#TtkSO|J>;^8O{V#TdkSO|JtPGdX{ue7FB#QnQyN*j}|BGEGB#QnQE5#+W|HVoP ziK73-uHh2e|6#yZr}#yTuADl&pf-|moDFS`$gP<9`xB|W?Tf8$Tp zy?d~vFALgFyPfhvcPN>3cwu~3gZnLe0e<_)ReG)Zp3r~+S%ck9^ul1l>(#bTi&f!w z7Wa{N@=I`3RM73s2DB&EEzf-`EDUaV-0@IyusY00pK{ZO(-Au8KefA? z!iVe2GuZZ~pMk1P>Y%4jF_;s7VOhAX5^QcaDrVUk0)E@L^#=Dj50(`MERI8aRYg~w zFCNcVhi`(*?x*n8;WHnf(&d*$P?Wj7NtdqzJB^G=KfXv8p5AnCt--k_@VJxCfzdxc z0?V>-xkqZ$;lmR~KiYp&gO8%4^S2qQ!mZVv3pTw`g2r`qPtr`9fVTGNX*w@-pnjfD z`AuVWxFU1avFYtU0>4r-yiIrPdy*WZv`7Xr}_Fr{(N5pB76Vy`rL(Hew6; zW`eU}?WMN8tiN4+ZNmTaBk=1!l*Qhkk@-HH|Kdv|y6D|#D)pE_y6@AUU7auubYH#o zW%ud?AeRHVoRrJSqr2Z$9q1PYq$1;Ubw?(H=8mBY2fj)GaycoNlX5xvRyi(cQAQ+a zY%*Sxteyk}ska@(j{XK$zOLapByoVu{?Fr2Vuy4OYRc5sF7fV~Mz)c5^#>3wt{Yl(HSwgtN4Z3U@HKw+yqf4L6s047>2#H2!u6!|!%0 z{89E->*GgNSXa-FzlzGz?bg`(`K|Sx^0gZdL}IWt&iQF}X{2K{c+_=oU}cgRiu=2) zXg1Sgu(iH*zrWJA?nl2a-JeGK^=Lhft@YIV(SfbsC*A(L`hQ&f5&Z_+@c+&T{5n&+ zulx|-CmSWhmOT>XNJHy(Yx!U4>3$6MyQ(?WtJ67P@P;+C6T&1@L5)AcyiT{8+&Y~j zZ%9|i86r&H>#gRF)4-~Y9)-4DQ$W7L*$)5KM>He+dbo=k>|8YL#*Ez1|rpF)q?7zO?kcCRLJw~?1k##TFIqR>v zwnq08_ue12jmed)pF|KvZ57@c*Q_ot( z%GdU`*0#SX-%CYLt8TTaDDyfGy=x{*#$OAC(?+5kAp=X+3`_Bo$FRmPn}fFZFC4jW zq|yS!W&8UPv^)mO>&DwfSclprLf97e)^=7__O{`n;r7;HQB;mZC+9TT z>O&}7eJwHj|I>f6qyEi*YKKl|=)6>hdgneJ>8?UR&3S1}vj%?!6;E)%{1GfjKgH*! zZuip{Ye-7|k-U9rn`(5fGNk2nKN@Sr!4IXY*)V?LhT-);K7o(Jw|!kE#31f{EOF;k zJ}hbs>(Sd7hd~LCgZEkf1W(&9&l@>Kr8v>kc1UT-DbQ^7`5JGv2-0#%M*Gnnv3ip3 zZyKS=|Nck>8wYosKOpoj8D@M}n3Jpuxn3sk=eo1uhpZ*5-P(y^?mH0-``QF(Ib}x$ z(#J{Laeq3a=COt>c0r~R#^cXE>ec{Cu3StX4=wi^`fB-V6%nN0m%0C`5&WeWfWiDu zx3@jM4x|IG1l0MfL0WEe|Je9T?J!6m=a$oE#HoD^lILh&ZD_Q?hboDfky~pd&P&?L zDW?1HH2c{8<^(p>$UUvHVx$KARPgvw^No5?o&2T@^s6IFGkV*siEbogpL64;d{>9G zoU)_Ej1O&Re^7``=dHiSXKG&`L!*mokdiAG)5k;0$K0?Uud!hlNx!f7itWW&lRg8} zgQJ>tPW}Wv(mLE46;KCgxsj9Z#9=3@0DYXR^M@aJlBWTi^ZK_B($Hg$XTMfo&vzAA zF(nrkH(n@x?m1Wo(sFuUY1CbsRug`)Z67c_9&Ok*0&OGkzib44eG7|STplwht>A_8 z)IRP8XI-gMhqsF6ZSK0g@8ajK<~AD|&nv9};$E9}xw)(bi^aVj47<7z3^G?ev+GAT zxqCpG<(sMZiq;!j@~TS~f|2L@g~1=oL1KdLwT@sdd9!>;x5+mbf`BhWq-*qE6b*Ty zI(e$ej-ud%`ohzPHjtg2*Xl`M?f@|*c1u>p9s%2~num>gQAkz{`Y9MkB~%CjK|dl1>lHxOSZlJKxv0rTEo>7Q}PifYn8^ zE^xKsJ?Bkiz`+sYl>9FNhZ?0sFUveI6Zf5KFzYD!WXfdI50eg%1*dOKtGBucByW#Q zABW^ktV2g$Drb_lD+gb6L;W2#1~go{cm;fa$}6oMdNIp?{*WgdAjLE`;p=23E2 zMmtX0x?*z2;^v&7ZD+x_=7yoM$j-yS$u&1rwu1W6KfmVByI5p?pt8eiWGCi%`XpZc z6+n$oQBn4^@N0aRO=MlB#|i8D6QsIkxA2$fc>1Nxn-+`YYA-^@m7wF?aLecMUG%>C z?(EaVM)ohK^gFNsz3$?*5d#9Zoh5xVdF}cmxtBldhnh+zd1mppz{}`37w}(1O3>>n z&xZ1zY^HH=7ocd5luX{sxxb{U^_smIvkJA5}utqAeKcl_s&)E0S zZuN8Q<7mHnKB_3UlD&jI5A9i>z#fBkt$VZG(7yFvY$LRDU6sv2d)Mn(uh8!G+o9zmX(I~uqUu4p=A&NR^6un1r2NkHM-UZP~(Loe@kRw`!q7M>9(^33Em?#KEAoLYEq8Nm3 zB0Us^z(Y0ZJ__FmD^VoERpA*Fi?CO?1w|uBg^N)MKTC+6%@IZyzMBWVHIx~ifNe6i$+lmK|Eg+*Wk!AN0AMBsBR&T`-A%tMK@G(@1XdGGu%QH z;jo39g<>2QbLXHahjH8x6zAZDY9!in&A5gr)uzd{M!?0Zp+rhAH4BN`E%?#Vbu#F7c zz_4tFtz+04hGjA=gJJ0mlQ1lmVapk|lwm0hTg0%13|qjk`3#%Ou-Oco#jqI+o5rxI z44cfbIEKYAY$C(PGi)rwB55XI=19QIk${;a0W(JeW{w2R90`~?5-@WlVCG1`%#nbZ zBLOo<0%nc`%p3`rITA2)Bw*%9z|4_=nIi!+M*?P!1k4->m^l(Kb0lEqNWjdIfSDr! zGe-hujs(mc379z&Fmohe=19QIk${;a0W(JeW{w2R90`~?5-@WlVCG2hYmUO1*$QRY zXoihqSTMteGc1r{Lm3vpu)z!)#4sO*4P=-n!v-*{AH&=k)`ww$VJ-}FVweNN>=

T5DcXk4g@g-nJj74#%#v#&0Xh!tK+N28gYagELG`S~8=gpzZpF zu^sW!aQWaJdylv^1KZIWs?SpuVb+Pn)xNi-piPm?#b#Y~xHM>u=cvbxAT__>>5P5S zFm^)eC-dkgu=gZvdk0-C_LVh&3WsG-zPtt4hWFQNydw=2Ikck3 zc-~NLdx%lekv6c@=-sJwy+&|H-D;Nk#(MC2Yz2Sop%$=ch{EEnx9U-KhUVw$NGW(n zufO7XdnKr`que=Fw*f@iFW;6lkPf$woda&WwShyd`S;q>RAGj9@wWUuQZTSES9!%+ zCAhcW2};j>FPrwP zp3rL%`fY?>sHEGDXi2vn4Z@Ca3DYQaky23}NN5&QxgDK}mOTqJi!dAbzvFsAUJm+X zp9lIp@Za-5?>*O1BcyR$RTjy11e6bRjW=FaxubaW-**_vxd(~zC+hT1Q?_n^uj z3jX&RwxAi&ZOJ*u!$HpIeMvdt34nQ}B=;3F27Dd%tiI{y9#W48JtFjEU2}h8FtA^{@W*rGL%_&S?AoGR@ksfmp+f1-jfm}g z$==L98oUe)b9CPwhm!ZcD?73+46QgY`ubJ10I-|gIfE@6d8F}trjGI> zr(a6D6vd)*OtWDh1Gk_Nav$oVv^N354k|I+H_2Kd9OMP0=u98w1Bm@1V!wdcFCgpy z!VVzpkp2v#I+QprL>w0&j;p89O_V3UM@p(0!A7+ij-UUFz@MFSlU2cyf>~!aDcpd8 z;rAn*SH6HpUcB?m*FE3W{}&!bqYH~kV$rwsdEj4pVEx>1_wG)ZX?QU1ge_V`!ymdk zVZmKrBu8IZE-a3lkrB_tv~%Ry)tqa>w8W#)7OocN#;!Q#cvl14ct!&$yBgX$*aW$f z4e5VTonl_nZ2n=-+o~Hsioo}SHBFpTq+!yt&5M}hgkWvu4TX&#zXEaJ+u|}?+B<(* z0_Ee+epxur<0GJ!zqq9Gxq`yuyB zmE``Zvb&s8&wl>9+yA3~E>x%dJGl2JYnKUxSIvkff0EpBuVbe!E@j&{Ka5Z8zqMz4 zCVKbDe$&Ly5#u=i>^ZOGeOC@U)RTFjbaw#1Wat`xgHuP#fzWK?r@WC*>bo-$IFmnR zw%R8XCq~3!(mo;G_`Bq-9TyHhz&{>TE4KLBRzA6(6TafBXV?|;$$E9$&E`Ll7x_^! zsX#CXvay-D#V^N?B-y1=R&#Ytvf;wH%LZrT!MkUcNZD;q zvE(DM{S&1ex!rSpyIiqC&KSjtYw}@-)l0TZPtI!C{0+S#>^XEDhhoX+?(c&nV!LjB z{J{-8AYbTPNe|Wj{?mdRU;*q9cUez> z-d<#X;Oa^&M@PK>tbw=FA~zkAdYVH#?oa1Gn&|zAi?_FCN#Bu_$wN;}>mnGq9NC;K zoPRIeMUxL^x6nsmJtCW*gdW*R#YW=qgVcLgPh|1;)*nbc>KK;FChCKT`tXVRfU!YK zFZixM$|vd%i29TD1VlX%QBQvNw1jjvQBOeB6A=4F#C{R6U%_W5_yr2qZ&K6BgOV>1 zi4vu!;F^F#sslf0!n9dkPVemh3yWQ+Nv+5K^eufJ=<`6I2l_nlPxQbBR=7v^5XPcN z&(2w*hT(=Z{IPqMsAtz9kwX}k9CMzFt0~UG%VhEJ9C>q74y*HxJR?gBt{I2hJvwKa zYtz+@!Lq9x(|NC;uKlo%x-Pb@FVVOz3T<}{30Yn*4#UjWE@^p+AB!$t;U;Y$2Q^#N z$9^;vgZ?)f9_l&hz@UIP7Iye?a0oCxS9?+ts#!@~*?n0QW)<(a=3gKKw=c1A(|f22 znbV#q%a`cF5vR9%uf6&Wj5;;8Ms0E`Fh8_*NY?7#2md##ZcJ=CLx=7I9z2?^+zfJq z3>qXYrJg?a>W$v z8o|fqS~HH-sY0}4aP9yLF*ssp(o_AtK`7?CH1GM~Nq~LF%E#cE#((xmA?cj|MA_Rc z9`}CT&eE;ol!FW-c|Pp{``A|%5clYlY!uoGL_LpsM4om>gdP!kMCgHm$k6kPhsLAj zFH@h}(H%h~>zfm8!d_Aj2t6Y7h};h$$^C}Cre7*D)p7^wMS3fDY>Wna>{2;(vv5S{ zQPZ`JTFD!u5zTJsX;rguU|Blq$vpA-U`oPlrsTyfX!VDe?2E@ekv;QU3P*fCBJ_aJ zQ{EXi`_1616xerZKU$Ep7P{!P-n1Lr0fVQS{FD)2gQGPp?Y?q(M%i{=xopF2Y4~H; zl?#i`g?z4ngBKYYGr2e}1Jf8k@3OSyx$&4TCdMu}i>ZZ+Nms&p2J?kM*Od#)KRs6< zpD+AMj>J3aTz|KW7}MmjWeyF-jB(ucOKmqzD z5Bc);Q8Su4EHDx^2*U|d&omaXTEJ(g+$BP5YY=n4j>6&F;?R3;&5|RHDzNDLi!!a@ zG9`0Z<+^RlTEV8qil&X*<=_|Jy-y1~8bGY|Ix+p6ufYDqSFq*b9#CZ)H6l${0Zt7Z zfn4lsk%P43b4O=!82Co_EbXflJg-@Ay-ZgYO8R^`Hl(Ein0!1P&xoYK*Y@`YOiRae z=e^wrI8}(jkcq8&@1BU2Orh5r9GzVUhKzORm8vy^BB8}1s|L1%3RoNJ9x4grye{c` zZk2}@kitFd=C8=T&{p_JzussJq#xeDTg>4NYDL}t!B;%OKTctbctY7@P?~bHjuUr0 z8e@Fzl=}{?zAN7EPPG4D^}Sb8>yG3t#rK{6!VzJ__8xiks={cY{7i7m+V*Zi(gAQT zBBQ;sF&60TJALM+tuOktz9hBa`T^j)m0>(_UMAS8BE!}8Isn)S+s;f|7ty14TUYCG z+twElPAfi5q zC=ZD8fG7{Lb!6+eZEI%-C%DCEBscfWswMRNxO?M@uB0@RvubGq@BNz?v3~RXp8<}R zjmz+g9tf95Tg2DBm4};s67cYMX;=TB7DbbY!2kT47CZvK{O^yzRU61ZvFyJ4X*qi=b;4U(l_?6-X{-NTz!?kh9d;| zx7{jsPiXzR;2-D)4XzU2Jp955UV-Y=1dV1D+hSC+ovBIy5|ckZq$Q1@Han z@0ES&3s90dz29Nq7r?r8Z_hb;D;OAGv_^b|5WIVA%fUi9s<>3BFlqmt2qQvp}?;cGG?in6r-ij5$yWdOng@zdv6C+GM-t zgiWWzexlZ6UK}0{tmTeO$cRvZx#zu?)nt4I<;kKC?ixzLFQ=8?&dK}?7CwKx*CtQ} z&Rf*Ntu>H>uJIv<#$^k^L8os;zuhkcHO#I{ZmXGyj!r*5q+ggk6qzmk#y(UYYL$3J z3F&J>j{zvU#Zd!3b#ykZ7%&$}WgVIJeK{A=-l(UH)Sru9iVd@onymrnr)X@6^m0Pc zd!CMV+2e}mH-&Ch{N#)xtrIVfG}P?7;ryF^;(fdQAM!x&4bpDm=&H%1j~=H)_9Y;i(uZkulH?f@xWW-lRL|w$5A`_`W+m(Zh^o(~?hr2zYLDUanp1famGha=rCUjpebKg}}L4F>x{B014- zeNogUVU-En(ouTA!ep;JHjq59_u4k~H1v9~?%wS53&2(;WAtkCXb^l>bogr1SfHT! zqOw|XE7%dnsOo<|6rJ?A^{8ZUIN%PwkeB9k5sYOlXSPXYBEL(^?$nq1Ad9<_L(09& zP}cn=r;R6WMK#yG5+mz(pdxT%UAyrGq#cuRzEX1~veVCBK2AOfjWo708f?B3IJ#S( zxU+UWIQg;Tjzd8xuzEkob@cWm#H?;D`LX{Iy0%Y?W54Agpw+ggtPIXXXEPL5-MIV+ zI7~FuEuQ%R#T<_lJ1{d1U2M_WU|9YD8SojKpN@Wr?tM>=tTplhwwKpBiWocs-&U_0 zzMJ^~czJ4lhzZ#t*!5wV;QxO8{8=76zv?t$>eijmXK7i9>Z!RfG11aN-epE&Q1~+9 z?q?X&+53sGjj*MKVR)lyQtq6|fA7U{&DQ-CfS147%eEKk2Tu~P9fj{NE5m{x+Z~I_I>2dRrtjdLUxDKS(VQ(a9)iq+hNG5Ge+Vw|%7?mP zW~^M6vk%w9x^TmAe_W5_b5uOkaQ(P*%tnUcdQ5S5_jbbef<6VLbI*4G`OA>STzU_P zCoizuvPKog>eYUF|4{+Tx4txTz|t-6j-=l5dtlYEUDGE7;_XdihA+x}55A{O+^;pa z5xjm~X4PL<0v=J{VYfL|8Lm5fW44roI`lOjG9qHU2pq2N>OC9FK=(M0D!e`O)A~EH zczcdWZU2=E8PNZ5fG$Ty9^M*p_q>=?2aq1KMkQ!YBiJEZ+OI)K0(vLibt}aCdzWB; zR1tsA<@bM_9z$JZPX$17fO56t%Qb5X?KTNAv+<|n-ay#En0 zWdOD}I_8=5&Rh+6_Y1={##)Sc+Vq|;`*g$Whkjz{d3^sxad>-MP+^nLTq}?_YuN?y z+IRf0%_Gih&$9wy6>$0?yuI&vch_&X@1H&o{J-%)Z|lIrFFAS8gb{cAf9#KODgf78 zx6MutJ_Zh7sjK%CDL|!DWukZA&qrd?zRl{*`M|&G*2=lH1;|Qz*1~-MW8mu-sl3`7 z1;9(MCF{|S0$_CF*}4jyV`zTf_=1lo@`2LI1wywZ3c!hSA#uO{1!(Cuu?Z{Uk0B++ zh|@Z`1>jMx{|twi0M_tns(MJANZ83%o@YWLwQbyFBSIYqp2q4$o5(!Xn6;W@A|oe)Z25buAbU}PVT(i zE_NXbeI7IC^1IkLpnNOfVRVEKiXZhsu6pS>bobNz%Zjt2QAlgl&v&mkAdlmNbijmI zP`Zz~jJstm5?UN{lo#a+@|Fy;=r5#-URBQWa5UZoMyN1ID34`@4XMHkBV6fq!+5xf1(}c|_JLUySYNP$z zf+EnoNnX5udL}4xU)g25mtpAq{J7ZrlOw>arYnAPcpM<%K5U`t3<&fLyHa*ngCRJ{vCxU#G{x{SCpNwk-9N!YAe}kwhBKSN2k_QtqN$q1-{a z6rBGrNN%~@BDv{uY`IZ#x^fC~!m?jwU&-E+y@Yf5Wy_|@Zj@as>nXb!=k5ctMzRBB z6=g+a8gT}+`!bhhPRksUNt4-x^Y(ekERmTh17(b52I4$@qKqcS8^#016-E)x%eRxU znGwS9W;ikIaPBoGLyw`%5R-0}ev5OhU6no~eMEYfbewc3&NS#OZ7*#tZ6ZBLS_Nko z{3i8I>XFnnsk2f?aXzB9lD65;j0wBLgC94zC_`R6uv;=G76uka4CgLD2yn~r|>xn z7gP8wh0jp9h{C5Se2T(_6h2Ae6BIs9;bRmopm08g^C+B4;T#GdrSK67AExjj3TIRJ zAcYT5IE%vjDZG!udnvq!!kHA#pl~{ccT;#5g?Caojl!uEPNDD)3MW%|JB5=dyp6(J zDZGWki4;zta6EIPLlCD6qOBlK6U3>4I7JXA3*sa}v=PLKf;d4C#|xsh zAdVA6D2PB1*@8G$5XT6jl^~85L`y-m5JYo9WC@~~AesuIi6Als(O3|T1aXugjugZZ zf@mm+!v%4eAQ}kbP(joe#36z>SP%yZqMjfQ6vP37s4Iv%g4kaW`w619AZiJsrXXqv zqPife38JbXstBU8ASwwW?zjH&j-w!m@`5NQh_ZqxBZv$^lomuOL8J?!q##NNqPQT6 z38JVViU^{xjK~0b?;Tc(5k|X=yNYr(ausqVa`|$Za*1+baz1j-a&~f1&ItGY<>hFy zb+R?G6|yC=`Lda^iLzm`KC;fTc6feKon(z4M!*IqU6p&%W(8XEd zY0`DlHPRK*CDQrQnbL{UVbVT$e8Nr|N*hV*O3O>rr0S$*zJ~3VI1WpPori#A6dabZ5F99ny{Hx^#IuO|A})r|8Pc%hF`( zWNL6W`4X9YnM|2PnJ^h28D|+gJep!8qbnmXLu1r2Y8Vxa5=K5Flaa^>6RM!m@Sjuu zk3CVr_Lb0L3NNDYLJBXSup@;XC_JCS^C&!*!gDA*o5HgwY)@f33eTkQ3<^)Duq}nB zQFtnar%-q@g(p$ihQbpmJb}XFDQr#QaTJCW1{7vfcr1m-P}qvXqbY1jVG9bIQVij>57OmZ30%!qOC$ zqA;Dpk`$JpusDUqC@e~05ef@aSct+jL8f(3xShg3DcnZkRto>1a0`XMQ}`Q&nnU7E;V%^aOyN%y{wT;o+bNtx;cXP&O5rUOPNZ-Gh2tq4N8!yBri|wZ zQO0wGDC0Rol<^!P%6N_tWjsfSGM*zu8P5@-jOPeZ#&d)y<2gcMl=`oua43aCC>%`T zwG74!Ye53O<^wzds28gg_lv-gThNG>`q}G zh21FZN?|UAT`0_w7-+$M*XF_Ro~R-wr{heV}kHh2K;79fjXg_zi_$ zQ}`8yUsCu5g`ZQnhQiM%TutGp6n;YCDhfZQ@FNO8r0@d@->2|B3RhD2E`{$<__iPm zC3nvM7ZGN5zW*lrfxqwo4gu)>_d)=2TsfV2m5sUPow>JMc{szprHdKQ)y0Cx zy5^|D58ZQBvFw_wN+w+(-^A#Y&MkTsZ@g$wYJPA`{PzYhIy1%NqHI@aKHHmX zX7T67T5pd3E(~{@s3?1ezX8no!Gp(jH3#j%^-sVsf$7lnxYUPK(Qx%>r3=LSmM*+c4yxMOqK)g9BU zPu2KL;M3Y@>t70wzgbRa-q53XAJDt@Ey zvyB1aUVrr#D*66A%gD$DhX8Xkb1`+}nws-)a7&J*5r=7MVr+rmsqYR028Jx?I>oW< zIh|vA(S+3FB>C@d@0+Gf zdaO@&d7$@_Hg&Q)?|#c3!Y^UJ@efu1JN})XhK#Giw!0nz3tq)VBx8B-%o*!_SQ@lt z#iY6(LIviVpLWXBOBZm8k=}A@>mOC6U_Dh%_A0&KX)m(`8FzkU(d2)lqP3Hyk!FE|a=*u?DDNvIg$H=9DeRFuRYWbW);==c~vPxpG}iInoESCik%R@U$F|j+_cs|KT{e`@IHQM z%ZMu=MRfT7BR~i??4h4)7Ow}DrIVWyvLvC@T_fSZ0##TvZBe_#vi|VPxoz|LO%l*0 zd`$8b{YG#*Qj1Xr2VL99A zX+r{O!)H&X8`>21Y~NwSv9H4~Bllf-Ne}hOE)Voxd13x2Eski2!Iv=h-SJi+4n0E-+?F+fk@a-FWu!Ye z;O25Z{3{QU`Xe=Ht8An@;${VDxJ-tq^ejDi`p_68x32ccr~a!+Js|a|B?giA`mYB4 zq}|43RfnQy-&@C-4vs-L)Ni<#^Rm&Gj2}~SszZVB+oV)8te0OCE_;oa4Wu%byBDnH zqL(J2(N7BEfMaC!Y4wQ-Xvh3p7YkQ&0jc+G%<|Wpm;hGgInFeQwn5<&9~DM=ZU*KN zA@rG-S0h3XNPS6JaSiM8YQZb7i4o2-`^!cz0~~TnTJX&C8C*WCd7tBFcev}vx{V2= z)4KcraWuM!n1pa#-&xq_fq$b1q5{HwyN4DGaR?x7AzND-{?PRrEEb&$`5G+C#R6wo zHZ{Q+1zcEeCM=xm(be3mFK|n48hn3c#|f6+nhvpN?Eb0&hX zC1F6M<)DxA#2~3(=N_x7_`VUuuhIzj|5y(`go>WvrZ$6-3vyPM=SstASNR)MZ&vl_ zRZ^bXHXZp4l3e4JT<=Q3?(*V2_DH*=d>mpm>0`D6jPw3dGr9P6&)qTfhr+4)3oAfA zBRpQa`X88UvG*!IE;rEYuzoa|0l(>U#th!kMHUn z$vQYFU+e?OckKau~UnTl)yvIG2UWsQH%0Fqadt6l!hO11ptg~W&InYDa0U&3?2ets==zJ1 z=N^-5%r(M&eh$Zy%d#}JWHC8rW;_#13v)BBv5`5i>ztb~XbLI2&b`i|)z0;ItkHvhb3yk?W@UlJMFfiO>yNZNOl~-R!}*#dxOTMB$;1 zOZ>W%cg)x2wD#x|+eTMCD*O)e?v9XaOILto`N}EwLmn1>M^wGJ{SvoWY~Rn-=tJ_X z-u<@P%z@_Lzk!;L`0d^~9{^b&c^McU;;0Cz`WcgG=9}iL!!A$#;hzQdK6xAGOO_z% z(OE~8|D1P}EUGzcV*N0E<-Qgn>5%_hviG-XKI++X`y_3C9{b5t?NYvlbK4`+h3$Nk z2qx2OzYvj`<1v5EduSQT`_3lxz(KX%`dP$tcJ33Ah37}?1{Rx6)Wob60cT~K_fChY z$ZdzV*+{cEbS^VD^4wAp;PK6O?p%-x5*l=rHyg*HAKQ(O4fGQM5~<_wkH1>Yo-%>& zdCGJ*BJ3cAV=tttZBoJLan1MgN5=ue4w~wavGe+*R1_2Am-cZ?93bo#p$o?k|ykO*FWW+JDU>RGO zGc9?h7A#XH*TRfz+1V#`?fTVQJE@Vf>(?)f|C+yAM_~QmS>IOaf1Kphfs>bT2lEcr zfQS~c7ah`Ckks!}A5l3mUI~u-soHjM3!_KRo5>u=xIu>w{iMF72M*}b%M^skIR*;B z^G%JH=SR2n45?a%JxXEj6N3uHl64-3mErGBLUO-J%RNMHSGED&?@0l3M$7lu&z{ohfb^g>#b;%7uP<3Hf*oU4f-#Q5z(HMiMh8#nA&p?r7% zE)UKOwD$Uge);3-+bePTCu2C*^?1E@x?7NLWPp4!E*_0cF4MhzeNSAxpy4684}8{u z1c_A}cDT8d`=zaz`C$vkXAN?$j@_2&>Q2^^rnlawQV*9;pF2HgYOg-by*8vx5|;;4 z?~Y}2d+da18T%gJGJ}JJtF*1_X2xD zF2bALqYryhao$2pfxB-asq=*8>|jrz-*pFBvc zJjhu$g$9@N)ckM&D7dxI8V3Mumcf5OVIdougrANC-!EGBxXJI!ykdNXo7zMCTvskL$0R6P-G0Y$uW}N}qhY zUi}t`tdi%Sx0Z*IYMzT*7Sy52hrAQ-FEQYRt24%DKc>Mv{e{+B`pH60^Aam(RxO%2 z+b=1wNfw5#*m^$AtpR+j-ElB|91UuFz*836+d-LH^BMb_BO`Z47l&&#sg91bs%J{>l1fz2DDFd zyD9I;fOk@2?6Ph$VBtGy^WB-}flv6|$pLR}p~cP5kIa}`i^N6^6=uv=op!GW`SK3&`-{klJxlsrY_k^e`}40zDI4=3R)!tXJm%J zox&|ZoOWr-n5+w+!O2Me>Zz}wTJEH$_+~MvTK7oE68q;6Lr&q{6lEB@e4Lj1p_Aa& z8qI8{9qm9Redk*>^9$(O`VrhKND{UMxQ-swC=c!De8izigy4!fUq7BSsYK?B>n#E?12`o8LK_LECMYHf8qIT@_MvwMs|g4g9p${6iQf976FXX zBb{zfT@UofNl!B!wF=!kv?D4X*#k8B)mD`aL0}QXe0=|P8_>2V`b*_DMWW|rtglU% zwj$1tb@q<$gDSIYr^xux$f+P#etMvH$NS0fVz&vy8r6hS&?5YKj~I{&3FdPo$mcI2nXbA zT~6DResTb89vY%KWbs|l{BoLMqC5k}30sX@(B202-q^B2Z=)D=(|R*xHoFykzPsq9 z>>URDdcI!I;#mXG_?aNJzgY(E8@b21e4I8kh?Q|zw!aAZG96^Y&Wk}u-$|SNnJ>|; z+R68Kt&@iLHe3bbDmZ!Os2}Tg>FPr7fwBu!ZdZYXCn!)f?=>*-FWhf-R}{|iUB6Pi zLlSaMlxYN&3PJgtpjEZ!)#1Qz$`-F`-vZBhAZm6c9d7tgx%mBC1(^ON*g3dT0`B>& z6*K{d_04g03wbv|1%CU1ulPRqMbKP6wX%UH6j^B zxFU5xj83X3bj``RrkmabQc|iaS?@oAdN(JDfunR`M!D;7^%fy$sAeel3ewq3L-C)VfW`YXbL2@!TWp%tk7 zZSdZ)qke)_8>^*D57mN8HAh}9cr+N=419D&)24Rj%Pl-o-tEt;gg;dXe||-y zr!d^+TCV_RwOO;mZ-fE*+L+Y!Y6g8b&0n4LeY!pm^xll;@IT!h&>BK&fH}W3B zFN6}n`XL(zzy7=u&6qtu?Cr&LFtf^p{zfRF=gAK7q=--S>-kfMYJM9!X9?0yIjec~ z_Cnd5;diw8@z}NSRW189y=n^M8a zrZX?h>Z8$@qUQynr@~Q#jL;4yYdspA6x^7Rau6wI2hqb$g#+H)-4-KQ>p|``YznzW-^NhcgowAmzUq{>i>%y&}5?VKYdG|2l_nF=Yc*C^m%~l zfq2Jof1;PC96hG9m!}<}O$*1LyL)+Ky1qd6^32?r91A=X$IRHog@=<1;%O0hwyv8g z9_!?=EL^(hNK9+J+SSX$va6S;g-!f%?VK<1SMNwrw=eN{uz4_$gH7j zglvUJ#SaGUKx4ipt;|>o77y3A!Fu3Rv2GLABT`S6C%;e1n#&VcdP?>7L`Xd;gNIl? z%6}mV!>c#XO*&f#Mups7x{~(^@Qz9~zwg(C_Ua7`O^;B3v#dWp@GMk-@2XHeJoOVS z{xbX6Pg(=8nDXwm%{CPn6ujx2%5GUmmQQP7c2tOEgZ!cC!=x^^gK-PjZ;LEYfrY$7 z>t~gTl>o!E2{P`=u#Q(JGG&Pnyj|dOcZ^aSlAY%>F^fTm!Vg1k`yG^mcU$N>4~rS_ zNB)n2Lf2jaqx*#;m5S-m$zl^1*Avxj>i|<`|r{plK$%U zvsg*D|7A&cN0>68EY9_a0z&DPu`{PVdramZo6IdoD_N@_7u zPfBt-l}SA*!-V@S7^ow&5tJnl;g1d8f#w<)Za&hs7pQZ8Hb@`bgfewwmoUV4pqiH@ z9#`A;B5^sDs!d|X$h~dGJ+pBK!RE_LW*L}TfF1klyfaMqgEH9xg)+GOwT`mqO(QJ; zQ63QG5m6oy_5ooZ5ca_d=T^hDPGT>#Dyu@zs>e;+k?SjLeLU&CN`3 z-b+(6oc+bb*qmu*>SE+-+I2&nngB@Ibz>zbzK+0u(9Tud`@58~%vZ!pc9@>|toJnz zLJz2V@;s2u+Hj~$>>HTE@>!i|L3a3e?6r{b9u@{o)Am4Fi#}AZz`Cg_J*=*VM zzF-BZpU7`;>S#F-n$7=Fc7Rr`mjTFOknUlTEdCB}|D3fZ8RW!S!N>oW=J=d2{$>AP z6qL#|E!C@E3#V-wJjcs&8eD0mnZXaZ+S&gXk{pkp|C7$|lYJiO^FW^m`aIC*fj$rP zdEhTSfS>;d{Qc+uOjj<>*luEu+bZ}i9h|}4)YX`4!Q+^?GL7AgO}g{9Pro^~s{xDU zZ=e5D=L7tar{hZhY7JXDprPpSuHy1^wzbeqHK<(i=$`&9P1r&!-Y_-016?mw_*7|e*Pf#oId^4jZFiR z*cDf170f!f3mh%{JoHjcCLr|uoX16u_UCq?onT45mL`K8DX}ZA9V=KnEtd*b>9)72Mk|dV&7pC<-w}cU+wzg@`RrMY~Bg= z5^TrUDqylYF8|hc-Ogoqgu$JN_Ghk(bBX%9@d-WhGAnrQwm6rlznkCy0Db}5%;cB- ze?@S%tzv3H^;YPoAg;SY!VPAqy+=W@Z+~MqG}i zxd|6X73u1C&b%;(l>e>%=O6o`fBZZGSLBcDfK53QOs^`zgpzAUH>TbI{7Xs(r9Ty6 zN_I=c+KTTWa=~!@*hAOBfSby9>^@b1qRox>^Frid6aQ=QC@~edcS^y|m;_y@b^G%! zn+{=kq3pn%!QYkP9cB&7##t7wxqLo@dr=7BeM2@wGr3NTSF;*$2k`+(0wSC5Ox!-Y|gkLlY#OAMIKz*@2zK;jK1S49KR-|~-;I0+m z$X0{P01j$;wqmgwyq9J#_VJhyyw;+SC_X_0`si3o1}#&AHj{Stce$boMGRE0)=jMg zBg$)%7q&NpP4{2(%te);`0^oNlWXyO_-99(nAe&?$2GIO6LW5Zb86t}d8Rzv{-yl| zbBPlCpwl?|!z)#&eAY2hce@C5Z7eu5)LsdK6!GA;2VX((h-*i;K2n7S+lt1t;`apJ zr(YVhP)q_YWXoO@>Hi+ouKV;F)wds<)U z^s_nVZ;|={k!3DAH>cVur-7=#2Ca{u2lVxKNyqfbJ`enpJI=+3rV zp>Otkqr2w{lw+)7(ZHme4Z-Xn6m+c$L0h6f9KP)+2ygoycd&dT zT9_2-j`fILXYh9DAM^&dN9|VMcp9nf^>hk)oP(YZGMXAco`r1Dl}3$?r$Nw{qQ!w& zUpuM)%hwZF$i1cURIK_gaAoOy+5`4pVB7IYMx!S^VE zSnpR~Q|z}Z1J$jf>Bp|wiFRtrtY6jdAfW537JBc>07~4{&<$AM;3ZwN8tYx>oxE5e zR)7k&U-@`-W;QBISSva?^cb-Guxi&au>!EHD0%E++?~2T=0fcV$PmIisPf zx1`5LMu6w*q^@82d;pO8Ygg`NjKccyujcDouw22Eb;r*gamc~r2zpEPtCLW%#@Grk zmMcnF_}ViU>%ZB;#h;%hA!jEWM)JfIbmKzs*7W3j)OvHH$oPU?V8**Z=j{_y0I9#? z|3FwC>jk3Um>A;`fM53iRY7qPeZd~7Hn{qt*I2!R_b^Rr)A;S-5}o~jVeupyT@3%z zxAb|S&jWoP=<`6I2mXy7i1G>#{MWM~ac5qVCLtl|W?{^8<+!o%yW~bDE|w-tQ#X#Q zxjCmZEGU=7vh3=_&%4YZWmjjO%rMd^J6|y%7mf7n!qPKx4?**o^t>#*iqSdVaaCXLLFA z^W2|ZI_GNK>hLwh2ofW@GyG=+BZ5H-$Dh03ZDDlnVYY&hMCVCo#xdoXbIn{#+*~cq z%-k$ZO%?c(U@ z^FbPZWDLgoCNS;m!}K*4!jM1iO=8oo29Tn#coa~fm7FWFe>AB|x<~)`nkmHP5m}zJ zlQ}zV|}f{}6AIA2m+2L~Pk!?zN2ykSy<>DHp=vzDGaNbV|{O=LDG)>*~@FO9YES4^-pq^2Q#?65erE>QlA^vd++D+ zKdctZzIRHNI9_3Rx<&Htq%%NojwcX&|RkoB+FA}w|&LFkr}FE`uLyaf!>SyBAJ6nhksfze3WMHYgz&LnOM=X z`_&LoUQ_*HfpG&n&cOU$q}m=}VYJ$8t9uR*jd!1*Kl?LZY{2J1LZ36is|~xh>xAZ@ zhTwgNYTniJ`yH$+3Q*mHTwjdczkDe!U$(9);X^&Udh1wz(#K4+XU6)LWUm0`-G2UIC|9(>qdpz3x z+;KuV(r7C?Wyt!@R=m06=mo8({2f*m$tNck1B1J-EaQT%fCus-KPo0Y=Z}B6LHFkP za&RtgnQLmmcQ&DCch8)NBBuUl@q^Z8@jhq0CuT6@^S}6BR=eO=z`k|YCV6t;JAQX$ zzZCY!`A?)}^$)Voge;NryYP-ZIj=aa;lVPpJU@KrInA+B@A(2ZJyX+P2fvjEWy!?B*Nx0su1qFQF^Ho`negzuQchR%YhmMKQg$`Ngz&-rYVG_Pbgp}@N@#BI z7KKinK|{xwio<)RVeQMuGT{2=xAQal)8S8PrO4DHPr%xfDaxTU<>B>u)(w`20vz$6 zVShPS7#_=ZnxnoFMd4#=8WI!$`B7l+kgY zfNRA5_R}{spiMuMm{y}YFd(mJR0Wci9zwMzfCzGljA-1|SL?!Ap#t#eS_;j0^S|MqYj zeGd!VhHxZwlV49nlLUFi!@hk;7Kiq{%N+ID>2QY7vhTZ=_~2I{*Z%8HKbe2Q#njxD zW9epY$>i{Ord%W3?PoDruI6qWChm}#bY}otU^k}ghmU2~4}Ti2D7kiZq{Al7d4H*q z-;|U3{DQ>~FhAb0Q0^&yrIDiNs6Bd@q@o6$SL%n3JlPC(-tcn#YEcW;tjs8ST#lc7 zp9uSO0_&ahm)@L?^`t+h)cae9oD_%TyggE1T3)bo_sM27GDmoG>R}N`>Z!8ZzV<(_ zztDSPPdr}T@6*}~`*PI>x@{W_M4}{4C{*d7xM=G$@dqye{gQ;w@=1e%m*I(%g%5Pl ztb2oSDoSxw)E|x#G7Oy3RU>^){sZm%aa~_|N`#);s-lVq^3e zJ!i{>z= zfuRm3Of3g7V5#o=ky`KTL6W`caQWj(a1H7h-Kh^9C_R408&!BlYJp*={!~Vz^CXuJ zK-&3vIX}rNyd8Wza)$C#(Kd;!M|Grn`IDe@M3~62=xhqixD!gi4 z_sCWao=Mif;5$_tHqXsIw`#@@;IBEv>hp{`pug$Hkj3WW@I<5;a9Xz#C1$O&cvB&@2er_fFPac;R?ACt( zLxnFc{$^&=^LO#g&@-eD>i5t4HB&E3effr zf5`t~?>(TJ*tQ4mnKUwK11QA~h$2=30Rm#qhzJ%`Y}n~Rx`++Yi-3usAQrG=?}`O0 zHU<$s3pTz3WDYmE`tEz*_r0~=dw*xGu=CsduxBzklYM69%--O7;>fM< zo>|gqJG)JtdO$)y?Xf~MdU#v4?d3KTEwW1JEJo_dPKM4es;R@NF()F}&>(q`3MIrQ(T;^-e!k$!`D|D3YIk6^^zozP zd;P%lryr>Fk-qest%E}2KqiRsyxn(k*dqG4&_4UXE556_4Vr^Xn)>_~1(fQ6Iz?%4c%7`!9 zwWA-r`!D^~`TuTCUhhdhEGp9Z=&Ly{M%3z2>#qyCjjuiFd5you$SU$7SxOd?d&o^> zCYeG`Cd0_#q$lZ2b|#yX`jQ`#x03skE0O}q0m%l*QpqeyoFqgtTGB_NkaU!oNhA`M z__erPd`Vm+-Y4E7UM!v?jul6UM~eH4UBouxmSRIOCVDS=D7q#p6y=IGiI$5}MAJoK zA|H{bh!S-cS%~yS9N}AGh46~-wD5p%n{cTxO&BMP5{?#n2^B(Hp_#C;kR|viC>Pui z6bTLswg|EWa|H2%2!WrVzra;sBd`=03V6hOqLR2q6cf2bHnE&YC#Dn8gby)@pa>aZ zK^PMp{%3v#zl?vHf0VzCzlxv6PvA%K1NdHiH@+>uEx$3JulG^!ncfY(b9#sMcIsv6 z&DV?9i_!Db8=~i`=b&e)XR62Jec@H|Zu5$HdAw}i8eTdti5JZa;tk@t^JKhsJY$}K z`5wX9ptJt^MrR+TR9`+h`COe5enH|I)&UR-zv)i$ovjwaltQV~N ztP)lMYd32HYaweEYa%Oz<<08DlCwIpnzAJLH~0l>7D7FiZN!R<4h_O5lImc#IV;Ln z8KI0+MopZ6PaxDv#yWvi2WeYl2z4N1jUm+m+Sc)e+MltGC)Ix1)@VZQ%UGjHwU4%S z9HAb=SjUm-(c0E1LOqJHMv>}~+SW)yJ%X`DlIr2w)(Ap9jIl`cjNwt@@HH1(PV5}jey1%w{ETQhlSjUoTPi<>3q3+9A zgGqHCZL5+{_hzh0Qr%128bqi)7;6xzcGtEB66&6eHIP)hXPM;-+E!me-HoyOl4=)is}G^>%2<6!wX?Q$454;ntYb*ET-!RD zP&+c#(WKfz+d7I++cVZtq*|tJ9Z9I|80$z$&VzsRqP4AZLN$)D%1Kp}w$+hPMKV@LQWc?Xbs$vX zjMafug=t&u2~{X#wI@{}+Ey8%Lh)Bds&wM79ic+;*N#-_#9v!Nh2pO*snUtRQbL8| zuas2j#9tdih2pObsnUtRT?iG5zg9;%_HXr4xTU5-Jpb zJCZ7$_}hU{q4?W@RO!Uu_Jj(>-}a-wjotI@z;`2q4;Y_s&wLSYeI$MZ);Me6MroT6^g$W zq)I3LniDD%f6YmiPW&|^R4D$Mkt&_|+lo-3_}hw9>BQfbgbKyqmZVB2{hcOiNDPV6^g&jNR>|fZAz$6{B26AbmDIlLWSaQ6H=uUe@zJ$iod3$ zN+sE-#a|Ao(uu!pLWSZl zn^ft;qw0U|E1euuXjldjGA3bvFW_th*7wOy~PY^3N%-b>FKkD=ta9 zegxAf`D_4Xy4y;YBa{VI9Ve)>^(pU}PVQEa=_Qm*2<69aI~vV``pMfP;xnLJB=5c~ z70OSsvUbg&eT;cBJ_oWR=R!9-$Wz<2%xMIrSx-S!cPM@OvPY<(J<(<2?ArBw`Ie2P zKwjed;NeUt`!DvhoB?IQ8q=4xd^sb3$^h7I>G+55CPQ{UEr^*6nP@h?T`c6T%G5{R zU3t```-(%Ip&T`0tKS?bH!h|x^?{NwVjD!W%z(x12d=hVjj5u?M+zr|< zpPaX9Jd{JEvO}&=|Ef|Dd8k7(&@vB*l~C`cw({=*^>LYRy$05DDD}V*@`=&Cf(JlW zbd*`nPGL>>FhQAGa#? z4~9H$hFH`a+7G41_@~CbR}3tAP1_c{%&S@ds9i7hd&S_QAY=Dhdq87_bF~4;C+HtqyjK=a`UM* zsX+N^+MG+7so?VQq1DxWQt1PUtKtGSrh+3!tf_J8RQgi(sT)C+sdP>x*(ZmWM(>Z7 zPbt5X3Oa_Ex@W-k6;}7+`?#lp4ilG3%&(_{ni1ZT=ZjK7c;6R{AewaCc&CauT^xSO>j(NLI>78W{^o1oonqKZo4?G_i4pue; z^2NskTK16ACuD7o=J?2H;rf`#_g=LEHv9QbJ03~Foh63nJ6^X3I|9|O*QHy~mSM|% zYH&FKOH(H8d1(dq)YukT?lJ=hvH0sbqwMKHtA=-AXUW0%FYCK4eP>3`8FF`5e2>=j zn%Pk&O}t#RZv;5lNaeDhPkz@2OXvCS3Ui94l#?Ewik(qTr7deRpzY55eAP zsAdYh%)!RN&QWfwgjp2iN~MiMfP>WE&dxu;HmG*ZL3J_D_j3p%>tY`>$4XcBgPp7P zrMqmqYd7hvR}H#}itJA+mMooaLiLJUb7s|vk$Hn|jabp|Et?XA-P#{+VMtx>-^bR* z9j6qfLl*aY`wncpDlF?#`UvRisi(W+>Cu9}`1ug{cIwG%&Htijwnx_wrrDn8%`3au zxEoMtJ2h{fOz3R`w zqe-@=(%qf}%~ayQY>)QbD_bYoPoDwigNX*mx);<)Dc`Jlla^J+QHh3&zF0&@CC4WI z@pnf)GwPMuNTs%RwobwRj?w^`6Z{x#{#H7bzxkm^WWZsK{eT3C3C7l#emK6*Pef*r@QxX4d*wMK}o*Q z$Mh=HUn%Q1IJR>+UEiLd+)CMxuS5N%?z_daoxFfc_`xwDI|qZ{?=xyfZZY`p`}uD8 zIh1z_`B)yS{KcM5NIg2RbYa9ZMaJ8iDH1;}jqRO~7~=F+q0#4Q^gyF$em)a){3U1& zRHTl-s#U^or&yHJDvol0<@-<|3!x(XatdvWhpC4o;q~24mN*?gr8{~w2H(^@q6rnEWE$iJMQFx^k*1N<3J zmxDohQ9)15m!4u#{d1Ea=v6&%LEK z_KrUte{(^yg~#CV?VqTDv~bnZC>0e_y!`!+qrvlIlQ7M7=Z?vqwU3{BCV66s@O@q1 zhN5<1`nF6SWET+VWaAVFljl1)_y@_I0-OQ@r9o0BC#ipM{ZDG(sAqM38_K`*?b?w4 ztC9FWtG}mwaplwL+dx+?8a;gMA4PoXy~XBoQASfrG&y7Rkvt7D= zng7$>K4!ZHKK)b!vd%tzR_rSkt4>ZKY{_>$2w-nnQjqMxv z?=~Fh$u=#QL+O?Lt#mk)A)imo4eU0Y*63+QuhX7p+A)&xqiIL)TQs>U_|^nE@{k({ zg3{Sa7no^G%? z#wTq#?SC%4tLun4;Qst?Q|zD|*EG7c8?@VWjoUL2+6!LZF!zV@*t$S7TWAk8n?&0| zdwR-*mcD7r0ei-x3$hV&=`!ru~bn5pyEo`WQX4J(}$W%=R$rU8ycBu!nw_&&90oR4e-DZ$&SerX-;jrh6sLB1)Lh`DcP+KRT z9`xz`^4h@wi=~J0UE!bK%m39qkmi`gslNm~U0s_(J24_L0)ubs2B1*XF3b~Um5nKGocveiHN`lGA3`}$2E zfWM<^#Qe~g~_f9C%fJ+nT6S4(oHE0<#Q3^Uuy=-Iofl~pYoo={}; zjAZQf`=P-fV`pT2I}3_oUCG#g|5)hmncw_x*#iv&ngLewd#}T516JJ}AGZz+3NHCfH97aKLe+^8fuiJo+I_sUPENT0y8 zk9Kco-~#s{d-HQg9;rsJ4(N8$fz^q5FkvG^VQ|E@Q(-JUh9H(^&h-(51eKArXn ziTm!pdjbDH^}^uG<)+9##@y7L`*Q!9Pet<;jQ`daDfV-(Y|-3Y+@hU7$ks+?^Yh2& zC18z{dfeuI6R7Zl!RJP$gi&c$pGs~TtLHfO;M zbN^6oALJh_w{@sZ*y7|QlfxW(PQlUusY8$)76uFotP2_;Q`C&C3m(5po~Ntt1XuNs zULyi;#61n>QW6`*NHN8ytlzr*U|Zh;jDFj}r(xZ!O{hc1*Uxx!v;v&FIjO(n>=!`F z-7qj*E}?qu)_di}Zb32n7oH2Qjca8@u{{EM5MBmI?}arpEHU9x-B%dr8wazHKB-v0 z;zVOTs>EQgzVb&KidnDkhiz5L<~S8w;2J}~S#pdXe6j1y?nY4a7p_jQQdl5A>-Xkv z4sMeVdVb$#dN;hm&)uXP&!S=FAg@DZi-GX$o!L&m{r*PSxNjhZxHHjew+Zt9n!K=l zOS(QK4~=|bTVsUuqmE5C-qIXCBRZ=v;vFzUe#X`e?Z=5K2b{A>EZj`XbL>_Zm|-+`06FMm2-aTD$5qB8_rSlp2O5Iwm@!)}Wi3z8I7pbis^tCR6_Fq;a#H5qg&9oH!h8pglI=XRlwN$r zf_q}hVx*6sVLIwb;5_=A!OmePgfo%8Drg&U4^;t!6lb$jcQ+vYo7GWk^1H63+ZDfx zZ!2;``su{7A*`%O`gMo94{k2ZpqUS(lnh*TJ8&L2U?Q}*CY*`%E6=>_SI~7WaD6|L z9w2mM^a+~n1R&9J8$D$)(huu0$-Pggik3~jJmvIVbYDMV^Vylvw=-ku{V8Ft-HX-( zX1!M(ssgTL#sbg0)m^(4t!MmTW8*r1z8RTDvrjHrN4teF`UDU5VXKdkX&|Zcq@PXC zFy!a7$2Jw>4yG4-&{xCPUq~#BL+z8>4XDbw+ym^WN_<{e z7{|0v7*o=O%3ww<5*TX;ms|y^W@c#KB3cS4{545+n3zj;mQWLzKv7s=6TxlyyUte>9>0XSUk&r=PvP?51vgZT}Q(zI18KBa>!S@{*TX3mfB9 zfYBk#&duOG>r0ZS-**~Ot3s}r-?f63mTu$^8+r9JU2&mN&Dxb*YUALlzTRDM>TMtY zPB+_E)7KXHu}pUvQAOrar&@H!Dfi>ELQ|{x)IB%-ITP-xKu<~fXhQ)`4LY!DXpaw- z^p%`f9d0_nA;`XQFEe3fren=|nDl#8N#E;s@VLaH85JNyjPNow-iP00theE`CF z`F}X)*#b%Lt3GSPM!^RhT#SZNW>kht*7zXvH{hs?{h8DQto&+Ge$Ug_no^lvGcKKJ z@dkVxJWJ>}stxGZ+}@x4#)vvC7gszxL{Ljw-?@FCHHNNEm`F~5m6c`I9ioQ9_QzS^ z{;($@4Lc$=b`Dw#^c9#xc`?{M^SuQRIIv7QR$da#j+s6)|pW1x0 zJJ~o84DDmvs-(kz;oO1$)!&NWpX)zs4>Sx>{5`Cew42s)&W6IiCu~=N&`ozw&BHE& zg{6~}RO&eT>#}_****v8wi$hk_snXy$24*$y=d~jBLgQD(vRE~k)uu5(S+Pp z5@g@9-6);@ch#pI`{#H_%Z1KIJC&Seb2ly`r=1A{p7Hd zz|?WZm?a(d&~eM;mebnY1U$vdxjVL=0eR-M+nt$*>B}#+U0slL9=tm|)V13x8uS`A zt8m7i0(zaK!|k^V_5lBz3n#Z4wiaC3@|GQ&yaTj6E$`oY{x$IGN9?YVO^(xp76)$g zIJk%I?Vf33)ax3(f9j>!8(&V-;h%b4Yt1bLZbd)D_R2Z*+SokVj1G~s{r0if(!x)I z_@}he&Y|!CFbomoIpMt}i=w$cf zD~Ikr1XOXW2h5&7j@~@WqvHh5LSRH5tBK;~(v>EU&W_m`K-(veoZaQjYFga;U1dS5 zBD%1vvTXhBB)U~V#jgGOQgbX3zOTQ);8NFNnXA)5Qh0$OP#O><3$S;z4Gs#hv4zj&$%6c4vf9V>?CYX- z_=I>y{_+Atf9UpKc}>`MW@}i1DYbsR^x3CgjhYUKWlO{npK3v#+{5>m6B*S{to86$n_y^^_2C5F_)>-IYa z&ufbOtXclu_v@(El>6d~rMLJJidnD3VUDSH?ho)s$kz=yyA7$iy+=QP&btnr9`(0rnDrRa2k-C2{?Oc%x_I@l=Nct^gwXxtybhl+D$VXzzGv%3RNEY{R%JWC z0euT{!j4J7y+RQGWLrqC;%(irGMl}l>KePDd zYhc%Hgeg5e6J+VTju_Quu_FHBLiItIIQ#b%?_YGl-|GLpJYy;ck;crzX}+ux?u)K`l@+iqinkGBd1BZRjHU>oA-b2F@RGQVVk zB`apMO}W(#X#CR}|Fp(G(D(-$|6oJYeZKRDWzq=|p=;fTBF7V3%gQQ?l9&%({%d{xiXRptwTU)aPfBbx;n321Y(F2Vh zX!ML9pz%X%{Afp4se_&L=W`DYz?Ls@7EdpYq@rCb1d~g?Q0oKk?CB)&t~>u{2{CaB z{PTPHzq|()Pfg-$##s?_YNw&j^qdK&p~Cld<18p@7iOF#g+n=~+Q&WvY#eNz9Q^GA z?Ck>sYy*Ss<-xUy`0K`3(RWTW@|WW*-J3Z7i2K0}_*Y(Dg+c3HeSHZYG@UJ+USv!K zFAn=W*T$T3$vAuH9ZbW|nU(cfoKy(-PaHY^UoDY7$DwdVxCM`rsKb1gdRGC)Pt2+7 zBX{iKQ|Zsf9C2-JOELOWr70Bx>^6Av;_bZdBc;d>C+6|F)NMvob@RuMeU5wu%zE25 zX?0`nGXdrOP&_JdZd>H%bNTd^sn6Jyje~U$lWwh$zGLtwgQ9$Ws^7TLR=!IcBmav6 zM{aDg`3pGR^L+??uLIK0UEY0QO{e$3T;hIpUWNhk6S?&JCXufpHD+>+fmJ{!r0=sL zeXyG&i=vai@J@W4N+;x&5Na``aGiL~+}REh+>^KjzS2hpvBo z1BdZ_qI(_<{p7Izp&i}&>QnosRlOq18~WA#mtBR6t{74dcUm1}|IyG-irjkT-x>B6 zbh7DK5!>>SreFQ~kzj+Ge$&!mz4F=v?`O+F#lZMaMs~l)ra#*`zjeR&!0$cq-?;}G z&ObsPdNk@o9MXJ@gy)7o%4CZcfv4X@-cHmi=DZ=HZH&(=YSAJ(kALc!548#~?aNKs z#qDAHyED2Tac$6JH&;lzKtFT`?~rK&{^V!*>WiYKljnnysQJgT9yHA25#Ktp{lw(? zwB>=@Mi(D6%(9u#IiSNf*nS~tvvq!hKB1R$^ZX02z0>X)ee?e0=QU}5VcVu)(0=v! z)?=DRFx!tch3+Ij}Vf#_XA1zz`CqHA>q_z%7+XG61dKX(w+RgY+Xw!Y@js9tS=o>y!8`@9W z&G=7n>f$bpgzbYe+qk+k=rQx|9Y}B3e)Hs6TRZ*9&(duFH~uD@>F7okmeS0j$o~fW z$LUrko52#Vo=vx94Q2deJ#Rai^@Z&N<9DmFGLb%E*Pa8`En)i#afOn-8u`JtKOAhn z+-na#c4cJG^DUwnKMB@rLvFA4+5^62+_+TR#5&#{}5EWWW`7yOs@lBb&38 z3t;=y?PD{en?&iHhuJ&WJN)z_XLFEy&Qf8WdW(wdX#ei2<$Y>n^2-GwF*&M)qclRZp9<|Mb5DudzuO8Y=K?hdFge-o8cf1wk~x`8({^_IDy zsW+z0xtOgD4BpwoHNheqdppk?pt`@%#--ERPPKtBI+r_KI3&bk% z6mgh%jMz)uL+l{77B?4@VwUKGs8V!GR3bVq+9%o!D+0_FC5R@7f<+@>O#mRW6}1tW ziiE-+!Z*Tl;dNoL@R)G7aJ_J`5T4En#|Z<3LxsJBU4@;6=CCq=p5Tk%rQp8cGOP`7 zNRTa9Em$Z}3t|P~0w2LZft$cl&|c6&pf6w(ABm^LZQ=q^0PpN>Ay&W&0SQD55kz-rzmTuuPv(d4NAf-S3cd~B0#*s&>3xP(0&eS- z=;iC}(c7T6ST98{PA@{wN6!n^36SZvg>?c1yl=cJ-hJLBULo%QZwqfZFO3%uD+L7b zhQLYza$b90bDo5Uao=*wx!1U-xre#guvWl)ZW62&5X>FU?Zb89cIKLK4Y?f7M@}WD z3|0%sXH zu?Ml;*bcB_KvT8|Rt$K}ddRxMDq`ibwz0BU>8u1+3@ZrM4CuvjhBX6PHgq$5Ps97) z@BZ`mfS9evs=MT3`_LMzL?rG17JGuI%i?ob1)@%6Be2JaItK2;$`Q4{+#P#_$lAUf zdx*%YJOF!u$h^n{yN}3tZd>dgA|umK>@K24(^IfJhzu%DV7C$J_q&SSLL}K9ft4W= zx4MSiL?q0eirqj&T)2l_N5m17Vb>6`=4N155#b$h>Agqp`z?N`Hv4Lx?V9T*Pt_ zl{6oQ9Yl0)@oDS;qT=h*u>FY6q_ePnh)%7}!uBFM`7s3BgQzh0A(n%v;Py0ZH=^U| zA=oZN`GhaF6H#9L9Bc=oqq~Z+Y(xj(P3P^1_T2*5Hbi^JO~AGy$|M-xLbPnj7HkEgrJeZLazu-RdSlBFWj^-9mLkdsnujexw4fjh zTa0M_kj>a4MCoM)SSF&mb_=l#M5)iuU<(n=nRO3afM|BR71(@4$p>~|^AOE4`i`X| zQXTDt%|$d5yv5QGC4M!*QV}IAi^k?4iZ42i%|`S`^nNS_(e!rjv1CMX7k6T_5KSH8 zg{cuud3Y3q*{SP)g@u#Vj@V2@lRC`8k`PVUH4{rj6k|OKOAxX3@Y-K*VRXvI83@L8 z3y4P$SylE2f{4`)(-DN5p>k6@c!U-K)~(Swmkxl1!awZ^`@pB0(?!C6#^J*+A<)h z30>I+!4DHVO9bC4X0=A}^_Z6hf-eWc%@I`h^EN~9+3H{`1fP04wnXreyRQX;_ctCj zNAT{HUNZ!5my|R`@TTL(CJ0`AA7YB2s&Kgpf|uJi8zXoz`wt@o&tva4M)0hwej@}= z1w9QBJPE#PfS{t|VSNPU??;gc9>uqlAb1$CQHk+k4MsQ^VUm!}y?z1Eq z`IamvuaT$8!(=wOikwd-krT;ac=q3ibb%EB%}7I%Bl#$)l$1%%N%ADS;kkb%tN|D+ z34=8N`x6-dEx(+94OY}U%+H1u_2%=F_!IfT{NemQd>4Laz8T+;&w(}dD)q`>O}#w5 z-FoZvGWBNZ#p;FWjn?b0N9ozZs(QwHd{|ZQ1@A7elvlvp$J@kP%A3QR&Wqyt!Mb{G zJO@};uPIN&{lR_BeaOASE#l^Kw{f$$>9De13^xc?*6YP}=62+^y@zcVRgL??8WR9b{so`?ZftB zyRv2Mwro>aU+){M3f9-V#42PRU~OS7XQi>?S<$Qj))1CEtgzRf)tn`PS1n>hK_YKa zFOjpTqo^gUWXKY}hm{O(z-tyqg*%07gbRf#;bdWmaHP-^)-tpaTEHt7Ji%wdGr?^^ zi6CFFN3cP#Sdb!!gVhXu1YWS3p-j+LU@8z0--s&WK5>aCBo4r<70ZbQgxbk4EHy7vtj)^&Dq{Mhau`FbHPIqbq&3NhahrV+XWwsNX{$6gAqAIKERcTWanM*AVhYq zdUzlrY5FZZ08!^5vA92?j_DZghp7FXKDaL;>y%Eo4Vh%n6k@4q4_;5r-5MWnBH4IhF?;yVH#jEKno zf)7HZw|odb5E1v@7~BgH$LcaZ01>Nn2;LtN9@!f2$B?Mz{0`g`(GPKZyf31!_iy2S z5PjkA#(N|B9OR7mLiBNm2=_qr!DKb=j_B=&UU*MLZ<_SN-4MM}EAbwPUVb&jT@k%# zGyw08=vj}mIEAP(HWvqoD((!z6^L$}FT%Sax>gp4yC6CrP=j|xbT-EmcSdyP>T28x z(W#9DE=N>kR)aetDyUAx9S|Ll?1I}PqKhu$GDLZX2XQ+@M?O!(Z4n)ot8po!++)eO z4WfhY1Mx11_OEV&cSf|gY!%)KQBKtvyd$FB13KXy5beS|@b-vyj1}Y7h_=t&inl|w z^-?fyg=hMxw??$`)GgcsQI^F6+#J#J)@s}g(bCWe zycMFwhgRS%5iM%o3vYoaBdi>6j%Yy|2XBUGUTiGh6w%ysWAP@4Qq4keQ$(|UUg0K) zk_$)T#)xKN>u@7Psud}CV?;A~JMl({5(8DZA)*;qkKzW1;yY5fKBDQ_<8czvG{tvZ zf@rE|J6w!t%E4>62+?GxbXSFG2*` zbjD92>NadXUWlmc>LC0CBIm~M@B&2gFc17VB8T)fcs?T8sRKBT$S!m;o`*>4;)@?c zWRmm$irYK1RFw5v@az68;Zjsko!qV3kv_##AWmUhE45v`j25zj!B z74RHih-kUR5_|!og@g9v^AXJ-JQJUXD1A<6JRMQm2~T`3qB##mcp9SF`sH{kqM18a z;d2lrCa2=FG3@vKCT1-D-yxarPcq=gef$hVA~^9E=Oco%5O6(2@R4bphX_83g>wzFl2O?OL0{f0=)cZ161CP09 zHqy%l`-*6|+8z6X$orHCt41`GGaCDhXh7i*>=UAXCEc)(i28m#gMC2MXCA=bBkJ|V z5POHnqxW#^Eux+~S7UDwxmotcUL$h#HNjpXqMk3pst|#;Be9o=x&^PrULfk4co%z) z$U(FSdsaLDpMzOpJPyYTzY5>{e3{ukNz@war>AGqyf(#8$BtO@`t&~a*?r)oa6YD| z#}AZAZIpr1Ku5bEyCA7j859^4-~^|o_}j??WHP7vyxTA)*WICn5={!Pom(E1j8#u6 z`~-C6&$ozauSflVwg1~5>}r?`Vl!6iYBF%U&Xy+p9c$-yw6T%?JT()Ga+cSKSre{O z;e8s%J@DA6nvx{^$CHUhX$m>{`^#(tgB+A{2c?~py}dlpL24(L!}k1avkPbUb@>JusoSnXV^<%#I?}%DH^4B{j?vfuzldV= zx>DD!Ss%^FdOxE7=E+3%FIKI6(RxPxq#>q|&`?xfgh_nP*fo17VG!ojYSZG^3hMA^ z_z>4?rhT49pQq6?laMgo4(BMh|9O|=C_YU%IX=13YO3cG=gN<-6R3#qE4t~O?mN>a zN$?MTD44nscDBkuxuab$O#0;npVO5F2lz{2?#N(&e|cS5&AR>u2k4Bf>vy#WYAg8i z{t(69+P(^0>6m^gZ^&b?P@L72YxV`r+0*SGu^e#X$7&wM=!r6qX0`3(PS99`A1=k{ znRbk9*8O!8u9@K9*MAy*DBAA-kz+RDY5n)8WWJY#Ub~Jie|tR)6_8@KOrzJ%;U>5H zc@9{XV!Vws*;6u^lCSANJD}CBpZ05B@+= zyD;+){cV+Yc0o#cfHJ_r!B%dgv{Nbr;qQga#$Rcxv~jHa%HY`)BhhcX1UmVD`}$c5 zl|Sn$PrO&T!3t8Tp9X=bX&h?r^)Kby6TX3F&SjC)#y6&7Mm26z6n_uo&ifcByJ}1g z@HaT{q?0kFdZ*gbZrf{c%;W9Dqt-Z;FlpBN@I?4%(fh_o|IUh>f5z!w%ckF`8ksEk~KUJqqe0y z^zLr;2r%ot8nGdfczp#d&LOF7xonEr&OA*S8tqUup$6n1 zf9zcP2}F8do0!z*2WV;ZeW1r`Hl^|me3w}&pxWjw{M@8kpW<7O?>KI`5jFiIYtv*m zlA1<0sa#pyh6=Z1=1Sb{j?vhXuD%@H0XT9E8XQ)5=g9SJJ@fu8cZGL6Qwsan$BN8{nH;+NuZB#s1pzR ziQQpsZ}udK_MB>d3xAzNpS9jJW%0?W^aMY%jmnB7FxFq(sy);n4%r;G-D)mqSz^TB z)hr(D3%5R2P-sPa>syAGx0y>@Z`*9yrg=PFl*5JPbFApcN2d)Nw|pFkJN|TN-|O?h z{idadV$WKE9^yszYcj{t^N*yZ%CFC(t@d4wtDfEt*aivo%Pr^9>zj8OIIT%MJ^1ml zfcI0|fhR@_Mr>$17qk#hT9{@Q4}v;+%(6UZ1zO9f@lBSGqwh6|9BO!T9)11O12gkt zEBbZQ0lw`Qj00A?TvP0B%md1uDdR&6tmtM=Ywlh1p9`j3c#$Alwh&k>7hRt4g73vf5Cr zZ$ciW?EhS-&>pfoN`IbQ#}ur-|8cp{z6Ujdy&#$%Hib&VJ`~IwZC!i*&&Bp)lKq6U zo;kY>eEXlisG^b#{_)u@Og3D1Il{rtCfGL6KG4P<7G;9j#^Iw$$)LN%nopka3(4#l#{RFe_MrM1&= z{eb?-u}S*>Y=KyWt(GVCT|G(a&^S}tix=2QO?-c69%8AHGY87_F?P01}DKRbF!2knhOqwe?Hf1 zf&i@9-+79U!AW{|Qn(FwP%en-wSB_4w}f_rTqj2-C;87Wj_|>0Z=Wy2LuXTCo_Jwb zPi&x)>yLevQ4(1nZ7dU;!)qfhE*N}McVPsI+684qu?=vN`#ad$+B*3wr2)3KK{j@d zGCN26U}aE1fNfCS5f5)&GqUc;uf3jC`|1y0!a*y~c^r8MTHeO)FMjeIZ27!t)@);( z(*OE=&-0~X%6KEzF6SkO`a1DT?~=6*HA>*WYAsBAM)~OOw>m%C{)H5yXWB6`!f+sL z{~vO+HT1}Km#n7uJbSdhXk)tKd*d0UfZtH6nRX~)9CXwdgiw#LG$St zseP@}*4D=9=T|z}inZkyncMGoq595GIl5!hY-(aVrarsy16rqGzHf z2H)0o4Jc|CrfbM-ug5t?c-y^YBj~e0!IDvpX3eeSX%7QnRl5)D9r+_5Kry z{5+6#`Fh^{IXD1z2cO@?N9}t>9I;5>023O{&~J4;u_GeUfbCQ56t6|StVRi>F z+cVMj*n_90T&nN+Yh#BGX^i~1U0I|Mc41LV&Wz17|51b5tKT>``OyCqPz)M$D;5SB zX8TLG`#wo}YDk^iKlSzMt)DgRf1PY)^i1pdU)KLR2<6bx*Y6K*{_ndCT-RlO|M{=m z0}Ve>gOw`|D4v&SK0!kNMz8xAo6VrFeX0DgK6ft7=(n0Dj(1SZ04`@bnOdBl3x{j< zOVZY!r$bxJpr5}_?XVyh=@aTQQofG|^Dhi(k$tzpk7I{zOI$POg46NiIh+!dwLaln zztpH{bLP_hHkV#;FKx(L?~%o6b0lRh7`A0i2q;1Ngp}m-gMHKI(qo2%#ZA3{{9v8t zHJ|mcG!@Ky64^K8NDkV50p_;b?_w(5ds^z6%wsvs_9t8!|J>35ws&)uHQIU<>9OuR zzuOKfNu~48d@K9|`q4gTb#nSe$r%yY)2L=-#aRQ&&++cuTbn*mNs93&fz!n#k0f#Z zO{T`R`Cywj#~Rn?f~`*lJJ^7zJN>f{^oOO&V6}03m=__)AqZx0mrET3oRo5DfP=k# zeWhGDU##v|7fLh~XTA?zX_m0pRGGo182z=deaAcRdQMB42%9Q1-qEVy&u4$U!zf12 zv}2^Xmxn>%3C_Rv3)0ZL%pMIPKP|h;c+-{IdsX(=1QgOgru{*+f0i?F%xDv;ckE2V zs3mjj&;J1?P8LS##js;B=F8Oe{&SvXpJZG=I&w+s2)~Mm2n@cd3xQD7E=&k?3=DP% zlm$51IylIK1N{U1oq}y`0)p&=1C;?VVVqOlmjI^@GEyr9{`poXm-0Uu(bp32Q za4#$J(&xYIgS&P&J|quq_~M~IttD*WnA0xGzn;U_>ucz}y`r{Eu=#Zk-+KT5# zAGm3YQkqRqo+*C+!8{X<7VRVYgd}#%2KD}XYVNT1%sW$NYtj4DeRKy_v%Lw!t35L| znpSFGcC)p0l>a>MCJz)@oE<;2mlx&L;`{e}o3YgR*mH0>pFBCq=|Ouy~feyzY>J^K2Q3e#e-sFMpP`CmOY{#LBSE$2{0n zy?lqdJfkldZ8k0Ixrowya83Jvwq3)a!ub2U^=P+awyVJpvmK1AZ~yDj+COXmx6S@x zHuV3xY&ev?6tTR6L#TueYq$1QUCm2K{$6d+^ju#3B!=R=$bhMeHT0#tVWI_l&InE@ z@;8m_cVS&&UU>gQEhb&qm{)%(;zAyI-?#VyS)?F}H*ea~dZXe=`|YjUb}Y`@H7Kl* z_w2Ody7w?!pVuLDgLQ{yyA|~^L3^@=t?e%^BJve? zWV029?;@#DKYl#xWOaj5=goHPTeJmUOl8CjrXkWvN@ zemi*BD`gHg!F5MGoKw$8ts}o5?0G+X<>z`YCtMe}WKBrv8{VIwMm)84%&-?w$Cpjs z68nrx6)(9wFt~p;D8AS_rsLE%z;B25%Rt+7m#0fZr@oA;2Ho|a5p1XrzdZu;D)|81 zZ<-iCT1HZgKD1s>>*`Nv{5vn<=UFHSsztH;^UZL*O@?K7t>t|LQhql-eB4vO=oKf| zmY0O+Q@VQ2iX!=xXGWCnfmN^P=AWxP`}O>nUzb8{^|VE9w zEzv!$Z#|wK&o~ziCL5c@AN5s$lkWoW%w8Wsw@>(V`ec?LeKOM@$j2zaj1;kP|4R|z z_^tCRx25^fk?*9BgM1XgdUVu25f7(Zy>@d7ZZwWI8pKLn;H#jsqCMBG5k-KFYu(My zWcvV(ADSbrmid>3)9JlZc0V2I14<4KxG~R1p?%&Wcd(Z^{dJIS3kTVJ5dGwx6o1l{ z8Xbfyy6nGBWn>+TUVmKxFD5qn=Po7&20H}@*uxRC(k{?0&_38v=HTETXe)>1EbM|{ zMT@#49zH+ANOa^g6ElC+kpzy>5sSP(fW;ffcN!dwQ_Un(&v2e*0Deu=V%E6FfHO?_ zX_na=ux^4*MLs>o9oj6S}d7u}Qh}ZF=R-qb)mF3aG<}rZ?{4 z@`=7bh<}3PlM3t$I-hecUQ8!^SB(c-|3i+ph8}6h_Ow_!%DaBp$4sPDY9>fd%6l3$ zH&4DffmU4KF@#q*hxV%?!$+T7L3im(M|Rzu0E*`s>=hQy0WUU9IOBhEh4y7;snp)? z=XaLaV0G2{Ln~ibQQed7uPscIQA$DLI4tCrYEqKnKfb5QoJZM9Va7jOST5BG-p7R5 zw_zGUn}FK6cy=-+tfyLk9t9tzs_Pg~)^!Y~Q`AZ+d|6D>cau;J>HmJkj9yp%s&g^= z`t}lvVO{yp+W+eYXAK=pYUkae2d_f&$rCi^7rIxYb$86)ae{6Gt&{%R^N^q4|8L0u z*Oxy)u_vS%71P<5_c7C5z~|%b*=;diOI~B10Oq0l$bA8yt-rx7;TFPNbi28m;Pdqh zxhdRun2#=u>j$5)_vE?~Z;49cE^&n@Ci3A^>Dk0OVkyj`ry}Culj*_4Xkrk|q^BTX zkPpclWC>XavkL5nSp~Dmg=7jD4>Jpdk$$8%=}Eeha+qDfl59%qlRU{c$y=CV;I8C~ zq*#(KIUvb~Sq7F$(j_WMoFrNj3^NT3lJt@&Br-{Rm}k&fB9gGgpT$+;a&Z}a;=M?G zRJ=#L1!fz}gipOEh$sI)?7an8R81Q{4BNpH3JRhk3U+LVu`>oH*jQMbc3`)NAS$9_ zcXziK?7_mv?(PmO3~c@GjdK_<9^e1>f4=K|-jBVmWq!Zen>pu9&zyViThd)(l(dpG zgp==;B~nRAusN7dB7oEHZ#|xR-1fNWaol4+oPgipu?#E^PWBk%F$_-eclK!S(cGiH z2li0IN&ezsf6&u|BmN|Q1*iG1i_ePx65GVv#B1P0{|xa2ajbZdxTn|*r}`UJ!fqHwZbBz6;JiJpn>iY~+H{)3|3qD`U|qIqDEaI9#AsGq2-sH3QbC>&1tYef}B zA)-QHmyjp?!s*Q!;*|xK3ap%oTrJrB3*i<5TLwJN7tU+2{dbdd9_$&UadvRlf&IT( zVEL~pr!H7CP;$z0ih=b%4-N|~8oXfNXI}*ifa&bLVAWt1djWeISOJV-4*<&sMs_Q( zx-g92hu;~j6gKDA2g?g;etCXzuvF;D=YaKvSGtL<$FP;r7Fs$J%;>`eyg|WOr z?1o@Lurgc9E(!J+%)BnYe@xX3!r+RsXH8{}*O0)bo4+->2X<3hF{t4HrHX5Kvp-C@uz z2HjxLH3lta&_V{yXV6>*&1TR{22E$sR0bt8XcB{x7?jAMaSTdeP&|W1GiVfpMlxtP zgN8C_FoOm%s6T`HGANosy%^MkL3J2}7*w4>VGOFrpl%H6!k|tJvM|WRAOnLsFsL1a zq8QYgK`j{+$)IKoYQmsK42ob-0~&E2Fo>xooI6b1Ee0{Qgu~Pl&K2g@OANZepmPj5 z!=O_PI>Df03_8M~!wfpapaTrr#~_M9sSMh~pj|ZLy z$#*h$znwu_8MK)}8yU2oL2DVbnn5cWw46apX~Y*ZNXQ^QgSZT0GsvAryzdP9%An5- z`pBU740;DAZ`J=?|M|a~GM#^1rnIpm-;xeVhWlD?7lop36Ee=y!9Hz85OlfhL z(&D_MughUtBZp~?9HupLnAXV2VE*F?gC5a{SB*hc7^Gv6hCwO@DHtSUP$dRcU{E;* zm0?gR28A-H1cQPZ6vUt+3<_jW0D}rL$d5t!8RWwtF9t~%#FQJ4DK{QdZak*kcucwR zm~!JX<;G>ojmwl9mnkmvY2lR7r?=k2O zgKjbC27|6K=n8`_G3Ww=&N1i=gHAE%1cQz-=m>)jGw2Y54lrmRgD3{2GH4Hjb}?ut zgSInhD}y#OXd{ExGiWV?Rx@ZNgO)RBDT5X>Xd#2Jh; z3`%6sI0hv!D4s#188nJPBN;TDK|>idm_Y*>)Sp3p85GT+UJUBNpl%H6!k|tJvM|WR zAOnLsFsL1aq8QYgK`j{+$)IKoYQmsK42ob-0|wP&P#p$^F{mbksxt^Ns2YQ+Fi6KB z4TDq+QZPuyph^s?z@TyrD#M^s3<_mX2?hl-D2PEt7!=5$00tFgkROBcGsuTQUJR1Z zh*whrLo?6)qnjjN+(WpFH-fz!NZ#jvKL2G}n)Xw7P!%xH8J z6PWp;L#DXwMP~n2bbTGE{`^mfYi4Y(1RRD|oO^vMpbu*Pp&k z9gAG?Hdnm<^`+ru1E-D6`9e%4)k%N;+JlS6P75r1Z0r%-eEa^V^HzVuecC4d^?A&@ zF`C#yzgHil4V8czADAc6Y78=+Myip5?u){p(pfAjty*ePyL9=$Vx6O6L+Gg34WJ2m`bWir=A+A|aSbzg<_K-}C+7uJn$a_`^Th4W#}f>y3a0<@OAg^epqTyGzY0QvZ6&Xja^jL+IW> z$(4hzyHm9K*Q*8Ms%?%OqBQS|+>b83ijKFPi!ge((k%j;;+F@_EWHZR1~~$oCDd6} zc@cH7*SOQ^0h^FhN2IrH@!R;xZ@w)?Q>Tu4x;J1Gla7hPed~W)oV(%3bW+W)6vz%8 z-D&h4xe`Yd_L{!9#v453uw8GnF&J72`2G0Jw_9Yg%8f=N=)BrTY`Mx}w8)JLl}-)~ zN3FJ~jm{&GVB68rZXk5D8+%)^kKfnXZo)k;Gf=?Dn@j2E0l**qw+hXZ=o-6fg!LaREG4X`u8hbo$TD5$M zeuEpu`NsPH{`Eht@&}f2R8UM(h0H29gSR)6PGNvqmsYJbfg+IWEiV}1bQBwej$#Xf ze6)WX!S~$S%SU}yd*XTfYP1R7z{N|ZC z^NbmoV|COE2p#o;JTz1vZuxY++s8Iu|AOd{eu_Wkx#j5pr$guO*{Y*-JRSb&@Bi2| zp1zU)+hKP1g5AIDtGzib*IR%3dNDMh!(Yg`(ZkZOkYr5Asui;+N-~q6;SAfT^CFJ(qjV_Du2|<=Nk}i)TB}CZ1uQI`9G*?CIwz zlzf$BN*+kANKQ!hNw!N?OXh>} zkjE~M^&X2orhAO@81516VF6!&5gye&R32qKig@^VaK#_R&&7Ad7sN-v8{lT~a`9|& zl6aK3zqpIIow$iOOso@E0DpjfVxj1(C{y%6bVYPRv`@5Mv|2P@lnfpL2a9@$45CO; zJyA80OcW{#5J^OC!Z*T)!Yjh#LQ1$rxE!<@62Z!TH24H;DXcH7CX@0nDt>8xAiQsP=e^}U;$7vP;HB}lg13X&@TPqvuMf|{YsG88)AMA! z5MDu^i2D`%9oz%&fJeA{xf{7lxYI!&V<@*L*T8MgtpgqpDu92${GcW9k@Jjmi*uH9 zh?Bxu%UQrl22Fy2;32Rbr!l7{N6jh2Da`Q#9f5c3$Lwq1^-C;<)doD`sB! zZ^{7=b|A~uSGm0>Nft@ok>o8&-jL)qNis?DiX<;d@`5DKN%D*&86l`l6@peBMC(k8%a`0 zvX>-#NV1zGyGW8klAR>kL6Yqx*+!DBB-uie%_P}Gl8q$UK$7(&Sx1t!Bw0g})g)O( zl9eP`L6YSpSw@njBw0d|#Uxool7%E$K$7_+nMabjB$-2!*(8}ol9?o#L6YetnMRVS zB$+~zWRgrK$t03YBuNrUCXghNB;!dkjwEAAl0cF%B#9?U97#r#B$gzjND?Dv2fDk) zg^>gvLEzy89!B7y1Rg@*!2}*e;DH1lK;Zra?nmIh1nxuNXae^pa4!P)BybM`cPDT+ z0(T{F7Xo)Ca3=y=32Y&-nZPCj8wqS6a7O}nAaHvEw_K2Lfkgxs5?DZBK7n}z<`S4gU^anSB6gtA)z6DO zCb+#|N!`5Ocs=yG;&t4M^4j9H+-sIsBIxNydzrmjf(O8AUQ(|TUIn~_prQZL^DeCY z|MJ}9xxsTW_y9}*-TWS&9X*?Q))tBd-vpVU<$XzTRFEpzBv>k#As8nZCg=tF-I0R2 zf-2w%Fj(L#;PXH6pM$>mIet2S7k?dpA%6-#9()0I1D*6H{91eszbwB9->2DqKKiZ_=xkvEFhkJkyb&m(x4r{I<31%N-m@7&ki2XJQL7}v(#%w5Kv$sNxf z&h5=L!D)qh+^XD4+~Qn6u7LBI^MZ2+bm$Lrc5~K)Pr#|1F`U7i?wk&wM<2$~a>{Xv za(p-(_IvOOcmp)(53qNzSF`7_C$VGM{n?$_QJ@1~9sB~8Viy9p6mG0HtcR>CpqEau zwy>77X0Z~%Ghj5!%xcN1&#K0fdW?kg1Qw509t}M79x{&*kAfZ|@mDxWa8G;@b}#md zH;R{tr;Epmhr$^GgSff4j<_=HT@(}N2hV^X;q<^Q(OJzd;XdIu;Y#ohm?Vr5_JuPTt%c!2B$Nw7h5oQ3Q{FSk zGoL3HPD^Bfcfd1}gOZ(+HLxEuSuz^VNpz94l{Avnkf^{vV4%bkc4OXpJo32eal#|b zW2?sskJ(U3Tpwb-;7-o;+{u}qJ2}&HCue%@A90LJ+R!Ky|i~HXL|1BOwT=r6dOsB5hNK-l3^qnN|GTY z8BCHvBpFDO0VL^9l71xVOOie$i6%*JlJp`;Pm=T?Nq3TTBS}}1bRkJ+l5`@8l_VCD zm`P$HiIF4*l5`|V2a>cWNjs9XB}o)X+K{9*Nm`MlB}rP4B$6b}Nz#lYO-a&(B#lYZ zh$Ib35LdvLq=(lF}q8MUs*v2_;Dg zNlK8UI7xy@Qj8=)Bq>UgA|xqHl0cF?B*_Dk+$YIBlH4W99g^H8$t{xHB*_huTqnsj zl3XRp6_Q*g$t99pB*_JmoF~aSlAI;U8IqhP$tjYYB*_Vq94E;!k{l(;5t95xlEWlP zC&?j_93;sBlI$nRK9Z!7gd&NJB&j6XOOib#*-esNBuOF3PLk{($##-#Bgs~hY$3^J zl58T$Mv`nG$$FBkBgtBltRcy2lB^=hN|LM~$#Rk`BgxXd7fSRj``;Fl#aM_W0VMG! zNkNhnAc-GId`Xg@B>706&;&m{Rol8<(sUlwnyo6!Cz?~|7Uc{z}m19>@+mjnMF=Rn+m z*rLBzfn;prsij6KIN30m!G^C!Yc%R~a)VT*Fr{iXtiRDY#hA$mUJ zS?2Qd5=?XY_g}=*@@fBK708OB;rTvSGPtZe>PuG-d3?U|MSc3-e9?kQ7Z~nx38LQ+ z)v%eL`GVdWzOPWHb7yQ<18*fhPd#I6e&0K~Z`qGFI)0tm?g8SAt!LHD?=7!A%c+sA z)5x{*pDSf^^dDahkN;V!`_* zV8;O~2<#ua_JN|lTA?;8z}KZ#t2J2+7PC}p)PP$di^iz2IM2gPFI?AA`yh1GKKmq# z{5AyspL|~qI`F({F&?gH8(yvB_~*7>*KSIlWPC&4?xI7d=6|r&>D3?*!|!VjY>F=4 z#2;T6a45cRu>zPkZJwbT#5XUoe)U$6KQefXLIu36Fn@)b@cR<)zP5ZIpIPhK&Ldmj zpxZI^!Yh38!XfQ$1$*;~VOiajDfjNYvo-1C-!L5Ht4Ag7n5uTep$*UVj639sFZENs zGS$ySCx%-O`tiJR=b)$0I|1Lg=VEVB)+coO$Tw~P#LqYP>sm+QgIi}Eh-on413G=~ zzz`ou|ILm$DQi#o;I_@bABctY3U9%4yg>faa^HpD-+e-|4+CDVhTo^`pIF8ouUa9# zDTn*hri;2(hx7y+7Jn8)I!P<#n`^>z6&cF8TmwS>z3nM8;XYdzsrTq+?N2r=6)zk3 zokJZj7F7AltAdy}^x>Kba39-C9XOc=&;90UaIgIK=Y7%ZE`tBx{YhL)3*o1w^R}J> z`DUxzRc!*#AAaz|-jMVswyd^?m2320ZN7I~o_HyKYJ2OwqeoNteQCL}VGAR8)afT3 z=kynFsFs%o1seKzQJ3^v-uOZK|GL&}ulaeOyd0o&AX~w`#j?ud>({P7?l>jdu+Nuj z^|tV_IoJKE#e;0hk>!%m3R}pq31Oks^NPm<_Vy2=hNzE+;}H|6D%Mp?YfleCvmWm& z$PFuu^4E@@u{JgdJ=EkIXPZ(0EvV%6rSJ`J6m_ZNUkT+WPz!5&&u{*86_PylY2D__ zD)hSBt{%4Oe&~`n`_t(Ahg3*y(0NT$$6zqE_cd7os7gu(O%8EtEtLqd#87Ju!=gbc;D;p`Bl`wQCq#= z#`;k=-(UT>a8NR;c<9Rf(TkI*a+4<=zmln<3RXJbC2MIi`Z{ag=Teg(Jl`RFOsJYV z^Xbvr>#tX#v0HBE4|Q8jU3j{7Eo-|!%8VLTqR13~6gzhCXMg{qs6^a2O~7lo&&(xb z7SH{rj~!i4;d$K?^=5}I$>3~2)kkvr)ZwqD(l<74`t{-}^s2U(Pju5SwzF4WcWAR! zh~C9E>ifF*8-2e5>q@O=ZOCbNQY{6KW5Vq8deCQ%cMaTg*`eL*{oCTD!8hym zym`$&EV0?Cu>R-ogdp#emjih@@NdZha3T==cc#SY6NM_BR4Y@P%;3J=Y}Cq>Dg`*j zw8%{|i%xBonw(Aq<`uu=n7Bgt(}}D7WFh&jQLD7=P0tKe;bfnKufZ*PMyn57X3qA- za>as^>Zu$&y40wVT+GKW>R!9q-}nmA@@KRwd}N0U;t@MXiTZ$>c3Qrw_k!3~FK(kc zsh+F+r}$u6zWezP1!iP@M?D7x91ao)nDkQ*Rhie~kq^Fgqt?XwZH0_{@%|&LmFn>Z zU7c3H-GXOJDLVa0Mdmc!BJ=~dUiEj$|JpCdH91wZYFYs9kYK#hMRT5_D@N=$ z(|q@iOVGS#VSlZC7mzoxqci583bQ93Ro38hBV|ry9ZsNLk?S3Fa4Ep`vyvTa8XD*u~$L)r|O#Khn2bHjW0jvmMrPp9M7ep*6tss98jJuM(c z&7VTY$GGB=E1pRw|JTh!Q@B&nd^JncLyKE~l)Q)1jl$NBpJ`RqHm-Zz{^R2CnGeJHPSEJ2c?K`jh^qC+JAWRPDq! z?wF3xom9)3`@D@`pi9X8!|!-@;TJP4y)|oH2bXY6?tjX49P-Dhki!E~g*NHik9M7j zJM%JEe}2>h*8f5O!TKNeoWP~95e{2gjZ&~iAqOXZYNJ#pH$$LxcB8Ypba&(^gpM3_ zha3G*zV_T9ziU=x^^UCOj%RsPJ+F-Q#&sprhc2)B0nHnp!Yc8Wk7riP)UgC#(X%cm zEDQSiU^G(GtgFv;WFCGQ1%~9u_k%NBh*5XhipBVP!K`p1$_fFdCg_Bb=+pgG}4|C?X_3rdggzGd2@49F8cXTDHWUE8kuc-0I#e+{( zghS}V^Q%Q~{cx)Lw4lCgSvae#ve5NI*6x>Yad5Q)=Z{~Tn}HgfXg}osYj3=0!QI$ft)HR^hUbyx*Lvc( z6|LtCspf+_Rc%{v-;`&lXN%Fj^f%n_%I}9F3Uqvr;(7!{G#>ElVd864icxj2AkJMM zX|9SmEqQvhz<*Zj>3_}(^K#(7B?q#5;${hzCmyb_a`&r5^XpHC(990l^W@~qXmq{H z&AXhKiRO*u4uN#iNy3AKO4Y_5FG8nnXNB$4@M|O$+gKI*3kZ`J5Z0 z?eWtN4fbNCbft=S43^dVc8qGRXm+u7r%P0^5jT49Q@Wyu!+qOAJX$wyT$VkamZ#%; zuN)R!r_&`w%hTyh^Xs@4k2x;?ycE;A)VUNu9Mkg^fnZ>;lVA4r|@6n z?rge{j+C=MmI{P;TAq&QZLX44VDvVMmZ#HMX7bcfJhf7xRLFlik?=+jnile0 zz^jPcc?Qktn$#LM;lG@}rGlq@{m)`q-GprTllRHXfqx_ik^*8&xaL@#__pwl1)`b zx~lD$;3uf|%ixIkL@zw^$>}M=DQ@^|g|7v&+63cECH0qv#(UzvV;8O6_3SNbRJ~Qw z6OkJp`7KdVoSlV^1Whj0v)Tt#M6C&~GN33Pv1e)VRcBwLrSq@ux;^&-s*F}8Z@kIE zq5kc}or?M5#3xeC+SGjba&THqr5eHb``R6ps6l=_=h^Us@0N*inVyxG4({TC$Gv#V zIbO*FAHKL|#@%aNEV86;O*f^Cn@2gCOWvRVsvO9k8#T8!;sg~P z;W@wfCjNy{M1@ z`F7MgHwAgSwV(Odk~mZ-oGZCMU^KNQV#S^GH&akmQ*FN{k*(2!RnsrUO`VLwv!<7R zpKm&9df{FT&s*J*YUAr79bQhOsM=dAhJ{Q1zzb#hfKnG_UaV8wWd~N6&ASI{(&!LXY$v9^^S5T`=`qT6Oaj zY7G~cG^O{a)~lM@b`_7OqK1x`*ZOvU>f(c@=5+l=6jN&Q7@wp4D3sCv;oxKQP_fP( zQBd02oO2+BRH68}^(`;7@#KiXJ;al6&rTKn-0%0piET_+JoZ@9;Mn4SzX8AuP6jO) zS5+D1N{vckG^(UVtwJL+tK}*)=maR7{{}7y|K$5I?%Cd? zBJOyDZ5ivYlR!D(+$JF_61SJFh0Iua4#`u zcL64z{+$l#bgqI66cv=6V4a_;#iiss(Tb;mS3uwrbq zH*UyVQgY)oA11!u%{r5(H~fs+rgjX9za7Y2cjf8ua<*9)(fEv2k0!~38TkcshIOer zARqSGc4pbKQXz~ydtJA%0)04m)YC<+#F5zsFg*tsE4}2JA1-sn@Vqp4P11F<%R#w9LOHz8o!E@kF2PmA0zN-wHNjfOHZ($T;2y*?@*Pe^S*ipiqU%e>KTR(>k&)kdeqO;?knfjV z|0vE*C#LG1{gTQ7iPV9!taVihc{=|42>S!4I4fD}O%td831olMhdf?17^yTn0gKcn|I8w_P9aqqv2m;kKux=DwQi2!YpI=5BoIkVq3^=Rpa>5wjmKjpdQ{?q@b<>}Cwo;Rj>?(k24 z|Bv4D zEn-8R+6evP>|P2+wH?*g4WMI-2Lk(t(MC`k;B^`JHJ8iO3XR!h(HRvAqfBiuNOdZc z*}28DLebFCcp!8%oTjBbsa=zH|we-5CyFMX0eyZDrMrW-4jC@C}@RjtI44-H(TyXY$CQ2G| z)xY(;2Z(ww^4k5@-uU?1;6((z0eFwTv@3V#x9>Xq;4;bRFWuglQeO zZrx7}P2cfm4#?-Tz1MW!m`;TSpD9x6<67!=>&8vdn?;ncu=#oY6-OwyVUN4C%UX+? z4EosZS>_^iNqMqt+hs>kgK)!3ujLD=Sw#~Ty(mAGnpaBLNXSX04m`NGX!**8==%5R zr_NNIie79}75C?-B3;3OU5|NeL&H78?spDfg=V(6z0+hoi}0L*shd2uQAcaaOTG(V zMWMSb*WEFm%{fbyDK(0pz9f>MbK%tjdg*6li?_8wTHQ@}X3H}f@kLV-TgHa`UC&PM zkAQn}3v7mH)K&`^B-dD!TCLOyz6Nw^jZCS~sU1D~vZ3$k@c(%8zQz_dZ`*q}JYgl9 zoizUwS|}g<{+8knRlIxKsG3vnQI8{^Kd>eG;Pkrh4cq2_qTD`Jf6%McM`}gQ8FhER z$cG0u63uLX#2vqSb7E|js4s|?Uz~rxTjlf_DA^jdvpsCSzj*Yp%R-+AD5LHCG1~MQ zRGExtO)nmk;MZkmqEz2kD0tV07VDmR;>X=q6&{l*#)8YO>hJuEi_1^0))=mf%HN#E z;Wocap)L`@p{=>tb)Trox^}%z^?8TrbY@Kcyg96=JDzaX^r_u~BNY8y<3A5H%-Z6E z>r3`VWyFYZY>$wqe&-}O^6u+-aQ|t;2G+eC2Fe+9ywz~6#L>rG{HsPbYu>(L+(-V* z?>67Bp=am1r+IbhF`WBIkhZMYIn|5FewdEWolcR3pS63s6w2Gn$el5H{LgbByJv5Z z#%_IPeZt4xy~-@_l}Me4&fjKet#MT3qCH1^{Rg7yovOK=J)TSzyE5Z;D?9)VFLGo3 znSukU*4`CLzGcNzfvGb(O|Cl(-EK2jTzF>^dU|>L(0yrR(1?u6y>d@3}5fJZ^ zM=p8FB~Oih)w}tbZCdl*)Hq$!!~Nt_P=`<8m?bnWIQ~Vzr(>*5^}{OS3GjXQ?7hK zu6#hQe4ri3jFcZ$-VLB*F#>`8Ll0SSc28z77_3^8 zRccTgO&X2H3MWb7B{Y0=D(7Nk^NS;nAqzsskmU&p{OsEh;>j;ikFJ#?ZTdt74N!ktUil%ecyf}-kE+p9Z%8eIPWvoc0bXp0LbT>Y)duDpSxrcp(UmF@4IOW z!RF+3gGM~sh?7poNsC3J zQ>cw*qqEoA)Tn}^*MgAQLa9J!#81M8>hm{eOni@i9%jFQ%l)rT`M>}DAKMSiu61Rv zw6AfoOY2;B7~|r(Lq;rzCY?+t`}rwy7S@fekskJ>3^rZn_)qP(7wi_4{WH4-W~17m zGsrBkKMZB2w5Z@swbU#(nBXLiQmS#MbTcVt~Fd$irJ_kI8F zcV{~PLid~5^#`M7D>4JcYYr?79BjUZ=&)PET}AqzT5fA?xU+AG`5NU4nHNPaIf@Ql z^7^Bt7aRw9>t`@fyE?MT9i_gMrp8XE6B{Sw`)-t6(+SpZc>;P zjvBYVlY|a)EXnOwGyd+gv*kLLte+cpg=w#f^;C`R`re*5w{{lO=VP4cW=|*06=GVR4qgA3_K)=<^o9N;%&sw^@m-8_$~f&rxVgoI?TbtCI)1>m zxYAAW(!>Mw4!xTzUhj%$rt-NimeWVDPOH*NfBsI7tv`0=N$;9_BXQjWeXG|!w;i|Z zIBn;aan0lU#g_hkpG0rNgO@>_RSO4%tY(=)ZiXW*MyUeatE#P5xlSu}x(i;LwAxWK zAY^JrLAYt!tz54AXJ|suw43M8v2fk8woYM-KB0?cH|_9m=#8E6uaPtUsVm+M*ROOl zf~w|$cPu<9QGQ_K>R6C3w7)-Y;EJac?$DkC>A2-eM}+BkT8<7Q-bd}UU-!SF*3dWg zCt-FCtK72t$?a{JzJ{LZ(BZEVYm#+&;^d-B^d*XqRVAID<$4#iN$-lc(QbX6Go?*A z-vKFgD%B6$gbI6)q~AKB*|$+cVsM?;jV>=+&=_|fT;p>)wl}yWE%lp;OEVvLhf7k8 zTwzgL!FeXQB-P0jN(~rMQOGqiSbE81aH7gl*;b#m(xIc$(Mzp?Z<;4u@9u^BZtFc{ zs+Nbt6DDU!G=aEFk;n+|cXv>Sg8460wp>A;12mG>g%RA$^-_fCESkAG(W7;t z@5m{i>5>;>I-ZuJ!}uEaARVuNuJU9Ta>&Zy*9Tb*T`pu@(^HGy>4S2(tMGcernwfI zD?VqT$&^1HB(#qrEd8jJO*$FdwZOH~Kaa{xx8QKU{gc|3&a$unx$e9~&)?mHwu? zVRm;kGNZ|41y89ajR{ z-i}?!>hJ;ey>W8>{c@Qowy8`J68H{{KiqLPtHWbdK77Zsi~EFF+(20=B#?{g=c2?-A#dJN2^2#3cFUxu5Kk;0V(I|Ig**hJvEu&VhP&o>0XYD3*$V^V`eVW}$!OYFqYA0Nl;F{?NjcW^Wajl_U>fXN` zfGq>2#BF@=2647VRh(4f8M^%C)zunRe(eL9PKPceT0Unw9+-}&<>=5E{|^=_eW5=I zvnxR8iLD>fdM|KoztGzru6L_5Z1lE_YkNcQeCIlYp0>v1x>(L>D|jjXaXBN0frzc2 zlYs_4?a&&B*QAWcE>FX~+EJ_CU+W24*cJa=3tK1GTA*^sjj)bU!Pc!>DN|WxaB2ve zw+U7^jykb@VE`TGm_yKW348b}eUhKW&wg;NakpwboI9j*B6mE^|MYiSE_c{>R_Q$t zMrNVml+WGh`QA8p_;?(ZyZ*!4sdv=bLrjRv`c0`OU4T z^-iOi`}D2Z>r3pJe9^X~=7RDkhn}`YUMqjsylJnk#fQ_$3kqGd4VrnvH#pxJmr`Jw zUU+onid7{p==(KYT_XAL3BA+k=3ZOhK{Df&wdeGBL9O7(i^tLY2lvH8FCM4HmG3nt z=F)NMWaq7HAK;z5wy|o$@3n5t-GCv!-;}NtI^qn+y%t(~TKZmw;&-LKlWdd*@>Z70qiomPG5 z4jT%b)zJXyZn`o%Dh|Ry`)MvD^8g1ScSnZa)saT*wA}ZjId}37iHH{J;Jr z42)I&ok=o!WoA^$qby^4sMmc&%id=8w=HpO09|I+Y-96l{jINhoe&?0SlsaZzD(5S zK*o*^I18P*S!`iL4{uD%M{n2~F3>={mc8Z7iYJJcUl)HeGjqK+&K(c8(>Pwz*7h;z zrR~g$?CaiJHs=7OPvuU3a+fBK`@H(O{;|g!vhSzovl8yxs{A;~c^@>$4W274Bg}B2 zboKoBS0&}V&;OFe#;vDYwQZ zR+^x9{yvdeNiXW$@%md%81<~~ocqseXc-ls-gWw1c6SRyr3ZgZUvAbS*DkMZQt`Mo zVXQFfZ71%^-7U4uZm{!uQ}y2ds?}R`bzSn2w)I1sC)e@}L$%73IWoU?cKU9k6Ss(N zthbffd##kDMP25)IrBG=AEI`=5q#xZt0B3o2Gk zGAn-XYt;3`1D`hD?@{r2Dd+tPenH(ECzRM>e1jfL+UwTvMF7_I^1pC(dl4MgE`Gp0 zkuTo!{oo9W%04e^&tUSi=;QYX{LqkU+NaYq*F zCPQ7rcKPADF~UdFWC2+5eD%#4Aw1lDDyO0;<{dg#eq*<|^!#{VmrJtl9{I7g`oTJl z#|L1wpFSk449#2HOf3$;1?2GP$uGx5~2+Y&K8*YZka#lP}n z`g!{s=B?=dS0UW{P?OKg%a28`AHIDsXKw~N-Sqy&LxVZEaLpPMY?MEy7*mkKRk(V2R}AI?hb9gPNb{(81f z`~a#**HBx?Ei2XCT6;+?VIOo>e|D!PegGP@z;9X9%FZZb#Nu(QdPh?$>rV)WP8{W% zICb%cn^tsmEAnhA>_ZK{72L)G>C~w`bV$FdgVD`xKI$t|5~!CiE?53mUXBE1XE%-b z9)p6*e60C#X&lwK-^`^AO3SH>{n;I7Rvk>W8$7bl(J2W?6i{nusq%8FdcF&{s(g>3 znu^D!dM=AYH&=ZTG%72nCkgahMc3QL?BjyDw*U3>b_ZTLRXqv~i^@OpY%4QGE4@+t z{#zQ?h@!e)o_3nm+U!c#qURKQ@ifwGrh0*CO134DDzqSB)tCB1(NtAL_=T`U)H780 zCGCopn%uVT8Q%s&DVID7y8ry8(2|IT8kf&41@XSox4iC$B~okWo$%{+#foOsPrlWt z!BDh#!}qv@35iHST@SjLGK9Jlb-h>Y>SUA>B)@T%-wlqZkI+?0z z;05eVNTeoB-^@u*8G;UV(w9ku_{Z-1T0G`Aq`1>9wd*8KL?aVDhKz-D9vS>9)D1UL zj|LW)cK%d%`nhbUUPbh-7H&d&N1rbJ=~Q=$&ZiucCz)LF<69B?+^GPX*HjNOy3Q6dUt$IbP}Du7}N2z z939g4^Do{2Q1Hq>n;*Q3p1jjfMu$$fI73`-UHaIZd_4VDU+-*Ref0F=!S#PJbV#S0 zv))PN>YwjCarBdC%xu$iMmJplZ28E_Ps4DZ`O*C+2G6js|9Nf~VEr%dDOkuW&E5dy z-~Ne@jIHc!O-e8{SJ=I6TQ>ks)wp4fhof7QL8(!zOd5s4Y*xcogvMglskOl6QWY#i zjLz0{Y;(AyH9_cTP4+yazqG3VZ?bGwRepP$;G1IZvvq8|czBDXYgNSf!;oS-3yIwE zs?K=EvvqHf_WlCF%G9s+MO_a0>k%2YuYu1{Zut@u-<`fR&IdOwzjIq5kq^#&eSFAs z2c)0YR^I%faddV|%q@58{n@cCCI3Q&&D;<2JDxpyS8kKGqWF^szYum=2sW-sY`^W2@`_ZFpu6S4P-&^l&yaKcMd~Vfn(8K)c zZXFjbE|dP@#KbJze0`r@n@T0);n63;p7^(eePZ43hY5P07%Hz8^uv`JtInj9ORWaE z+N8C}q+lUL3r^FV75m7Ag^r31;ZKK&38z_?KbP{v>UEr~%u?Ubs4d%D2ugoL<A!Gc`*-oT;2bVPq(L%rKvS(iY=zV0h zaq~@HD0oxi#r8!Ry>_X4Z3$ft$7?2xb5=S`JY)I)2rHr>kjS1e!DpuE$XY- z>b{^qnsHQLRjAGXZ?1V7lNW?W|x!B z$z18U<(Qyoe{u4hqrWmd@Otdl$~fZ2mLs3D-r#{HbgjIN4dWZcYJNYGL$z}#%o(6> zgG#40gOOOd3CxICWLljTRJ^nnmE5d!D$J!9`|hZA5IU-zz2nHO>G7(;61Ha2SCl)X zA24@(Zk}82SDcQgLuWd_9;yHI_kUeyNN2#GgxMo`jjgYFu`BG;F#9LQ@i+TVZl3*1 zPLx}Y=D!M=|ISgZ;RMf5ds8m1r_GubvFI!AvCbHAHgip!U#!|$oqu&^gPxdblrR$o z?L@nADV@e3H=A{GmC0l=8{h?*Md37~u>a#IM|FnKQJrZY6ZYP;df)orD(&T9TUx!6 zPnv#3{VMP3$9w04-Cs(YO<$gY&R@9JbMAzDXwHYMSM4$#XEm6IBS8wva->7Y(ek0G z?HgyO!^U(xEk}pWc>aG!PnlhPDve|hPkG#hUPk2#UE3jHbnoV}n)IW!CD&8tQhrlH zuk+Ii!PYmO&9h$L(H+a*ZghWg^Bt}~1hlZi5GdT`{y4IG!Pc17j+k63lBD zOa_fksn$xhD!EasF*vm%_w}gcs2C7BDu!Lt&0Zv?bNaD2V4vJF?b^1LY&`a}W%R^+{@9t$YvfGlsVf~fJbGSgbGtm9 zkTZQZoHfX#dzC=Ti!dEe%h4fyKmWSEEW3hL`JT~x=o2peM6L=(S18) zyLxMf2yQTnHNws7s@FEJiC&$(YIqg)%+SK;tBkVVyhU73yI$HzlctXR*7OoZAEI4xA3X(pm2e3 zkg%z+ypSijEhsM7Etn?g$=}2u&o}e+e1G0+-f`XvUJS1dPs#J-KIZP{&f^Z?Hs+S) zayU0RDV!;s?ws13V(d@s^Xv`mv1}u|D!Tyd73(N#8QgZ>=U<%zq3j5jOrr!HCWX!n z1{dHsEUX?ZO107gUY0=@M5Z=|DqT`iSV(}+DvQ<#D_U>}06v&xTBTNQHp0tliOwkn zo~=L+N~1Pe)jB0ul{3Iu2`fhi(jtQw)uCEvT5>cLk_U~QiF9p znbKl48WbuO3>;vXNv%~Id>6ZC)~R%$SPdFD8l73Hu#~Cel9E}> z8mU~Wh7&1DlU!|uL*^>25soig-~#-6PARF$AeY*gdr*2BlTN8qn1IT_yP(W!Fc&`T zl2WO4a5pe|$yF95nA=b&4B+d*Vpf+g_)Bad3Rp#8)Rm?VUHr36>vYA{d;MpYG7smS?^YKu_=r*KR%t<@k?$(3+7 zTF8H^PHh73E2iQ`XIf@0)E0O?uy`dkD&fSA36ADlLD#{mg(D|{mt0a{KwN5uthOjX z*kColUX4{{fowL)<<=0sOG*g`K&=Ki3}I243}&SqW`hQ$67EqB+r4EwI;BiD{5;DOgbF9HFQjHvLqGk&`C{V4nKoyr1+zp&gf(ECB!^IYG&tXtO%TTDHhUpBFmo6!p z7ACJ&r5R?g3Jo}-gOdR=i&bH@O4TYeySG!yU{Qg^RQn#4(E^zP`Jyyw%__)zc(q~j z_jO6>fuO~5v~hoSLITFgHuXpff}qeKyjK)O2{s-J!Un*97$$X zzXI@;>C24JWaZESK(&Ii1UgVeQ-hZglf10iC1r0~a86nUZBzxlj@o2`lZqBK zbTOcO$@X$dDWR8u>AG4A$5J$AsLEQSRV7s_p@}Hf+_^3(xl{>O8eoUnDzifCx5|y+ zu-7b;DO6IET5#4W1^t6j4MhSs0=cd-f-4lG9BPHu3RObsE^tbLff`s2=+w|)!RNe5 z4cTunn&eiw1v~&53;8&uWEz7R&S5|vsTI)5WT0p#v%*OzrOB*PTFW{69JL%eK@(I= zwcMnH)4)0-80NIf)Ka+-)beEIxX!dxX7C!Qg{-za4ux5#6%K7GO;US9k((`LzdNK< za1YRzN}-Ocz#P6xt5n--A8bV`U>Tz-8$#bb6rBp1zRm(2fXt)=13Xrf!U#P-BvxRz zQ%Vh;w^bukfGr-i3XW^3Ef)K+EvRT_sZ>*@lS@kNFubdVK>+GMJUrOp2W28TR6AJv z7x8dPspJ~ylc6fW;>q5#nc%G%v=R+CMAayiL62QhGOY#13fMYOs=>0qObYt7I_SeS zCg?giObU5mmMb6!V8$-B z*q;N6PO9XbaY-4V9jX*?r*IJ8q_FEk8K5f!>kvk>RmIl0q@aZDwF>;;$mL3l7Q97T zv}$PJ_8~-7`Xns{{|l$K6|hIz4N7ZhC6|;2syy7S zQVUiIq!t(rR8Y(?FMteI=*k>-Nh!cL5DWoIB@AM)pJ;+sZd6-j7C0hl&=qs_zA)m0 zB_}JCtr_|_D-46s3B%|HStz$y16w-Nf{{}p)4|(Rg%WPu0>=R%2`vDXR3$a@oTDgM zGL$*0Jv49VWME;Zw8}LqvqCB}NDXS%F=txf{{d>9$pX1xfppZ+z(G8!V(X5w#?R& z&e;%DQn|tkJp**XYK_$H=M(hlj5;G+AKGYbW{ozIsk6IDKk0D4aQUbJl zC58!|8Eo2E3~GPp%t{5FH+X&0DYY#^6P#(8z;*_7p%yiC z-X;?ix(;Sy@C-1Gg7aYfgH9<>F@yqwgS0Srg9fRvk40dc(hL(BDA}MsE-4slRnYX! zU^1UQskAkI6163;ZMWyHco$_UZ+8pW)hC7_T5X zEAOmJ3XXWf5DN1V@S`Gyb_;VJs6kNAO;(LE_^3-tW&$T*S{3x8YB=a8wc6+UP-~#M zL#q@`a!Nt33YAm}ho)ib1+^Kn9@?A61Y-^~EB;ej3e2EDuO`<>VR+MO%x1F^I%um@ zV=-FP?EOwD`%qwnEL0jSkXJCfF~SrOW@wQAW`nZKV3(9uV}vq;Ih;WYlU|ssnWs>5e__CSt^&54otqlxTZ8HrS|Cp3^4}q zsRlzX%&;`;Js8z#Owy=l{YP|DKC$Jii>5c*hUt%&2TeU-yot{x(7C zM>PDNTR*YXhe>~^=-kFh-e6Jk{if;SsfC#MX)`BR;&=1G1xj_S>6ZQ)(di_A2B7qPHhmX>`1-+`fkkGdfn+Bg^ouay;5u5 zsMnp!Ek8wfdFsj^OqEHn#lL@KYrp?*?yAMyj z9W@o&*X^)rvDv*G4VS2zzZzv$-#x<3{%8Ae^!s)`cyiK)PV_+9pEtvTBbh}XyV&%f zBiwHo+qqR;tI6nlSH+2>L2ek^xwWWaSklPmL?XowdvXpid-Kj^k-GN04j*jV%wXke zgS;r>gWzLmn;BJR8*}pdL!VE2u(61o(I)ic(WX5?XP;>A2Q$WlB&_jA|9dyaMP&Qx z=V%Jg@uk;BvV{uFSFoQKY}%V^{*n(O(s!Q-yH-Y?C-y#ZaAhW89i3m%qbRV% z^K95P5%@{|b76BVZ{5EQh^(ZHZ)Kjbe=h>Q4vX5E?vsE+4w^s^DFLh^D?gaxH1hJd zXJk=Qy_1`kmyxVIO}ID!+96px$8W8}`*kTLJ4TkLLw$}I~o=MaQ61{j)NQQCgbV^y8iu&3 zXV7!RkSDA9CDOHSKGu#WJ`De>-u~}PV0>ye&_7g@bA#!JYH(IC4hF+49?pJE=WF#RoFq_0}DfAV)II_Asj z2Ly;uULJDk;Ab-G_NP~0zYrkn1?Xfm`yKh*u3g!p14ZngUke{@tL@7Q2nX zmG0NO@khQOS^b?)Q$u(3C?$6gu3mOQww(U{5A&{CH8cm0j^>plyt3l#+n?-mcHq&| zFERh_k(9oc>U|tJ8zPYx14Xt*%#I7<2l)k2b#;oAHfog*V)@|MfjC|4D-du949S z(7?3aE6r#-gPp&A5G&RyLj6_0PsB)HNP07J8<^8_p8fyqm{)(Q)&KuTN&p25puosRVQ{o0>+ z_S;3AoacX~b#YCoe`?eAV`D|gPS43-3%8D+LODJ>pjsc+i?b7Kbh6VR(l(Bb9s`y-65&dA9rD|!)Ne z>2rIT&Hj(mytB!FKJO%3S9gION#Vdr*l95yPLGiYJN&^&1a{knojqY!Iae3P=RQl@ z&8Cd+W#>-%kEHhSvj4Ncd55$149u2+2+t1d`rZ_R1J{Pt3oFDye(3(!+NUO9*sYw~ zeiN>eSE6#7bU*)=>~}QjP56+z?LW872%%)gz4b-E}61&7p^7(L!Oi;Ai_RX&Doy{XYk&hF4sD!5oF!Rljku!2|kW;VxoB0PL08)&)4tVi~ zJUK;RvOD}8d8gfj!^Iaqk*}i4_oQXKqvn3vKJVUO0&H0SyraY3T&hV8<10oM1bA`L z`VKy{lv?R`zw7?TVsN5&&Lm;TTPn_{2(KJSfQJ3s&TC`Yi;V92`0ie_chrdM(G3a! z4ty(bo|l&yfwMQ(74t^py5Q-zE7sK(fV!U-_LNZNfa%2!#4K4H67ar6To4(Y@CDXCu&psx!EI2 zO~Bmo`}&1%drCeZ?0oQ5zB%ZLC542l-;%(?L~^2D8D%-Seisn<}ZddQx{+3}P?pVY2QY1$xgIC;z_ z&D&f()iAGO4%tKceSk<%KpON(-{qf-pWQDcrHlMFmTW7ea{QXkA9}8U+Sb>2|I_=0 zltE4!@Z3eop*)oqe|x!wbm74Rhlxn$99aFPP6< z3iH2Dqd7WImqR^SOFmS$%O(3T3v^pf4!!5p;LH| zo?Q3pa9-e`A7Jpfy}{Q`cLFo_n(r9;Vk=#f6!OOM4-c8?=iw2BMTebZ;sVl7PzIfCR(neF5exQmMOz~=wyK8|Z(ChPo zUcb_yuL7(-dmPrzul6N?m9u(ms+a#4Ha4*b`hT4wKjqtZuGS%iexXHgnauw`rDY+W zxAMm?R_hzHW~uc{kgnn0aTw;gD0(0LqjF`&YQwv{2=1L8yIkF`4VBK`f4=R7>dn%q z{!c@jfPje|>&Amb(0AxnY0H%51>Lh-8|n;ZTKXMGwr#d8FAFl$UIP<)V<*#d@q)wo zVfzdmz2^dl5y6p(-k$Mpa5P>l96Srh-!u2zTaA9zF_`|by{2mamOp)_n~8q@i@!U? zs&E1hwmkaU*zbrXSa4KO`>BltgxNN65bc!!{GV*%ooyT81ssWCs2RfX8_kiqnzy0A?9gTr$os$$m&KSYGtlW`ztq`?mi_H@I*OzH-CfG7EE1 zD`~~5O*J1<{ev7wn3h-o*O`{0@^=DV?Yo%`pX_@}PK_?gH}mCzGq0BTy~sBLg7?^! zjq6pw58rZH-Jp!zVfD4!_`^GU8TClk8+>QNJ%6UlpPn{irYfXKc21b8w}^M zXKu6V|Ef#ie?bYLXGG-vQO(!AXjqW)!^5dpU-GDZ;<}}4mr$2{9tEtm8ciPK9lmh= z{UCBgS_RLt^#H2g`wN9BlNXQ&?1#5bHq9ZwXQykoK3h-K30amnzvmLt!6s&Vi`W_D zNVCz3!noeloEH%*r{HV>tR2|fD~&1%bgfymX)V=jhAgUN z*$irxux(FXcyG#BR1kgX&L}E6esSlE`v$T0saH`}o94y!CQDAoj)L|n_L^0h+lgn$ zCuPVdX~-uPav_cC>6JzX%R){Z*tC{39G5*`%5c79yL(9$gC{Rw^Fv17r<7Ojj3Qs! z3btO|H;5W`n0IMg(qJ+N8@=e3Up`rU>uJ$}r}Ifq+gaj_@CoF$wjuMzk7+|Sm^QrI z3D@;xv!`brwj>Rv=!o;KUp|$?+o{_3bUtO!rwsb!VgCkE#jfjrZI5%O=Ro=M)76;V za`C&}#IK`4%Arp)hEBBtQ+w_1q%c+HCTIKo)2NP_wGqDkfCJav;plu9IFjETjwyG6 zv-xqF>o{t^JIPf#fnXtCo4UZS;$X+>~*M`Zr$42?&9(9B%8AP|L$j2PoF9Q z`~9lv-`)Rj=k3uiw$;#)%KXi9s+@V|UwmfeY|5&!pR15Co@h zJ?2v}hn{CHW!Y7!)-$`6{(ke#rWYPcPUb;iA~~4QTNL~1*%Q_n;P7l`I8}oeoQlH5 z%L7il0SA)9*>pTST=hM*UYUk5_Zv)^`z;6>`1z0R+Cm$?u%w9^dpEYMf9_{eR#GF( zq>&0-UfZIR<0rUEPUyNEN6ib;Ix$my{)6Rja#?Y^*L&C32374esCd$};J`Z)%7+E6 zZBsPO(F%J6?OT1#moHms2b%CLy3qF9KTJ<{&wW7}trl-fJ|X}qwTe4BeiAeGXq)Vo zoj14x)Tid3S{pU?;qR})Sv|cShw7+=Ef947hjq=IHRfgr+pXi#-9zTE{KXr#8d3)L4|*o@(EsB@rKYo~-58 zq4U}kOVrPl4fbaaY)|&QGCuNh?0HH*tKYUFGH;RhlzELwVL>1N`kmi!>tQaRn{H3? zn^4IOobNVy`L;?AM>sVE94zz4pNyD1AJ?y&*P?vTacR%nkHg)-xO%Y1M&%7y3&8+C zpQkEQy;R*(T~?h`=~O#Z>r{oRnX2)sv8rLJeyV6yxT?L1RJo}ds_a$fDyi~^vP}6x zc~@DYJgL+vcfi>K7b|Bd^OYIOp~`-6-oVbvw#t@D7iE2=ttXmu$Ukscg1v zqAW|6D(f$ck#&&;$y&)gWQ}FDWtK9z43m~iUr8TGuS(BI4@q}PH%OOC=SU~Pc?;8| z1Eq1&ZqhKRztl(CTv}IZEmcW{l1j-N$wSFC$r;HZ$u7xy$r8yd$plHJWVj?r5-kar zw3DbMu95~4JBf(|C%zJ;#1rBsah^B|XFS|QtRUtQQwS|Fibx?6i0(uf;ZJxI%?U@s znlK{7_!s;g{sg~;U&N2&`|&OKDtsY64bQ_z>{o&wiTO*anV;%spyI5rs%xr zsA#WflW4hUuBbqiBT5qu5XFijMZuz0B2Q5hQEichNG8IBpMIf_a3IU)0iT{d!pMQmaim&7E;IHK`=FfmPAd#0K#4vty zK6Z`Bi>J|!!8jU?7>s35h+QS}VrUFwus4nHYfu_Zqcb17Lge)_NFny5v5t_I_Mou| zAG=KCMbW5`)7YI+Ag~fVubU8)@S9^LL|#`~=q#c!l17+?OGI858k-Agj9`$DT_p0t z83iGYooQ4+DW2C!Z|ee)*O3-l@UaVcUI$ueg`FqzVD6!&06UN8h0;O`>>QpKLX!eJ zN8|<5yWwe_CGvu3v}dqAjYdLR+KxteT4#v7wluo(X>3Cy{2IhS8l8pMX(BIxkuvB{ zqY*(%Thj=?c8bXJV-&*O2+X zn8pT-RE+6}y!wn(g6Z(QdipB`g^tXAAx05-b!iOa)9ApcG6*jcbJ2>3yxImS#9B1M zs|wMcQQ%_-h`gGNf{;ddrpyK2PvqG$QUZf$W6<44<}}vgV-U?41u>1L4Dzwvc%BJ;YGK%JBF~ulmVich(=k7ST|}OWkqR-0 zN(POvokSkI4PbWVG|Fj&M}R1!5ni|*M4ptJ3xQGtt7O`hA*ukwEbyRNHK^>G=_?><+!#Vt?EKkUz(IO^$|)iJ|9~~XcK8g z8!?TH$A#Zniff_Ar5zt6=y2)pEG4wD29ry0E%dh%zAFX^x?6hp5<<&(TN?r^B(#jT zRhZG(llhVmTTEyfZwv2jh>W*|ckv=Z%XnMpAt6F<%j9_>u7&QFHVX-QTM6G8TS#ab zXNwzQ3vg`&oqb4zmhrV=QfxkanX597&~{{`<`~2djMNO9OK2HyYfsP^ zN=xCbGl$TI&V6zD=<8R@Q3nJrh6}B`o{Ro%#23H-@+TJfY37jwyB&( z#@{OC*d#*B_*+=g5E*}KEx;z?TKKb-@GY^4gqCr)HX>{Sp=I2ywG5kpYoWW9@I$fj zgqHEP@Jr)yZ8KVEgXQB|=xil?Yb>A8HepUifQ`eojI*_*2|8O^F^|wPzSderBjanE z3NS6Mg}#4FPBx6d(g-c%WTAV4$T(SB0X72H!dr$mFoMuBJ{I1jsf1Qe8-SUF$oN=z zlR{*CtTi7Sj%%ThrOz4?bg}gJA;G-SGz`~52P@$RV?%K*^slsRD4}KCD?H>7TnpVR zEgV8<8SmOmfej|KjCWNq$aq(rz)}d!Smva}*dRi~cvoj7HVD^1?@B*I1`?Xl^nUnE z96)GB(dfcpI*sswl}umU`mScSh4dYwwY-wbC zYcL<{LuiK5wqPXH2iHK~O5aq8xCXjb`uZgj8pg9ihXaxEtgv_z2+cs|OJXbm*D$Wt z7K_I<$@G^LG(pcwe?1P@K*vg-Ni3m(eiix~0gceH!W%`3#Sof4215`T*9z~s-na(3 zR@xFI#h5 zthC7Pl+Z9f6$bYZ8J}t=#zF`U<5S@|hTt0L zQ|aGyFri^wDilK0yHq|Fglk&R`|W6g9#z7($ASnA<4~=|SbIXFcc=u7j6;Pd&>q)7 zhf0@bJ3_Ks-~;*VGF=;Rex2C zDpD1qYOV5CHCH*RY*fZ7T=`A;LHQiE0bEv|QXW+9Qf^Q#Q_fXRR%(=^l!KLhls#c9 zKszO=bXPW3)>c|76-t4kQt?*tSaDNvL2+Dh;HP$gEX4@L07aaln<7jRplG3RR@7J6 zDNGd-`44%S{H38K;IMp;e3N{Ie7=0DJWoDGK2+XM9xac6Z2_(1p7N%02YC&-N-mOp zk(J7x%5KXp$%-}Yzb@)m?X=Qjg$?NCCH*=9c67~EoH8=B8UKlJJE=+C(H;5{vH2-Kf`ZxZ2>Fs`S=uEgQw$z;F<~D@KC%p?u9qS z>)@8S9LL1v;+Nuk;>+TbVoJPSyhgl8JY76aJVrc3+(+C)+)*4TZXtFOJBqEvDzQ*h zDS9J%D7q#(16NwuC0Z|9BAO+dAj%XC7bS_JMd6}$BDKg>)Iek>G7;gzufkH{6X8wa zdErsvUg0L;a^YNIfiOpyCLAD)6-Ek!g{_1h!p6c{LUW;1@I&xX@LX_5a7l1Na6qtC zuu8B%Fjb%xj1r^>5(M1^VFG`Fx1gE8L16V)EdlzbH*5pj1XrE@iG zwg_zyS|hB1&TMBoN{V#Rx?R z;ctRHngAgmA%>90A@2vm?+Cvk{EF}k!p{gR5mq4lgs>c88N!bUKOlUMuoU4tgl`eP zLHHWsD}*l*zCidK;WLC!5k5iq7~vy?4-q~`^ zNFgjjcmUykg!>TgMYspyZiKrK?nJl);dX@E5N<`d1>t6dn-Fe9xB=mMgzFHlMYsmx zYJ{s0u0*&3;c|q_5H3Zy1Yse<#RwN6T!?T1!ubg2A)JeF4#L?8XCa)4a0bHZ2&W;O zif{_T$p{M&PC_^l;RJ-^5#}QthcFMJ7NG`VF2Wpy*$A@`W=aH>81p0>%TWeLV>lYk z(I}46IU31P8b>2IO66!cN5eQ8%Fz&x26L3c(IAcnax{RWWRCiCl*Ca#j{0)choeM} z5;%(ID2}68j$%0K%~3Q*y*TR0Q4fxyIO@((H;%e;6va@2;SK#l@9^5>{EM}8c&;s|g=a-`;{B}cv-wcyByBX5qp zIP&DkgClp2+&FUO$b}K9&?%2#5h{A~LD{|nV!Rf(!tRixUcS`BOe(^Pq? z(W=3!L{*fkgDODf1MB{ERW(#fl|Wgce64(-ysA76YyLZx>y(Acnac6XvC3ikxBn6k zNn=SZiMd2d{2)FO&xt$4CE^5efY?f`A{G#^`MS!R@MeSqVMQniKK==Rh2O`o;HPjM zz5`#2FUDu!`FI9C6z_}o#5>__a9`XRuZP>=djM6bfbv#tX&@h6(x!dI>rU+6r0U&e(KX@{e656t`n2ZpTpEj-j|6 zLvcHX;&u$h?HG#NF%-9BC~n74+>W8R9Yb+DhT?V%#qAi1+c6ZkV<>LNP~488xE(`r zJBH$R48`pjirX<1w__-7$57mkp|~AGaXW_Mb_~Vs7>e656t`n2ZpTpEj-j|6LvcHX z;&u$h?HG#NF%-9BC~n74+>W8R9Yb+DhT?V%#qAi1+c6ZkV<>LNP~488xE(`rJBH$R z48`pjirX<1w__-7$57mkp|~AGaXW_Mb_~Vs7>e656t`n2ZpTpEj-j|6LvcHX;&u$h z?HG#NF%-9BC~n74+>W8R9Yb+DhT?V%#qAi1+c6ZkV<>LNP~488xE(`rJBH$R48`pj zirX<1w__-7$57mkp|~AGaXXfY9LQLN83@N99F1@k!gPcq5vCy=fiM-}aD>AU4n;Tw z;b4R*2nQh?h;RVHWQ6?@CL!#HurIR}IYKjprU*?C8Y479s6wbjs6Z%3C_^YkC_zXd#1V=SiVzAB3J~%UVhDL0Vm}am zNB9llSA<^>enwb{uma&Hgyjg!5Pn4X0pWXur3l|4e2ef6!q*62A$*DO1;XbDpCNpT z@Cm}l2p=JQ2wzfGzY+Que-XrG4_&tag#HLyBlJVq3L!v9B2*)6iO?5e3xqxhy%BmL z^hD@^&>f*0LRW+?2$8qPkhjN>x5tpT$B?(jkhjN>x5tpT$B?(jkhjN>x5tpT$B?(j zkhjN>x5tpT$B?(jkhjO|VgG*~pD^Lc74X06t-1uNOQ5<0s!O1{1gcBme`yKijmQqv zPh=aMBc~^_g@f*R5cRXe%b8MPCsXwHNbqou_fGUkgv&?6Rg|5)+~G*>crVXHxOQ}6 zynZ-sndM?;I4w+>*QRACn}i?Nps=9q_yFFiQmr)CL2sb7e&!Pur(_7}!_s^%; zTQ4J};h#3Olg=XFhCB2fPh3;)${TukMU7>Y#qIPEm2?(muv2uPz2o&%+ht^Wy(ZTl z8qZ=UkjT7Q)+J=c^_S`aTOym6Dpqp(`h_88sP&T`6|ysaXHL2KI{U`ORcigj6A9c5 z;lECb=Xo0la;5QUW9#MRAj;%+<&gfJL3XofarunEyg}Ikf8(dm%-rA{=kDg_ z9p~%?=NyB}eYrWoMd_X0;#}gKy**=`-SqQuy)V&N^p1CrcZT6z922{JCN{I_Z!sM0(7W%AiN4wU8|Qfc>N6{^n*Os=*Xt|5U+Mkb z{r|>O@VKR)S+H*K9YYCZRw;?U`?;!K6*Ii5j>OaZ&yGY0+8!1OEcRXjBU}onG`*+D z9iH9xpWbjd562151h-f>R}c4uI8QI9xCFSWl&g1KOk$#IY)p)^r>lMz#fo?z=7xhQ z7df!2q|rAVyN%htz0pVe@8n;lRrUVrGb?9Py}iGl^Xc#I|2Mwjnor%add}mQ?CfS$ zQa0#h3yhtimz}9o&F(OyhSkw}v-=Hcg!-4D)!D`AFIF=Xs^cv(R<~L;AGF^(Z_S5^ zjv&ouQrO)V@zZ>=+x+wC|GB9=ojg6^LS`Or?ujvOp00^*u5guFub2c^_e5v7rkQ^2 zq4EuK=H`PbbMw&?j{Qge?4kbO`Lo)!E!fDI4<-fKFHU`K1-8t=zD}C^k@VS>z9!7l z8Z>P;g)$rWo|Go=KZie2g3&9Rj5)3}28JT#lCsxrxH^M13xdtx@UkLj-U6r3A10eRc1$JOVPSt`~#dw-vzw8rVhX5;u={(My>ycqrL9*pWt8LA!U8I*2-1&BDcx25J z4V*wLp6As=Ystx%AFPD=GzMLhz7_gr8FTrsayTBFpKJY3EuViw_}g)pL_A#w?Ne3l zk4;j~KLPD;v@IYzZWVCH5s^p>x*vB zh|nAfm`r|M=o#%1w9T+o>JWo|Z$o~Es}1Wca{1SPp2`LnnFkl97U>$K8ICuzC^bj% zv8?tC!{^~eh6(tJyt5p7S8RzS=X>3FJw7Oi!VX+A&e)VX4^cqN$HVOy9xI?6XT(tTd%64CR#hYw+am z>FnJs=D zE`eP0>|lL0yDq(KGv@2dgTTC-g9*K{@8;ZGz1(6voIO0^W4&TwUB`=FYt_x!1-1>k zy2iQb7drcKW&=~xfhkkdq1^%f*9`u-E2m8@8~1@U`Z{NMa^XudWXSC1WiNQ3gLzc< zln@1w9~d&K<8C1s);R9?px!kA{K_9Uv*sd3pWT_?^)+K>{f(B>kbN0o<*Xi?8c%Em z^^N{D8pY8QxzWvxQ}U+0rx!aW9JsT{@PwMF*00#RT5XVXPbPz$tu|*G&W=letpMBk4^c`Cp-M@S zjrsP%*jVnf+OS_8)6}uA{eY5RLkKSq7f)~J-$Dpixc;J0eShachl_h(fsmlMQx`Ve z1DR17l{f2LnCzY1-p~MJt4*fYQfUHb-OhrnUrQyK$sy~OJe^$<;#}h0yxkI=oIG7! zJrfdR6B3+U-MzeA-QeOr%w6~8-c2_B#VxV!NeNBgM#a zU-pD_a2mX6=k`)^b@E}cRo`!9^Lp#bC+sl?c7lO(?BsPoRe7V~Ue*sbS%Io{ocI37 zRjgA%Id3&>KkuT#PaCTO&rLx{-f1(i(qR9s-}CHldoa*loi_RYZc{L(e+fn(H^ix+ zyyMyXUZyBWIlPU zgwuyce#y@P*B9SUeslOOKli$hxfSJekQ{BW-%7A{vRwF$WRGje4)29zYSF7;|$w|0@9$&O|wFN$mHk zYb?I7;ni^))w_5-i%)pXBJaO^J5y;ioixac$TlNuP4$^YwQ`eZw@^-}*a~o=Y_{os zC?6}GYL}p#&Q^@oKC5Dk>v_*2do5pg&dF#xwJBla!$!9rs-r?@I(tOTqnb^OJmcJ8 zB59Bp6?C(7o!es`nH!$H-oMF2sfRnue2wYQu+oC zU6Daf=1G@L&pok}jFeB^S}Z-RPD^i?@N^YUnS~Y}dN(tlEWPgIb#4AQ(jX`6nJyY| za7I3LrGqN9`GRp&qFaco*9x4RKKN{A%&dIsWTbD2&%AM@K~7CSU%+)3&`nzv&~$z1{clfYqbuC#pcOnFi%CBjCFQ`J+|H5f2;4&OU4R8yE{`qZId+z{ar8l z^~qTDU_H-%05;nT7=2%*S5yKvYZn(9ubsNiWUp&$7$+ zs9uh-xw@_osYWlq=1oX22K7Gk_RX34nc6=%==s)H71a1CrJGJ&GXZWdm)*X+7y|`; z_8u?_{7SO=-Fr>z>i=*G`QpQvLdsz3kz5&z-&jRN`eeztyn#oXJu=nk#wmY-Q zE#V#K?=Kxn6-__H6Ddv3vX|PX47zKDPNrrD6}f;Z241&spW%*dp;H%bti#KEU&G zxfz`|3g*Wkr!H0`?=5xGkP*cdcSb|~@mo%43!U=G7yaBue1&pDIT&g`0qpBfuB59p z9{jdGY-R`=8uR+Osq&K( z27CvWPNZThNZ0s;;x2sT)%t&KVSlr&31H={e)Gp>^TUT%aP3O0eeK(r2`&+Q@S5u4 zx5ToH+t12V`YtZLm8Jr$oo6EodCf*B!AN3Q2l?xdWL0^?BU6& zcgyPhPv^zT*;Jo@^J;I(7RY~9iabt>%88y^+PJd^8e2ysn&CT@WIF(`axec!is^P! zDRy0QgFN%}r(x|>N} zcdnxf&daAb`(2ZA+@O5LZiVHD3HjU|d1=q1%@0a5skHnDzTU9D&+2b!B){4FeI_-c zUsk~EdHKKA|D9dDoc{dUh#(8S&6ccja{}EiKAZUYV-gsp59;VR3lj@@@L%;-T>{l5 zP+bDmCGfwj1g58DcQCxb4bG>BeCNpFaPKV0Ow}GH^u|{0J!8E+ojg1e+~J5J*Eo6v zXlyJT#t{?e>ERsf7OSt?i|+Ph0zjBD0U+I<$pGe_*k zK*oc8otu4sOFHGOE!`6)1}_WRr?0Sb1he_(9U3P~!Nkx2*E&3N5Ze3p?cGbQKw8~E zkFgd!&?edSbkl)1Nnq`6p40X{IW!?+Lr|qLn9}C1?Y<`8$W0gK+wZycnN-g@x~I4U z4(`p(_Vy1gCkMs%OAS9%8DFaNvxWx-T@^;h$?~87aZhVmd85>@^C>_l}+FEyG zyCLt%aT|+_uGIQK9-lCyX6>&y81j`jd+jm|yv-%Vqpn*4i?CzG&&*Ggtevngtrn;f z%E^sql13bvEdjTRBI4r@z<}0mMa{;YagbO%rS`c%Qy^WhTBBOT(?v|%y(`tf25{Zg z|7y{;N^(@&eSGUlJl$tO{q7GtnF3#p@x!^dr;%op8c%(`-5eZl+-kc=nh8+faF(TC zmw+Y1PM$s$X9DW^dhT9xjNt6lYySSj{>vYzN6S_mIg)mXVvieGJ`b$^^nn^aZ>_NB z=l5iaU;kAHSJVJ~SAJX^m}~-apLy?UeF*hoT6CCjcr)%Zby2);|BQ&6ls=y|04txv zKRJm4Lr9fu==fDxL&=rxJ1x1iwNZ5dPkV~$t-1uNOW@BXfGVKfk^;8%+*^A2#Jlso ztX-%B52rk;+ufB~IAh4fMLylBG4&6oPOx#IcIcJ{o$2aI&8^c|6XV;R@|>hRl5g!o z_OKsXTG`E&%-j-pqKj{LlHB~Y$9)?Y(x6YCO~u69eY%qd`=r4>xkKdBZVi;*d3mSb z&Y$#)g?qoJLOWISvt|1GR>|SkRUZt9mtW!N2T5buQ`PQ3C z-@D|PkzX3wv0_qVk*XVIuuthnG#pg->lg7%1&`b8?cwFg@VI3mp11PHQ$@Wd9X>Tz z*^M&TCm%Yl51QF3jS95=3fJ1}M*iCL=kDqH$Co))AglG67xV3BfE8m> zmR*&$Wa|Hzh6n#uZ~wnaV8Mv&j`{#RyUCbl)-uOKIyET|H0r=0Yt^Y6hKNGCe-(6NB zemD7`1@IYqjTdpO7MR~AneVnqsjEBcTgrY99ylrr%b)!1Hd$so>(jNhMxZ>w@oPxq z&t${VCsG#GzeSEa_$WBCQynlVWvYkB*&Mu9o29MTRSWpKm*u>HLuQRPM4xH%;~QyQ z**3nW&KC5&bp6KK#owuL{=Kz=dKREO^+V*eEuTr@u_oO+mt)|T-|kUUPn&~+k_*Nb zzMsjOCGEm~e6$74H|dPV&izWJP0@9tmfC{cipndOmk~f4zu2q4lR0?U`l43sV+BI2 zK5Z&6Sx8-V?Dyoni5k|lvB<35wr zt=BK6A2tVxt54dk%v6HG+VBBqEy}3Deb=_Ynp*&ut9w2_4S?g;7w=d2_f>%0fP5ph z!*`NzeeL>rr5U)F`ONjiG;4rmjk{F#{1Mr-b9rLFakikFadykKF&4n7e&yhl*2Z8* z@n_wD6`x6*^8ZFYhAr|n7jLh-$r{)t+u|>*zmT13zYOi(?gDjB(d_eMV*(uCmyvd2 zg)MlF4ZpEm{gAwKbm_EqQ;fmrMcU>2(`$jm``K+P?|q_%re3~Q)Zr<)>FkmGklhbS zd>+3}S<`Q1&iNa=UDIGiCqL_3_?ZXfBFpVBItAE(PGifa2gO>0=1GTa6v?%KX0T&n zGl3i^GWNcFQ}-IlFOT7iU)h31bp^(o+ubKii(0SDY+wYOb0**YfPE&1pHZgWFFHw) z+VD)XlQ!Vwy#aGaOspg~e;zXHj)N)ab!X1hqiv+1@1*PRR-Uy5n=f}>E^~l0@I0vT zYNWF<$j81#xjwT5evQ}KIX@ACosk90TXla#)*({L>r58|YH#Vf3zupF*Ez35m)r@^ z<$7j~`)9-;VESm&3i$rDVO)lf%|UD6kfa$fO=JbicJ8m2t`O+_7PXJi+(PU9GdhZkiFemgF2;C)*et$UNV!x4;Ortz0(g^mZ%I^+w{6At}}%@_E-Ns+JMh zTIg!jF4hNY6hx56ssg^IPTFs0G#Rb~|+Hw48+iw3=+I#%cS+`i+Cqc>2tgI$+iu5_rb(69wlomQ7XbqQ3LKy?ZH;}Sp>+&iCo zOxSoIT9wRFJ=J*L}el>xSOCo0t5)j7TDwxZdi zTA)In`Yons3m}^EAtq^}S|^UpFx^x$3&AsPFwyp>7EhPvQqZR&+@ZlK$i zUBl(U>rUX+9r488YlFefapDb|L)>&>o33Pae{BIgzUkf%z0w+35sST>zN;jKFOo9U z4}HN8Qa-xZ+aR5>okPUSu;$=FrCAN#{hqp!TT~v7m%4!Ixxqo_>PPA-yWd~6FR>{| zQ5+9-Js%Hhza4nOH6Tb=US8zz?r1yR(jJs3<3cBJyOFS+bbcG%i#Ib%;@us=o*8Sr z{QLQX*1Z}WPH5}^FxR4OiF;e>=06LS?U~`DyPH2hsH;sIP&OzerLe?9_i}1>hVe#! zUG1m025(cSbzR;39?c!lTbD6+)c6@0@w(fQueUW_*Fe{GyK_&+#%8)539I(6S>vwj z?sX^fh*eEpt$U*auI1voCH?nSE@>*%y?I`odCT2HxB7f@UhAvAU{=JK&fjJ_>25Zg zjX4H;fL-sE#~sUCfm_K9CeA5!&>itM?VIG$7Nq)j-t73sUl;YHV(Pm?&cO2KkEps^ zT|mhCxa0ESEx?Ae7o%AvaRhw<0Uo2iiI6@+qMtOTH3ZXdLsqy@rB(ZyRL7jGkbJ0WZH|`0Dsh@#4FcVS7UUE`hJfNx|o2pRtdf80sE{l5P+bE5+e!e=;Mw^f4=G~D@W;8sDgRwuJzd=6om{E13%II!POEgTmKEOY>JpWbkK94YNVi7I?Tl}K8uZ;LYUQFoj)cAlA(91uN zdihI(oCogkto>?#mjls|Hh9ZSA=p1B?}W~#23Tr+q!le^k28C$5Zd8^-gOQ@`>Y)q zVCAeHn<|4hGspdR9#VAvx$OA%=qfXJ8m20VVEd+K8qS!T!E^GX3&zibj})~`?|Wph zW`B|J#HMBQ>R%}0Z-1LT$n%KWAXgjYzs?}x?BeP9=Q*f^>dTG$H@mTJB53!lB)6WE zFFUYkd|GxVefRU^T>9-tR&Z7@52Aixk^bceJ9&w-cWi>ISA49qd%Rn$mz#&HyAzzg z)6?A}#?{%|*+bv^NV5JjbL+vByY+q^P!tBw_#gdsJ$Y_!`C$S0)N?jrVVp4-B6-$$Nk1VcKj}JSd`#ghvXS3f&)PM3fVDID(erl? zB~~D>(ShU6skWT_<XZFu(b9myVdNVDXY zpPY@r&{%WZM-H{P<32e)D7Mn_A(^}0sb|z^3(o$l_#LE8Cjkh^Qm^W9U&-kY+Z~hr z;G;44Ed8J|Gco7%Uk`p$_RgmUa8@6dpOwow{jW0R(KWN*kzTIb58>C@<4S?y#`!II zk>p$F-9nqc+<(@w*g2+Eo$AKr-%9R(xd1gDwI1B)P_5JfY8&tB~>;Eq5=- z8qalyH^@nYoT{Qv_Dotaaq{o_ltIqwXC|&4S^D0^k7`?MPWB%0Y}S6}*^D8fr>4&# zbKmy%nrXI#mGca8${=U$aPschu+XJQpR<2A;>@TW;@PY|D<=)|s`g2P9af)b$R}yY z=da!W-95bB-TypYx13xd-M(nmjGCa|hqfQua~-3|BvVJ;J@mvx&*3A zpt=OAOQ5<0s!QO%uLP!y%nmmUzi5(2*MG8uvx9kAkeLb)Oz4fRdppO$>P~E;M~sV? zhkI;-hr5@HJFI}3@s zyqrAp;zID9TvOmNYjT6zvM(en-ybr&9n>dTeKWm2)$9KCvX;p>r?2ePNcUy35tw@R zd7E!ODuY}GSbcat_Xrfm_JF-!rcC*Wb$4u@ z1;TRB>Ce{&uyR(9P4(qatGa^67D#nkT>}3dC4ef>-S&pnNE;!&bWU8a>zoMkOp~kT zCfj4l({EPq=r%r-?9usXC77LH=w`mS$RMZm6Y7sEGEBT*q?@UDTKXiBEVu3b{B9EQhDJ<-l9dTT6o zJ?BouoBU8ixBCRcf&nRP%1-=W#7^kXE*xNJyJQy#;My+PVFqlW8a{J&*!}wau?q{N z7``9CrDL9^hb@EW^<9}bx&HQ8szuwcZEK7VrTia!^YxgWKmpbKIvnP~OT+>@SaBOx3#ZaMU0u_uad*$5Q)g zWITWIde(&xhxV7}%sVz8+WDpXzl)2Lo6{fr|C^C3T_y=%4QvQTb$U?Yl<*X+_&(}s z@&y0t`agRIRj1V@P+bDmB~V=g)g|!XS^}{CAMy9=|B24tPTp~zPKhqAi7p8VG2XGR z9!}nFF)@j5ZgF1mUQE5;_)8F*{-^5yY`a0#HUPn-D=VIbnS+UsTKN{YeL}Xe!rGkL zQ3rH+GsFG6{3DqL&P-Y2LVzK~3Ha7DYtSk9$-C)q&4JaUg4(;6za-}tt!h4SpcU}i z+D-iYp(Pk#`G43u53nYdui>-lJtP4IL;vsrtUy!H6&yqN1nsyd9ipp4LEfDeHt7)NVd?(Gu56KsWdBZ)a-|z+-M(p>EV?vgnF<){a~5 z;PZ8dt?I4EDVLY`9nVLI!23y_q2F%1gZ^*U7wyiwM7F3EW8XB=9-Q!6Bb~CJ11@g; z?NY;fQedAsZ>4Af7ueUX6GE(Ifc8(;qz`^}o75h!^ESh|68IGPdHL9{N2y7P!`+X3 zV1Y!J10IV7GH^p$z0dk|H*hbcc~mvHKS%n8kBxlxkdr=inbx_F5F9)@;?kT)AIPcK z8U#M3)4&GRy+(C=2tjHyn^qFN5FA?4ug6q4&zWFM-L|WSJf#eEFV{*Q51UaO-><&! zC6cMg0FiT98!i?Em{5_#XfF)Ee@05>N4aQ52Qxd64;rnNCg|@Z^++xteUm z81i1;b`3dpYF}>v{fze+^sWD~K;KE!QF1GryHK2SXNxjsWS#gcuuygjl8`mr1@)#skqla4djP(>T1-Dkl4 zQnEbmJXtz575JW-J1CBtccbY|SC|hLy`{Y)#TzY5Xt_dTzRm4=q4w|dEbR~}c>ciB zb_=|aA?l-LW3kV$Y)AUC(>be(J74`G{@*d^P0emS&VmVkOAm+pb_A=lM;;tB;d62P zPbY@ku;b`cp{V%$<2ev-7uTz_o8_-oa6>a~X?2>QAf;RZm#2nrd31)55EWc1Nf#=Y zhiPGDuXLb)rH}hcs(MhC>FeeL52WvlLm3}T`DC>dFDC^octK{Kn3}Zmi473kcV3#` zQV4q3t7@`#Fu=Nc7p~;@{yzIr`8+`ONRRxK_0I=LkL-{v)&Iw>;MSmNl8^S)MoSZ% zcj;QDGc0S4uJA@pa~8d&NpuCiRV_ysgO0LHQ=CHci*$Y5 zz%aMiv}L&qCy$Kl`Dc^xcD76b5iHlMif3HYDU52kbemi$*TSW=;Ow6|tzK&gQI}j1 z_S3zPEV(j@JF@=KUt#M@vhf=(ljSAh#hUl5fB|A7n)gp8D*;0E<;G+e-gjAU`ww=% z*k0Za?HBD2OUwSyb|lMhC)UL1^;0VYbUw<P!hF*vERLogdIQsJt+2Vza1^tp1|D?q~Y4J~4{8JYHq{Tnp z-(F>U8uU+D{8Mh<#%?K2kHvqvM))5`;3?p83LNqj7@NJ_ekB+<)>a&OtN{4aQ({>) zZYPh8>+$E|KkEOi)oX$@a0fs{+E~^lxR;a>s3bj^V`ubmX^h(K{4`s=n zuYgy?f93C2Gd7M}ocDR*AvQSuc8})5z%L}Y7fZySvIUw{*6zd<2C&ny>Cf}(c_+Gr zZS{7}qq@w>zp>Nb9y}j+`-sO^FYk+E!%jcf6jE*7g^^40XLEjZ|N6&aNaIyv~2Z>fnw1{3h&iiC@G74+8HmACld~yOG5{ zC!jA~Q1p&OeqQgq$%THX+*|K8(P=KAw6fXKyM5+#2{V{wo^ht$>oI9!^OypE<%NM& z0Ma8nBt4wPuT7^aUjP567sM6m`+qD4te&iME{_^hy8*f*OJ(;ARF@qrHY99GTWVRp zE+WIsnzZ`crUcTWC(-M7i{A2@e>e_1OPAZ5g4ew-wGZ@ev-RM@x7P-F4;j0W`zg_J75-@!sS$K;FYHv5RG5}Tj+3Mf?+fkSPOnP!*qlIGUiW*W@Opi{&_=JDF1Li?qb%Wb zOO$SJMV0pNUPC{Y{gQiat|!-ke#+)sOG`{J%iDdg{fnFc6IsRPo6X|PWGao5+jK<8 zH0p+t+IcF#L~Yk7Kc_;y5zcv$6f%vp$r?B38PtE27}qb%OeW)U`+uRhRUqPXp#ymNT4^F?rVL$eGYStg5^0Jlt>n(Wf@S{gSeFH_y+7 z_GMxY4XphCVlgK>a(UC&jRy9-0>+Jq%~iSCf)&%=tpD?`W&Y2zGT(oZzJB;By>pk&oI1zhpnXb3gHTA~il)E%tS z1sSzQ1#D7)V?^XZrQ_skg_PD4p~P(mPJeLt>7DAd`0V_F{JCUqZ*F#z6d{u$TE<}$Do`% zEjj_d_Nm#-Yn0+M8Ik?^k{8bwl;Rk|7x)(I(SFo#7eYTaAiYpIJvssT;Q^#ac1Ui_Xmz=GzyH{IMmzfNmfx=*eHGXF zHNVQr?H~qyw>)RjbJ7NVFYQh@%kq4j>OIu!O;OLAi@i&qSFH zD098Ri$bsd;esn&lYJG9J>I=dRnY28oe|k#Pr$%Ro+Fh61HM!C`}3d@^(hZkYeRy= z3?XnmcBML45gHVvQp-b?s-UpYFs(vWdhxkeeo%7pL0NL~Im2WA*`Ev4|5yG(bx%ax z#C*zwm*DU5+y+ENOsQ9G*%z`@PX{QAkzVJu<@3jyaAr!&r%9hZhe9`SL8+bw$m-e{ z@h~y)D%G1U{z>G2RH=W;VsA$F$j`H$ZgJmoUXW|$$KQ^0lY#@>jDepI=2Op0^n9S2 z7w~eCCkN|MI*@*$Xvxyii-h3q)ozjA!zC8`kL1BpKV+$&7Zmc3^sk5oZWCTHus!l~ z?U5>4#(zr&`;Q$N4QGjJoZh>xe|!#Es;2?8e~X^7*i+@~f9Q+v+2W{6OLsb$5IQ+? z#3P!yN|E=h@&2};cjHbO$M<~x!QK>pKy-y`XR-gD|H$6r2OvLwrGChql6?CE=>B-&~q+4wn3+o6+$gxu7IJtN!~64|D((vLXlmjXCiB;nzjjNV6% zsp+@k$<>S*vtz>AWyMm8)IF(h>qJvc=;_s6PA5=n!SiLKPQ;SKo4a}Ks~b&*-JO-# z=X3&jt$MWMv=gzEwDAe=3+~Y*ZFMJ+4BF4!@M!R}Q?V3!w`%D-j$hzd^xmbx>H=@K zdf~?w`RuYhbmKCRzBE|eoncuk-trLM;%({sjb8ZJl)eyJi-Jh+jIG|cZac5YKo9fX z1FAnGukOpJb#~E0?fMjob>~5g0Y_F+K`RXXHmpdZW^t5h5$o6BpgXzFGX1>ld8PkaHK+-TY}SIiFU4@7VQg$iY7Qs*XCpCZo`O z%(Fmf&pF$^a|GP)nfROAnnOP?_Xdy+S0<5VD<81M`R8lO!9L}mp01iu{RkKlcQ{rb zQW?xW>{frYVy4L>uJ<2=C~zM0k`SeM#UNEsC|oTz zNo~U+b{mqo%3Itl{i_fEZNuKJPk$tTE;-^5RhYDlyqRuqXB$IwAYThRhxaoTT7Tfoh#Q%&m8&P|1GP@FlicdujROCQK z4*X~3K>Xmiz&~zCK~EK}Rw>sQ6``1H~B55i2RjAzen54e=pUG0n(S3e|7sm_B1M=p#QG#-=A{YIUjou ziJxO}98+!wfAu@ED=)FTvUM5#&)e-7z-MybRxx!WfywPo+o5-E#z)2V`{Tko7ND}GG%hL3?nE%C> zo2NcL_fdjfLtVQt_yW}33KFQ^VjQ-WGtS$j=nsB0a{#ftT}eG2Pkjfe)XE)O{Xh?^7e`W zQ7!>pt3iF)k2fo6OvKC|=akzLKge(H3wLN=cE3Rjn=afws*o(d-@)3MGly1k`@w$g zvTwiB-%5W-=i7@dYwkDK0)FRSwamUvqR$B5&TtzZ zu!~CG=g6PuS(LG^Tdl;e^{_p)qUfDh%Z5eX7Co{jPv6|)?)&ayMrpG|rll)*OK*$b z(p-^=nj13aJUM0eq0L9{J2tAjhdbqFtWUW7__v3t)XBYlH!wPX%;<8#*SAfhT<_b{ zqWc#w9mEhtM$t>Pz7@zb(MGw6KOlU#CiO|>|7*dx;J{B>G0_9s+M|8UW9Zas9m zzKIFYhG~lu=*9Lo`#WCz?uP-89@!zegw_E1=l*kz&fl*(&lWyaGj-XP9u)YjqX`U9 z&kEEt0rhk!^Tbi;hy^rhm3kgcNCGV;Bo6sl&xL1WLueR7ZNwkN zuf$Kpx5bymr^QFa`^4$uH1Se#ig>!%Bpxe{6b}&h6Za5z6t@sJ7AwVd#IEAXVv(3B z`YOs7Jr~^j*AY7c8WHN;21ekvS_ksf@qW|TofYm6Ll7~5s@OTsJ^I%sESA` z;)-m9AA~Q3kA=5{mxNiu!@|A7t-`g!CBnJFX~H;Rv@k;0U)WdJUD!d`T-Zpc5PAq* zgq4IsAw%#*@K%r`xF@&<=P1n-92D#jY!Ivv%oofQOcIP2j1&wO1Pgi#d0;rw8JFTM}I1-}tr z&acIH=1ch;ULmi5_ndc+ca?XBcZ9c>w}rQww~&|2o5UN(8_pZR>&xrLYsUjTEzgrz zo#((4@fh6C+}GSE+*{m>+>_i4?oRFo?sD!t?sRS(cMNwZ*U0tb`f^)wy|_xQ2e%s6 zj?3fPaNcurIS)A5oO7IGoc)}1&RWi5&TLL1X98y=XAmcl(}UB2(~P6%G~m?WIB{$_ zEcRFSTlO>d9rk5*7Te6;&ECXb$)3+nV#l+`vLo1GY=3qab{n=gTg|S^c4b#$3)nQ) zN7hT$Bi0Sp1=exaLDqKGI@VIwT-H=p3@eHi&I)GrV)?LIuo|)CtXeE*mXyU|7BUN% z&zbj_SD9y+N0@t=TbQev3z^BxNz8G~;miTdzRYgSc1*z3GCi5qnGQ@5lfn4Rc+Ggi zxW%~0ILXLh>||_UEN9GPOlQO~#xRC5j0``9FQXO1i=kwAFsd=^7(9jz{XIRG{(zoM zKSw`C-%n4cuca@h&!#8RC(uXI2hjuRJ?I_i&FFf119}a*6Wx~1qJ5>kr9GqFpsVXLCOjR&BVRFRefT=R3N|@|1*3y$#gy|us2bk_-x`*j5 zraPE!W4eXuCZ-#hu4Br^bPdy0Ojj^n#&ikOMNGeAx`63CrgNCiVmgEAG^Q*}r!bww zbOO_HOqrODVLFQG2&TiB%$O)l8JG@XI*922ru~@qVcLsn52oFic469yX$Pk5n6_a` z$FvpG7EGHlZNjt>(*{iIF|EUthG{LPHJDaoT7_vPrWKf$V_Jr3DW)Zu7Gqk3X(6Ts znC4?j#WW973Z}W3=3ttQX%?nrOfxYhVVZ$yI;Lrured0cDG}3TOp`DrV2a0N!W4%o z7E=s|Zch|H8z&-|fM7g=aR`1xFcv{Hf-wk2BZxvU3c*MOBM=Nn5Q!iH!7v0v5ez{P zj$kl?K?nvS7=WNZf-nT32#g3q5CkI#LJ)|cAA-IJ0ucBk=!3uyL2m@T5cEXQ13`BL z-4Jv|&;>zf1ilD5A@D)a5kUt8?Gdy?&=x@(1g#OYLeLUH3k1y(G(*r70YE?^@J7%C zffs_t2pS3z!iZDf@%n=B5+1f1%VR+M+6QCDkG?bz#f4e0vQ4+0to_J1Y!gt1VRJ? z1bhTM1Y86h1Z)H>1WW`B1at&6Hr<|ET-xviTZp=}y*A?a;#~0qakluJ_?UPMfkw3Q!w++{utLE0_x^gRV1zZ~EBj+XO5$6Wy0_QmAAZI&g9cL+LE@vty zh7-jJ=LBhAkcA1j7Xb1bqeF1nmSspoQ%m z)dda$k$}Pf%zw>)!oLMuIZpC3_&fO<_{;h8_|y4u{4xBYd?VkF@5^t+_u?!09{g&2 zJ3bGt{`j7k%X`4f=AGjmRJ3eE_Q2=)rLptu%2H)%XfTudBHY)mXnOiT<+bWAi%1STAt5I8m=aBM>0*o45b z34vo10>>r3K!bRQFrO$Z#D5I8m=aBM>0*o45b34vo10>>r< zj!g(0n-DlQA#iL$;Mjz~u?c}=69UI31ddGz9Geh0HX(3qLg3hhz_AIDg^%(SrjwXX zU^42bQlwkO$Z#D5I8m=aBM>0*o45b34vo10>>r*cTC+db;Z;LQ)f)Rm^xwd!PF5` z2TbiTwZqgFQyWaJF}1?f5>pFI%`r8@)D#oIL}K#B)C7|krpA~WVKQLSW71*LV$xtz zV^U#KVp3p|V`_-00jBzxJTcY7R2NenOdgnOW2%LzCZ-yg+%Z+h&nHra zQ#_pYwAr+)1L!_jsQ|LvxK?o?r6Ds*~{p>&A7su{cFq(ND7vi@i;4!Z`omd|mcfs(zzf4`e{ zG;6Rrhbq;-rN%h0;TkG@Mz8)?@Zz%0Ts7Sh5l9^DJWTy4n^TmsQqmN6?Wx2~26q3J^d~V|hC?F1PH+ zMUkBQ4$yEyVdRoa0o%i6nG(rJsgqu(Q0 znjZUKZZA!L&A(UJBIgydA_x9ca^QQ}-mU84j8<+Pf03}5@_N*5-J*|qlr?ipqVF`a zZqnh|$KLs)4*W6feIBf-zSAg+9u2gzvFJ%@)z4Sta7q9-kSWFNEjL{2h*_I@u#ZYf0_kRJIVEqd$+J(J2GCpy0J+wX~w>K@Sj zsAFFTjEx1!*w2ZjU&dae`;its+HVZ9A2zX*N0p|@NN;1&lNNp12>=yZjq=C3R`J8n zmQ3z`cl@BpTVTkS9h$^$lfWF0KK=Uf>l8nYST2l2G^I1~dp|f*^3aE}y_v&2eTD`o7BS=2a{`pt-x}Gb-Nc7)$QYv2Wt%oOAlWg>^VFHQsdQR^QcQrd)OP;X-{`gU$nLzMvT@8_TTO=~I_-*Wo* zJ*2)DO2BLE$sIGMyH`)t%(%SHGK2rxj4$f<-g`SeqK-}5vV4_#tEHlug7leCW zcq4m@pA5^yFGyc@q?8s8UsC;e*A+)N4DwN@z25JA8-rez5*~EfxE^#Uo%0(8jRXHe z(5TRoBvp@}3|_5ZDxRN#z}i z;t#64Yv3&DAz=z*un{h!rhv0y8dSkSA#f>m9h?DAZ3r#N(8dF^k;EBl9X$Ti{C8(+ zw`AWB;`ft-eezGvrtZZzV>maPL+zaGk$bd(9oRafzByn`5t-J1$lmk@1!Q8c{2>Be z0hu#nt;Bn*1b7=ior`{16~r{2Q!{t62XJz&xBqI6JvdZ7uy1o4TaXmE|FG`58#psX z>-GNZ3o?As&ZQ4NNdaqmgNOA7!^PmA?mKv*?sIa>RHLyQoG7kK&8(2#m#c%oolaA| z;9_kb!U7em?hC;Lnt0i>k+#5hjB)pkc}y_CsasRc4hLX2(PfDCSxwM;P*De8xe&y+ zjvak}1_KlQcykX+*+ zk$H?`f^KI&EUR>l2Bbm{Lb{I*My(IN+@_EZy3O9zO*ix{d0n1zBNd^b#+B3@a^Y}pdca%`K>I%`Y~)DiJf_szEqzZF9;QN< zHyU<)1_vO$BRx@clnkNl3w@@ot7W7n7&(m$MgNb_nD@POX(}n=oLW~G4WLAF&Z(NA zhwdb0xUCO==dpAe#kgRqG$dy_C2soueTI8FwfD}7mo=77Bga&B-P=EBI(f=?to1?n zbaL{o7akuj%%yH#a@n{hD}|byL#uJXXD?-!(rNP73v)ySt;bi;exG)efE}p zC=}Pk6hCc-V3PZWpZ(Bh(Q?pvbLRt92YG{WSEGHL$X2j6686_>Bf$!V0WJfiP=|!6 zb#U~oS{J6!!!|{gGFTp@)RsOL^sLemv{070u!~FI;<;iIJ6#J~!)1f=5?yTN<=Wre zgkG&Bpu8;o{RgtVM87ZZAASF;+yBMQUpm^tTK<6^rOjFCmbuH*(e!fYyJc?kbnnu+ zU$@}7V=XxpgL3K@-^(^!>$OU);m0elbh6k3U0*C})piDGeSi4=S~2k;wDR8Z@6Okl zJSHym&sH8nHDRSrTRc&5aIjHs2vUUVv_`!;Bse%st<(q0Lye{LN^6JQFDX=^EOYHy zH+>az@wHg|=kKI{D$8p?ZmVB4UzqlZBp*lStxI`B7WC_K@(ukRxpC=|9uk%q)cBb9 zE)wcTJeu2gFYO&QEM$4(R#5+Y^VO`nbKa0SkN0YdmNG$K?ohpK5eL-I9-PGt;Q?U? zVUBJ5p3FTpl6_-U4%xNA0n!cX<0>ES;$v$I($?j-eYY})y!Eu!(pE+um{zA#`s9$7 z zeAxLK)SK|Ie&pkBWZUK=*SuN6G#?++l;*Qn3M#)kk(b)32bH^~ooxcqndAh|B-tHW zQ6MO~COuX1_o`Kv%b`O2jU4#?uxNIv$?l;~2kz?ic*)X>UZl6Q_MN38R*_LnrSb(^ zd`Sm))trg*ms1OCg_DjC=THfhJvpvZdot+il?Hnmwr$v2FiFx;2w8^2~25w?sJ1^W#7dU`f_U~Es#t1IoOX+1o) zz1p#i+Sv0_-le43WcE7evk{ZS$Of)Et7UXhQ_=dP%*i_q^p)33aR&ck1m{US-37gC4%M`Y{qIj{iC2s?k|B4z7&^ea_CC*y!On5b4-r z^1dZ4i!<+}L%>1?xc|a!#}n~aa>az^v!b>Ykoon+pN4XrKtNQ^MFV`zOR0s7NCZ*8Cc0j!5SuZ;(pGq0?q{3As|vx2P4ZKNZ|$d| zdNwH>NV%hoq@R1D$(GDk0%@lxd za~{p%h1LMQvidoFJ;wtDUOV>&rrLo{?RJhAr13xv;f?`Q)|@1FPuPE1a@q+T8MR4$ zZDuX-qF1*(Ci*S`>8ciW2(9}Kkv2Zyd-RBwNZ+U2<61k|f=Si(9hlVE0d%eI*g62vUD@P8)bb0&2XgY1=KOkYsfV=+P#qDxmuawr&#$0Dt7V@gPG2W*oe|Fe>Cd zX*(yW^$LCs(EHSY#^Xgi@VRhOP+?oR3|-dH+rj%>fv@A26Px#PK;H>fbe#gZz;$2d z#w#1_z~j5#w^_%Xz=)QEo77!2ih8#Ee8l|V=Vab6qSJ@7mnd4Ns}nhM+(6?w5jL?; z8K5*=T4@QFR$0QO8Wn}|-zZ-zOe=DrA_ppRpdttUMLB>g#fNQMhSl5T|APA=Su#C$ zAZT4HJ1i#D-`u&OWAo(8exQEb)ds4Xe9**W+OSChAs}9^YHr`5u~}eq-7X;V5v5D% zTR3`20EmfB%Bk5n6f84+UE8p86ewIA_w|gjzxhDxOP+6C2LX}rFcx&xYyHi>{QO?Zut8w^Wf#^mzd_*Www`(F-UG~$DUb6*ZbX0!;|)8iP91LU z_v|C{#Dj_EzSr^&+`KZ#T$C5lJT$Ajd2R0g>2>M=*8BV6*x9VuRna zVBofjPv6!d&fIt1rQEcvKrpR=UH7@X2=kc7LEdL}g_=jr+u7vop;&NyWTX7M)B1oB zuiUq$%*f z%uj>6n)7u0;EvTMn2Tm--|?y(0CH8WhTYF=1(bQi$9+B=0*oiJdc|e>f%HAAuUvf< z09LU2-S7~02WFAOm+_N*0XVc|$k9dJLAQ_oji)6v1*_J&uUPNe8t4}UEMBs%wfXe0 z=VAGjtsA)gE^%^LVmI*o*6sP(!AkR{b@a}6x>W%~)27oM zZUlgpQ->BEVFZ{jY@JK1Chcrqf9pO?y3r4OwGG|3`(bThmJD~;*u@7BL2n*+s`;L3 zG;UbGw);DR+&xut6q%ht;mm*?wO)1xGd}sS)=E2>cZh{gvO+qWckJ_s>pZ0!V7mOa zy032w(B7PFJFkB?vmvSWqIZ1)LH2oegiCb~zzf{w9<-V?uMSq!VATu&D)-JS8G#yb z-g8FcXwU{6-?jeIqs8^jC;R8teP-wbj(1|@J?_v2l<9n@RKRISf7%l10phFhIyCoc z3gYIZhS%Nb0M>2LkmK@Z|BK45ilqKu=0I^X9D_dHhD&4nFs?$V_*CRTMGpM0GG|`mkU_uv%?|ZSoqO!Kl!~e$XXN?=6Q_MzW*{-nz$g`GcE3 z;NsHgBnCkG{Wbj5DF;80pWDROTshzc+3Uilu5m$hfb_@?$=h!?C{hl>IXIBwuSi_$ zG&3D{4!unqTkc0O=zE!`nf}LD8zsF0lw*vx*(0lf0ZU(2cb+;Hb|oM3$M4cmS8`pb zP7ZtF>-AxwAxgDE8>Wz}^a?E;NCA8N1Zztdp=o~Yc}ZqMS(2Gbn6~1-;!G|6yDJLm z-0nO9{7)Snuq3&F960dpp>8fEb4kk|i~D8F-}rjSl4J(Z>6(9wxcnY>=7K*h`IPFv zcRxjXBx`TX_Gx$i$GNF1wUGP5{hC`Pcd4G6Edt2Cye#!!>F=GO@=pFYzJI?+>U_%` zwR47TX-}MaXuU-At}Mo)w=DHE1$PZEm*^PF_wo{(mATjyI^EA}P7Ln_kK4Ud+q?k1 zCf4=Z@Ba*#_FrnmV4pF`K8F|A$+>|! zUh70)`l+Yy8mXC}c6QoIQB!B|u3_8Ise~9*>UrMdzH%HDLvLF3frJ4b)cN-G_VIV* zbxD)EOpXvB{oHm7eCG$pQ^;Qa`rDKhqox1c#q<3nS2MhM_2zj_=-&38jaIE9TTJZK zwoctel ziVzG-w_kSwJRoa}8VDRei@^3B_NI)v_FzoG<{nx z(tkS#+&eU(i{k+Y;Jd79_O?OJ;HhFuzoo65fWx!=D~_+LfshsAcQNh6;I3U`<@1>z z$qo(TPc;sB0axkX)_1Fd3#zr*bojUZ-$>Jg?u4J0Gq@;MD2~^+1Lx~~i+jfWoxGR& zjkDee)46f+J-;dvU=CE#H*@U3o%~G6n}M&%Lmz8J(QZ`%yE(f?l9N_b%DkQPJIuLH z_6q6{u(rlM(skURJmaY9yxPJpXmeUKcq%1`|^G%`G86lO&{*^>IAu^`IqSP^Y4>|qf&ck z2rp8?&d&x;U_2z3)NR)H?38olPzUDWhgBX>cQ@^u8eH=sb)>m#`jy$Y$uaNGZy(<4 zCb>0q#+9ay_sFY^P3a@|J*4KaRO9bM`-`vVw=VYYetOQX)CuRPc^!voR*(-!fnCkJ zBccDGHkv8WKV|Vx&bM_)hW<&5f6C&YviPSg{z;2}QgmsJGxSfX__z1N^k93}`t9z) z^iWs1-zLH1B|YhWU*YkR(My<#R`xjk7JHNr)Ql}Rd|*D1nLAqzgZV&RNOyhmZsdKk z^O>{J#re}^o53|4rt{geN%!D!k(T44j*T`wg2zQA+D&``?J0{rxuD}sCum>x(Wik+ zHU4Cw=;EiJE%{>DtF%RoL@=uMn5kXzK7sJkp2)@VKaD=shHZlXDn1oCP>}=wOF1xk zR9wWL^}<5|gh8z{gvu46At7)9RlPwDTL9po0N8^acEQtW6^fEtXuD?)xj zo4VG{Cd`)1esP3}<^fDfn<+qo&L)#-oh4Uqel|Xs9(|g^3 z7&8|PAnvdGZvi$b)c4+(LYSUHs7gh$pI)8t4``>ED_|?uT^uNx5 z@3jcu)+yn==AV>L81J@n`9VsU{OHtjC_9DcZruRo?{({`H!nX(BFRnOZ?g{SXZpDN ztb#JiZ|jBCaC?VWL(|qksh*Jbd?A!iYn&ShCG~O9LiQlB~%q*5o`xUAj zpxo>3aHlfdUb)~9&k=qv+iIZsc>u~GPi9?+RKD2)2B!Ns-WqWV_?JF{lcVE?Sprbc z$sLNvpLdAsU=s_uWK;tb#UC`PAv83|s8Ogw^$NKLcK24R;ZSsWm>N!lpwx#1sY*NG zwkEPlZha_AZvDSIqy*Xfz#aeW&!5%Bq21zFq~H!?i9@0t@O-lQrnBFBNFr(XdP6;_tJVn z%CT?yF1KmG&!RU97D4-WB+^HxcrJ$a)W_vM@I1O;Kf4`i@cgpOv6olc!}Dt{J&!OT zw&+nhkQ8oy0ne{%MZk+Pq(WBY!2d)Je1EidVXIah*cOS#-IvPlmLaa8mO&?hsG;1Z zbUW7LhINY`{mvr0qwV;2$iL+vW6*bt^hSP!H@}@oIhanqiab&I*zgtJqKo4~nr%#_ zE|_VfYfPTu{j}~z_gbC{y_F~4ZW(LdMowEcm@)a_aw<1-BfIvuO=LOy42yk+#hyAE z>tmaAa5?Fw7&P4$+S9vlJlTF@D)K{G{7@D@q{W`H*i#mJ%92jXlFkfdzdH9w+h|1s z>9)Am(rCZg)Ykz)kCuI$L7l(H+0|8%KoxGD(P|9T-+YrRJNR)1dG!YUcz_~>Djfur z?2YU#deWlLu;@vPf6BI|-&^R1M0$&V(&C@A_%B=aP!_*f_-S7cJ93-(x4UE7jRE7j z8#nf9e+sPGku&DM~KTY35 zUqPQikEVywed&$iJpEEQNB=Xp3S9cKFxu~AYKyJi4(+jVlgop zW^(aI7VpZicPUu~ZAe6YjashNYG|&~co&9!RistG#W(dd7g>BYD^slwzAlo(j!5xU ztxa`s)CaAqINq6IUm5>iL35VISFtiv>h&rmt%@w(sjNqXN=|c<#5*$VtDqmN4SEC3 zQ5x@H^>c+vt~1aar16!ll{$k?N2@G}uVnRW9UOm1t0alHx6-L~It9&M5^ravQ)zT+ znw>0ORyI8xd)@{+t1N#cdpmMWEwCX~brtaNaky`CnJ#`CQYTnpdZ(fHDMo|O`gA(yLZ zJXt)~dOvC<9BNGC+QxGj_KqlrHF715BZ+5Q?M9{7Drjs;Jj+T4579tl$>N#T4q?L8 z3K~-q&#*T~6nJL2Rxb<3%Mx`KLOHIS9OkuY?g_?LJH4U{=sx(Tij(90C4Y4|MjZUc{@?@rP zE1w#zP6x9+S85t;^>YQB3C}>hurm!RbCj?z1Kj^}nQ5Sv2Q7>r6*?kEW*T6ng-ys> zxsG@yGxfLDs+AgeTu-H@Fzda+`4iN{6PYR0+ElAIsIVtO^Q6Ki7RI8Q5O_|BhN~=*T z4GIl$LuTr2t%ZlLQxMlBre0RN(!;qXh-|5;r`4`h8of$GT$7r5SSgjow>NQBYU*yK zEFMd4Ag;(v-K=+{Q^1}o#AT_etCgur4@)NElEl=-YVR6&BNG>8rp{JBheeA5rv7)C z$=6D&hgX$ePF#?fI$8Z*VbG|E^AeMfl}@9AIdV>F>S(=ll~%7J&Pq)ktdvSP*MfmK zBQdqN`Za8`&=9Akrgm0_DkV%3ktH>?wNfhN28EnBB{Q|LN~K1lg_HadCncuVRz6`O z^~4E@sg;#Zt%leAafzv=l};mvw_T>x)WT{vaNim|aZGAzZmonjh>AFBXKGgFN`xhw zQcfI^nwnbMD)m}Daad*oR_TPrhzh3DEH#nVraA-sJtZ@FTbaT`GAOh}hSb!=+7v#u z)x;s0$;--Ar%@Z=HF8jDYHV$)g}uRu0}@jst3y@8;*r=dF&V6M27_Eq?30@G)<1@i zd?m40YSLLNm0GQV*dsM*t&|GA8s74|B_@rPOO-*XBzDP6YO7Sjxj5zU*mufIDl4s4 zt%mgwVu#G6w9;yH3Rnvwwu?;)t2&H9O>7gJd`UuP&V!53O)|hvgFEGEHlBH==U10UI|xy zCl<*}Wvik}_|PX7+L~Zh6emm#A8`vLCRh{2wNtoMyo#7FF~N!`)~R8kOQcFn*7Z{jPh_f}p$|zRBIg*K3naR2)s+DWBS``r^je|8&oIWKy zW(_e>6bGxKrE5yVXi7;;kj7cpLyK=a1u@<>4%S1h_Ms-m$>Lxo6lbBqV1Q*H@tY*B zOf?i{EHTzL4pu|)kKwb2h?c~`S}4v{1-yQUF_Jh~3B@|4#-JibOXJGaLE%KtS|Umk z2kW5NPy?U1#3E*a9JFzf#M^9g|-G3 zfRVOwunPL!BYa{+NaL*QpCxSt#IWM{pH4X2Fq_d%LQ(Ot$^nx{+=xH!%~>+Fcd#xr zBvh}3ld~D%++qqi4G)|UMH8yg2gB%I7gE|E+qY?6$r~9cOWw#7=ldVcEw#1d?xQ`M zeI$P_zt0I}mE006wg2<&|JX~;?=6t^&F%6XLcdv37*j5#zx-b2Wg5Khf7+f!1C1Ya z4^&BXU~sKtxgmqOaQ39(f8666_u!OkVYN&Vq%f#eVS1xp6J*dE6&krp8KhRIf=ee4 z_nEh;;vyncYCCIHnasT6PXtj903^Cxy9)o z7wLdHPdq!($^mp=e`=O%It}a}Vn6HDkk916n$v!70PWY`UK(F5{Vmxsd-~2(L!OhO z^z0p;4Q;^6kYJyB(9iYz^E}7q-0?P=oIc8-e#*r|7pp`2U5=AvhllXM(N)|R3aG!z zS~c*%`3K(m6uZbJ`S-ntj~}?GI6eQ^X)bp*|J%NPzkF2LeP85nNm|`#{}PILlyT+m z(mLUJSsA4bvWLCX{j<6`2S3e7zL^(yJxjKEU-;baYHA;I z#)QoYdcVT^-f&E6f^>d?_wC%_8T0y`_ii{c=0NLNHe{L0t+Y)2=@rKyS$R!bX6}6m zS`TJ5SU#5x1_pgP$~f~frBPg@<@K%qlyLa^*5qmOw26gW^7i)PODQH zwQ#wHAeBNLsti*F1;gnG!$O1PYF)`y*WvgIB=OZ(Twwg2zrQlAczK(3RD|C`_!{%1 zrr@q?pSxt%N>7Iz<2!0)Q(ybWEPF6w*1+d$IXPstSLP{=-hCx|e!cWG=@ir4{lu_- z%kHqj!wZ9F)M_XOIgfgzdgTbrqU$}6XI``g18xRt#|7n3erJ=K7^5V>z0-}S_FNm_ zne5)=(}i+iLZVX?D`cu3tnu_nc%Py(vV?t66mWg6JNp^;1T zZUrQFZDh_lcNUm=(r|v}E)H;N6I|7N@)|iTB0J#8Q370_{3f;Y92;}b^^3&qTFF3{ zwEB6ouiPaAV={Ynt(`+H>iheWm#}NTN8z^H)Kwv9J*|DbeSM*sM!3|yy#EEYOfJ9l z+^vALd2;IZ`ZOu%vNLa*-$1VUg#6;j*!L2!plQBSt?aL4T|**sZRmT-eQA#xb3`@( z)ZM>FV*EzV%fCHtRL@dAki$1kZ1u`>o!YH=g_9 zb$iK`5C6U0FPtan!2#XJ>9MtHpdP71QH{xZ~+9OH3M%9`|JzbdkZ9(Q-YO4E7P5oA>)QK%Xd$`(4WKZ20d0GVR zNsB#cu_rC|q{W`n7Upd9Z9ALnx}ih6)%TZ@>06}}d(29v))l%|@*6yrbgTFJD|urD z)xSfN2S-{bP&uQnbR6NeoV@9fy?fC45#+ZcX||vHtspIaD2pG;;)k@@Qx<#5Vvqb& z7XPHh{|_S^(J8j%4`s<8%91}+^xlGnj|R-6+zN;T(G6CUr-rEugxyvkJ!#QX7CqVQ z^w&j&Qx=drqMO*=8oP`-+I;b71v`cch|H^!RcRhMBhG(c+l%wa&7#CQ1w$7iJ!R2T z7CrJqS^SU|KO_;lJ9}elBH3tmgAH_#In?55jdz7k+CascNB6EWZWFca_#O5KO$>Em z|BCp6VZ+G*E*)4sH_oT#DsHY=)iIvZdPF8pNLof&^rS^kTJ)sF4{7m3TKxR#2^wQb z4`oRYX-QAH+G|Pd|I^y56S?Vj^`7@e!!cd{8%8Z?I|VFqxZnEx%JAYUC4+s-Mzl-N zgx3K=#it?%DsrGA2P$%)A_ppR;9s2s@!@f!|F}&Q&Vo_Wgbm-5gzAIzLB%g_Sx0_0+R8*GGzF4{sAxn~_ z2%*)ML_3x0PBUaDWJ@WOB}8PKd$VPWDA}@;M7Att{XJ7X<1-hZ<-7d9-_QTseZ6MR z-1j;6+&gFP+;g7o9D7)MCBVVbDJTGz=y0%df`wUp;hYU8e>=aXno8q0HDzFdRCdq( zlU~sOtgN(B!XIA-aR1LiV5*otv%Ju@43i?SDCTXYQUK3s%rUrnRfg z%vN`n$szfGtQQwbwwM5?F4VpL{a=w~hx=`5E7k-8)t#AJ=Bk5*cTbjU1#@J4gd@39 zmjj_2M(fDr(LOp3$+PkPgFr(=J2Sx6(|-`W;w>=EPQ+h6U>Y-?IKts|1s@c{#=?8j~m&x6VYAS1D9(7?x&Zy(|UZs{U% zLG&jwG$S(auAL@m!rzijcug~|0M8H>jKMerB7VJl>O=4nl-9P zu1Vp4k|=9bbIs+z09(gESfk85gt(|D_KU!z>(gvLRQY>hMxk;Yt&sc<#G z5DjMy3k@R;4GpgPYxT$KSJls`A5hOxU!$I)9<3g%K2F_Ty|=ohx}mz3x&mAq@KCKn z?UY)f+BUWIY75k2)uyXWRC7}UYTeaL)HK!js&7@RRLkKyfrF|$RMS)!tImaM1tzF^ zs5+}!s~V|lt8!I7s61A=p>js$h)RyiMwJwmc$Hw4$tvzD{ZuSfI;v=?2(WipHCBP0 z#|p7s*m}5nAQlV7CSoHofZ1Xum@dXwu2-&7zNLIt`Izz!<#gr6$_dIL%6`fo%KeqC zmCcm3m6eq~DAg$4P`apeL@7^cqtbGvc%?Z?la)p*^;5D}>ZqirBoKTOR0}Ev=LHhM zE>}e+z#(U&x=s58{u8D-P`WUHE!@HQp!QOI{_flqcck^D=m; zyd+*EFMv0eH;~tp*NLakQ-P}wUU2VoFL6(B_j0qjE4d6!w~l=E_RdWR$=0yh+Bn=hsblEVB*1u`veyc zlIK3g#IA_@7#9ze=RU&30}%HSF77YSeTa!&5ceT2?kCTEfQkDe?gLy57Ty^V?O z5cf7Nww34J!o)U+dkYs^%X4pHVk^YGiHj}exi>Iz55&EJi@VEnuVdnFh zJ&TJq@AC z5phrAV!k~01SaMo?g?DXmFE^=Vh-XK;bH}OE`y2YBQArB;^nzCCW=E`8W+XNb0wH) z9^y)H(Oh}%aZD70xW{o(v^@71CW=DbW4I_%o_iD%MIi1`Tof+PJ%WkC5cdc!nj_CW zjEQC=?qOUMD$gy%M6(dL5Eq5Wa}QynnTUG`7X{064`QMjhU-%B9{Nn!9+6un}ds3{&zbjlKJ25xQOL{w_zfg z|J{a*SpIh_CX)Hzt+%>QQNB9{Nn!bCFvn}v&5{&x!|lKI~)xQOL{Gcl3O z|7PMMmjBJbL^A)Ifs0uFcQYoE`QOdBh~dVHmjB&=iDdqF11@6u-}RVC=6~1YB9{MMhlynV7p~D|`QJ25B=f&%xQOL{*J2`> z|6PlVSpIhnCX)HzHMof7e^+B7ng3mli&*|Q6%)z)Zz?Wg`QKHTNalZ6;Ubp*U5SZg z{&yuVV)@?{m`LV-SKuO+|6PuWWd3(KE@JuLWtd3jf0yARmj7LfiDdqFDK299-zAtx z=6{#qB9{MMjEQ9acQGzv`QJsDNalYR;Ubp*O~FJm|C@q~SpIh*CX)Hzg}8|2e-~gP zng3mYi&*|wjEQ9aSB#5T{#S&FWd2u#i&*|Q857C;Z!#`o`ClO>lKEdDE@Js#3KPlv zFNKR({x=B|$^35;E@JuLL`)>}zlper<$n{fWSRd>z>``2cRrRZ^S|@)WS0Mp$C73K zHy%%B`QJD!S>}J^@MM<%jm45>{x=p+X8GTFShCFj&cl;g{&y~xEc3r}@nn|&jlq&- z{x=3sX8GS}ELrA%qw!>x|Bb?uW&Sq`PiFbwNGw_AeM%49(_E8rg!lw+bKXPivJ96Jn?S&rId~o};$Jjg6M>2Ms?edQ+)B)ln z9LY)6RNa}Qr7e?x5~OM8a*hCN-jf>cqaKjUZmt=+b6smttUO{Ob@?k9u%mLw5v|q$ z;pljD9;6@1A$>?5=|%bw&ek*JZN>+)lrDgMod`$gL*EPGNH01+nSSr{#ofj*$B_K@ zLO41eod@Y*Uni1BdXYYaqZU1)oi1p2dZZ9PbsZm{@3=1ez7jxkh|byCA!=?x$Dia| zKc6r!CdnM^*mH49#Q4u-)UoQ9*Oon$4bv=GQS9FNxvq?l^dY%!ac{B)25HI87nz_q zYgR-nSgfaJdG~ev<}PRhkh~=}@~YuS((cB@1#MtC@TM+k1AvZ4=Rx|B9MXs6kzS+^ z;Z0rVZDiMh^dS0I{3bo!{#j=oQr<#aEYM#$5jcG6(c^mFMr!I6>#=umnhFITA)Fzx?RosS9BWKWUKf_ z3tYLdh|xWHo4)@kg7wBca+1_(`H>RUN@zT>ddSF)9 zyZ!mowU`(XIX%?Mm>GI{22X!8!K9|uU0SzF1Ek-|Q`!HniY}QzKc3~$in+Neu-lFV z6J}}5k%8(q)=YxYppO+FIxvgId0Y~FR{$~n>D8gf^_eK;^D0;Rb_Tvt`AM<%CQRWv zQDL^S7E?Pk|4H$lt_=39AKsa7%!KdmyP#pQIio3T8(u%|9BDtJzDJlB7xd?T!3?as zFmv{LTsjqL1ZKXZ6&6e=B4;NI_d7GaJ)@9fAFOax6|7p)Y1*~ptwHYk)0P(_TQL^j zU!5q}qQ{J=HJHCYy_8Je8d?{t-G<5KAKTw5%!Ii%K~>OZwl3p&qg#9ztUYroKiY0u zrzdpI9tF(ds0HIRq1IcXT0>UoCVzT%O%J4owlPdwCSayz=6x6LGGWg9#1>1sm`6(K8)%1z6;ieCQFy@PKe*4tU?U?HwCLdh=%!Kj&x@4G- ze_OEQDZXk&k1eE5D}n%oIb6X@po&&QV+Fe`d8cSwe4um>~NlRu_4r)so{7XeZ0Mnw#rq@J(=Gg+*$T0 zHe-Rifa^(Lf_!?CQF50@GapEBN*A4Gx-n+_|UV&y1>=Qs6UsAgCMDRa{Qk zFtL-XmrVC^2KR=)F3K(L%j|Y3x}+FB7M#io$nJR8mzitsxq`XY9W3oXdDWb+(-{2D z3Ju&p_tzGKbMq zR*wTh@b2_D@9`kfC1>RR`AUFXrMtp2V+62#cfNMO6_RFld zo0Rkwx-jK2ZAVm$83BA2CJ}y@hXd@}=&2@^lfgq3zxkUEdV}2&w>aUXAu}ZSdEg`7 z7?8a5QI1~skxb!IUA3~fX<%9Ef}zh$T;%KjSxHx&|1=aV9bWGQX8L{IxyCCFBzcLk z&(<%%+7Qn>SM!$sZ+~F5T;G441*GHuT!lpn0zQ2EpWRX-l46FD^i|acT#u zp!7TH4o~pM*O2Gcd#2Mo@UqpY%8yfP$g{)3b}m$F{nI{@LwvN4j?3tY4X=yQ1Z;}r z*pz)7(u*jYkMy$njTVD%1NMI1mgB7gen;7IzuQMRqD}hR0<_Pjf0zHSn=i%wqAmOX zwHEjhk-D9opf6nJ70S%zaTr9h{xX!Chdc{nrwq?{rm$OAGwpdKS#9j(Tgf3{zoaQ zwD>z}SG4ND;aGt$oi5*J5{*FC zKHh;8bwviGF>1bd<>>9O*&w zNDn$Mx*l|1qz7FOx=wW79(74OGOYB#5vu3mCCnGHD_>R5r}rDu+bwk9t5$r#a}#yC zZSs{gKW%$2!rcsv+c0GL>B!gQ-EzscuQ@s(Gr*>IjhQyM73MQz+yE```pgRMiXt6w zu*&hQ-f4ZAJi?J4B#-o<^P=lP=S6zZ^`PrS=WUa)SSwdS9YmgV57#-=2E=~((&oYu zEinH?W&Hgv2TAW69J`qP1i0fqtH!~~5WL-=uJU=4Do|^GJSs9)0GRB@88=^iAitQc zU%c%@JK)}7$>WdZ&q(bRZQO&>Ol9&2M|zMv(u2;6t_PhL=|R_nt`nVCuzt11$l>~+ z@lOB6+r26GLl3<9J%5*}eG(_KQ%h&@FKpKWwOF9V0?iiq5!#IU_8#k6ziXYZ`4j3! z8u<#Fnd;#gOl}^ixoLDw0?ESZW}I%uX_h=2r%%}CzVrN{2g(1W$8Q8?KNOyqC6BHL z$;+;XB~OpC8$94l>s7SwHB7O7?*@9no4hPCWT_0Vs|wxyVebaA87KQmV%ME+y^3VX z%W#%F(u3q>dYaEmHp|P-+pHg550a-5&XOnN`qq5v-qW8<*yP=oc(sI{Z@baj;ofo@ z;XZqq6Zoqoq|3ElhF$M1NAfh1SFQcNy!Rh)B!79y=2*)gdXPLh^Y~?v{m=4rvwqom zoAr~;`ek^te)<7FxRVvsBNzHfQxX0$_R}M`M2s_ATeTffD{uFGlD!@5H0$BlaDIZc z{m)krDX1?{)xb`|5C0?EN~ff_<_NU+0t;ygq~N4rg+%zdF#?4|x}g$CPEIg)TOcgb z9b_5g=kMSI+x`xIw!Q(rzHr@`t!2}?jw$wujS(oMjS;8@RPc9Rf9wGGJD*SF+-*4L ztq9h-Z(aV-{{y*y)p0G$No_&iqOy;NqF#^_DtdQui)jUJe#@HpZn&`w4|TNLuQuj8 zY21Afd5lj0r02=)w9Lfb1lY1~;-KzdO=a?JB@?&glC8kQnL49`C9P!g>-HGz4Ic85 zEF$Xrcx7~y$&0S84l^iL1etxWf7h$5A(8&rMElOcK58IswBn8PS1e?DmbOyTI-1)W z^xZg4SGnpViLSrflsBh0ET|y6L^uRqNa`w+pI|rtmSvUmWFqh8+A68$$y+0dP6uP zkDRGgmuRq-kbSQng9}d0gq`D``(xX`?l+amYoraHN$$$3gtCKlT z6HIoq)#x^|vkbrFtl>0ESqpSpmHHw}g)h@{Vb>&2FY_UPDGcJ=aJUrwUXLTaE^R`9C zpu3R>S7^rRW?ZJ{^Guy5?o1+C6{9|^ogfLxCp6=9GcJ?AH0zeo@9Yx#5mU>_T$zsK z6`FCf8JEdlk0aviEVhwF)UcigDtlz|lMjC^gE-xc%j6&Vp7#vuJd->L6rA(ZGGusY z+wKQube>7~RBT(lFD*l+XYArh2EkrQl22GTen*r~KoqTYYfksm zo>{WuKvfx!}7cyI4-?-1zc>jz6wST?P* z4}5i@F=~ReF>31ZC;ztjCz&Ck89#~{2I%Gv9(Cm-Y3jQA*y@Dm+W-b3VDH6dVHMJ}zH#H0>)uazS#z$wiz9%K(!8r%B9h0L)0i!Dq?@9)@X#Q>A0Y{HN>T2ngE@yFNu zlEBG3{2R#Eo0I~yzLnF>`-qZnns7wvx6S*_l#aS&d(Qg}hhfPfd2)8al!oqE-~Niy z&2nstj-&r|12jMG5~n$V1)6E|?OzUdcvrcT^saok^gX0bGm6QE%AGWr(e7#u+}B*V zcK3a&#c zcegcVlmF>YEM0!hCku^7t&N%%2zu+pRC*q20B%i1U|}kSxqp7=q5gNPKs#7d3Kpda za`5+sC1`y8VG?0j9|x8Ju!R+1oBqdsW8XL4fsi)ddD3PcN~0j9zuQW_So(`%xZvDC zz1pQlpGZQmMy2>W4w~?{WD{P~jDII>=bcL(d{7m%w!3OIBaH`^2w%Hi``#MVr-n7+ z=sZn&zLWjWRYLtpj|xCIl0y`o_g{SH^&IbDl(SZ9c3m?eAj;k2pmPV41$Ilyb0kK#9;0@g&6GT`S(b0R?Vx-r1#4Kb z;HUdeI`+eNK01Q`ibvf812arl`b0kfQ$57<=vCjF+W+qrG!CgbbAiJ9=F9y5H%W4( zIDf-q2EYZqtcw z-@RhdkNgn{_c^66S9~Ddl%G4D#cRm$jEAdoXL13;*?OAr|8V{YowB`A*JHmoYfWHl zmwSf6LL5KcNR9a;dKBHfa<*47nCiFJ=+uU7tL9P)e?4}Hyr#XCe}E-?SOr;G`}x6I zazU_I7A&X~1XJ)^Tf^>~MoWgi$VOE5?2_K5=+zy5{;zyJK3-*EI1%iPh z;!B>+ZUagc4ytv%t__g>%6GjIBfNCLS?rYVlyx0t@{U<<+9Q3;Kw@%Dd8?P5Wq58; zY5tI-Dqw8%{^s9B&TBvG4TH=gZ_P8VWosF6)E)N1W(e!#d04yY15d zq^+n8sOYV`^{)T;-6OtQmX6Eo2v*D({h)A7E15he)9-M4-g|OddALP}lWDX5)`0zg zLi?*n_-yYUVE$Xal}h_fU-&IOKFD4zw8a7~7Wjn)ezaiAz3ASD_s~=4kcTJ5bF%*-6A=^2pnIc+wuB0~x2kLIQ(yYP!+6_qUKO;D znZuyv9ZKnL0oWP+NkO#Y!_lwS^<+q;%vG9mzB6=faZKOd4Snf5;}+KLdQw8a9-?cs z`FsV5pZT)>z=<*Bll2c$4wzgb6?E3DxYJNUn;0ri;#E7+(_SP$*uK7mH0|jA&AXw3 zOneYv>zN%&t}C@LsSRW3H>Vf5C}tFqGgfSfuVg$)>qkrXGlSvvBm`U^OqP`G7iP&>EECHc!qhxe{d+N;aR&hxE$X7?RVzl8%j z&vVO3%N{=3^VNiOjPra@g zPmk6=-FLi|2W@fkO5x+kWn_E5sy*&LWwg`XvPH{7Ey+dp#fOI#is@-j^!p39l*#uz zHPu=BDd>~79`wlDH!Tio^veLtr>3sa888)OM#E`G_Ai7HN%Q}_)r!XKtYEntCp$;W zKnHurK$wQj$c?;7 z?wK_o3ye~m=h|fA6=}AC* zxwb}QEx%vhczoxP>jXiS2ZO;d1;**khgc9bJ5PJ=gF#6HlH&iQ1vk7o1E7^Z?47f+ z3xt^_g93xBd|^>iM?V{)_U_hQmez#kefd7m zpC18nx&e78m9VQ^?_=M>S5Kbsw-6ERHvzWs`gdwrrJhTQRR)w z9hGw`1uB^;OH^W10#!z+^ik=oqNBpWYO$NxX>1?130sIoV7}Ne%n38a2<5NJ&y=qz z7b)*nUZ^PK^uXh;v2;~isuvy6f+fqu5zdN0Gy?<=^C= z=I`Tg;xFV!@O}Bi_)dINKEeCSd&axQJHgB2t>=k&b9fVZLwNQ)6P^b56Ss<=Mv`#XB%fFG`a#IbmALx{*Z!?%XK1zj%>`45IV?X zJgx&Fw3o-YTzf)j$Hwdkp)DJ;C4@He7>{d12(9HYF4vk6TFGNPt`#A)l*hPSOG4N~ z9^-L)5W?>A7?<0f5O$Ntc-(G;&_W*LaxDm9S9y%b?Meu{$YWe?7eWZv@yXk-Ga>B6 z#ySx~b2es92+ia%9@mTznzAudLfBCr<8nI^!VYY#10gh#$9P;5LTJp!j0vF;8#5w= z?b%p+LTJdw3<;qD8#B-p>hq1UKi)(7gs`2w7?<0Q5Vn=ac-*#xunik)LkRWQm>wZ) zEst@ztqGwn8`C9(I`SBgt3wF2*_bvVY{kY}5kf6CrbP$|HbxLaO?iyV)g**C8^Z~q z1{>2Lgz9Weoe-+YV?3@JAyk#exLj32sKUln2qDJCFhZ!z#*_)6l03%aDiJ~f8xs&h zMR|8@D3XoQ1m(=eBm~t*9^-P36I5?Dc8s8Uv9Y5B)l(khagGoaEBxbe z4igkB{Nr&735pf|aXE(wiWUCxI0p&Jj(v^-f?|h%oC5^K4*xj&35p&5aq=}O82#E@vA-v0^_iXDdOmVm}@yo1oaSA18~T*s&jH3qi4C zKTak=v0^_iCxf6^u^*SSnV?v)AD5F(Q0&-`vx%T!>?arhayAkaJNV;lAShPw$K|Xi zC|2;t<*XwpR`AE=q!AP=_~UWb5)?c5ASib5$5~EL?BI{HjG)-TA7?2+v4cO(5`tm{e_YODf?@}MoJ9o14*obP z1jP>iI134i9sF??5ELu;<8s6V#R~qo91%gWgFj9(L9v5Bj!=_=!JpjUjUp&^?8iyc zq+skPCzeQ1?9h*sAZ`Ei6;>*!uTXR4slgBbh$S4Hw}W%r0-HvY=URHa_1Ceo6E?wRd;+IUEPW!}Kp*g0y&7 z{~nhibxYI}kJq^_@o3u{_KbWdAHInkTn4H5o|Z*9q=6^Cm`%NSPO>OBwc#qjT*pwxG28#MqAYus*EDP(B8 zdr4R2@hjFJeT(L#Dy;0kbNYcT<@Ca{Mwch+JtxC@FUu0pIEWb5ReUi`8AMymj#>Ch z3DhTjef?sy0L(fHD$|U8iZRk1$(=g9>xk=*Z1# zfFHlPRq6l)~0S3Tsl7^ zLq*KlBPkjH;X?zkq&{s=kc2qyNMaQiAU&B6+7Iujs0r*JRDRLZzeCzoQrSxHKa-;m zRuUhBKamk9HZ3vKQU$qFO&>T|YlEcA`pSle7`Q&DUewX@IeB@@y;HN`>Yr0LtcOg> zQwBk9^A@E&QvzihukQk!^W@d6y-UK~K9g%7w{kZ0{Ybi`*)72 zkEL3QJn(p_u`q8K2b@&c$lDlopWgl1W6XfK8}yQEHGGklF0jmA^x3A@2U?jXEVe$> z1Y0_V=v)qaNBYH2EpBsI4>bMmKay}Z{qFDRMGspY326VECOa$5uI<(iti8a$&}FkS zbK>-qGKuyya`22Pb*oN~B|8LpXS(&;r<5AAmwNtSMYw}jJsAW0q3_^@SIC|OsKV6%1qP%Sa-C5V>O$nBezmD%tT^T2a`Lq4bP>{HJHig6Ok3<5g?tO{IN`-!2Kh9z(aD{km80 zp36y{WxJw3g{`Hvj@FlTyAeYM`DKQw*e<7UU%FJjZ^l~kt58=YDj7`P@$c{|gS&~G z(Cr~d=s+6@}2I>`yNY1(CXNXt;%D3;5u7XOwT8T?qU&m{j_KVJ!8_TDG397=sIDyWog4g z=yOwCAFmgUAUlx5>h}2hkS}y6XI+~ZLRQZ??w025O};FuuHw&LW<1k!cCx4=ahMXNP0lv$@S;AgwipuLzsSjQ|aW(PA_6zBgw3H zy?8e>L&>}E9|7-vspP}u!x^7dv&m;S?tXgvDTO>3{Pfla<>jPH>cu`mX|ri9?Zk(9 zpHpb7;SyD2<>jmy><&NA{*+Dx&?PrHD{Exz7JxYp?~2_BYq01+kG7j7(Mba--k*%e zj%Xf_h11g=Y<(TAtl)H7YkT`ZCtC+6Yu`X?yFfS_6b4O=u~KAK2BM8I6DoN2tN&lu z+d1bi`EUT@?KyiReUxT0K$!haN9|M)Mp zT72cuNmK7;Z3%2DKg#`c70t#0S+fBRGnR~~0K>)>jXszBS~!E^{(18U2B3{aTZ61D zVQtnxN0^SsFDNL;#>NIFL-VzF3WCL4tzmYwMk9tF3rDokn9`uq{SU9^Oa6(J%!oc# z6at$u%@hgz-F+n2L~($QXVbsS|3^O%`y~Gp`#)}yp2JHAn2&&?y>eSZ^B$WbA%y?c zK9Z9k{I_xVdHBC^9RP42Ro82dq%WWzEFOGhqJ3lgpF0Dt{}(Lc>%k8#TZ;w$dn`b8 zrm!X-p&txWJXlyL=m$5ZcW9bzglbXS+B(@e1VVQZ;Aih-#uM07-UnGa{kl(t3+!SNeIv&!f(iRdUfO=~70#lR|J)6YN43Lktt{;vg8YMm zECZeF103MuG#f`p`ye|Tds|1?CD-^n&EMdKsLT!hv99v(zExWG>zpCZUr0oe+~3_t za!vF{{|M59=-=i4xBbrOLjJ4ikNc;0yf1d!$mB0hknkHm8~^RT-2G-{Y2{#N?eyzz zI51FQWWi9291t9J@Xb2g*Gpn3rRMH8HIXz8VUk)BOhX7iH$EjGk#1;0sZ*enjdh^E zpQW9zwY`HQT!(819~S=pzA!7SRghiNV@o4jWsv{mv-EZ~XfxX+(oO(= zM;ov3Pvj6E(ZAAz6w^0e- zt0nPaeQs8DzUS<;Ih?b9#(l{|T=CFd{xivKQ&OL2e_t{uQRmH5)BDa}Q|d@9?`r4G zjwjadQ@rnNK6js@+l)?jTiDV$^3g33*PfH$x+`26QdwLb4;@L4k+)1jm=V2%p?z-W0C`* z9t%EIFc0EqN(_#+f_g^Ota1Xkr_Ku0qI{J*cbw(jo};~!-7lS< zCb>IW{odnz0}vRfk?v(R4lL&1NiD7XA`GVle;#k4_5Xf;fmYT*0ls!NmNxzYj#mEu z_I7YFZ9t%djb9+lfz}vj#VVB{DvP(!h^LhPZgcIFM@$rbBN0V%e|I0rHPPR%r1-o1 zf8B%n#~-uLm6cx&+}1W*FyS{;fxUtPSsbmDK$La7>^S)w&&u)F_Z{EaePZF(l2Nr_ z=oyirP<$Ii&J}m%stu90|9N~(Y5P9~l9sK-0xcH!KWqUw`$p~0{Scac1KV5nj()aQ zR@Rmdjt+iSb^&nD(bvik_S;zc1~&Pjl!t1Keh5;TANpgXtJSkh#pQ1_!N8o{EGZov zL(Y}bN4Xa7w0GA#y9qaZVb%&j*!0PEMUdVovsEaDG#JL|1=a?FbWr#2yyCSV z$RQ6uJUj!beh==g(~yqmcQ!q-wB9+qB2Ynk{E+dSE>hY*uhv6KU9tj{F8kLzE9-aZ z*Qh^h>P0CnF!fz6r5!D*Pj7Mi==?ACL(V&U`BOfvH`Hy)H9q>ndEa1>UHz;V&J((J z3nXUWckb#_Q_zafBQtF^%6AuY$*&vk%(LCaC$H<+oVZOX&_nJU%@||Np?hb%<{p{G zrK1+~Ii@hNP9iGV^diUajl{3*gF&C?K9iq);|Q1C+5DVxtFZ@~)O=iB_x4RW7?}Kh zN2@L+V8(DVwsytTC37j&zi$4cyD!kj-`~>F*3TDKAAnhJf}8@J{OsZIhrcB(PT=6z zw4QXK=Hte@57K|!{QqO`6#C4&J~{6Fg?e(mYJ1`Xq}A8Ollq>mC!H&7@po`v91)ly z-VAe!dV4v2gR@b<^-q%*!2M&@xf)V=vo+`El2BfJ0c$VC<6B!kmL7L&!N|a`Q2zYP zxe3WvmBEm$7*|S{Et$6Ck@^6U^!SvJzU3bw z4UY@Xk?Jw*b8nPX?unoNTIu!d|8&?(dfshEb-jd8Z`2mGK2rT*#CRi(i^`zgt2G}^ zITlI6k9dAfzN!Z*uN-=!4)Irs!G=;jn0xh0sh&Iyk5f|p8ogp>NRN-iqn8>zJtS$R z{MlaPq8?bWFA@v7eNbZ1J!prNAFQN(NUHzX@Sus$dYqCJ?mfRyii5&nSE>9C+l3>? z>t#!3AHJ~Jqu+MtdCtejO7R-o8Y>Z$n{{r}`uoqaC1r)jm8E+296q*D1h0?Wc`>!( z-U-Q}NzUF@6OK#JewlfOnUvoYGi{ZuI%gVq{H{c@eZZO?l~LJ}?n@FxQvLtBfAjxg zht%@be_??it&BZhj>BUD-pqSndSdAbGo~_s$-=vfl$k9DcSHw1>%m-{(;@Ous14Yg z*Y)9r34MXls#j+~ZwoN}MSA?81)Ukw*AtB{-LYm|OZQcU&(dK8SMFkq^bD9W`|5}J zj_Ajje5~I+L!}Et=3iAJ_x57$-#a{R!zmXgq4m%U=1=Xw(C(?CnAkzg0iQ{AC#{UZ zthbW%nWJ5qz%d2iCtNdQ=qIK4sS8LRRWf+q>AmFP4+|}xzVKky^KuV!_=CYC z&gO3p^YuZ|t?!$FADpK+Wc|^ZeY!9z9W`fO9x&1o9A9((2qlK{ z<2H>;p4W@vTb9m)<%5`RljGxutn9^Pwz^lD`Rp5+;;A%k{FEp1vnOCCK1bVM=V8+X zo~pUMa_2t)Gp|p`KHAF^ES|ELP-!(Ew*OWBJnlm6{{Y88Cphj8<1BwaM>|VPd)UPu z1oL-V`a1gh`3D9ydORvG9nqgWpLF;iZ6gYvM3}$(LjI1j<$kx1a73H*{eHaV@ACh3 zPwF54%--BFMh%8fkVTql0-~&TJ!;Q8BYrbQxP*-(zI^-N-u5?xEwc8%>jC|aUDUpT z$lRyFi(J-7+y5LsToWop3R`H41zIf7VuAnfEU+e;Qg3eg1*f=4>(9ASZVHL;bK}4- zB+^ah|C}6bYy<6W;7G3}%(`q1i-1TAe^@#BTiL;4AWicP7N2x$^nZ{x`ah{3`OmyG z;PLGkdoj@q^}9TPojU>B%ju+@%cSGACvot2;EKxeZf(JpwlmI# z>v90Xj}Ej}cgs=)ImVYa7AI)QaPEX|`C$=tw10-R&CQVi8z=%uj+z{J{OGPPR)BHR0Yt? z;kVTSC7o>}N=@{&fW0J0Pt@U&Z2b!4H*R5yAqQS<_>p@Y;f59yU)Jx`0g-j1m`7W= zVC$&|rbovpf!i;s`=?bkz#NM%`JdA?!LrR&x6V9!Kq9<$P#f2F#yX&Ll-=@PNh&h= zo;}Nk`<{JIE)3pKX>nB*xQ15T*C1=j#oU^q{X-jQwf3(D?*5`7!$bQWyx*EEA)jti z9sl`-o(!K>=lFR=S1x0f?{ctPnH~^uN2MHH#Rry2yR)Nx%IQhrFRtug_?bkw>!7su zeGlA9 z%J9P#7R6py)xe3-wS9|SwE>bp`fgB_;>7j<*$vSq9A|FMo@#H~rQJ__BzJD4dnfKO z-IkU#GDr(;u|SIj{{0sC(SDmTGIO7f{h)OzCU@JsiK7Q(o0#ToPoT|8K8(u!lt@1B zP^~+rI+;}NIqurZrGB({g|=%s#Fu=%IjmkXgC4b5|IpPT$>g`!+Ao74J~o$EvXz-Z zZ=2A6!LuRBH1D3zm+cT=7=QNQ6sZ69+_f7!ZciYQo`%+U9cMuO6HTgix-Ru2jqmLq z5CY}*^Q;3@G{Z?M_u;0z197yqkLk{U`lU3HkaO;g-z?I^%yXTUVFG!QceV9ah>u^a z-X43HNShEYpuR_cx*`0TK_tXC*{pYYbw81=jZ?QfYtf%XdRmoOT-VVGCzXzRPajth zC)1-iCEU_7P^O=*5P6&lhkDRDR6xE@ik;9mSg@2oD}` zZE29_Vdt*XvO9Ne7b^K!$ZtEeauj|0z1Ogmy9S@y;9lU2@K{ZsooQRQkhUW` zdzzV!r0*7PjN7>;l^p#+w7lDn6|_=ysJW4kRwr@Z#&GHnsng6!=O1{{L>vH)ydyiv|8|7FaWv()jDvKN|Oi z5=>f6~(b zm8@aXqD>=5)RG;p&d=|=>kWC&(tG2>&`0Eu<+&q1eNhAo^u?522CabA_9E9#)2zW4 z!R3I@5O;I;st)O)35>t#n`V|61F!AxJ=~$5+Enk}j)7mv;g7ne9`Dr=)Koq1=?d|! z;cZ#M)vszapn6Br4Fz* z=eXM%Bl0r!fQg#d%YIOgviP1sKR+dKGOTF1xUe%|u6LU%)qmon$>U?9C*(cq=DLg% zdf>q)h4xbY%STs_djcy`B(|RrtdY|hteS0RE!7`0JHj%!Mhz(RxvmtuLRJ2VU!rc#1y)+44t)>o0s5TnHefWw)yHrxC5~J$+KKo1X{jPO@r`-f0QK-RuS<^J zPyiB(aa3I+wqsFKmh^Sp#KOE|LF+11bGx>yEV`x6@ zS#&3me{bk0sXYzjE32m!z9X$h-W&HIg8+&Bxof5Nt+mm$uHd3ZY`OHj!mGB$ z16}!`w2NJbVdm{Y`3Nr=W}DlbR{_dydFF)?X5h*rt=3;4E;#9Nk5jD!dfz2$ z1}ykY@@=ccL!llcWjb-6>MgQ-j>5MtZdTxU*R5+8l=Ihv>pQls{+o~>;S+77>8Al0*IgZZgGYOO(a`X|PwqcKqRNmZ4e*Y5j( zD;L|-!1-9iT_W4x38^P3=Tu3LivbEXFh9Ok>R8zOlNRPdyNhHYlqX-szTXb@G^`w7(&pM#=k^6F`2FoRkg0uE>l_Wp zB%5(b)kV)JPrD6t$kq6Q0|A*xUNU^GQM@;l_wqedpZf!Mj`(tCLNt`GG>s`<4E01T zev`!CSnB-x@W#u4Tl5}Tn>E3aD$u@x(ER3^q=w^9IeC>(@g(LgY zeuc*VeShH26(vUNrg`-xV|%^J;zB(|n<6Dlr!?o`Aqmd*AztK-{y9VX(JK)yx$(wX ztNlzb`mx1}X<~XM-7GKZJxgKX)L<`i{*#X{X8pjOyWcurT^8&`JG}9AybSfowf^mF zEghV!e!1>MpWN9cKlqMb4-k{_Mdgf|0Ic`;!}$Lr1^%Od|G&588?;!U#R4rBU|C>I z8wzjkU~93AZ7L<2j&09XUm z!9F0s(Yk3}`SMXzV+RAIjU5c#;VJ(sUp7$R?_L%AE*l-M;e!4gJY)1T0nmNBW63&w z1(5mL=6%<`ILNu^JH)$I3uqHI+jG-Zz^wS8FK&F}1O1X)J@k^|$eCSxQ(6JM z9i(~rXlIQx1V=t>ZrI4z1U(WauIz584NL-!_MakPN<^Z5ywFS!L<<8#qhTsUaY{FR zYDsHw_p?s>od=Xat|aB8zvVG=ETvhz=3IY$JA^Z$gr9+ez6Vgq`8)qoN`PN+_4X8KionaocDH{wD$HnGGf=- zi)%i=AQyS<&cE?V4fq<^ZV?9(pp{2@-UY>vglE&}kh^vabOu#U~aE3KW3xnopyQ@IAD-)}BcruMB~qMgzanZZKJ~X>Qe> zS9P@Fiw_$Tr+%b|yUdIaoooo)w;ax`gt)GxA3o%2e{x~ez}T#AgUNIv)_J*S`<6}w z^aHoh77Mgk;D4tDesm1fA8E5y(?8eO2LjhBNqPV=>Iafw)pXZ&4 z;l7OWXv({S4q!HP8*Jn8)SGb#896Lr%v9#KvbV>_6`sI&y+_^n@t$D*Bmarn?Z+~` zbe%8vahVJ%dfboCx-x?~q|p0lkAw&ivoLLsq48v9?s)E~xyoIc(Ko!l-i{c~#BON+ zS#yg&v(lr(i3SB%Q1Gc{hk|AR!+Ct~#5JesOaZU>Y|xd-V0Ol`q4Oo)OqQY<+3S=y z=%!%ZWd!kG*nIE=_UE>*d)e~@S#xHz+Z~~)l+NtnRR6o#hY|!|q zeZI^`m*Q!Ulx&%}nXXAak9dQStbi&b%{d_B*^w?iTA4EGMb^=K-C@D)-~)E21II8k zpDlCjGuaR9IlAAoM8%hBy*(^vmBkn)?E0HWH%$B)>y(|()n1JQ6=qWw=~Yi)zIN%q zcYftKrlQ6?>~8Nt%;bwUNtazb!T2`x!TA&X!K|7b{9L91@Si%9Bk4YdiM+dYtYPhF z;7bL6ST#eyv^qL!$?~_MV0z9jj_GR;Ao|j_YD`u~MsxYf3w`>x0OXo83uC4o)4at-J3bE?dx~-`6IyRj#pPimH9Gq z9RN0PHOQ|MceVow7N6qdzlMX@qMe-HrwqXM!J%2#RY>XhKbNniAmG8bmaWACEf)Ac zZh_4)6w%aNPI6l&O~aO~maL{A{oL4GhD5rd=CVH=hSaa3M z$;J-mrL}KtCa*VHi)dqW`B!7+|CN|@!N7m!wR_g9*q(z_!7bdxskQ1W@}(!gop-1{ z=;&RxC+*5ZGSf8fnSP`iP`O?l)_-#cKz3K?mFWbVvd!|XxLEJ%JWbH zb3#k}9+LZd;QX;sCSc@t<=C#VTrfmEcYKcw9AsugL*=cCp){NON8A=le{f2Tf_eAg z=f)o#66uD1a4TykM?Xg^xb7al1v%JRSvgwU1X{z-u$Z%hW7ER>l_Lupe{e_}e{iXn z`LmZhX61axaW^}GMi9Q$E?C-dVKV{RIPyxXmTSsz_fgAt zFA3HFixk#)$252(&ukBkFly8NC;1gFd84Nds3*r5 z+|}ED>AS2g-*jFhz{cUFHEsxx(W-Irur4EewQ}?>Jf$m>zdA#JoqVYcRtHTFn@yX_ z@MEbDY+e*9gPbm3r%Zu)36UPhQuaNH3yI*Yo?lgH86aZ0XBD25X@$7HF}+&ldR6 zk1+Y;$yZbJw3|f|ZXG*%FMsp`djH%SMW6FqoLM;8l*%L>O~FbyDvpgW_z+44WrVl& zP@6!bQ_Zn88?^dEC|$GfdDUmN3IC71KY@#}>;J&fe`Y33UDo%?>C=l_4czwht$Jm0xq zO=qt6^`6i9oX=T5*E!d@@E$1ofMx!rV~D@KWRCGjLnTf~a4(6BLHv`SOUEz%gB*N$ zzFePRTpt3C%O|QsaXfll9clgRK_Q^u$dd=!CqyXYJyuu~bZ<}yR5xSrA2 zv3a*Vsb1KJlo6vlEY~af-%h@Mo=fur2M260gx%IF`8UD7g^ws+Sp1iB=OWC1;RAO! z5mG2#!1{D)znSL0EAUb3Pl(UleSCWKI!V)0MCMrf(1&gLTAQ%!qRzgr}EKH1x#0XG>y$ zZ?Z$u@+C?<+HrH{-8V~Ou#}}|_CFI0`tt$+3X{Y9>;1j0!1`YGukUR``~F>1`!T;M z03I_H#)JoT&iXI^PLbNd73<$RMP}(K2aWL4)4h&70`$6m&G*3U7XTkLUKS5HXD2aG z;sqxj#|>}k32{Cyr`hq7?hUkv66fQCPn?erO7X!e&fkAfu8$RYWbiHYYOS_l*JF7b zt>NtC>KBf+)-WSqs8=$z3e44ynwNg9JG{euxOom*)Trk(NjlZXLdidHsox5_L1yrz zdiH?FIz))`-%h-L%;aDrz;V_>rtR!0nh@vXa-^Yl9Nnw!0O#ZJ;(S~mTu+>j>!bhq z!rSA9m^tNy*3Y@9Wo1pDICnId_rYedBhg^y<5KNh9F$(|Qb^y*|uL$;agg$Y*8< z7dj~ApRB0;Sj#klF$;AE-OAB|KDUBg!pnQY&t_>3Z}Pr^exaeQuEsw=9{th7FWL?; zzw!y z3%@Il^Km&iA5+Nh$_EPhI3L#s=Xb?%KK5fZ+#gBqi>1dlGe-6g!VFf~rUoot25@}% z3Hu3C`v-yO;_Jp!q`6gkOAI4$cP(`FOlQ!N(Nx zfkHm64^YTg;y8a`V|>ya6z|yG2VQI-tpR--Zw~1lu@-CP+>z`$#p{ZL$;L+xB%I=b zZOiX&+C*A|;e6Aql<)(;7T1AJ?< z(NFenzvXG7IA6}~f@AXYqMN&BWs#y_xtY*t21OPzrJRENp}DhsGApt8XKDGO{1i#7e{ z)BpJX3v3aYET&TV0yde?=b)7yEDBY?A(PoW2D-jtIxP;^?~n&XEkR&MM8OgWXJ&{C*qn7QbKK z4CJ^PwCSa1?lZy)>U~+T=3a^+?25aW?`(eEr~z-QUEMv{JTK@cu-Cuck z+?ciBOKs3Gp>-agV|dae>5q+JNKu5u%oEH1^gZUcXDvNYrMD| z()~FB!HtGcF8?){_-gUgC(g#OL7-1Od+#&A@g+GE>Yr14!ZJ18FS2wcSF=6+^tIvN z;eRIw$GgUh%R%Y=mk!*X1j2(o3m+FrcFn3Cm;X}>ehsv0s$42t#_?x7?QMIC(4Z$g z{Oa4#dZd5bTwBAupDkehgW|PsNB;y}aotfSvBtA{!(fn%^3e>*`}6T5xbZWxgH&Ia zv)~%|RZjlQA`s+y_EFs`?rIp-xyk}63;eRcuUl>TM_p61_cxp-erxJC-2qk~xckHR z$0yJn_a?Yl+Zje5KY8u&_Up^pg%7od=d2v4!oMPrf<5S|64hc)GXH-KDl)588&Ev4(p}uSk}~zrx~{ z_g`>)ijh>JMf1DEw1hPW=KGJIX#`_dN!|=n?+HT+qK8-9LHq6os~>ALGKHCu6PFn5 zr9k%-L7w_87wMgV(V=}U-Ql~V>q`e;?hl7}jJA8+Tm-gwKH2_oV}EJD-8i^d*A^af zq4P`nHeiKXXSavsx=G(!SwOE4ZRsPnhyCP4d+8{aNDD?tH!VA0dtj9V+;=G(ydB4c3A91{IZ;F4rs%<|D>ga9J|33}t@W8wn?-|9 z))0nCuYAy5Ls{+)zkXWNn40GVE0l@k91#2H%(I4Me`qX}DQhAuY zG}AJ!f_$hRi`32il<{B~?1(ZRTXLNVtL#UG8>tPH_Ha&KF!;UEpO^nL=u{@iu+#%Yh;hV#2Cb&$6wI*&c|O+AUp8;UrZql4YHUFj*!X}Q20!`Sjb@W1Y$Cc zLFY5XqR#tXY3o|>u=D=cAK!TTuYUjIy)uV+?hmfBETKoor=8bWT5!hT9?!2b(bgY7 zwniCJTL5v~prIqPjA84pQ0moV#<0|@?XxF3=bk`&d!p;RpIUno$Sq*`t#fNfAUq^K zB~2UQ-M$?m$oR`P+Ao#iUa|aJvM~3PskRJfN8hTsj^dlSV9=;i6nZ`n(keyqjv(jn z&_cgI=0?wy#T(EtcfKZ)A3SK}TN1*3h6N0g$sfmi(NiWTC1dmpBXnM_S$jwyg~c_` zt7Q7jdKozE29n!0eO_p{L1(xvr92B+x*kk5&?`ad&9@qMT^9dNhxqyXH&<}Wc6mdY zeoZs_kCVl#R~)upCPyd3@vV$k_ol^B_B-jBhTyBnOTq46(2(hU_eo}Q2@0#MCM3%6 zI+o;kE+~{dS-tS5j9);kkC(-Jy0Sbi;nhV6nWvcWp3JSU{jp0Huj~yp zszYUg|5ywBx_>eA{na<^+?@XY(vY!&r_S1+ z4$f6NIPc8g26D|k?Pu>z2W>T}ei8f9u^(%D%+$TO4Lh)>O5FqT^3QRykEW+%MRk?+ z2a30W^1{aDqYtKonRCRq&t{~9v}KvC-z3|>&jU4mM#VRCA?+e2A$o87 z+|x_6LVU*G_-ZEyo~ zMCH3su4?}OKejTe6jxb5WdW51{!I&@`G50&eg2;&7E?tW2A@Tz@X%|Hd15}FOyki- zY!OY&6SIY#eoy+YA$a(g`#s2ZT>M9$znbI!N7(i7ZP~dyd%*IQXP(@7YYvIq2K!>^ zeIXbHPRj5RBlu5cIL-6TA}rk+5_74`b9c4_oPYMx>X1$BUhua19^+yPK92X4;JAaw z9g4b<6SV%=cu4np52*Fd*IkO<9&I>Jx7FXo1g?E=;XJsP4y=rHI@I&13*493+h7!@ zC)Cp>y2$XXOQx%3@`uK?wp>N>d;K6y-oM30iBEd@bokwiHcI*K)V!@BL^8}gXPH+X zc z*}mRPACBsCV!aISu(+OH-Run2->goS;afKq>@N4|1Aj*Ncj7p|Eoi{ZwbeAZ^1{ib z;U?Xc@^OCOA2-;;*XzL^0u}VrFJyWeXpP37PHn{AQm=PC>Q zH(TJB{~Q>(XX(S_#Ieuz>hyki3v3y4&PUYuC1$hY#Mkf7PGQUPnY#HEcK~h4JR;q^ z7USi|_q|Ln2ebOzxMxer08M-N7;lx&u zy5E+h8&Hl-U-)#w1`i$S-Yiax_wF0Oe8hmThlBS4QcTj6{Db$fc$#h>J6?W2jO}=*)0`jGr^Cx-~g8}4z<_+AZ;OpRB5XMopv5Nf>$eT%HVi}A4Y{>2z1K=$dnM{-I{#PIY+exYwGb;_Dwuf5u#sju6o4@fuX}~w& zw7tpcj)4h0$`$xNI`0gxKi{=+LV63xoT}rrs&nmWN+PM@bO$u%I-VDlF} zV`G@+3e=8qP2s5vaZ`)Os=@q*ZEaEHp0IwzyFo9^O`t_u{rHK_FMvgz-wYk$H?XM8 z|7eRj37$9VyQ(e(y~9Ya*ta^<0Ny@jII~Y*M|h(AXlD4U4&c3eOi2DpT`1n4yInYi z2p9I8T|HXR1X7;aMcG`mfICl5J=DD?T6J4eH2u`bcR<_iv8d+O3$WrL_o%t2Eqolh zU>A2~9r)5_`#34n8kUjVW3r!Ez$c8GoQa=wAp7MPPmk6TFwr#P1o5gf9Pp-fq3FV8 zFzdV74xdO%Xleh_*ERSPpxqvj_du-~ye~Pm)qT1>eATVMhmqk3d91LH0TcqX9MSi+ zA!Z5BM~?m4_EH!6^*gmUzfA|CqSASQKB;@SRgW-VXY)*}{46Yc*Ebkd+!IDyAIk1q z_7MBdD>fL^*B+}1y}fR8+kU_(jnbNz+mf>owIDYN)_Ay()Y3FyL)9Ir@R^zCQ$&9UlzM?t)lv*gb^C6(e_ibarXR{er zHq4#Ct@w6#d$b*%B=dF41q|q)Na^MXu(@*Crt$}q!TchvMyu!r*yL3kDs_&< zfsIT*J1_PS@Fr{2gVvKXu&1V*d5^Em2eYZ(%WZD@V(!aMdhwbTe1}6HZg0VBb)eans#J2xv3%8yT z0Afs=V&AZ1!6K?5EA9RcaOJGo?vR7N7~M8)i0#WQ*yQA8_x5GQV2L#Eq_crRSlp~# z%eNP1fbsLcv2XO z9PIhUJ+@oE?g2ZK=l{loL2uy}tipocO7DG38VcsRUd%o6co4=O zIfv7C-jXiMHrJ`+{)`gbBy$kqbD?c5R`Ty$b+X%!s z^iOrDEb#xm1@?u;c9Re2|Ir5YlTFmL3EJ8^JQkV9V4`)HOp%Dq6bR6Z+ZYUyP)HVt zL}-4J(m9Yn=~RS=odbGVssG>nbY1k>1I7Q3K3y}RzK&*cboWp2y7`@c{77Ahhxhbz zW=uF>0lV_=B;1QwliXLy!}+g%9{gll;0SYqf6nr;>jsUNqBeN_Js%sZ`E@%-r}nKJ^nkKkN+Nz2gfJ2 zmIgHKHiWpm!CR5$r;|ftEfpSIV4}Bd1A6aRZ^CNyz0-q3f$YFDT*RWVM0AFb$rSU+ zY&7>wX0k*y*)!-OI*m+|&p@AW)9cJ|6#l=G;r)?9|LD`H@xSmd^N_x|)5*>a>2Kc{ zJe<3l;#bfjw1sk<09%HZ`1U)g0p++pTp4PTc2>(8zIi@zU%(xu%lvRdsL&W#P5%V>E!z&lP8m2b34TP>T=~=s?n^I1` z-e!PstSfFg<8-G!8szC6I!{dCaLNHn+Sg~GYrHt#mH&78;QX%ooPXf-H7(5)c1UpJ*aH7vJIA-`iR#|B0)o4Z@{eaShGV zPJ6}=riu5gu~6z6cgqXmJ!R?s(_yH#?*P(Y`nIgZ12gFX|8(&WCvyFJsyxj<*hKYJ zWdW51{=+Ts%j3S@EB;pSCiD6Y9*Nnbi?G#1?c*BfY^C`H6pb^ldP~(0j1<^cn!rgf zBVN{>?+wS6^obwfjJ6{$-bfw$tOW}!3WIKYjigCGukUCnv5-d7^ULq2G-0OA?1U3p zjhJ~)J+ZgiYfz@I@gVeY6XxBxdzWF$Yp^WSuUA;0nbe!lxj%o62`q@{e!6*?jx=qo z-D=Gx-#}c^rtSOdPXcxlse``E7{;5N3Sn&Oz*26wZ|}8MPkQw9@m+h34B@lL>jTr( zT%|ipADj5Rd4m~z9};pP&mHph!dLaVWGv0pLUYiU&872hHHNyJF^2r&H*WBViIif% z61rLGNUNW6=jFsSW9jEF&Ff9Fl={5$3F@D76NDN*ENZ&=4m?^pdBkBe3u(pBSwD53 z_m=j>&PC2nEC#Jt3AN8>5TvX%aaNI78Q^jW{nw=Qf_@vfY?-!-B+WCNHadCwb$}I( zXLMU?3ImrcqZXArOTFW{E34j^!1qf9-FAIdgXIH5{hw#_gq-g_G4HgTp`Uh7tJ5tG z@YL9H|8En_VR+Dp6e7v$=s0uF|ciLMn-tj?ja0Lv6rP7WA(!F*{*Rk`B5_e|PYVy0rMm_;+nl zj?%2PE7Rv(qf3{BKC?dA><5|ewQU_Q^^-cU3sAch;3Tc>zk8;RfC^okLMT@;GkB|Y zsjZzI75WK8i}qf#hJShiAce)DIJ)(8Tr@`%C|W35v}~#4z@9SyzdM{21Ip;$9JuPn zO4jK7Nk>4(E5p{fdU1^>%TFUFG07{(C%L9LLY`-{Cm!S$-90w-FWgM2ksxrjx(+uLkE|v%U)aJBQAlqBUMVbSie}2D8}@ zCdQ@yP`-BnKgWNE>tT0F<7#|U3pin8`9OV}p;A6B7ng_gaUQOhJRf@~^~2@ixZLNp zhWPKhhX3rkTuy)05Vx!0ROc!Ss4SqefXV{@9TxcI^1PaP{9yGd|J#F)7M(o%3^V?i zY95=+fz#I5a}Fi?!12ch*?Ci`(!Oc$at!BLOCOt893H&1mz3lX!H+m?0`t=6-|?%b z!f=B}RitWl>Ej9EI}Yg#hW5?b!Q;}cr8~aQb>X=6h0~nocFg|bB3*v?#g38}9?-sf zhI9#O7`)ZD6_Y;WNIm-smbrL)LV;F!--X?VK+>s3kMJWyqywohhri`|!P8r(L=4mO zhJh>k(3ZwpNz2I-!+lNM;m1wJqt2Wj1_uu1WzAXEj^&ImE273fs)Ac-MT zf2d~zhaVJrguSLgmPb}c*(QQC?(F{U@R9=K_|LxngA5h?F-_RgeBaYuv0JX#4xy?Q`{rL zs0{4cp;g_n&JzwCcUavhsV^Mz>DlO}az|;mj_9imUI4xgeHvKk3L$ZDSN^osn7h3Y)&Odx+&qqtMC5f zA@AylHVP9b2{z|r9;P^3(d}2Ds9)YoP0EGiFC8>&@0Es2TC>I9h(iy z!eG<)n484F;c$Q+Y01IX1=1GZpmEm=mrHlqaF;I%nk$W)mv0tuqQBHF?}Ywvi$FN# zYQyYIr{&UCotohz#{@`&T+X<>d+#jO)k&RsKi5i1F&SZ3He({RygG05vH z4I~RknqB?+O&MR*`O7Y`DI~b?=ZV5&eD~k@k4-&obBEa~ah$)6zieXZ^Ij0wbJnBg z%@To@lJ^_lMgI%l@6HBjB0ak3@4mJ*L&!(z>7svIxyZ3kg$bO0NHeTF0i{oI#PuC~ ze!5o^@MYawypB9T89&a?jM=d|=y`V~j`L5rdhdH<@=N~@%Uvc-6;}cYFtM$8jn*kB z@}+~HYdd|21vE*WX#HWNn-a(Q4&F(X14-SKIL`lYwY(+%yOR>%G2%>V?;kqQedjy@ zDNaj?J1wG3opw_L($@^Ww(mthC64pw_2j#!0!t;1^KU+WHR>+US%DiV)2A?tGLYxX z-@i;&Zhx}sX0g&@sxK-Fs4VcC1%BOZo9*Q`RFWK}U_|3DL8N$^zyC;0!(YM|-EZE( zzWr?dCAX#OA-&h`mEio{*ID~1MhRfi;7>oNK2BBg$sK_y{f!c^I@hdT&Tc76{-dA) zJlmu&FyQDp|J~l3@RmP1(DKQxnMq+-3$JiOKkrQ#E`JO>9R6)%7~mf`{@nWy_-f-- z8lw@P{&4kzyJM7c2p^VQ>sA!Jg{#mP*M}fa*A8y`cHi4B6>F6GWUXjQ`gVQ`Hpf~t zvN>WWjuUo7jGL~1VG9^_@?uKU{S;hI^j$WuW(8P{mD#P%a4uMk>q!V)l>8(ZtOiXB z9)%w+SgcIf?6CCnvk~9_YJa1EUpRr}uy?Tt@u%5&L>iv^C1=g_kq^hzhk~+if}hpC zTX6Z&Gev5Pmehx0W+gRQ7L&Fp<$Ere&5lF-1BYgrCiwos{e8V6jw8NB^~h6se~@$M z{hVN}&kR7%r>bYxMBs7==ZcysPd_s-jXMVtHrGVp@e)kLy*K1EF~D2 zTD#+$7}(~~360xpBmVpZ0F_Fm{dTiQHvex0w$8iFvT)u8L#En?&$9dok4rq(vBq5M z^#6(R1oWpmR2EQKKxKjdeG4Rq#9IIJ2LOB(PkiucOb(CFqzTAW0iP}6)9Gv`mCh8A zDMA*FP8D@Jo`s%tJp7j&Pu6xh60y=0sWJyhecY$8O-*HE@lf{kgR>trzuH@((z8Guvi<2jx1sYERIk z%Q$~R*lV+)Ck&zbmNDe;Pwq-u-Rpx_Zzs$zK1=dJ`o-is80uINBmEtlP1vHYILK zelFqc{fcP@P;;TpM4(}=JVPhZK;Pc!J9 zx4{t3{=f~PCjPR2McX0QxzcvvaOlg8c->$2|9PHxdL_FAXzLuD9e$#_QlGO&jLJ_2 zTEI7jbthPC7o~iz%}3Uc5^Y$vv|sz@%W6vg^OzvDw;4nj8T{3~`zEpi|8>KoYo*^* zb~1G-+OoVWj-P*hk(wQLjjD2Zaf7Hrl?7B5_|Lb%F9+S=pbNb8B> zpS=Jz@L9~6*K=_PF4}uWzjLIHURZtV8qZy|b8!bQx=-ncC})G-fc)`Ec)=fV%Q&;K zA-cV>RP*(5Gi!dy8ROD$Q)6W?_*!PufOtt&yTYn1VdjqVz| zA%69x4B`Ag;E$#($#q^CjIHB0Y3=$YhY%m<(YDwp9h_P2+^a{`E?hn#w_)PK)jsJM zbbJuyTeV9m|5C&EEkeY%-*+l2{SSDdxVKIy;(vA6e68jWayG1g+GB6cYRocsKjWd2zzYmuaJuzsvMU2KaFl+#+c}c@QWxNkLr$yd~f3%3UczXRWe00ak z$Qg(~cC5W&!yn}64Oq2t$g{cFo~fp8<72lg<)q&2_dW92T!7Xr_3IJ4T^aAC$9<;u zK>Xx+B|R+ufIlC|aTFlFO(FLBQjAg#A@@-m)_8pfwk39<{n+udm2w6eCvMj$*#U~4 z!>tFrX5;Y^*3EkSxPQqG?7==>;kJ=VJo+9@Cv_m=&kS4fNd#tNe|i9bPX6oX|GR^O zQPVr~Ap^RL{$ zO(^6EL^K*p#G})MbOxU+q{&{s#TJuA93Dsh^gkv-Gl7$VAqqR4|9`aA!vDY5{?W~X zP3A3L9rO-x_sg3f>!R}{J4rbp=(QpA$Q)qbhHzT(4c&Jryj=J#?#5^~WT$;(%`AAz z9axuQ_l-`1qgc`P^|#5e{!Fl!4xv#D@kP1buYB!}bJE!1g8jpXr_K|Alx zXTnYPPE~3Mzwqi}`%lELC?lDtK5YjF-I;ea5MG$`=C*suHOcdPm#eI5`@<`HW(99a zDd%#c`JuA-PqWTo3sJfZ0*;UqPnC1au9S@UjPSG(m4`eRI75%4mtudQ^Dpt;OIwg$ z_npS8$@Ix+7rwHzyT(lyc?Enz*ZX)+O_b^LxKy;|C*p}r?Cfd>NM3L%KAduKg@;-5 z^;_4KaBaqTX?#R{Cv)%843~lM)!-YaWWSGcXg=A5@Aj_}X$KeNC5r10i8uyb(&$Al`iH3_WbHf{> z2g~eqWqBT-_@*7$XKrmDcesL^Q`2LM=*uO^fx!{Yw-;K%kd*PKWOkO%vCNmOh&62f z;t;2JUzsPj-=$0Zc65Co`*O-DO{pZ)Crr}#Ny^36o!PsC{YK(4OGG<@(yQ9<{y(j; zRq3m;fXV`YYk^w~=K#yevozuBSI%zU%m& zTb$L0@p_Oh2w(L0Udp`%V8c z748^XEje1bI=Jm-G0O_ja#^dZ>f>>3G9*+*z7M+`wQcna^?}ys*f`4+W zn^;bx4^hFAk+-%ioA6HZZNb!yrA>MmqtD9L;zOIUn@Zy);Mz*p{EVb$>+$7I9pc zXTd+gjP~moeTkX!lF>y>s_V6vz(z z{+CD~4v_!yJvE7` z3w`t4v=e$%gB`Wmhp$9_1e~n9n32&i)qK)tP}XPS#pY~vn3*$S)_k%nBEs+}M&+*2dx zO%14f!?(US(F+VDtD^{T?3R5+S4?!3pweBd~x{_5@* zp8&_>_?Fo^@4)=k>2}&lI&k*X(1`y0J0Pg)(4^3juF$f0fkRyX+hCN3{_Cm=J-9!t z)mrb0CR`OTe9zEWedu;CW&O3&ufgn^0hE0MC{Sqns0m@T&eh1z= zK+`;NWd8{Co`;n~cA49c14C5huC(^ zsz+t|Au!ti9(#Il6gG6yb?W#`y6XOrtmdi?l?DE{Eb#08g^#ZLiNd*r=a+Iv{BW6o zbsrt}^I}^xb}7?XdfGD!JAVF2itpHYSoW|-3tls3Vplg!jJF&fhBf}w7~PK$1)@rg z$9(qVVBsOlA0&Ma#cJoCJpXF)QV`C#NG299z%E`-pkq~0*tmua>tJ0C5MuTIY{kr} z;NDiffg6k_fiZmkl+->Gfa{RO4i#+y`4;N*RG zZ`WaAz-qywLA?l3Sds8bd-E&~SXbIF!m&9N^uMV2Nps3ltfI##?Q>TbfEK+YSG?~; z0sjH3#yA*gV28h>Js@UI#h&MXtE@4egf$M%W^OkR!1~AE-MUa1f(_iAQ+w^reoPwZ zz57+$BH%fH%ErwjJ;3ab@7Ab)%mC%zo)-_fJ{MDuu?jT1xB%0>I4bmn*B-2H-r?9$ z*U=>Y=jVOwE-nCV+>obdy!L>aSfQiC{%P3E?GJly+u)5Y@H_a$Z%jP);Bcj8R{Atx z@!fgRmi6ACVtnG~D_-%ya`J^A+)$Lv@j{o>?r zCu2q6{PYQh<{8_;F)gk9I0qK?zP)*{ew+w%CuF}!%Gi#vqsE4RFb@DW>Qig?3PV7< zi>J5d+x>s`{~2s5ll|BG|CxiVJ-mTwMI~_QOntM+mt1&o2Ar`t>3OIBuc1Lef2u=e zf&YsZ*u65gyL0=7`hUZy=ZI%Vr4>MH_FD<|gNpQi?c@7o9x``SXQ)RgK; zWk0||*4hnM>}>uP_Z}Fq^OUVR>?#MxDFtKROl#JIMuul|#xA#ma@-W+_zm9Fxt#02 z-V@PRw2#nB`V71b6aIK(sRvQ<>O62fZFH7h>>Tn>I+u0MKQ{%?-Iz7`5bmzApvpR3}S-D0I=XZ@4=i}kMgLB5rx=B*T zgX4H8m*eoa*X8mZRSpeqFjc6sfXV_Y3;b_e;Fp6NVA*nA($aHG&7S0U?Zx2a$=2%p z_yTZm$~sK~Cbx%cml}*3S1elLOw1BG;Yy4E7Nn&vIsJg zk3DPacYE9YYuK1jhy7E;H!;SCyEYqiF9ICjRq@?ns`w`89jY6&UiTuFVPpRT@qyom zcisrcLVXf1t@{JMW`_F;BnRMf+7{-Xm+=+y!IFxXA7pYA@aPbkl?s{ zn}BGp{C6919LTo|h?nGrHEPZ(&EOXATIVX^MY!{lkcmvhm&YTL$ivB!uG<#i zOXKi#bt&l4iu35U=+lL-u2d>l^W*qtre4pwE0y?2MaiFBH8K+Mf5RtDYWG6?-{dIt zRBnTS%g6Qo)9QaJjYFmW<@5hN!JSIBWf(^X27krwnuNr`McUcsVb!B#{=bH1e}aKX z*A)e-LuCP#1ymMLS>UfMuy$pveILSDZS8K}v9cRQs|{8g5YX4o`$Q;^9Wxd8iG*Y_ zg(;*^$aETCz((&UL|-{P7Dx8Df{4kKZC6i- zeb%^nQEobLm*>3pFLfrc>p9NDujA)9??HyYLs^Fzlo{%;j(_#KYT@}`S?_-k`!&uU zdU%YPkn!TDVBr0OyORJOM)f&jmi+4+Q{dRFQ?y~ZzvKAtaGvt~$@2ZH4(v|C)|b%R z*Nof>@bE^~f<3E#o#WTB*|SUaHX(d3R6DwMvK3DbWG@j#9&4zP5sM28y@Z#5uvu8yZ5Z+MCy~V(pweEC1jaH&pop0}1F$ zXVpi6?7*u&pCRIiIRYV>%BE4pA|6%D6f#6CGG9#P^T=ZP8jYK)xt&!Xg`HJjHV~72 zx_9|BVpfmyxddrJ-h2d*+57nvz5`pd;zD$w=iMan|4S!B3uJWLKQDW33!LzRs7u>;00!%qEUa7*ytZ3x4|&xY9^;3qPDBI&eUVok zzs~6Js)OcG#2gx1NE6U0e4&uV6mwWK9$!ph(1lbUUB0(ZX_|bN1%;hehk`_q(C3dm zCH|cy9iOw^i~#Xak+|Q2{@%WqZSxEuegWshW2Wb&25P_I246>uF1VThCKu=9zsKd_ zeEhs?DChtEek}j&x?KP6syozO4g0U0|Ehw+C>J(7Tkg^2N73@T5y5yB!7nQ|Z4~|H zo$EhHG#r(%?KdxfmA%W!p$XgYDj1Cm>cYoiLKnUgmv6SXTJl@fq_N1qRZZE;9<;%> zgi(fx5A>m5Yy7&e8r5*iN-t@wX^@_7D8WxnouE5W*G*ST=Y`IBo$We{bVlhos;O(Y zY2VY%(N5GBYIC)%w3^hXY2DPy)QZ-cp|(tmu5O@3(yUi|t#?E%Mf0*|s^$vK@tUq` zKAJ@0Gh%^y6mcstkT`4ss3lMLMrbqrn_Tr@~FSZv^B&|AMl|ABt4{sw)q zKGe6-Yt}1c(kV1PPfTM`Q7eHS1ZE2vY}`!hRDv`9{qI93jj`$!)?dltP?$Upg@)cJ z-* z@Mj883r3-n`;^F8Y`&~@6Z6oXjTDhUK%p`yBB6jqVen~milEOxISUOQc<4p;G_=^0 z!(fWV6gEX5=F_Qk7MacyyL^=E#iVj5EH+y}A(K&zVm3`k7N9nh#o-9~e2&d6xhxJ3 z-5^3uE0sa#k@<8nQh<-7b66Asht6=xRp=!W&;%?Iih(6yv*}Cnk~n&OrnUSwLk|*?g*qPNmQ|d@4;KAPZ3L=}lCym`s!&8q$_e zkqy#BC}lJjh0b8mI6Q`p8qQ*n1!NAH$)__>9`VFvGKWQHbA)8130?QQf<>WF(3|La zOe#abXED%b=u8@&#-s@OG#;B`v0TnVg;c;4pk)FgA(=^KiG(Z`ozFv=M&a;iG~-zc z7MUZUqZ~sJvKB+cW}w_c#Y@bQDQO!dXHf-AwC_49VL}FlOrx_Yd=8n%M)9Jjf0!cc zLkbpyj%+EUAq%ikY6MB$5&nk*56A`tKd z93inl!6Kuik&*6V2AfS`2q+Y^3p|x2rjSuqa*P!*k_AX(7Ah11zKG9bktt#^U&P|0 zwDLqu7BN6B3oVxrAY1W}>SCG@#Yh+ODI6Lq0%9?X;bf;^A@fkhLNbLapz$dTo{)x~ zZlduJ52co8v%izY64TKePeo(_8fK&7%n&je$lDh2cr-F9(R@>11&hpQAlI8pC3C1W zmYBguRhYs=g@VIGmN2KtS)$G{217`u3D^P-3*{DBEMRarVln!{GxV3Uglr~R#Afrv zY^p%a6j7*D4qu3>FCE#KMz@tHSah}k6)2&Yf*K_;4KbN~A)CYCF+?c&#ykZJm2(=6 z%om8n0xHTEDyk%WA%!NZE-b#*E;)-Y2bqK>+Y|vMQY;ejsT2+!wHrMf1>8Th%y}Y zQE1%|>KvF9)F`oqRI$J+1830~WV(pPpis$T)Hv}u6poON_P7*NMJ$fyYz2!>WuZz> zp$b_-4h^-eB072~fI~s|nNa=g{Z-CFcbSAtiY!Se6{y9cqnM}+8Y(!bfbod(wh;Av zsEqPOOr992#ulT^H3V$B811Bq`ZuwFR4$i=nt4>P$ZU>?O{1VDg^Ki+RZBK%@Of;5 zDg}!pM9qQ#{g@}#k%g#%WT764FBZ!3j3q)% zErpn^V9|wa7GEHg^)TpR2L`e}MI=I{3-whjK@VIO4HXnIn@m9)fbs;W|6wpG6dqMT zry;wEy7f}XLY6|k7uqY7O-G#-8x>sCBOp^yQAfa1QB0^mfs^0=3gT)fE1*C2Y7J4@ZYL3twG3abD1!W$C zClU+js1K#mL{znTq+@D&e?KPqzjUS3Ua0C**r?*8Izc51#Y`55jmj6YJPY+Ce2SaA zhhcHh_yCnEl#fDmH$^CrH7#V+tfHJkBL+7`?~b|=S>+bc$*5wYMw5d2$j&YZWgU~< zbDTUzF={{27=lVcJtu1FP|M3>(K*QV#8P=EI=+C6 zYB@#BGR?wSEIR6_1Z*-=3iW|>KATBna8TK0GuiBJo(dKVm0tl3J+C7a3OQ&Up{&tI z-^p|`pG8-nA!iB2s0-zxF^7;QVxp?XLG82{$tBas41t&=FNJKOkWE2#hlQl^&Bfj~q-O*$HVq0ukubx>9?9B(UFLMlq4Kq#P~R+5iKv@AAR#79l4Y&^s4r?5S0 z7sM3Q(4+AWl7~7FREtGu&?iDI9ffS_s*r^`1*8v6$Y-I_!{DPKH3v25Xh4ZZj7*0+ z3Kolr#^XX38MUr7s;s$ZqBa#hHH9ohqgpCPX=n&R6QVL88xOHj^MncvhmMBisMDv3 zIZj>*S;#!7#}T8pPu5s4Q6s?QG1v?ai^pVe)Jbv{>LFy^Fbj1+WFAF;#*7rCDQaVo zVQDPe{x}OY4J=f4XmkM+4M2q~l-o!ekBQnox_zaBMP;HS(^12WdUGnD!a`l7h{;EH zN;oto-+7&!g^Chiz!IQdR>()YBZ<&0NHnCNvS?(~Hj1rN6f8ayJtofNqk=RL7P03*eAMq#cxc>TM(i)sBvqc+j|f2v)S$B4!So!X+N8V$4Q98}4KC=Cn_8t~DW zeJ?7sM?*@UkWNOUSTt-${uAm?(P#o$T2=&{Fa-;hNjmE0&`^ntIz&2?BS!v&kiw#H z7<{p_vx0@3Rn)PdaXFL8Vlq$`qERpE2}QDDCb^FyMiy$i&^wlxs3j6{M07T)E2!wA zkuMv$NTObCa#^T$ijbegqBBGk9x5ENn$+pViuhzP+d|>7GEuf5btx=?h{Z$Q8jpom zE>mbsy^w ztMdO<{=drqSNZ=c|DVGCU-kddgUPN$KZ0h9=3C8|nh!L~G>bIPYvyQXq8a_InhBaw zn#(ljYx-+W(e&0Fs>#-L*X*rnqiL+EMQkHB5$n-he-*KmC?%dD9w%lHcM+3_al|m< zVxpMnXVhZ!*65|t1EVsdBBS$0IYyaAsYY9k5{#mZmKn`A@;91dPE$YT%+_uHg~GG{fzNiH6aJD+~h-`G(UBeGG>i4m2biIvaL3G({^4 zIt-c(8Vz0;+%vdoaM_^1Alu-eL5jg^+)Ir)~D+C(YMn#*Vof0 z=(Xs*)qAP;K(9=%NbkH}j$Wo-s@_(;1idJ|WqR}V{Pm{jdFu_;W9zx=_13e|GuG46 zZPRVit=D~~Tcum7E7d)tdt5g|cb9IGZk%qI?qXfBt{+;bF-n)KOV@SP?Wt?2YoM#H z^G)ZyPOZ)(o!dIaIu~_vb&lwyp*0+dI?*~SbOLqwI@5G~bcX8;)FJCQ>vY#K)zQ)J z&~Dal)P8|hcihyztX-g;t$k2CMSHXM8tn+}VC}iuv$ZE_duc;$rnZ~5qqdbcNt>wk zUF)OPYpo|*m0H)eBwDAnj%lT9C2MWaiqQ(yTBIe^nxQpbYlPNdEvi-@Ejul9Ej=v) zaUyXPkxQf#U5P!3mP7-hy2dw+_Zqbtk2G#;6l+}6$kjNak*2X-BT*w-V}(W_S{*X& z|6%Vvz@k{Twc#NNL(Vy)A|N>{OqB|P7(oG1lpIu4f)YfKoI$b@l_Z$K9B>-JfFd9Y z7{CCiD2R%ZQTUtC<=(pYJ?DJ)p67;t`eCSd-d??`YOU_+uCA(83~LyS7#1<8G0bNW zXW(UEp`V~1pzoo7P2WoYfWDgk3Wk&np8M`Yw9 zGV&1_`G|~sL`FU$BOj5GkI2YJWaJ|<@(~&Nh>UzhMm{1VACZxd$jC=zk&np8M`Yw9GV&1_`G|~sME>a` z4&*I1gvbanBgBLd1448JVwptf2SVcrjUqIR&>%wJ5b8(hGeUg`^&<2Ep>BlUBGiRY z2STq9dVx?ILeCI-f=~-Wj}dA@s1c!hgz6BwhtOSwZX;BK&`pFegl-^o9igiTT}J34 zLgx{xK);RuBybO50sgaQ!?K*$fFy$JarGKFBlH=eK7@J^`hZY3LT?f3LZ}0wR|vg8s12cK2t7fl1);|X zH6hf9P(4C*2;D>IE<(2vszK-`LKs3f5W0@gRfH}hbP=KR2vs0-7NOG!okZw3LZt|m zAXJ1<0YXO*%0nmzp)7NEP!K|a2n8VI zhtOVxd=T@JpDYxsXVo{&d!gyLCw;_I*JjQrnE zLi$JhXX-_pg!HRj|EcePn1qycZG3XO%`ZoV%zh_QV)yJMB;#KvA(0aIALS_Bc8-{Y zgyd)W-zFjbB2VNK@{2s7he$(ypM<2N^EZ=_&TT8@IJ!R(ZrSu@0=zeeDH&Y03kGX4 z{lcaH@e+XRXJ^*qLmt&Mb?^()5g%!VkB!F1DC*&(7Pa*7QOD}|Jb>xTvURQzk^X6V zEiujZKbwl-)&Gw^gmwN=+LY04ESUN;GF_)pl-V365jew7fVt~VJrdYO4`+BX+)}>t zLUNxV3_5#w{KE->-}tATk#nitBUnvk@}6LScBH3uh6lg$$xlAL8R+1IlhpO=ZsxQB zLjP2`v#vT*EZ#l14I3C?dG%b)9aM3+pz0X+kyirH&+JYEY#0DZpB<#DdRXCVN6q|` zjP$?pF+IMCsn1PlWjmf1JmD<=1#sy_zb|agLJj1=iex8`V#!Fb$Li^Hsm{jkD$1akzWswLU&n&V$o_mxOq^{+E_-lFp49P&W&mUl#Bm8$)*RgrelH&Q)jBi0#HsOs}L%WwJvr+xDb?$BcNxJD$e*gT2ksZ$vFU@Fn}^8q4DHGm-aVC_0Wu&CIR$V;rm(JsJ$d!QbQy zX38H&IdRTN6qW@{)a=gCSHp^q*aQO*sbB)&D%L!&8=_OOi_e+nJ$z4Qq{C64jPg=L- zQiyc+ybj42i`ACZzfC>Gsq7glkq@#(=D*)|iF31#SC7Iz7R$ist&ZyWhd@=rYn zGceZU@&8!d@r1U~EI_jW%>w^BEr7q`oBPkb;#0>by=zX*WW;Cb<53`-9PwFyc#Hx( zq=6{UU|l zJtpr+kl0V~JzkpgBZc{YJD!jy5Tl}zvBBx`)BG!d&T#wUH_@?Kc6j^ zJsC337`XTAQM1vAe|wcjL@Az?4((wpP zc-#YZ7kt(~9tCOol7BX0NTle#tctslxEKF1{nfjkk!8o}OW2`_XYEPh-bp~DzseDO z^up{L>&JiImkEC1?za0I<@u2w;&?)ykRy0Pp7{Q2IwQ{of8~kq|7ibAy=eDk!e0MU z`ty=qy2e!XR$9&M(IG@S`)-1GH}DI;ow#4mijzn}D>;lp8GVWSGQo#*?3-FY(f8LM z`H=Y3<8K3hmnT_PH*Hb+DUb41H|GEJMI-*zn9CM0-To(uk{D{}+3@5K+mV29`PpUBRvny*b{)wci7$(W*v>4x&PJQ*E&{>Q%L+!rsJ~2wyk_n$lzHwvjh)x zU%R`*PGtmKV8{}&Dq)3bBW!z^`xKeuC0C?IgccYx;D@Z?UYiZOV0%Zs936zeEfRJyRX$?c1kh1hk(W3Lr@cdgI zmmY6XxFjg9+_Zre@gf3Tesl-9;cj@6REaOoM(F8Wz4tt?EH}LUsxg8h-w##_z0YUZ z$PTT##Gid>9s$RLs~@}?=7h0_sm!jYPl9=hh67ImMWN7F+zB_swZiAiO$Rs@_rTq~ za3M+DW7^doQ;tT_jrCmcI97OgE<9iu9s&@L(S*n3Qo|#RsOjKKP~%awgE0uCFpy7y*BsvcSJSibR@`|Hu3PZPA8*{&a{dxt(NXbe(xrH=bQs z#F+TWAkFf`U6=UD_^tO`Ls#Rk15?ve)@;2}Qop_rE>6(@%5bL?S{2l&J<+p2tRFsa zw)cF0{8VJ>aF1{gQYen6y@w}L8*!m(={ewmLmcqXFxpdbC-E3{_-f)#c%UvvO&vVS z+{_*JmDc5H_u(mW-;@jg(dVC8Jp5mgnJ)QX{DUDCPPO@CV8DpPWG=}I9qc3$ot1vB z-?pk*vbVQf2C`_|ny8#&gM8Qi>CXDnj@X zZ#=(7Pi&MI7JCdxdI~N;dY%qy`mbf<5<8{3z7@&9S7qH9PRVnT zp2C5frfhtC@bgNG&+`a?IXKuYZ~uVOO{><>E9y!;-#geFx(!wgf82H1L-+%Nlq=#KoVZB+R4EdvH8`OcirvM4`-C(@boT%FeIP8fui(kwu;0L=n_XMvxOXB(G0 z>N;|E+mGHvW}wE^NuLwt9ByusxaZp{1q$N=dfZK>E`o>S=smK}M9 zeYe|<^w08G=kx0in(THH`bo*GEEWy8e8{tL+bGB1cuY#*<+ja^NDt|dvU5508bm%y*_6;fqjiN%^44*g>2(_3jwSSyKtxBsJUJ1Ic6nN_emfTF*A3QsxdxYy;C{5yrxx)c-Y?R>Zo=h# z#oHC^YGc8l-pu0>LbZQy|4;S*OM>#MgouX|bTDLLDsRHb2rT!Sm+CAwHDaAK?yFnR ziW-HD&kVPv;f>3O8dBfO3WOoih+G)E{AP#@H(V-6Iuysy4-B{PdJYPVfUTyT5AGI9 z!u#ix+chO$gIn{q+07l}gYTNT`y9Pr1LiQB`L|z+LV{l}W2dRxD+(>{9?hv<+l&!< z3g=|B`v?t#7Ne}XXn9Gfibb9Z9~6b}j8ereUHm}N?D|}==JGqr9*3nCSLs;ct;Tl& zF&m|!tn=Xgm`-tMuFZCCIJXvbB_z9vUYr2Y=^G=j$czKZ!w;bnJ{+(b98Iog5P^U% zHbg>H5PsA3X|~Lm0~b5>Tg#o901AxT1XIMu!7j!8K64-fm04pp#N#7zt%i+kOT`4? zId>hYkRd);Ek2+c$w`L%-^e8ya&zFIyUA{6<`IyyAuhE^fgiR~HZ@i~7O zy=6oK_K4q`v*yFkwg>UcPo%@7i4t)uY52;BGRidX3%c@@wIXEp?eGB)oQ0C#?UklI zYZC^er8EoBEI_jW%>w_oEbz09w4;QD$vg5ahv=an-#;I~GRt-DtntqR-x*EC-h^fX zg1?d~Ar|VNg>5-CH#RvW6XOl@WoX}O3K|xCD2gyi2BF6)CN=IPfELzsAJpwk!6d7{ zM7BvX)_Ynq+`1+K6ZjxDH%cH9NIpp#zbcT6*?P(+mR(QBdd^u@o)C-#x*J%orwQZ& zFY&CJ+m-16jurC1!uk2hJNGD%4}*pn{_%6>Szw2n`~GXVp1?Zh`$^=(n2f>r@|Ekd zu*9>SqZOYbuveq5LR~7!7}?r}0?ZC$QErz$Joy}fMXDNUSF0ogNrt*Twkr>VU;_$W5wckC(^~l04TpvZUG1<>-`6JSCSS9cIxelAHz@*S6 z2D&}#ut(xHqb_PkG07(ng%WTcLPLaAqWqduIKKqz-nB>ODDcxY zVGOzChV>0B$@j(i*8`qo4l+lvOm>m4`)|2{o9v5TSx?wuA;+7%tg86IOVwSK`L;a3 zaJ-uL)DJsMh^cJ(;&OgW!Q4T^m%@kLX}w&@EL(`Z-7Ih7{GbS5D?U=EJE#Dh-LWF{ zjZ7g1=9?)*J}AQ8ytfrP5?p{aHvZUddOZ)E@YOnbpL7iHMCQLU@hk#0-B(vK;C%V^ z)$2}CbqR-|Ob0fJXt&MJ3XBni`NpKtGc zC=QdE*jD-Eh7a~xvQxro=y9~*{05HQ>9&Mywv;znvv$ddoGjP%|B? zu&FtaAqUJ2m*{)z3k!YZ6GWu-EP3oPjv_q>ZIdrxM9E1$QEZ*6b}F>})SIQ4_DQwE{s z>rZ8Zn!4|L%?-}jgR*5;_;iB6zKqP&!lmlYTKGyZj#En@&Ma{J{(I>3@C!Vh9)9t!44L?6WTzHP z{;Lm(^j>Ptp;qdq0|9J#Tp}<>HH>B3m2Y7CoqQv~3j)yco^WzD7a6*n)FkAkbHFpq znWLwkd;>0d<0(m^-vA+hczjhGOYn2-{vthb_sTIq$h(fQbEYojtQtPvJQnao2I*<^ z6zx}Wk%a+t_g=U}zeIZGX?3{|ZWe+TEq<%ky_ZDg3F)lORiJW1tKe{JCwFP2N04*$ zBttz3j`V%qP-Vf5j`08s;tn+hNA;(ba#Bn15q@T5{ypWQ| z4VhhDT9QR3QN6f1F0GJMdj^(Y@l{M%&Wq&tOR8+JFCD{{*y@K%3yC3~^`rimk6XlH zG*{NqG!hfaH=7}Ch*yRmQkA$AUasRqd_w3Ojux!$a`M!z~%Fpw^-Q~l8^fc_6uc9o*3_Y9f zdHTN)Lwde`+-*1LPO7?G!7afrb4eh)wLe-T1@3bW*&@L!k*e_dYr zX@m4*{tE(CMTX)FFV@LI{B-@xZRULAqrIa2OY6``ngwVU__tc%=c_Ywpcf!)bo={Z z=0i$c1C);|Oj>uQcF&FiwWQ4Q)LEVw1xoF-4gGShBOD}qrsphtxrOM23VCr&Xm{qu zec;)}P3NDS4+g}rgtFnMQBtkjLFD;GHv$F1i8&k8Sst6^33Fz)Z&XiXzq%1MzPbyOcyZx z@!N1~`5vPDq*)%DEvwE;uJxKp-z8H1SiueyBDoDQ;kB-3&3tuptl6MxSY)AEC zZwc4Coh5)pYizi^VJv`%wsIz*&tRA8~l-Yc- za5Fm2;L7u9#L>;Z%z@Z z;S4GnCEOl?H_T;IhRpL~d~tE{JwFYB(Rhldt)(sNOB)WKs7x@fu1%x>u!GMH|k z)e0 z!iL0P6E4WzyoXCaw+XYa*=AGujx0VfCIX-At`O0e8^?@CsZg&`^taxP2HV|Spw<~?B^c>Nyhi06JT7_0H{ zh`BZ=ygw@bFqtX>pXa(Y$1IYCXLg?~hqipMa2%l__!#30Io3Yd&jTy?Rc>z4kcHPy+<6!Flnu73c=z7C zKnL%AzVq$!W-eGa;9CA^V?U6h4ClQZrh|27?q@WPGQ!|BvY5d_28g@iPh0g9`~6M2 zba2qzej+uG2R>1VlX5eZf?qzxR`(SM!IM0Z;bv>-pdV||!uKtVkg5YCt#I`_{HIEnDVOUJgv-aE?;-;E38wmLrmA@wd!yDkl2ywa+3Pv(h0 z`4t~VxK7T2doQ{t$%gU46J8-F#Psulzw+k$K{xrKT9>bYkv{`m?s)HRwlOahYCiHY z>=q*|d3({#*0diu-0vw0tfPZBzMZ<|cU%Cjc*4acUo8v;I$b*UexC%)x8pI97BePQ$!|*rH+g>+4r0fZ;QfOUo|q#7>m#jw$8X ziG45P7pZQ|z+!C$RpTljIswfXBzPCyr+4Vf=08o1!cI0q-gBz3syx(0tsW z&Tv;O#;x-BZTJRH@Y(9}kU*;sa8wSFNu}C?{V{EA?AJ}PPuv&f;(HE(FW!p#6Ni8NY&M?Qett6Dh6f&)olNUNw$=;~5m)%>sEZVk;qUIjE*{3hIRqHlbmOrNj%4*K zj#gkkXO9POeF}(=ar`>xa30{ebnLwMd4DYbU2MYnq-ZSp`|~aQQ_m-%mhb9^T_qtPhS zhWP;VG9wqJ%eL5$_a_DOub6^`qYpn?Cq`o$pWyyCli^r8k5I_+52={VhOl}IfdX(f zxlkeV(qqc8HT$SN{pA#?XN%?EeG0-Xxwf`f-!A}F#ZKul9)7_7?%dkbIB)U%iutj7 z1=!$-zChb{Ka9UcBSN9q2UD=N8|(S#fn6@m=Aym}2bqH42@7Wmrfg!wGS*~{eYn=R zmo+*DGqp?aY%44RT!Vv~u~=8Yw|4afueTwX|0$Wch0BY8f*!QFQ+1Cz698a4RVjV3 zY=-brEL!+%fTddoXl!V%*fE?98b(Mh^YMJywfj%r{@?+63KEmN-i2dH+nuv~ae21y zmXycN8Q9^kX7a0svoRKdxs4mk!m;GGzOd(yN~uJUK~MwtGvVcQ-auvF(P$11QeLKB zjf??C?Kd`l#>?5+m}p?J*A=VbjsHCKF67TcJ+yW7wErS5&(v5CF;JSK5~%*}COmM_ zxYI`8kz-Y7Wgl<0N(yr{O8;M>KYE(y6VnqdU(VkeGOd~ zS9LudeHVN&bX_ecM|B6?>2`k~r!0|9xBsQ^0{_+LpXrPLS7k^f?vJc752@CN@7}Z3 z;)ND@Pb$SLM^XEVuw7K1Jhu^}2>exE)BCQt;!_DY#K<=PM>H2~zh5%BNsAu-9shUw z=lGxWdmcFl#hIv>1sYNF(@9Pg|=Q&rPxT^ajIUP#C%_U?bCRrd|NKXQBBhPiaG zL`Z6H%Ki}`eOX)cd{95Qf1f!fd*uZ9J3WN_OumAsJcRsA`R5>>D8KsEJvDpe8R0V5 zmX&&Yc;F!mh4JL2!jL4`AaYip8OhJojzs#^o`0tw=^+mMCH=L&gdKcxWn<)e8DK|~ z-h**4vtYs0p}*RhsPC`kC+am*{$cE{bISHX`b&=SVect~{+V$AE)*iT z{>mfD7N6`=Fr#7fb4%)?0hi6GG3-z}CX?#aRThwnU0o)4a^iCdw#_>4RQQTc>azXW z-sJKO45+jph%`;aE<0|m+hCP7+n;PnaofeSiTP6o$UCN3t4J!sVmCgJVaMf5ZtvO8 z^)&+sWhV#P(G`I**|lCr%QHY?>UZU1ltgUVs&hpr*Jq*r^W4Y2TgZRV!;tON%#F+M zwND@SUzP`c9UtK*t9^2L?IFxrY>OFvKpVyXtpnv0UVhsh`W?G(M*|51zU;Y~>0n@w z;+GKrRN$8o)lCvF1e5W*mxx&3pB+zONiCLLeku9!D77X%RrTDN-dWy-vOz0%y)mvo z{njJ#H%JyJngvxIPm)J@Z@RO zg4Z|mWZ3N^xE(eRm&wM8r(#K~_(ra8Jwo*VQ#TCNo|?q%Ogirq>aO2OJ+<5=fCI0u z;6Y}Mop|}5;PHxN@bae=ZyJm8PsP%e1<#s@rDByEZ@6o1Jwn)@y8H~x>U}ms+1Osz zwd2gE+3_PT6#ac6LMw6q9!u)@b{+R`+eLM)V|e-hH2oicaMjk=`^)}+F;JO)h3al) z0E0s&zbl+`gcbJ->k}*cX8QlbNQ`0s6R$g3nP?WES>XS+1v2-AE6ujgJ>phP_0O*g zUquSV@l5~xs_E(z{qs&P>e?=@8V~5;T8kMbgRTpx?xr;QLp5eTj#0^rC zsUDoS>8!InuIGDdsO-2BCyd@&vttV`zh$%6DZ>Wp&pD@;og_qed%Li9?Z3)(bjggY~=Hq%Qi-RE- zzF*2Fe&k8ZTh(SSEx76$3#dKN!v3Or)YfaOp)9v;NY6POlTx&YU+bYIhhL0nI^ z4dq2CUY^6b8jn?R`8^Abjtt{G+;d5edw>IO*3DOVYw(Kl(#P~z4{o=M>U{RvPC9VK zzD~ytxO~=W3%&`Qe_^X^$%&Ug%kfa_j;wO3dwtlB*SMYrSbxPDx6kGRLzXH1Csogb zQJl{?{Wax**L)Z*5|_6SFOQV_V3~;X1l8-MB&A{Ze7KGN@B*v+VM@}GAQ^Fdzmv-2 zUjsghAf2G#e8L|1rUK`yx=vR|;pOC7X}~gN zUqhD0tMxd4Xd=4qLA^3`c$uy=7uT!&ss7_PE^%z3VTJeig^M6;Px+*b^YGeJmD_;~ zSf6jy@*Cq4@bcutw^Qc{Yq6BOJ$OZ(&s`{BP%p=KxKcW=?UrUdl7Uvo)QevY1!T%~RW!cwlF|+^v^>lLiTQD(uVN zyr)pwE;wl!Lx#!`9} z>jPW)v3|4E?lFouc0rda)E1b7S&jyUOKlebA&Jk==anb|BP_#{=X4GhovGwaQIf)z z-FfSoZo`c=sjHU?e7OLs<1~)s>PTP?+@V+A()Uu5Wj8DF*XCjY>32ZB93N2g3V3&` zkQWrLN#(hvn}995{`Aw26kfnNu&yo2TpENcmEAm~a}>LG!fbtS_D8C5EvK`1sSG&G z9GtX^B@>)IEBYnBOdS(^oEc#`M+!JRZ#R5vn1jjd-m=E3hbi4AYaiQXc2W0T>+0-` zE(cd5GfF2v31AhQJiSC~SisoZoG7-wVr-p7jS_R10AV&Gmoa+X{kMy;xO4p=XkeUj&OD)$Z3R1~-_*G7ymuD%r}n>&o|=y4UyuJ62N%8w zg+zIILcg^h!Sn;l@F=D6)nez()9rr}zTg3|=E48I3qD^zeBn$3BFrFZD*kzdM}!Ay z3Qsp6@MLNu;-Bj|YiK&UxTv{0<4p)Xd{Gex9d#EqZ3jm+7gr~3UCrr+!w3UAkxn-t z{?P&jf7eL(ck>_UH^hZ_j)73}!S;%`bKv~5E;l*OeFd8tSWOKvCg>O0rp)$;4VrC< zYAd_SQgxMndv01cebpsWI?%A=hW4XM_cJdrLLvJ5Up{bg!)Z(5|5o4J-g}M84h5J< zTUOs_#h!jvH+HG$0{k~SZ1`BXf0JLYQML5(+P7FS9lxgX%>j(am+|)&H^6!P3?_()YNl=!#{2-0+~U%2>5xyN07MWa3%wy>VF|7}JRD ztz~0@Kg27so6o<1gDXTy)+|DB`MhQwnF<~_;`?Lu31I{U8pM`pR6Y>CE&v5Ltu0%#v=!{?Qkby9^?WE_cbun02)ePJOYA3) zg6>re4&92cu^Ls$d7&@Z;i`bZ{gyk1z(~JW$oKJK(AuKE>H4W5&~ngf*}I*hu=uc| z@97vBu-4``aB>_4oY-u6w?vx6I~*=?*05-u(r8 zwxWD(uD>um$9MUd%~wWv^s(8RDf?`$dQ+?9Iu7{nrikAO?gbGicr~WVaHCC~S5R7;fc)V{AjOygzWkO}Bh?Cod0!<<~_!^EXU@7xcUDCmj@k_m<~+P5Fy^ z#5*yA_c=DVw6Z~4R}khakkylHnBh|FaLdsI)+(K)4I8ZQh(N(S(cCHhLn7thqx<-w zR(8|In`AQNbspq5YGkOIxsIgKb(GAb`dI4noLQc`YUa8Sg9J}pZ$#Sf%JI^gEJJHp z5eA{9Gz-uyK(heN0{=Z0_}L(KDDk*hLcMu3Q^+LjdoXynEAQyNmN<~imagskVJ`?9 zj6SgVM-p~DnwlMLaF@EL^jHt*-V4$x5ptYk!63ybwVv-u9G2Mn>BNfndx5QzvI~z_ zIW>=Y>-|+H?@D;c{+Pakm#m+#2S5aPh)RqFOEHpKN9D;; zwYu54_9-q;t}1L~!t}r?j_bcMD9>Aqm$PfwU*r^CPRFAP%XZ=AoEd0+ ziW+=6Qg72l6O|ZfPE4?09KLFO$mA7j>Lc<%-Nrg9Q=z zpRC{Hl7*S%u|GVc5CkSiunvweFQ5e4)RdAvv54}>cbjv9Fk8Oo@pZVI=h@Wq-cJ$0 z++j~Dw@oStKB;->#X>%P3V1m!Dov zfCZnJPh}L#D9Qq?7EUr$ygY|q#lZ)7z5Jd%TyZBi2-Mt$iRyU133W#6cf|&Pk#k`p z8iiR{X=??^_J9{~jxGTV3~9j3Eg*CbUSGGjWqqEJUck1TcU+<`0z9C5a3|d+6$=nQ zss9!)&(R~vixTmAy`Mj9EBi?td+lfUMBXh63$ZLb!GqgJB}v);AZ~}!RM|5=csUge z+Gyg+PSBygX;m?3|Fm z^Np{NgM7HZA~#f8H*E64l=P({W8{NC|0kvJ1$e$*Wio{aq%yEWuQ&rd#U(H+72eJ+ zyqwmHidygCc6)8q)?kL)0b}Ud9*mdgaTS|}mU9+XTpq5D%@4w=OagAW;&#{;+#Apr z697W8Uw+z$+w=6z=ea((9scwJKwV2iN9Qm5|L243T1S)QGEEp%+2MF<=P7vfnB*1n z-pHw+mE%@xUa`Xr`p!1dXYA0vTr$bQiUjqISeH7))M8I$$GtsEk71TuczflIIbl|v z=zMMMaj&>^^y(s%)1 zD@t4^lsDo7rfH~2Nl_AjjazrxKgZX4Dyw~7a>isBSl!z7bxrdGaN$`j`)Z32Y_XYp z&up6j+^|B%V(tndNRr(7!{ijKTrev}?W92xf$iw@yxcJunA0H7we6MOYk;92xddm^TM9l;4zt z8^z#!{zZ-~2SwnWx=oK99&tg@_!bqr79RL5f`5+lF$vh8!t)}arz7lCs=wOsP8`3@}8cJ-CEvp}_5vL&v;TyXo*fR45>+h3rzkfL6zwiw1 z-|u&+i=E53;O!^x-z^l7gA_im66a02Sn!JVkAC3i!v_661%2i|ELQQ_54K5J==^l3 zdf}MNaG(=P9V*7o{RmPhr^aG$PPjXIh9_h7^nIKPb7DZ?mrg;e z{9^3#xeur34-{eYa&4C#4<}(3B?nGz;7kD=I(sDrCJ$owbUkun{x0B`#6oKTv5~}OrBw)3`N@odY3T9iqe1b{7 z0E_+h@t(@dLhJ{-l|J-7g6-FITum=u00d60TcGu_5OfG%`D*HY1ccjIUvC#&iAjn! zN44)x!Th__p11Kw0Pn_moegxMV4hIIt2K1aSWum`o2o!6cKaH=n9S-_Ov3L@!}2{L z;F-O_OW?H`WAHQ1m?O6m2-n{qlG&XCGL|kWkr9Z%jxI`WzC#y^8LL8-^CV}m`GY?1 z5B^jj$UnzLbag67*Nz_Ex+etVJ}s~_$!9Z&h{}28-RF&Yx-*W|w#I-@_742ZPi6q7 zOBOPr=QDwRS9Ai<^S}y@K1yc26A3!_ci%O=5CN(;ogN!qnhEx@?h5R)+lML3a?)4z zdV`OTlC;F0#$aB|uC_%dGq5!$-nYn{&&2XKuHuPY>;bj}8M(~A9f`$QO>_^PkHBKR zy>_xK%fvo`^-6N~`@nX!j~BKU#9|V$Vy}goTrhz+ktnUkDDb3Wzg*kiC~#=JF8&Bh z9JYSlzCBx>IAJ_094Q>(JFz8quXil(3J2TX&Hd~b9*;HbP*E){hz0&#pKg#FUBG11 z;40IGC``=PtRDDAVNuByojY0Lz>Bqu)*3x^0Jzf|mL0k4iejv8VZZjF-dX zLFBQlQ#K)2sig4ubF42;Q%j9ARone8Py)P+Tc0YQq`oomS+Y#}JmrLD0}mFSL!q9f zw7(E9pw>USpS~pU7{#J*U^2A$BDEm1{;J!NG;p0FETLVO51bVRWjDAb0voMGKIZdM z0NvtstZsWVf$&v&mosiTV2`Y8PL)U=2<{*6E6_{>a-W|2JT^`R4Vky)d5)xEUy6dH zb?fpm2i|kym)sIDkIPqNWagz{?<(e+Quk(J#a^y8=iPF!14ndqE{o)03IejCm6~Z- z)O}lzT;o(M=8ISU;nZZHd&-SlZ?89?yDa3|h6l(yAMv?;@=H24k^CW(@+BDzl_+H@ zZubFWk)EnAIs?G-uOaTa25A`E28(T#n^Qry8^tUuH5pSe8Qb@2k2ltAZRdN=Gy~8- zf^!x7(*Z@A_fpj7WGtkl*OO<557vCZ=aFGo0CqCa*{8)I4czvVzIJtUDn<>iNHB>> z1$1jHIIharVMi@Hc}(6!VC|DndW-f%;^#re>f~jqU{#$I#TQ@q(f@7Os*GH7?5f^f z_RP*uOirlPW82m=z`Hr#UO6Tedwn(cvX!(Qc=~dS#HFqXKwZLopxTjO$g`(9=@W*L2c%R#SJ;bacW4`D&;+X{f6^>N@MWYN<_+=N$`}N2Jr^dWCQW z|Iz2Kg8KYV<8)rWpuDRRg;7ita{QbxAWHq79H$No%pTs>{eCM8ly7UZv11T};T0D{ z%06EKexxg|H6w38_%Jz1$b|#G;^om2$rge7+Ad}T`uHiG1*~ zYW0@mHiFQSNoivA8xz#&zn)cMBnADHjyf~-NI@>gk0}q$grIlxCCP>QBY-1V$M(aC zQQ$duQJ;vxci>>I7i}9o4CoWK?Q@b8hZma*Us!&UgvIB-h$P};DzCZhzU;D094cOs zQf`lwhHUaZ>z=F_2hC=&E%RFG;rR1i7mwnBYgOkJIdg7*F!nP1##OveMwNn$sRt(&lGH@1RW?I%QM^b7p^I^faEc5L`)@h6?&KF^(&4=c56s29BE_^nxYi9(cQmZ!|} z)LA~{PlI4|f1d%Pjz_?xr`*vQSJ>;d0Ybwoyt(rqLHmvP_!r45S^L6OW~Yy^C$LXV zA7PJRCxzj7`Zj|nQyVe=T-Qb2RULPln!b~khLekf4y|yN-YH>&#~2;eL1%PY*=l-%~?zq z@%bIx2J?<^Ls?&y!S6L1NY9!DOYN7&aYHG3iyY|~9VD-?RzUuVBquy(zx|bMDIU9R zc3aWYCHSpp7-Xuh*Dpv=LGs1+kGewWm|*M`3FBNRb|g<6_iO66nOB=liU|_sC-xIO z@tv^eHNAN*U!n#5Yjn z8S(1-USbbP{P5k*e%TWS5{U0Te8McV6TkJ6w?6ua4}~JlZVZz9q(U|`LvMZV5PB%|9!uJL`JlfW&xT7XcnMZ;IA!! zo)oJbs=ueLe{MP}7Lrwa{aH)42c|yOVX>L*AVwUnxZ8cXV2%eCarZ;T8lHpbjw=|q zUY+{G2V5~tCe4lgxo(|o!2XM2im8~zXk%#DON@{YS<%y*>522QOQLFPUtmbT%DCvK z+8;jH`)$FDLP(kbFp`fWUbxTz-y= z?KToSs&B{!w;Sqic)nVTC$(<8_*t)+ID-DOaqn`8Cu-76_wzpL%rJ!GMEOG=R?+*- zjZUP_#4Sum@*`t`B@DO9z&y5!Eo<&xNA?lgb@sZdLntxPj{Rf!_F@u0#pY zC96O41UCrz5c=>*7Rg2F*v`*)PX-3X5IpIeq}dSZRvDIRG3sG`_d3$k!;)_%h3jw8 zbLjH@gC4=PhL2uf@&%P-5xJd|ON5?~#g>ayLvTLmM3S*P#FpnwX%5m`Na;0?*3V?kN~xd-HNy!dKMJN;C3MNlf35fUVeDV z7n^)5P4@dkzIWx04DjIfWxWuot{D_V$cH@3-f@(7Q93BHe^s&S54`8Yqgk9dFLh>t zn>22pKec?O=KpK`_4t2jP$ldaq?us=Lv1@tiXX6QN zqgjAv0h$G97NA*xW&xT7XcqWyut3SaaE;lHPxjQQH_}n;QS78}9M5!o{`rlxtDdf= zj+2I~i?gGiI=*U&vy--?ng;$TsP62nr|0TC-G{&#vyn)r-$+a0)%x%JtikjD&wREA zRO0c#8KJ?J;q%Nd=^=L3J*rNt6;n+qe`Zkq8k~4{E=~1`01Qr0X4MFmfoG$9#5(VA zL+GElgZCLe*|EvSKJV-}HW?Oed?8s7I@iD77$7484V1#)n5eZp8o)=z3uIU_unq!!Oo4xP6MxiH9rTor{;kw>`Y8 z%?h91*K%y}6Nh;Ja?OedPcg2IiVl)^KqFff+qFE3BG8P#Nytn?0G_@nnB(R14di|{ zap-HOhax^?=LIj>;41Yt>MCV?b=F1Vj^6Dgn8$WKh1r!KZrCfN@Rd;jZj!jScyM7i zest+HUfN;#ihPZQN*b!*xHuuHvE_ukG z;f~8=1{&WyjXu-EDaC)a&FIHNOa0Z({|XMdtV&&{`p(rv%b6IDsk1};BM8@!X8G9= z7!j1&%>V2%P0hb?S^Bm04F^ti!NnJJs#kQh!)-?UI!w;+PbmRg<5!(;N#}t^Yng0! zGTj04K0yhCLszg0im|uqx^ZxLu4m@KP2WL*!Kcbwm2$9pNcCVu)i98>XHeHtp8&#& zbIf#~u)$pn*JD5I>I9C}9t%q^2toRXn^sWptNhjvdi`A+grE)CC-43BAz-=XXyzRJ z*VM)6Y;SD>9ZZQ_;4eB)3Z`thAFz0BH!w-c+Z86q0dKQcGD!>ZK}w>I>m2-7kWsxl z;od0$Xq=mPK=zObRL@hro%V>OYF>}p*&J;WRI#7yUuMn>C!^{(>}R2!ABfwhDI}Gb4bD~*%Qp`MmgZyThYO99AAQZ@{J=7CGUJrbao5rl?TQG%IF-0;=g z#Fiu<8JKP^ZYM7G9Q*&+dlR@AzrTNYs#$7gnrYuRAtG9)X|bInA%qA;mUb;KG!*) zvtDNvHI}LB5LJUOY`(QAjjaYvWV6_Lg);E)b2*`*@u>w%(w~mQ_zzna>C&;S0y@9< zdM{Eb+j@ffTgA}}(^r&e`j4TZNI6F*N zYY-eWIM(@CQx0mR8QTu&Pg^a#aN;y|xJ6>l)ita^Fp#f0#rlFUoY%O%?s~#U;IV@` zm4z5E*g8eLP`MjSS$4TZZ?OjKDqe0j{g@e;ydeAb5r!!5nciBNqQcL}>7D59sPP)m z`*ej=3dXCdCWE~S7eU7suez^dg`i~VmZJ z1^#DR;8$DS?b38O?ZHo>)DKUMCuE?oZ*(t(^21=x*UA%0O9H@WtGKh??^D4nl}Ur5 z_D4Wg^4-<~>%+)x*{i!ww@x7aLetbO7KtG8h-b%SW(1O0WcX;T)nQQfphCwnARScC zAGeWmGLeu60(m5mC-ftMek9Nj`UgX{B<81pcw5sY$F8P>@C%{)cP2%kUBN2jgk~N` zqB2Fjr;eqeMW5cB%sUnVP72TC_)yb8L#*q@4fl_sw6nf-S5NLmtnDtTtxr-ATf!n4 zY)Av^40dXq=(9p>Nh@=X_(9~~{gPsd>FKrg8mGAQFnI1;KUsU%AtX}oDVsn&h%O&> zZk}(K4g}8yg69IkbCJNGNZ?N-@F%F3pBZI1B?D}0u1gHnI*cqsJTt29Ss{Txk-(oo z;7{Px{@_9Fu>^4C#l}zh7DvFY7>ABGcIkvafxw?Y;7?@!#V(0=Eg5~=PRY}mmI-|H zS#yh}BftR<;i8CuR5Wd+qvfF`X<+gU`{1`W0Z0%oAP5%#E%PTV!1P-kUwrzQ#c}kb z{gqvPOA5L>Lv_F6$wVNCMW{m9j=~z0@dANW5 z{oO~=Y34I8i8FhF!2e+DwYNf(rez|5{|Wy9fq#I&KS&TRAP5%{gbN7LEfAzzBuKYN zU{55lClc6`@DCFB2MGLw3SVkk2c6xARHd{JOnnuPG9OkcpY}U}zUU}au@>wHOMJR| zA~WJaN8IG|{+qDvEc$C!O*P2`T=8)wbIQ}uw6ssc#qJ%Y2W4_Cw!lmj0lw}&TayOx z?F%YOC3ZMZEuDE7`7SM@Xm%e(H#1Hut@@M*c9-atotn2FiDqWHuACW<3VtY6%)6OM zJQoR`iv-Ul{0Rj91Ok5o6N+=d&2M|r=9!icG#|wyQN1hUb8lxNfj?2Hg}Bt*@-!gu z=L~^=fc=AaD@S!71p@ydfqy{v#Fk>)nejm2ACUg@W~7_vVPM!|aj@jpQBa@1N>v-% z=0Ff$AP6rKgm;F(KS1CgAn*^N2MP2bfu7&D|HIDzGt~S0@qayVYPY(|l>k>5>r?Fi zXrKUIe8qqN=gxru<#M1Zopdv_9zssHIS)afxjW^y3i&?OB#{66U9nvtdpm3U1HVcL?k#_=9f1M7Vxp9vC z^Jf*rydsjtlx=Tf?UW$qb^f*dmi(o*oOi?bk1N~L=GRM&^pp?=`qkpoi_nXMr#)WN z6{pc3F9SETrIyXQa||q()5H$ zNkH%hzP3`)Vj1Xdc?lZdz6=q1sN>@|<~JvR4?FtIr}mYC!E5VX*M3_F0t+I{3Yso} zRu=j8%)U}Y@Dp#Up@K4}C)%Ok0-*ig4INpwEV#8dOu21| z(t(gj4nEqr-7;oqv@wzy$s`T^=I0&^hAtxa=sUX_VPlNOTx_|ei=LygsS(EkTe^gu z$7qPHfN^o)=NbuXRRa0B=g(tM{xK*0C%=i@_RsK{!}u)b<{u!l$6C^^pc>qpC*;=h zL=HY`+Ih)NwGa6x6dzOxl7VMhHkHh$QsGO_*W1UtO2UU@!zsH(NicoSeb;c!Zg9El z-AX^v58xQ(6KuJx4A;D!$D6yX7p?Mn^)|S;8Aa?^Gw$-70g$w`w`4`2BusH`xos^z zfMmKRZu+6F3@KgK8qVBT=yKl1`Nz&m!t%;>)CF(I&{+1#1?fStRZ-7Duk*TyX)a1| znRM2Ps&_r$8l!Si@>LPu;Wj^6$1o<`DV>Hm`)ZJU+_ha=no{uLn74_mD(?dOg}wbg z*yy6)Y@azkt4YvOr0t4M(G##-X1D6NbTZ_}?B9H~Z)9s{Ui$_dvqBVy^pkFV*j%xj z1dmv4tKdX_15dnWrtZ&XLMwBr!INWzd7go?+U?aJP{j7{_U~r)fTF?>pyM_GAX8^) zOPvDTbZ>lTzu_P_k{;%H{3#h`oVEyMv@v*<-kUT=pOt~tJ=aBBJbS=vxqw)1&uy^z zJ&UE>Ck?eUyuZ}wvEg7XZ*$5q5qRIbMQ$Hi2wr~l@*aCb19-1gS;rbrhFXs5hkX@E z@Y45J?o&&ppuBEU^X!#1!0M|<{Yh^bczX7o{bvt6ML~7TF0^@zL*a^PLN7h#;Ak_A z+x||%5TAXi2Kz_Xz9^T=0t&Mf$}4tD@Nhdz3+${Su=9OH&R|s+7^i~DQs-ya<+9Mp znDD8_wj;-O{7q6Cp++n)VgbSezgiPZdfGxA)R!lf!zr()ZUz=NJ0ot_hokh_VHW;a z;V=32m2~rh&FF{*W8v$%a8NrFWi8E!1jm(pdb<~F1~yj*#*lA@qfgmkQ9c=ws7qo> zcK6iHNXa22uC+cK6kVQ0*_Is%YHg-9PdO5Y9ILlot*beQY*IfQSGJvlEN@J?-_6Jd z`Ibqc5#rH&o2lukkJSivD!bxdBBsOD7oklqDebY9NzAzsg2t7Yf`A8hd8fWNO zQ*#bnpM5l)w`>k}EW>%;`AKPLzZv!1ZH;iWG#K@XFU&`o%9$IVy*q;}Eb`jT(~kq) z4+$Z2{r4cjF$;q2>}C&dYj-(utwRSR8&T2@+^Y;Yiq^bszxm`|Hc)<2B9G|-)9c!1 zElNL*&YUL|n)vSlxE>c`T0`j{+vEw{g#KSBSl1w!_F!LHe<>r+iE%o4xEQ&Zn7Zn53|%=UE+$+T6MXin^xi%E z9xx2~Jz)Q==ZfI}>;>as5B$f^xVj&A`o_Kod-BH){_Ok)?v{9a>{S^LI}2&u$JD-~ zSt0s0?#eRoz8`2>vPcEaq<-wecyONmNHWS;2JZRNq_-ry1w4umIi>2xg7eAEquiEb z2gI76k(um*&AaXr8(pZ|2b9*BJ~OyE0QOpX+N}R74DX&2D4Z?@XU8b3Wvh&VXCEKhjPYn|QKZA2l~VA+Rk5DZS~_%y z(9~^c6^2-_e!J{#_l!=#W(?*_p9{Y%orQMfjE(!OHfC7nC4lpZxVu^6HSfGZN%U;YBA@NiWCgrxxWz8gOc}n{M>9thx@+?-QG>_ z0=WOB;BTJC9IcBQhQh@&%pP#;d8G(GsJjkQ-)WB8n;?9D^duDqP{nXdMo>NsI!Dx+}U=79tq@;KpqL?kwAVpr~VNg;hQb|R{Jjw0na)6&l_gHn31>`*9ql4DKg=vccJ42|E?=ua0(l^iM*?{ykOu;JAdvrk1^~y@z~C?Q|A*fHCBVHI zPYmCmxC?_5X81^E_rV0ap0v10Px#~iq*#(jIrjDc>$Q^`Y5cE8aEYxxIj+un9A^_# zY`vSSqob*wi-Df8tBIk3i=&gXiJqe)zvfjNUryk^9Kp4a+A_bSvlqqAzhPy%_zN)V z+|g*=jqSB1`94BhCmBU29&=xCh{kJLmX!STg97AmdnX@!{Qy|0E1X$0ivpDuY{Vn8 z9|Qk#m1zSjUV^;i^JBh-KS7HlU1jUABkk3^wK|?_enVG6Cd_S{N#b4hH?Z)-jw-Kk zF*+1Hp&4x(Ty&%*>LxhjU?F+YiRn@*PjB7z(I{9=OY41$y!QI&K zWzDB=AQEelaAxN&RJ!xYl|9N0$hEZMDt%4`n0!;%uhZeVDDWnn64 z++}*ZJHFHeS_KOj+P_Z(UweQ9tfX6LwS}c};UE@~1-SBZN+;4q&u6w&YTz;lJ zW>JI!j52*u_xZ6B6rDNM=5pg8@YK_Mu-j7sh8SObpwIpS;+7V5I!<9jLQmh`#*fDa zHQ}xhy61E5uMOi;pV-Li&tXGmipbr2J{iN$)#)>LRQah1lU1(ia`M%O{kcAV)MG8k zhVOr#U(ryjIIMsE=(QK`oTS3|1GkklNA&M#tRcAl9)ISD@@Y-nRs_rx){UyBGDjz}^uB74c<{r76GBa9eSWlFr zh|LK@c^Gz&!47204?nm4!JL^-*ma;JV;5)BGbPw+9NoKBr4JaiJn(wZstK2oLvoIk zc7eo24&L=7qt`!@YxhvNa0QxWCTYnFk*ob3;Z(+ z{A&CyzaA)b%W=?!(t7%i`(EO;FI?rKjXHbFek9jzUQXa8f*+R8Er`0LkMV+V5#b#Um%t80c!%w< zruaaz%C2L>dZw(?a*w!^00jK7KUpNN;FTCZ)=t^L*z5pW<*+8^XKOsV>wX~kNJJLF zgYy^dI)(1XgX`P-tDOFUPn*8q1mltVt*;!PUp%1)1gabn-;43Z55bYPm!arqy>DRY z(?Ap%xGl4o8G{rim{MQ83 zhy_M0@L#t;Y+$76c>W@!4N;a@N8EPH?Up3$C%+#KgQ1HUMmBWDx~ugJ99^CDu>)V6 z4E0={T=Y#1U0sZwj7<%k@L}Xpo#y<0I1K;gFtP@wc=p zYBk zlTq$ckl>v>jz?azo;vaK9C>&?LG!%6OD(v!dR3&t8DU89xuYMf zb2JlzJ-ah`{tMot%B^)rw+AW0iRrqnji*U)s&(k5bs}HE3h^!#aTihOoUf3zeX0cf z_Dy`(r7a>*JT(NSQRwhuc*DmdcL%`iO1r6v@8n_noc)vPERlCRujNpf34gmZ zWz0?(U<(-Fp`ew4@98|^rc+|=uA0$jkh^ln&5cGXBmFUiVMY+Qz_0ht*;}d#beX$8 zw5E=K6>G{wMpkijd)sxOc3i83~ZWzCr^RsnZPa#^a(%#-btq>6WgtjrZ7!NMGx6But zUWi0g@4XJeAqH{I9FUEQ7Mg;Hc z9^QYe`5T?HL~b{b^5w?r(i21dJzvwgJ*mWcL0`Ek!Ko7O+Nrbe#mO z*p)CwQ=1M&a?F-|;q-utNlq^_2@OzGrwF;8phAM zQGe=03&?qvILT^NF7nlLSZ->5AH=Fn;cm^A9x2a=a_m<=)}K3=_BclB*b3z{7w`Fi zR<8@|Wa3UBZ~8vKzP1%v?EdV&zCQ}Iub=Ri}_U&iw z)T1ZBxG3G~jjj<0elm-yId%?t@v=NY*m>~!X|Y1lnzbNtiqe}giWTVdkq2k`4pyMe z-Tu9UJ8yu!%k~r~FD?h_6-%RIZlr+C{%7yX?Lfch8;+r=$={FvV}pnAqzb5g4IOkm z9vaY3!HC(>ktwZG{P%zI0un6>`x?1MEHGk$5exjESpchpr~YSkumRV}g=>Vp=Wq=5 z9Go1DT#QVy8MOvnY#yyXRyVuw>)x?ZFA4lh9X#}&L)5`98b8UfrN5&@o`k1+r2Zrmao}7 z+&?1|c!m?%y9fRsL98RB#D+ zwEW%=MT{q6Fx>3A>C;KtHQ>wq+^D0q*9m!oN6q)g9Km?OBE;DX|rl z42)e3oUs{idiu^Ljz;?WrYEMk6w zQ$1G)znx&fX)zZN*?|fV^))ND%@>CAO`Pt{DHVfHX45K`oKl5n15YHV%c#Ps5tPE$ z?Uz7d>H&U6U320PBn4 zF8U5x*bc>(O&Ti&{pYp5SsW(;^}Kc8ef3~L35TPre=gF5By<1qNrkzf?E1&ob9<=6 z@-a#xE3LgKFzH*vXrwO>3H_Z3$1;|i^#E0Q%9OsJZ^7jKJ?nK;72&0oD#AB-KhU`6 zFJD)WmWRdaJw3K{H0WV*PWIDxdAR4ubg?|IcR(Zl)YH(lYVcc{+KU$vWOypsWB20o zUjgBty@P!xcFdH3cI4VABL$#tC}1m|1W7~YwM?cfmQvN!%~ z9e7t{CF?0A1urJ#4caZlCZLdCkY+zph1+*z%1MM~5?O#mJrUf(m`LYEJ)X8ZvGt#K zoXpxj$>mm%=E#c#KkJTsjacCSBMbbhEEg|4S3a@RaF1PZ^fRdn^kmMYndC8-P{WLx zALCAzg4KRnpF5-~K<||FQsFU|z}oLC#%N)@hSlk*YL-P{NwLi;a>H5FyZ800Y1Y@l zfP-YGie(Whj?#1xZ8!@E{?Pc3>6Rr|!1d!3?YN3(k!)bz8IB^hL%;#ux&jrY{3^$xfr~;a@^A`_2H$Y+mH~6g^+VZK(EI*-Y*q=GaJ8y37 zPC)y-{j3k`r=gg#AB!J7$v_V`b6L9U6F@;2ea77>aj0t5hVdWE4uS~x!L;p14x*Mh zJV{@LNI>veHivs6jvNHh?@OwJ6e5v&zDML2OuppH2VwH}L?GNhYoF+-<3Q?XN7j&j z_43l5f$l_9q9*m2GV1v7bIX@=vYJmFMP&0f?OLaJ5UFU8@#u0g5HoOD)^h47h!LK( zxxy(P5&Ud3k}PL^0y<8akZpbj#a29TEI*jSoXTy}uv-mjMr%QKpQ24lu0EOSzS{ z03JSFyp{{XhTi{0L>80iQM3gZj9eoY7_q?rxdozwB4^+ufR>R%D@nIAw=+rDPkuul zgQ1JqO3g{1W9;P2bbYW@s9`6ea9kW5uoF?7@t$aodmz6dk0HMyuZ}4g z`Vb9tq5)5!NL1z2A1n&I+ONCm0Csdo?TqOsOMNuqhtT^*&bvN?sN9X6E3hrdX04># z^1S5X;J6PW0momUcyh07Rgok-6wzoDbyO4Pe72I`G+GJ<&9s;o=<*Plir)5HP=+;3 zwTjo~-4KH9ig(nM^S*+Y%kFH;uUCe9Dr=0kV~p-O_FL zv3L#2@9h;@VM~THW)-rY6)<7{#@RtZEJ;{*;DBd1lL05*=()4}izd{56;kIRDhr*B zK4&gVPeN{_##~fEhAV=UZ=}rsisbL~MQwMNfwK*^=MV7%|H6B>$>(e17 z|64`M9;i>bn{;%o(S^t-zk&`R7 zfuaM~5gY8+GvzuM8RTWzH%=;{{)6`!5I~wh$z3AFRHK@j=T8EXq0*9ifH+n4UL~YAm zvoEBw;mjphv@bSO;HuY?d+wCJ2PrL2J@+ww0()!(W8F+D&t{WrW8eD$ux^iI-R=+; zj820&KP|h!EBGY)OemXYP}s1Z(V+&DI^XtK&6k8ZrPCEBExCZ)4lVNbw#)>a_(g^x zQ+tqXhirkWk_2p+YxciQM}ur|An% zIvB4N8MwH7_4ob|^fT=7tDcM~u=T_wYm;%Fp!H*2d{|i$GQSj;?6orv^==Q)Z84Z(WL%9JNO$M<=3NINvn&?}e@gp3=Wq;6O#a#i z#TT&Ml9WCe_nr*xo7`$bcibTQzhZ+Tr~SKWGeo6{GX2KDNo6jXW91d27;{yuw>~C~|^f=>`$%7K`faTSWC?PS}d%Ge1 z(ljQF`)a<%+zmVbC@gN`=|F5b+TF{qogKTtvE6r4L%sWedxlI_i=PyH9lnTH$Y8># zkKV2cI!!=Ry{`Kx?-^Ludd^|YeI{HvW=e&Ss~VK_lJI`LNfHLQcIia!{sKIBI^=gh z6kyx+ciG~tZ-HXvp;_0jd;ynEmuSAndY|s;%F8`dk%cY+QcYUiE->YDb^O(O63jdp ze#kJc2izo$I0*ichu+9c4`atw&*xf z4N*l=DN%+fnKnS{rhTNnqCKUx(VA#CX%)0mS|RNm?G!D8c9fP#+e_O?+eQmwKVsiw z-(pv@ud|EU7ub31EcP+>A$A-)h8@8UW&5)?vR&Eh*tYB??D_0j>}hN+dop_*TZ660 zmSQv5WYz$yoAr_PiuIJ$#%f~SWL2YcFdjYa1(w<-_t|Ik8r= zma`VI=CUBmlx4u0$Qr{^Wy!JFEK!z_c%OKe_&f3E;_c!s;P+jym+j5qrJhnrL6j?$0?KJhCMAWEMA=7)ri4?1DZUg>iZf*mWd&t1#ey=E zGKIpS=#XcSjVWRjVRAqDEBQV71^F?#m3)U>OD-pukS~$*$vNaSaxyuAB2PU|JxqUqs)Ec8Wd{y(fB0 zv|9AKXfgXOyOVl`T0%iau{ihWJUsGHzSI%g%QZ`X1FmN7^@i83`@pr#teorLyw`&&}1kxWEkQM zDt(aNL;p;FLw`npNWV*OpjXj(^dkCsdM^D0J(YftzMsB}zMURI_oHv1yU^FtZRl3? zd2}=SRJtL35?za~PFJ8y(&=;&u^(dJ#6F0<6ni3eU#wBAPV9zAk;r+G29YWe9$AJg zPNs?siu8zl7I`D`jO;*OMYbkel4oCHaVVWmn2p9?^67J;Upo_!rGM2cg5e{o{ zu*1Q62xy)waF~vh*Ko+e;RFtcafrad69;D;9EO1AS&hSa9Qgl0^K|69e3s&%Jp>Fz zaXu)##i0X-S{$l|faWiU1C!9LC^4#{o~K8*uM# zz>U1&2>;%OiG1*ULCq$yezj`&WPm;&{9oXJ_}~|XLnscLaNwtFnx7r@B5COS-64`8 zO{EF_gNu!mc;b`h=OEgGYy5v-fSYbX814$jL5~mf`3aLYe=GaTpB`jL+YgB_r19(| z&5s<0G~Waa={fj4zO5P3{A9|Io`jPVaNwJqALXzRQZA@n}|$ahDYz7UNnLXXCD;IK<-M z$%jRF=2}?8cP;ecgC(AXEt7HA0URa|0Ymx?4)<_q#DV`HhBUvpU`X?GBSV@Ww+!ja zxX1+@@^Q$;;Uo@eIPen@L;4`@;`@^!jTcYSfjH@pgBKs9p5eeRLl{!6xT_h5J2>Ee zk>VSYA%%NFssO)-KTj$Vck%NWLyDgf7*e}%aw`rVIPh~ELyDho8B#NF5>EwED!5Ay z2Rube@ne%AIlw0+f8fxK!%H0SqEhk+?rOpTH>4y#g)k(GaFSo1F(l96u2VSh0gp!c z9NdMca`_b8g&R^Hk3ab>I2nwC3l97$gduN^yNqzaJuk0_yKqCwOW-cto^n6=E;-!u zavyLP9`AB^Qk3K84u;$%{N8ySaAV2g#*)M1T`nHKhex;^9;tGxaB?{gi*djU0l7K2 z3*x}VK^F&Y97f}Shd~avyc`||IWm4vh!3(oIN*svwhMRhb1_5q9`35a0grH5+`6*6 zaS~6TvO92>Cl2m7ti?eG2i#|}<8T+h)L_Vt!d)sj;NF!PY!(lTH0XS^JVIvMsIN0N`1_#{aGW^PhA%ojmW-flu1P2Zd<8e^M z0Z$<^blioTRT?*|G;UUDe)6Vy@Y6iagC8QA2R{rn4}Nx{dGON&%_De73(aEz4*aZ5 z^Puou?$2=GXHlB_HQdF|0W^1h;X!lf2b$*Yjo;%3jOIQMckz=A&0QIHG5O%eH!{tQ zZ(W*O3r^m|Aq$7&I2^LJ!!5t_@t{f4!A*F`SCz=;TO0xmkRzp7Y`ixNs8uz2h&9gC-J8{ z<4Xtau-E6qx^^74;(+^QEx**Dt;OSZO(Fl@ zn(2I4jhl5fKUdN05TCR&z(GKY;w};&R(`_)PX#MGa2NjAm3MJh3J&Icu*H+2tqY4%*5FQTUcr8=;&am?`&%7jLnbaICGr!oVof2#@I4T zem}8^$6NycWj`@7xH^QRI2w`-!s5KzI_37EtVTKMLOAEBQ5##5=!H|B&x&F?hth zz~5gt6(lsLC^l@q3AD_$oNhE=C&exc{%qJH2}7@rHMO^S3f$FQGQVHGj;s!Tv^FgM zfG)MT&kVJeg1dKBsFab4km|Gdm)7da@znKp+b^PO@Z6&JzSEW;Gt#H~KPe0S>Q6PL zhcxNFvbt}wJI@;&26@uL+Z07}z~dbfn62VQFqtMWJ3YlH$JDm5Z7~Q{+CFfL*4P||Gs3`Nhi0sY3Hpn`K zgN%ex&UryOzxRV;Lr_L0e_1~`Gz29EuCENfk`r-MoBBR4g$diIE~{BSRR+3fsoq_mEdy7myssx) z(%{xy6=@eA8ORH@S?Nw20}Cu~u5zhTgp`-g zMS#03oP4DqI9GHOESqFimtmt0uNtHs%il|e9r~*;>V|yBjIJJF z^hGYS@dOKYEHr=FpiO}v#@m~GGLV2P{9C7aYpBDCn@VOGU66vR0nV)-H@pI~$1|TK z(A1%HRM@}-F?r~1mr^8S!-kuaxvr!%d8l%vVWRhOdH93-_{6sLEZF?>8_m{eG+cUC zrR0mdD)cebm^A78bs$scDJ6Df9DH}>^(*B|DzIr!+8)cJ6)1I+xxQ^Z8=gMU>-L@^ z49&+BcJi<_>7@a_=9T_zs9|VjeY&~`e5<@x_!%Aodz|-MaTG?zwf`rTk0XBhKd`{B zO2y*w#xVtKo5Ck;<=n>HD^9Gj)$cqFbJZMNdA6STAq4E_@sV{7+OTemKtq>Pu83WH;ub zJKK+j{`5fT{_&7ihSnKi^4i&_^dAR<>uco=pN%#F$7$RJ%04+jK~X%vYtdOSd$wcd z_?$HKD(jlcox^)TU^px8>ipwq>Ee|-H4|cy?zRP^n9}Ev)2VWEN2(w4EHT(Tr_>v{ ze38GgL}Wj*zue3{uRjao#^~SkdEg5IJj-_XV26KM zKiPZa_4HGqaeZD!XGj^+v>J%C*m()r%gw%fJ0cf6e=*u@r|@C$AYozJSmjg%8UvIs ze5yj$XT%biewe>o)%C_3l_Rl;e5DqllfPGfM%dE!zc~UH-vqqovAn)aT?`C-f!_K$ z^udI~$0b>_r$=v&1pjWkRe7!wf!eWUU)v2I0D|AY;nU?J zaap*s>r_vn^&wKw9d6RFVVt>frS z1_Mrs`!Ie(Aa)$HfcJIMeVCr40;lB~Y})uv0^f@75F#~^Y_a82}Paq-w zr<>qON-rL~hoyHAHE8-;_oVaa^q01ty%(tlb)< zswxBt-u!;TxwH9F&}o62?*>OjNSLwHA?(KVbXjZmU46fDe zDdI>g4*Q3-HZEjCiXyC9RYlF)N*d-9&YXYZSj~d3PV#hvlGwxim-+9HU-(Xiu2;w} zEf@dVgS(v<9DLvGF=)v8G!}dF8upK&{gsoC-YP&d-787e*Chmcey!^!!cU;tkHwJj z%p zarVG5&`~vAcnml+%-86JoVzw~3*IIeNuY{cwlSaH|Fp#Ps&_2b_Ulq z6Uj_0nY_vP1fieQqcCCf9R z@NTunW=A{jerU#{O9_YLIRSNoi zaEX6N`i>~4RbGgjlXwfBtde^gCk_R%^cR5s3O|!COG1%(93N`i#bImDEPK(I-`a!H zX6+>f9{N?@;reMjT)%`s|HvfYVZ;8)$CH0bpEnIk6w2U|URZ z|GpEI3ev;7YbQ>C(7x?#*_Y~M7=h0v#WK#U|19GeIJ;omy*Qg1nHq9jIHp`z6DO_< zHhmR)l{7HX*E8m4n5oC{h6{$jHCTv@L!hFYL}GMqH!$y4aoOZ01wA9z)s88Vgj4zx z9anxC0Ixub@~d~f;N?`6om4SYrD{txW`qrKx1D?d%6jmS#B{ zwU&YA@{MT|CX?a1{wwb)Fa63G>rGd`UpcvZI64m8^DNo$;#Y&mT4qa7fbYBE`!f}b z&9ClP9BBv;3G`RS(Vt)-v)F9S;n0K~y(QIPPWyPbc}AINzg6MG$scMF8|LX^JRtFVy~AkZ}q$xn4_*e;$jHwes!Wjz5P8{zYu)^{1V&pdmD| zC;0I}AeDP0G_wHP^!;}A8IOv6p!{dqn#jio5y8J%nJAogW8d!;4o6>)``1I*i4&aO zIIcM04V}p8IakKV6ANIXHba&f|HBRs=Qk?G=RAPo5#M+H+blv#+Hrujsb&9I#XTBZR%zb&~S*G!~f!wU#rEoeOT zI)Vd8d)D8+4Nm=@@bvZcP4xcB`#%}v`x~zQQELpBr4_vG5~f3c3s&jl6L9GLUs$-7 zBvy%ija(xZ_+M)Q>=fs@|9%LU=rS?TH|98DuLcf=*aCcP|7`~oBdqhp)XBhE-_g*> z72ke)LA5-;AjR;v7RaAnCxm+xX1t*f(K7iH)lYi%ktR^!C6VXKoX^-$rC5e*zlRWf zC=#dpSzQ4R$S0iJ&K7~P11SS169&Maypx4kcGfKg_@^GlNOgn8t@1F>O=D25T?j5! z&C7Z8;wvEH5KsCthGr{@N9!@=x#Mb{OfR`O|JA?*C;L_|;HZpf*XzFEl9nYuS{i>+`_AJt33! zlCj;PxGfPg=Trd+y;E^Re63fsFvg=TqaOs!nNx*+UU}I01ml;U-e^0-Pnp}ae;Otq zu%&z75Krital`5`rU&Ugc&q28coUI7TsSo&+ym`>e<|&IW!#K4YrU7KbQgoYKg~zy zSX2>RQl(NJE^~(DzRl(S&hFv!IR_ss2-~4EG&6F;_6;QLC;t@!gQ1J4`MA!mP6pUZii-g@ zXkg@`XXxO}aWXK*rZO2B>lqv1Gb15y2fyaS@L$$^|E|XC*l(A*zpE29e*dU=Yn3eQ zO4wrYa<>GGdVi*Ug*zKwK4BbEX&?(FX~sgT+j`L>-~F6|S7_h!a z4KStOfIXF;<)=DIz=>L8YZlCW25K*9S=pSHflutMZr##Vfn=^cbMZ0>7z|bJIK`4- zfke`xsbnUUnABU#o=SqL{pY(f^sj?mcD7HFX1fM>-BeZDp4M51R zepqlUbneK375~f1>;Jvp{Z-S@dHyv`91S&@K>pfH&ThRS%*=bY@NO z4Fv?hW>74KT~!OV{2Uy=4bxBP+3%mJoVoEFxLqF+yato^VVsQ~dM|(Srk50-DDhMB~+O8ov|u z0U2vvm9DxLhgR1orFa=00_MFAqT1qz(ET?DY_+}PL8bqcF7-HTw54qH)bRa>0Nkrk zV~@!beB##i8dGIL(9rDb$(VeN%Db8-O#htI3SrGN;!%LrVz?iBu9vx^g*T@Ex0=Jy zkfZ;%gB>hzf98XpwI<@wzm8gdiFO_Cduw?><9;H4{GX&mVpfXnr7pvM3}0BCvfw|h zQ(TO&sR!7!dKY~ItYgZVqwnbI;(+Z&fDJzz=({@Lb&7RkKfg}F@L$#`f4(&lZes7@ zn{?m2&S@`@WX<~ID^M0556DeugA!2PKjizpGc0&8Mzy<12HU&WVqVhBEq&-fEzg1% z_8c(tZoZtKC;`6~G~Rl5v>gn5`>-MPo;dupkRH7?KnMbfFmPmISAqmn@T+2G9Y~2s(RY27rfS~ia9b@ z7QXks5bBCel%-ENt?-nr09WdG2%p{|2Cw%$-oGkD432B1By5+LgG+PQxb1GBL$Sur z2QQD^N7Ty-k4%MVko(}=-NWk#L3zhLdyOVpc>itRlElY!c%v>xw69qS&T&3_C;e#? zV97?jG~b(p2g6pYF2z-2?_^>RxK?LuxWTjWopseNGk9-Wy?OpHMhWzXrrD}-+R!`rlS{CKMx|K zcb87OJy3!K-zisMdBI&Wq9X0o%S)|47pRDRYtL*JV8p`KThxcHyn{Xabo$a}8 z&c@JT`3p(P+^;d`kl(9x|Ie6w-2?->$zK+N1D!>s?C6`r@&rEx`8FhPtp#(OeH=@$ z=MwrajB@<`Alm_*tvI9>opumRDcn)2rJsb}#s8eW(Q6-C*0N}+o3B3@2+qE5n0f^K z@DG%n;1CJa=lVH6KNN%B3b#!f;!|MNz<@#$YP_HqxC_(M7kMOV+pYDWR(#r>OOMyb!%`VYO_AD`tFSzsL#$t zX^3yt<_`WN|9hRn)#v`RIz?oE*uo$?7;yBhx|;V!828T3W=5_tHV!!dKN|$=<2A4oy^3qFP2JdCy-xn5cL6pMDN@F8(zNZrSH)y z){3M-+H=ra?b~bjT?9_BEZ96V=P`Cf&z3ct*HU;|ChL+CFU!LWX|D*32Lc{?gg8CE z(fc0E8(+T}(*suEdJt_^f}whdB$U`!qVc4<8x{2I-?CYS#PiEppkE)rfd4G9dnuF0 zs)b6zyg2EM|A)Qz0E;5&+J+}F(=*JFB`6?Zz<@eA7^pI>2}Q*$hKXjy91#%14CWkB z)-2|PnZ_K~xaNczGb%<5-|3!dTzl7NpZ~wU=X#%gKjsqaK4;EUcUM>4Rn=W}PTYab zMep}^m(Gjb+2I3O@JrySWb^m;gwFhhInMmOT=|zOehMKQBgssTsoLyk8No^Kst&xcOwQZWGQB-}R(Rk6d~(x%Msz zdVl<*o(oWaM9&LL zMf(YC@(TU9vqK&_wfOU)#RfMVQ1-C+lJPz|lGFZyPa_Uy^cgM6Ck>nNnw-|T)&=Vx z5pMoUT7&T3kF5FW``2a{phI=rwBz{{B1Yf7jjrd>RfS~MY@75_kfbOd+p~^=8=J`R z^lq;!pR1Cx$-4*W`@2y6O<2Th4ys)z-FWWth0@x-mui5byDQe_2i z!^ak_s&?tKUHYQc)ju>Gh1{U{^D1U%!-*d_Cwq&|6SwY>(sy7xJ058Cc%Kycb-XsU z;lFM;%+@_vBdfBS^*XuItk#*$O0yRJ$yElO++;Q>wN9tY&9rUlh6Cxp>~XRJyG(C< z`60?SZRzyOTYp?HcU1pln{J@)Zm~M+H(wlAweI;&Ftd2q>cQ=}K{7l#Z-k@SH6dp1 zOVTjOQ0+KQk=|+Vff;@1sqLC8>%B0O|AWL6mlH;caB_dY*sGgAASQp+!&y-uLf)V- zQ|%u6L%oag|2#7MWa%#3nA^p1+J_c>KO*R1viillqVK1^zWIKa%B#5HFgNIX!;!zs ziO8?_zV)cyT_0?m$5eFAk&tdFzMY1XE`3o+y7Wadli}Z`fgc-An$z=aQU0^<$d-NU zae#E`i}G{nf1Le;(_-acZ2fU9Jvraow?mWZxa;tg?A{(Cluk_q8%ag#Rt40UJ43pr@ zo8wa(59eTf@`Q2U1_8c@W^T3l2y#xnR(+}5N`|laG%c(hUx0S*+1Futzh~%m-%_8q zKDg$}-vcxHpVgC?%?mkQ^(8R#J(K^0W^o0rKfflO$Mq{Z?yAf=vzPYHAnynDU2=8t za`N1UIwxYQ7LR$E^81ga-aOx%|$hizg>7MriG+S(k3#jW#~tr)&oLmXZ8z zy&8{19|T?d?mL=JGWis=Db;J9U~jZDNZWST&|q6MCw1Z$$<6iA<-KwO5^~8Ry>qBA zAX~cgwtZr?b zldytVK~FPj(sI*#0scWolNu;SpYhNbOf(-W#)NvUOVsc!uTWWNu?^UF=_ z`x<7QncLpX-`i7;6|1hN-zpJ;r~Bzq%O<~~qobuML$ji=STyZqqYq(t>4VXM4H~=S zm-YDiKEGDL^`~!}vA9PWTz_(fz7K7mkuvRyn~Z*FH!_lzxrUnkbgj~G>myq4Pz z+=AowYdL@R)v!Q3Z%tNwSw$hrTG7uOv?B_yS<`9mqHclsoovtf!%GWM$T!FC<+dIFCKgR$C|t=zEggQ&f+(O^Fp2Px1k6e0mW^9AG9>fCl zI{KB@Sd{|3%4>Ff(e-RJbKC1L3x3;<_RpE#zV_$Zs7=EiRqly*l3x~we#XZ)kuG~O zOIW7>>`9kB>9QwF*)rRKJ#yLqI0gd8@cKWmzIDcJIB>sV^eXNl-2L-{#G$-e*lsAD zCg$CmJUlh#?-!0Sjo)fhn;}?(#%9$64Ja%YxyGor!mbaUTCUf-;)l0RZB93SNdL0i zXC7HGDbxKksRHiRAO9+yJW;W7`BFHb)U?^x_jut0&C8S=a8!)njct3h^I8esG$ZMi z`yv8o^^?wgGPL3-fn91k!FSQ4@*>MMDPI>Fzsa`&+@VNP- zVe?Ai4K_!G^*V3dZ%eJ}bGC(GM!yp_LEoGG0B!I8a{IN4CGhJUbDd99-SLNhFQUq| zEr}1ubozZ_lDJ5pQaI^k_vIqIVou83iofThD zyi)t){5`Kw%Z)A9YhHTcbn$Z4(r3Boru*3Y8%DoEKALpHnYVeUhWUDfrq4>^@Dg7h z7C1sMqbKg#xVughnC~eUk7`u{e1ACe={0bXY}`^WT+!!7l}44sxU+Zkr1u#=UpUt9 ze(aPx;t>3^wkwr-Ha}ZXym0JiKWz4Tv`*i93YosTdZ|19V`gQeD?<1LGNydI-Y0(L=M;}GQ`ppA-M(?sG zv$&OyE7y)C8=c!67WQjrG{*O8*t@K@Xk9>FQs+*ai}bx0_zifpyDi%4eQ#R#PMZ7DK3eq>J5X%V9glhIWDIPde{^~gGO zPpwIyXY5n7%L6Wn`=Qv-cH>JMCXX15QAsVTh zaou<|y-5FX+|e9m$U@R^OPkf?YC2-slZ(q$R+;~7kK#|Aku?STABMe3g<50y%Xc+i zXmi)nd@ZLA9x+3dG<%o^Pk;7M@gZXxHUD?xP2zaOxnC8NZifLDe~L@sf20K9^+(*l zeu~Qs!eEVw#i%f=Vb7pVp|I(7It930Q)={bh$gSMIrk6tEyws}M@A35iI=~r$!)7IP*MNcn*eM@e5p7;C#nQ^dJy~v1nsMkAittjPS&AYpn zcl}-=bA>XJQGHl{9rj#s)Uu%K;l^%XMw)%TkeKi;F zA7sjh^S#rO?`-j0nSS@?!gRlH*A}3s@~VA>Q8Q78_wG&Wjax>VzVa&E>t#fa-4jJe zCS;QD&m^=N-y;JJD%9UVUlyU^(kEF<9?d}7IZy7yCN4vxt4$tO;)n(L+}XYF+mlSP z;|v?@Wm`h-#@m9$o0gE{u7+&a!kqlUl!oQTS2dxycy->O7mLW}x5mWn4fz$-dMvqI zTHk~Gfy7mMjpAqT4L8QFTspL@33qLhk+-JATjn7yJhp84cMmU^$ChTLT&=c(M^w4O zptot%dX-uZZVNRko57?snw_!ddK;ZfwIHRN4)fNKX);X8G^Is(C(iqQ`iAOn)n@gM z6W~EpvubRQ5#%V>N41{8k>M&!_jDdUtpGjqZGXC@*AtXItnIO!@)EpjMDn(bv);JE zwyR@qsNHf_9rs*?&)t-f@n)iY#yS1D$#KY@?G>f*EWLLrJDJIlIe;a3!&?-BI& z>_yUd`NXWeiYv)T+oUT=2Ss@A#)_+#=818K<5MQ%n5!;(56m35s7kOvrLqZaBv1S> zqh`*(KUrzpx2tXwKN+xa+gV|J$Mv^SZ$Fq+X<@*g)kVr34Q zn%Q(h^L{R;5iS%~F#P_gq4be$lq>wJJF%vF?(&%PMUmTMBOSgI4CCFH_49 zA+tqfZR3mslzzqYZYMVPSb``I6%VWAkW4<#$C8t?bN$|YdxnljuWU9V`^l?K*)>vQ z;*!4|i;k~c#p>NZqe%Zl(+5XtKYMI>AmwHM&{m7EE&g55#RYxvu-g!y>A{$>{;4gT zU1nLz8#1?MO%4F(&S7fX@Aqi4O1;&jR>1NAg9cn;m|>wm^q*#fNpG;)OnS45?kamu z-pHhMyZ46-s6VQmvpZ*O@` z*TtMJ>fBwWlldc(%{?H>( zJp%+R;tcZkx!y`ReXpgkBG+E`E1I@vnM^0=U`A%XXY}k^aTdlfHROM;O^jWrMD3GY z_bcg25o6ED%=bUhGxq--wT8K`t$N9BDDB4QR0?k3Mr{Nawiiq8|q%uqQ~T|LesV0u;FOu)5>U&p76-_4S?+;kfJKl5H(L@26Hw zjV;4zDiQ?*_XbLeisA(KrWvU>eZ4RcAvhut0bZKr8k1FTG3gW*vq5drn5}B9Np8|B zZLqeAtu#!wWo%6;9#fxedPuF_kZ`jGoSz9 z%82i`IWp$U)m*pTt}{$#o#{{I9|%JMTgd+O9Dt$=9>$4nKekeh!7a0z4eGtiCh?GS zj&jd(B-13xk_1Uth-`)>a!HuPOTrQ7LmacS;vDfNafW!BI2q!_brm-kW3gNuCia5p zaQUA39$_9{9voS|EEgi}<;XV4GGx=^k%8@V16`d93K#ai*(KJ!AC_&U!)EuJB z$wgrzFA+zWFU%F573K&x2{VM#gvr7LVOL>uAr{JoVL~q<2O{s~x}9~)aogmU;WiE8 z>?OE$b!+a1-Q;dzZeDI2LB1eYa8{5b*aUI(rU{Y-34*SI<^l|n{lWxZ0uDc)pUXeX z&*5+4XYi--llck!uKebF%$M`S_+EStFQ1poJIl-AZQ^C{rty+_3B0bn<~+=k^TK#u zJPtRXo69{bO^|k#HkV?lTp9-P>NxKC?z!$~-E-VGL3Fxl?#b>6?p@uRyJLt&7v}Ee z&XMFxawTUW^4um#hG(wlSo+ay8vX&)lShAWWt5~vEV(pG4rR$9EIF7Zmtx5!S#l6d4rIvzEZLtWmte_$EZLVO z`>t#WXTU$@_m+kk0sw_$#+&(|S(dz&C2wKLn_2QEmb{TAZ(zymS@JrTyp|?duj+VV9CF*oh*3=OU`1++c_L= z4r@wUEdGN^fOS8&QP1yqdyA5Xf1H_v%TjhGC zUabc=usVy*pf~80W`)&cG3qs1=R4f~1MKvpQ%H**oigun|DBZC9oo#(@5Qy&uhG1u zx@&F3_wCM}mfso-m@UTZdfc3(ZTS&JoQjkTi}Jy2J#(Ab`7ri^N9zl_I_{wbgF?%s zH-CuM1n>?HeDe&wd-XI)bW4U;G#T7#y@xjr@U)M4c%lSm+qd_bTE;h6hS~P3GT;A( zJB8unTh?F6pT@@?zda1z*V7j}kITb1`p2ER+4(CP)p+LTcgcH@YD*2zZFOLc?JB=| z-UZ(H&8a}5WXEn1k2(Uwbvl#w)VdRZL5Z zSD+%?d`ejX4Qs_SsL0Pa39Uw~bgO%FV!L0-Yi?n+RmsiB&4ZI?90U7|Q|;#d!5+QoS4vCSbD}y;pzPO_nSL}R zxf!~$(GtHlcn-Ps_+!D`8vU7fu*v!Rd(G7b&p}Nu@-TN3ZQ3xOEIp?$@5T;QpIQ`qWl` z*L5&2do?8|6CTM-ycEeT-u zF8wSGUy-w<4=mGX>gU+Q>gAtr5tyw%AiY>nTOUzm$9(@YJv$%9o;eOXEfak=gqskE zJA^%18j#{sls@g5`Qe249l9L1!r)o@6N+ly_S~9o5Np|3Ip)kdAI!*1I<(~cmr7M= zk)HV-W6$JY+PA}-w!7YtGjLvHYJYf$v>M1L?V1OO9=2AEthrKF?Jl$qYpneeKd0Q6%$4MEBokD?^h1 zr2kny%<;<_*Sxd%}1rChHpjp0w?g!gdHJW zdc^2o_^i8eF6;#xy)=cGTs_op_$d7$Gj$NAoCG!u_(7r=cr{3@O`{!zw9ac^oS zz!&&%q;8_dkQ9H4OQ5&}{+}*^@#@re|9Te&vy@y94s*ayo6@4SX^l3U+N3ZjjG(mH zY!;2x>I_DcnE9F>OF~MICB2|a#v%o(96p-%hPHz2B(ks4vWv4KsoZb6a?XTpVf<`T_M@jL| z>dNn>!a9Gb7+yTqVhZT{SU}ZfU%)W`eZpb8${iLTB5z2)sC&-diH5H_Rwp048-5#V zoq0ig2u;{}CVbfJoupCA9sdCIKEA)_S6R1&6lYaGy{gYlRPMK%S;R|3mR>EMeqikq zwBK}p?u0%w$wNfW!J97-d3?Wjqxr)F$mz${n6q-FXyVnkNvAHE$sle)l5Ri%YJENG zu4R*yoZcvTc+78R*1|_var>tgbW~3&L`TP*oqqn426wucsU5N?77urZ4Rq*I+d5}q z@u}3h$CRj)C=Nh+=_4f6hk1afG^#Ypk-9`Qj{kA;BI|lLLJZ2jNY- zLUOkId*MpvxIKsNT|x`)k4&#MpekO|ub^4lc|Y7>eY?2{u#aiZ&Chi*83iZ1Ct{C9UvX1%#X%i>1d+Q{LZ3*38>$$Pn#Q!9z=HU z(W!mAmB-0`FH~(88jhgswe7CYJ9->(s#ujtA6KJ;r*|dS%soiznvPFCbM6$G6FqAB zj*qL!&#FdEb)bL#Jh4G8=qF#(U23%QIBJo&zBiRm_ltGPE{1%5m<6abdevV$Y?Yvk zvF#>Z@6`$SdDv{^P4t*w||VGF327toSLi_tAY!PDd6-mjSkD8iV?42+=C1FkxxRSBJFH*v)-C68 zgINzmiSBrau=|$$tB=sms&PpT8q08+`0qk=wg zaf{aTy}UXK@z6JOmTjo+mNUEk>6i6Cdg7g{hu27+{|+(vJT_fu9Dd&)Grwo-y9s#J zOK*BZI`i=_%BS3+apxWe%tsrxR!k_NUxEgna!XhX50?Hb_Ovh;@xL+sP8A0HX!n#G ztE$y~`l+lun(__awdEx#m^WPT2Rb|M5l z3iNw7#u19%ChV*qn;d|)HC!F{<#g#H{khk^28qI#oZR#1K}AY9>G}LfwGl(=kZrD> z|7!9MCB<8FH+`yCvPl0!(^oBCeUBDg0ZN{zOjy;39JkAZzZJtv+yG!CFt3B%w4T%VkvAlL3ojvp( zKC)-BBd|kYgshdUs*I4{mF|_!fpzkY#Zl6-?qA)naC6+ZxKDDoiJyoM5wpaL#lwi* z;+B#z?j=1kc}kv?H(K;rbP4taq=_t|Xi=c>weXa1CF}s`AQlq2HnZHUR+pW}RNQj> zraXngVo_UQ8;jOtv8ohSt(uC^thH+7jJ3;3XEv&oCWF#wQ_C$Dg~n<#sP$@a?q|?h z<&x4)E3?{Qu$eRlt6B{qbYPQ|)@p+QoN9|oZ?@?Ok;_UB57uoqvktbj=;bOiP^Da{ zQ7a5`6U4lA4|iIbY;ucUWzk!$5CKt4jF~UqBiN( zT9rnw(;Iaftx*MYEv=2)lC}bGL`rZ74AG~};7l6^kY=kAF4<%@Kx(m7I_As^9{3w9 z3LOO9HG;X`EVqKoBCAoM(pxoFRhblLUPg-*ULq^yIxF}&Q$R>O6ZkaLnM`nQW|J;p znac`ZGsE>LVa%#fK`>vN%4AX73`&(!p)#v0+;LjTjRva7VG|Ktr$wh$K&VnJL^rak z4JNgz@<3N!R+B-i)Tk5|y;`nO>ojsLY(q0^jas!?r4#j~t>6brwbCXxs^tdQxCkBq zlsdV^W;5Bqm#aY+(9vn7H<;n0RoGN=x!h)h{Y2oP&1zBW;Z|vk+!`(`jX`5lK=4$! zoo0nz4c=d&_{@~&aiz{0w9sj#GlPRijoN55fY(^HRb?@#HCBa93$CBQ#WGRfWu-Uk zH71iDwlNv4Dy!OPu$bT+VLy{np;ZT0b6G)5LA4zA*;%bR=qr_06U>tpCZkcU*J>=H zu`VmvhYcH`6cC3{V}e<=)d)99Ym-CgYB1=6<~Xgi5baf=G?zRqof>MJ zRs)_COa{5uC)k;n2FgZb14nn1mn*A5uTv@IM!C_VRLLzSLvWDGN}*F|sFHA^V6#bXguRanXuEQi+zRorY;aN5fUZs}2v=>^ z>L5m>0=7#+h!c(81Uu^WT6mXc(JI|TTvl4GMXiGOX=)94GX^i+YAb|DfeSI3z-zb9 zMwgXR4HaBYUAozz0@s1y${TzUYm8dCMPn2G=CpzvZG*d`g(oU-wKkL11Y39w3JCja zRHzNobeEMD9G}VcI;F;-)2gAIU>~PWp*H|K0iOiQomL7n)F-I-a8%$Ft3?m@08ABd z%ivLtY>d-NZq=z_%PEwo*`|Twu*$*Bh7sBnINC5;O7?SEsdd0PCg2K{P6ubEfu?D( zC>20hU@J7yRuq{)%~M-oBdHdM6Ur10r_w_cQd;GVhWjR0%jIT` z$^^~VDAy`g-eIo1baJan2m4gjMiqn%fR+v60>D76Q(BdZ%0`zJxB}5y6j~?{o6ZKc znc7@vg{`d+9M`I>T-9l12G=3bE!fN&13bQhz5)D+n4qYE;uL00Ipnee2X98Gtki-xShL1NIepgx(<@C@s9V6@>c|5wEASErLC#e` zq*jAXt5KM!9>=QCL2H(qI7eMp3Z>o#?_;4pm=tD}(WZiuQmAz*joPNumhJAeGO3^j z!1^C5gt~_EKxeZkU?~vQ7)?f-ILl=v*IBGOsGxF%Tn?lPwI4iQgWp;$wPeUE!f6G+ zm4gT!18fEd&$ZxM1!AI_tws}&i6(HE(@GBXsg*-;2BX|&GQscFCMZ`EG+&!huJA4I zvQnE=@IVmU<0+vZuz^=?IKDy$ZP)-MK}aM8obpWX{PWbA7f=pdxrVCBz^@QUNM*5s z%Wmkg%m$?)YL(MU4`->NXjNs^K(RxAp$GTjX0rrL7D` zSPcM_YBU?Q;3ggFmWm>MC@_Tb{ICOU|?a5yzM+9tc$alpte9!D)btg3JO85u)vT^ z34M_~Qsr^>->-!*)H6e$4J-nqM!2UiY|<%Um}$^kRYr4QsPl;6Lf%Yu{x&1{fu)?P zDAgtdv?Mr3ps;dJoo5ex1T_W7+iWpeVRQiDv8>RpR8VQbixKa-%L;rM8G+1U%t-k% zf~IdaSd2z!ZB~m)?Z4G!r3A+fIw)3!LJw8iW`SUiKm^czQ`G9?;v8trR;VrDn3B4I z8n^%(_fS zKn?~I(32U4B~ba*dJ|BsCis-gN@oFjQ-DK#==@A7=r1TP1_8WI7;W5mrLUqT82t)-R(92C8r;f!<)8V}R!l&>d@l8zE4$-m3SGcI5@@ zS6~DVS8cYypj`*SoMHR{45_w4>nrQwvQonRftsV1!!Uv_dK=s{g&fws0O6Nm&Pxe> zD-59F78#+~q0_a2V+3FrHPzbqJ)L<$ThYR|R;57+qz^+cg-QWEsTuksz0D+cl|9uH zLq`N>PPKa_3_jEtGX*M7|KGGfy;z?3Uvc|2%CbYPqW@^4W8+=QrO@S z&}?DYuY<~BgNcU)P8$fw0375y*JTAwPXQbR9Uyc<&|le9Dzh2Fuj!!T+62G2th7cD zVHgDr2jdYXkf#Z%o+_$K6GYy=%a-`PHTpFi=3x*TER@o z0;`E&Xl8`wth2yrDWPYC5;H*`6XfQy0@BqRp=;7x%o?COGsO9UArJI%CaqlQi(OXW zMiqt*KsS1W8iwY;{xFFG0Y`snBlpFfO79MFE2aBaF&nvSKOc?mBxITfk@!Duz|5Hp?NL z5!KDW{7J5WXR@(gqXtoOt1&piX=OE1QzcmBYcU$A zAr1^*j1cCr^l@vcf?kyqs3lh($?F;f!$F z1{HXifgW51F<|x32Usgxi|7B&aPg4|1X~Z7tjBT=l}mp=l}Hjf8G?1WGo?pr1(=@0{??0kTxu}qjMoz@3Aq|8rqnY z7!E*s4J{hL%}? z77iKy@sGVwE%eKwtPfqv;REhn1B?f6Bgx3@yfH2E(NrIgx3l8C@y3+#ZGZ3WhA$mh zKW!rDTXksp)NgwjuJzwsuOuhB8R*}i zdSBN2DnwTkHW+lO>=QJ$Mv2hQHS1gMO-KR>6yg z`;j8JQ)b^6p16)rdFi1k1gP`~$XC52tWMB@i7nyRu zUikvF)NAtf+@|@cao+tpuO*Bhq?d6ulQ*Bg)D z@o-KpR}NQM&~wR#j(+&+@|QglLBIad$YJs3ikP}F)W`ka+^M%>Ulj%X zoeU&6TjX_N*fDhQ znIz(U&<66{qh*tJaxAD;ZtT%Ipg(!PVYzxf8xY5P>X?mO3##m?8NFoqF|t(RjQA1m z8^|wbdQ@`@w4gHt=Eh zSaNJe=@3V!6DZEO@>Gk2407a3-`w&&Vt?Gd0Pp%>h1y^4Uyz~`8-thE%eLV`TQ{%X z9MuO;i;vQEB|C$|@DBf;!*Ez~1;H<24GFwy*BDH&L<-)I!wXo5h6$^{ayp-4GKe`?HS#^ASPTHo!!}n-QLfYU4bv~mmt@a&Q*8V-oKiw;~R?ZEST25B> zmfQ{3?@`X&YxygbwoqL2VT=H;+*W$U_MD zVn*-2?nFoK(O<}z_}td1FB+5QJHLWRN&moI0pDAWs*k7PYR*VjNhklE> zv?xeB6KcVwFXHeYmY{0XN~jcns(FPMX#-r0h?27TD?y?zz+ z`tASSULW34!{PuFEWm*UG$!czVa*FH3WGHm)S4KTv+gBb8${PVNdL0eXL^4oW%v3| z=7bIp8z95t*^Za8ZK3#sW@*ibq6s7NbDuRWL}#a#_`I@W z0a|j@{lmN%X2kB^jjIHp}>0rF_0I8*U+FlN=HBJt2ZasILirGNVU$*YNE z66{DSUhZC0WW|6gf$!XfrcJytccIUhUT@c3icKaP3A7cz^qPX&lzsNOZIv46T4H?b zi?PY5ynp(+!d_EI`AKnxp<4~4I{!`XZSRTp?Gh93{Z^u5XUb)4sa6It`7ri%=Vs2_ z5mSR?*^|B_?ytLkH-gcVu6)R$?osoPEQ?_BXY?q_*k@9Degw&!4|AS%+cgniYfyt6 z6O%40Ipi4fx*j=w%HC~cm{-IHb800-7udDMmV{#868JOVtV9$eWw*;Cma z*(KRYh!Kz_TPyojHb*uQ;sgwq^^|pywU#xOA(>iMQ5Gumg?#~Eq_3rqq&KAJq(@3Hc#*ehU_c96zO>q!mLs?svjK&ed1bARvt%>9o0CHIryU4OfKru!22 zS?&(^B=&k;|AXb6MFJ;hzb zt;CJQ(PEXjf;dF%BNh>#iC4r!;yT1PI85v&HWJH;`NR|=l^90!AxuPDB8I37WE55q%RCh#re>i7tqaiS~)Mh*pUfiKdIji$;q2i)^Bf5cQzGsJf_{sH`YRw7RCu13Tp|K!boAT&|4^UD}+c058SS~ zopC$lw##jU+cLL#Zj;?o-G;gKaWlEKb&GMU?WT3B3{eqExJd-x1O3=nh^#0y%$E|3}!BcYt2q`*_)#xI082@m+!_-FWs_`4xi!V3OE zh?X#pKZ4(nZ{fG+H{;jk>-kmrW%z-78K2Ml$a}%N&%4Sy%{#=~&D+RZ&YRDh0#%Pl zix6utt17j@p)}UX+hNYp}e+4S|Fv2cI^r4i2aYEGnyT;GFINGYrEVnu0g6pG@-5@`b0*8uqx zgmY;|r12;iLPIVELutsNU^6#fQ;{Qy${?IVqbVfgH6qh`bQZR(p zj-X&TkJp5745wf)4Tn)MR?2HkIEGRXjst7^2T`yZtsO`k5WI$j zV*mxA2mtn{V32^UTP z3c{u72}eT;!l?snKtlmfM>u|=AhcY7^(h!i!+I_QE#at3!9X4lU>yoZ33wX9QQKtz z5L2)ckEbRah=MW+1*0h#A?2wEM=c752q{>THV{y-1_j~Tl!T)?1)*jEG`O@1!l9=i zoDM)81wCk}rC^|hCnp>l3d+1FsHPxXW;Md0q78TyRMG|l3MwcFN2^LWiQG}y11xpDiSjqLP%7mjLZOh{UtUy6%DF7oW z=;g($L^#S*5GowNaxQH}!co?x1z3iHa0(R&2Tiz8GXTPd zbm`g&kY-)TH-d1`tP2MQNV9H98UpLWweg@%IcVAq6jG3;T@kINX%`x47~!C4H$+51 znsy^(6r^dls(@FTaL}|1hX6>^E*uyjO}lVrp@aiy7nnpsL7I1=CIbZCg*L|Hg%A!P zUbuKK3ewCAl@Fkkd3n5G!a*}H{Hzq=pqV#FLP45&BZRz?gdLcdz94`=yfq{gbre5ef0!4P9UW&UxNi%P_8!v#c)682^NKKS4p7ci~2R5_Xz*J!nYtF4Qm&!cOyUFhN0@cY!Wsgq`MHxaR^KQ71`;D;Eyc^5K3W>@@Gnq})#;JMgZUSB?9Tu+y{~ z!Q*}u*@1RpFi0tB){WwGKM;1Bb;D^0tP5OB!zvVn5`0hCowO^YAWgf~gxq(8y%J>r zCj*eCT`wsGY1$3vao-Ylnsya5q-ocS;1&>ensya#+yapuXqP%gP&!#x%6&uFY1R!V zC`hwz6p#Cwu+yvyGC(KmdU0P7cA9nJ%3q1>z`9UFxcP*gCfy(&H(z82(uFolDV>}v z;Jzg6H0K8MC`fZIoYo7G9XJ=px!f0oou*uw8wF{~ttRC@C+sxk!a)GilncKCNK>wi z;65YlH08o?0n(HUweu-qcTz4vL7H;m>Yj+~aF3}Ae?r)4#)UEhNHZ?{^f6(VP(@pf zpdih-m3Z7d!cH@;OhQ4LaU;CAj|e->xZyOU88<@6eMs18#)ZQGq!|~E`+%_XT=@dz zQV?$8eZo#NE>xQP!~~jgq3rGv6KKW-Ex<&|R>tSv6-@xfrAibK5H5_Xxp#;Oz_(OS zOF^1&;aIuE1Rz|B8!1RLE*JnDLLZjTy-iG@85hd#wrBz{E>#}4L=%8;sS0|Fm;ihW zT_cZzK)BR+gnN^ikU&A1I|YGpsSz0W1~CCBmkZoX!9KJB4QbAWletbzpg9++55S(3 z0aST_H0Q!~T@y_J&ZVyF8Zm*UT=>;hVggOMa2u}D@BjH*IntxT%8(R)ic6rl1pd2A zVC!tW4XMMYN^}r#YD6#C?CQ*3l;Kxzby1zxk%W=~Sjyy&gQNtwn zLrdUt4QmO{J?V?~?%J2$EZu}!RSvKCE_n*-rW`wnUpfyxs#DKvsryaz^z()eeNKt+ zZ^8aEPu-W`cKNa0-p!a!)-U^NYU8Mm1LdV{-xjKE+Ff)sj>C7+Y$1O_Ik?g z%F9UK%U&60O3WoKJ>whId~$%CSN`&~VDk=i=}F|k&K1`o?dR98#lDx3H0SxsN2SM+ z?T4!u=Jgv+mb|TBG4t~BA0HOO2D86@K`cO5JiqBHJ`~^z;5 zJ@GoxhX>CxeR1yP9g8d1e1;M{cdjMw2I2;3M?Sq+{sy&b_s&gsp#Uu(Q>Zv`U5anS zTMtFr-k_6{9=dB&1$arCU|syhFuXK%=&=cVeejK3|6?74c)0hI)kaODRP)3e$b z3DzfmI3B6<#zP8o8oap@j(X6Sv~#=lsAl8;T!Bkw&!ufpnQwW;ZgvGqW6ejjinxOt}g zZRrMbZcLXTYlE?u3A*Pk(bRT2NAnwiZq!C$w1a&2PN`1{Gm(GMznHM;G8`x2apX{ok>fQtU-Ar z3(B06tszZmJwI$+I{<|&`n8mYcn#TZj6t6(U4vRQnPuqnVF?-eK2d%drJ}dNnUmhU z%^<(5jla9+YD6LW^+({<79H!?q;JOiiW$Ew=5td8&w}V zyiPLmY9uI8%5OMYa_ds$iI)pd$^rMhm5q|gvFGF~4ua!x#k%A9l>;}zb)GK$Z0J^W z;N^x>*-N&eh-Jqgmam;d{&MYh%|^qvlABA93@g868#yp(_$%MqIcV`WN81sP)5$$? z8#egVUxtoFSH4-Q$zF2B${~+0qr9O|F4$Dy#SXli!D3Pkc(PJB17Xdvdeb!y z(jUeFRBXg=1B1I?==c#aX^(=-H!AFVi^jh9Zc%^HVlOGbPfk-MCc zkxP$UdbUUAWLHrn|4<{qot;6i`m?87s!H(D3g7z|$?F$!{dT=;^dB_=n~fgtlOn%P z7?#@g@0?sSdyBx8rUg7bg2PTZc;L}$!Pyx62fn-Ha+MXF%+Zx%R>~13{h@L&kuBJ( zio5J+y!vR64DWN8TlZRAfW}_lvf{?jSEyo}glBVmaj;|0faPbK3NTlMLtlU4<#cM- zd}MaPZPY!vY^*Td12gtN)Q&$0JAUc-!vFCHm;F~1)?#;7W_l*6o^|)0bbUYaa%hd` z^V7t*(V_KSVvO!sxUlpre|ItVBD*&Up7a2Xc0?cP5$lcvQR_%zmlWShN%M}Gv20k zn%=AoDYaf5QRWu~IZ*XBB%zdsJaMks%<75vqu+E|;lJW`5Sdi9q%w3x1RA$;{<`+b zVsd4f13TuPt4uDfGPc66&-};=mrigZe<4xu=HbWp9czmA4}ZQ!G-DmPAvCq^!KnFY zVc-J6`2pGFCM>J-eDFHddGVx==c5;p37XxD23OBUPk!nD%JOy{>E4f;_GXjI4~|oN zU#N}54Q+eORo4G;ayZ0XMFIZo{=CD08s!pbze1p17MqA|Z#omvL-*q(PvPJNS!dJy zQ@c3ZzGW=cEu_>;smTH8+*j!A7?=l_;HwrwBpShQD!8uHs=&RL3dq2$wSZ4AaHc1x z+kD!}3QYP#+yB`^%Sv#snLmKj;w+KT-P@kmHM_yb)^oq!>NP4H3x4NYr5(SZ^r@ev z{pWIUethdjH_~${N*q;wdAn%E4RK%jAurrv(KuOBqkP1DKD!HLUvlu(`foa1Mr#I+ z-yf71hQVI$Aq> zQ7t<7b<2>Gmn7Jy_4PinR$nZ9wckSJfA;bFW_gA;sLjV2SL&|%^W+kK&wPw^Hnz;3 z_q2OtgC$SV3*Xz)KAt79?r6`RR6e2W3PwDiD8mJVLI=ita>H9Xb%Xq6nE5f29y;V{ z277Yi+BKW%=zQ?Y*XA*2H$5ep?eN+efxWJCDx0v3&Rd@=>L=Wy(kG&VIYB6s}ug5DFh( zPWSv$0&?l2Zz@}*>MIN)U3&6t4sU4L3F*;}#a`Y$?hHg3eKT@ue6%q7>~?!o=EmI_ zNWR#J+~$6?6b*EoyQv05UlEn=b>O#I(bAo_?Gvl*jc!zW6}BjHRIc(`N*Zu zzWXTg>5YJ~(jOHLsCE z5f+bkoFhI8{}q3VOW>bc0+UCiT3l}@VW$Z6@nfW8q?{DMbSDo9^Gu@);}gD*59!nR_@H%s>Zey9P-f<7>6pE~_{lt5z$4centBQg(UGGHj~($6JWV(9)clG0XnV=vnzGk{cv;f2x=nBUU~x0S zuE|F(kRvTK7jA73jE6P2`e4_zcO-0UtaoKWAvzN``dhbIdC0Fks<3ZVX?*3(#XwPB zF1mVYX*<6WrEz7?mY8VB#|>hJ^&N5Q4T|cWcmHw~E`G3lZsCww{gpF6%DAPj;nGt z1iu|r@`kVbN#x(gzPeh-Lo_vI{->D^8SWqA@E)%h<3(qJAE%FY!!3!%{1$`5aKoA_ z#l1Rs;HUwgJ({+Z;`yFO+O~?kpWV7lgHfd~=8*{nM8>V>m&tWhRHoE0d~M>hfsGp% zlHuiRexBx2p6stF(I#)=5VFed=M^tK3o9OQ{8Kri_`IDZ@O?1VCNdR&32Qm|Ski)h zxh+WUu(`kag{L9q$;5Xthpc3n$uu<6+k&QTdbP#K+Wk)378UH| znd4!b_g3pho<4Js%$}qt{X_WEkDnifcGq6G%eQK8GQ&|>~{+iK$sa`xxs z7v1}fLH%Ei<8zvHBSV@wcHJtm3Qbv3-A~qQ97J*Qd1)R#7~OcVp&O^oQnK9Z*Bjbz znTsA|B~JbHv>Z8Z_p%O4ep^LeyjA8c~Ucuvo zGd6AP^7|xBs@XZg9W^eBn&9?M>CFM?T$w{XuH# zZ8Hg(Lz8(ZowYQRsFaG5nT(YXsZ8Y@NsXjZ8-P==C1GLPTJ*?NxW zdH&D)f3NHReb@WF_T`*)_V0JQ_jcQ7-D|vW0lVAh`Uk^bPVab*nu zRs*mk_jYpIYmb8GPvf61m@@}#%}}c1-C}^4IPK8dCs&dQTzA^WLi^u%!LowM>`8>4 z&b;L8t6L1fv`r_~r%m2Wn`&h6#=)BogkMF5$83xNXPhHz_OA2*U@1-XhZi07a16|6 z+ZcmR<B?9L73-_n34WJ zXBiRyk9eCnCM+;vfe8ytSl~a;0z~?G*U=upKc}B3);SP+K%8Bjv|We{Ped+5B0rg? z1CcpxIPar@=IFkV)HJV=9sp7PWe;Hd^fr0U{%Do$%wQq7Fu?ZBLfv;@zw=vu#hM0y z=DYP2n{h!+uP14r%Bz6C*%keycf#=c`>pH41K$HA;5{?#5CztC_@Ae7R(ifb*>2m1YQ{Z{2L|g zm>4V)`mklkk554VaJi@D8!l*V!>hB(lL#nh7_z`1s2K%GO_g=m5rWSpr>gWV?+3Xv z#3#MC=Yorhd$OeJYLVBK{YB{me6S?4G_mUhrSQS0t0xE?Xo#iE_HQMo&wbuyI2ZPU zizUi$4y2Auqp>_SsT)Gx9!TXYd>eixMfkE9#N}4)7JA52NDA@onO7^YE*jmRqx(&L zNOodWi8RcATKT($yWoRIopLzb>h{piwk`)8Ox4g1G2VyFJoL+az9k?ue?^*QT@E@3 zJuddo*@vV;I?SU9e3M+NSf%Z8w1+drET=RJ`EwjBa(L+pCL0&7=(Rl#?tIy0eVM@T z4ZdK!d*KOs7i#cyY|KacJLWd6H+=w#7w2@HI+6)Ky>wjqZDT%ob(YO$BY}r_1}(`V z@cs)~FZ$KbqBq}{yEX*eL`1V#vc6a!9dTIuYC!!gc(>_^PZNP3zEmzf>$W~ZNquW~ zTipii(RVlI&re0Vn3|k}<6&gNcup_(?Pqc#R=tnHhnzuFWB`z5Vsd99~df zR#cj#{{n8ld2`u;f_4}_lVj8J{;fp(KbO&}%j+Mb{t{_FiHUhVT_T&OvmUW*OIL?T zKt?1NBsLit5UaQKN2;vjuWZP2r0OE${QX@?R$XMt*RRtM0B(TIYg1&hHuQp|fT^jv zEzCZ^JZkV(?*LujRrkqOiATrC2>r4_F_O<*mMq3notuI@`o446Xc>2Ye>6!!x{{ zvz?NLfc!=J*{vsA!7KUWiK_*LAcqWfxf9}tZ|-DsO!4G|jS1&He(-z-4^->#_HpbcNKEi7M~!4EW!Q|E@~G3Ps-|anA*1 zbqqr6Qc*#$yTju02vDige`S-#G2m+XDJJ@HI*^F3k17ZX0t-3#?K!;mp(zPNp9y)u zlt+0-lxn{GLH^m5yf?Rkf>1?pYY~t4J`h-X-f-XJbVTBjMk{un|Fa%2Nc}iDTjLla z<;m+xU*hyT>$wl~XSwd%LEPsjcj}ELS*1x&Et89Iz4ABLWIXf_}g0 zX3oK~B#ubDAXs&wf21rR<#!G}R%^_RMAlWiTWjsda75xMZLQbRJY`v{-r@LvI)A-! zh96|Dy3kO-^9F8~wR7%ksQ5$tKMIwdjY9lP91|A!FSI~XaD?0F!hvx9nBj#3k^GVT zl;O`KlP5$me2@zVT!}o#MDA5hLn84ek;qh=NKULzEERSl^0vAV`957n+8yaWqg(2U za-{w7#|#7i&S$b+^6!?FRnI*n^BU2V2lnwxWfh>tytykFY-PZ(F{#his0h5A3U8m3 zl!E-Pp4;y=k%Qurf+w@~i^1ZByA(Sc+rY05wx4d|GvF5f*Oh^fg<$?Klh4&R1>iSI z!NU!gWZ)`|+`XRW^{B&p(%U1uBw*#2nxDOPL{BEL1uUKEX~)7aHs4J zYQ`KP=Us0q@un^4_joN14=;LjrbV3tDyz@S>)NCO z>k6P`t+_CC`uJ#Bx32>HTyu~SK7A^56;JMz12S&`J!s;~@j7Q~y@W{~Hqo*+yct&sEl#pB**z4De zD>)q>TYL8*@51w~7r6?LA5y#b##0H7eAEBpt?%1niLF61CfciiVVenP!U7W(n6SYA zD;D_OKisIE_x0Y6hh=&X_g-3m2c1wk)mtF&3^}>6O+R2#jP_d%fJ;1b*txJ@09%!$f61-VMgJ47hwQBmC12m(hW z&Xh+?c@jrVdBBt>^&k$Bw6B+U-v#K((GB-xo`8$e!sid_<^v{B_{I{X1O0kc4dY42^^d>6rD59fYig3M@)IZPP(1hc;|)=|W- z_w4xlMdip)x6q!z(L%b52!Vqok{`AX<4k$v9x%8mn82CxfGH1{@`x!9GMznoFC zDG!+PsPaVptr*i%;FPEIDsWdZ67cW1`cCy8VB+W!|G?_)yNbbhoGDM@V|tkKq@Hp8 ze_d@o1I@o&I3^EXRC}h(yq5~&1in7IcFGuL8pkO)U0yRh|IbeK9-jZ_A&QCP|Iz|c zk`W%GyOS4$4+qc+384@lM!K0qF?^7_lXZ0rTy+g}h-6B{rZZP316>CrVtF314O-8T z=zuyJjC3>4)tQpzNH9&jw)+j2@CF$;HvPb%)X z2qbYgYIl$YZ!Zd0W;5Jp-wCQ>Yp2&N7KSpF)a?Wgo?3Pr5jZ08vlR8dZapbDx}9d{ zSo{p;IgRAW`vFJv*@^oszEbxkkH)*0IdsJtI1m_Eq72g5>2pG`wbG`6s-Trld&M+yC+b z?ZLB(gF3q_XbV?=!ctoEQH{*~t%#ZqI<#)rPp?ZuvtnjiP@3~Wv2A5uC09BM`!;2P zHi36pFVsEAkq#a@?67ug$pPcBm!|u+Bgc zxuJA){yGi{H@GezHK1f*3q3{d_m;F@g^`|=#r8fSc4HrK`+A=Jy5Dzjn88kyydVqDOn##tA1w~2D4wfq zJ@X5EqR=P{9NFOEZM7G_@$te-+>zAkq9)+<{zF~6x)f}lyW!{B{0bnTEO694nHLKB z?2f-H*9H8R^Z-p_uSs|xn8o$ne%IQ5kP@Ji!0pipR4)E7OP;nYL4l^fnY(thQQlvZx$XwtlloY-T}*!MgDB;Mq^ zzq?lRGAU2ufGH1{@}wTXlxJ;&>T7B0Y5n!)1tJI4VcHW4ZwmCSo$)Qf#{|aqgiNkl zAw|@^?*D4tt81X;q(fA{nwpvp4qCb{M3t-OsAZ(9@1&=vO&}w6Zsx`&vK*;7 zZ%f0!`%W?J6wHkZddJ2gN%@(p(sQO2u|qc*`^2!PonTbIF#KV#+{~!p8)&7aV^sAn zCQkj6eo~%7X+5NLucW}A=2aQ-lxs{*Wl!5OLJuPKkk=hAPwE+eKjh!@F)!O*qv+3@ z3?7>gI%pKJ7p_vZSdlULcil|>9$Aj+pV)*z+GC<*THtrxyKJ3Mhvt0n_@xYYADb7b z*Ew)mH~T8>n9|xwlLKXG-pF<@0{9_g3&yCH)+i4g);RcN}gYsyU}ObinqGe*7tlKiWtG}Jb)I;K- z<~N#V#}3+p%{Ic966d?4xbB16Vsk&y&driudTZezje0%P)X)ES9&ILGATqdnhvN31 z&l?)739Q-33O8M;M1kQMh>1Vi3!HHg0WV7OXjb#ihb!bBMo3qM?mkW`w)sDO(hk^Wpu&uOIbdC{Pk zEJqrl!_g}L`1+>?Y2?l3bFN7gcx;+>0G|Q|Yx#LiI-R8<=W0*=7iZgmivF~j{AKq+ zeEW)1ZSR@^XLX}Su01F0Xqms9!6OK<+_Od3k10c*6As$WM*X1u<8!~+ag6{S&^y|5 zUIfZ*h}c!+#RG9@@-wZ60>!S~dH4Nd0l4={V8e4ab|@vpReUY03z(MP_?#D`05cZf z=024B7I>8Q9{do`2lWa>fTFI^1SEF!9;v0~?ftT$nwCB-P%M*30&|0r`)y87#6O;O+0Vm2)3;b?` zS_0Q8Qe}NAre9`S@&qA$alNXHydX3?+i z)29@9+5SL^Z3gNrWwMb#~R)BNn{s69GA0bq$Ri3^lbh zwMXjQ3x{OLa-{D4r$u+mi2wW_U#enZ_6)=C;7jQ=nX8A01xpaD71=HZf8<*0mm7&f zr(Xs8Yc{jN{ymi%jQ#;I*@07Pik>KxOL6r(C(#IY@qS7;VbG2aAMf?{?_BivtI$OrAv;~3JAi`$eIcNkIsc3%?6Y!J%ER<>jy`{4j9)z1p6agJA7YDi4;?2p*Ot9s2Tw z58licb*y{S4d%>zFUWJ12mV~2eizY%VaeA|rWw&f(8~Gdd(*2#t`pu<1&y0IVOvt% zw5~lu@EF(XMff!yc&#N1TVeVEZS{Qe)u6N+6ytmM2l{COv)acNuS?rNfBjbOs>>3P ztTJK~&aA80$X-aEdnPBI-ACvr&qJ5YSr~LKd~CpD?t^#LqSBKl>R!@76XmD{e%HM# zXU<+xvvteyL)_O|G`FC&S5mZJ=I;a*vt~Cxxe*0weT`h(G`9eew;mDC3H*`C&+0=r zqL8||r+CM29K~4ifJeV^)a#a6TAIHT?R7z#2MIl?Ge17>BJy=atE--wrnC!4@_p3^ zKbeN4H~UBSQxBq9Rx3*85O`ziPQ&PvX<+{~VO2sNZ78^PRYz$TQVY+oB;-M{OrUxX zp@%7t_+usF_7QsIe?G{$qUM2?9+kXnx^^E@3w+@CYG@}y({rvzt9bymGrK&O5O~D0 z^6gYD{Bk;-Wp~r&>JxqCU={oo9JA@wAnJ+zkU8BFNdWCMM zyfvN*JtN-$J(*@-P1qImzS_SF^Dg;Ue@&!%{ z91(~X*e>8DU?X55U@V|1ASWOqK;dua|IA;>U&4Q#Kbt?9pUxk`@4;`!Z_W?-XYwoZ z3-NRCwedCZz2tkqca`rfUm{-&Uof8!-+I2~d^A37K6ySdJ~rM?-a6hlyrsN1c~A4E z@J93Q;`QRS=e6LS$E(S!%qzmng|%Z}u}Z8QyN;d1lCfAU1lx?+VJk6+>0*kQ1jfPB z&C|eB#q)sYHqTj}6Ff0Ip*%i3jy%hG7Vv2EsPc&M@Njo>f8&0`{fPS}_XX}0?l|sU z+*`TrxmR(|P-W;0_mB(lRxvk6lBiGMKjz!;0!#o4?!Z*pdH z#ux_G&GyZCJLV{^h73rW@qXDnc49Wlmyl6Axx^H^C2jA25u4mbm{vi2AQ zkgPqy_b_2#RBH0Z%!;qC-k1-5Lc0JC}XJu_M zh91e<;tX9@)&^tfkgN^P&}L=VVGJ#jU57I?S=qH1V;0G-#TheM*)Jl{iC@ zm0f``6i9Xj&X8wimt%}6B)c4E$g#4P7(?Au6XRxudCK!WCvL-l#!pcI7zMEtrPT$4K0*t*>}-tgN3yeV`c_uf2%~Qy zStFdjnUyuf=)NRth|_&oSp$sjO|k|!-HVmg$LO9UtB=z?SXn)czKLY@aJoAytBcXy zNLCl8yRx!67~O?rb#S^fE31vsok&(2r#rH;S{U7dWVLX*Ju9n;(KnK;CQi3wWoKdZ z4J11Yr>|#aXJT|)lAVduZCKeE7=0bd&cNwwS=s3reGSP@$LZFr>@OFl^l|=I0i%!kUj>{# z&i~3|^fCV{kJHEb-zgY<%>Pco>Eryb97Z4Wzj8Qzod1=@=wtp@7N?K%zcLto%>T;Z z^l|=oGDaWszmswLIR7h+(Z~F+G)^Dqe9+nEw^U>Eryb2u2_C zzalt&oc|TZ=wtp@7^jc(zd{&&%>N4E^l|VM>^l|=|8>5f;Uv8W}&i`^@^fCX-h119RUrvlZ=6^YH`Z)i~fzikOF9%K^=YQET z`k4P^$LZtzFB?W5^S^93eVqTLV)QZpOU3Eq{4WKI8uPytJc`x-Qirf8(*IJ2@F-UQ zOC7|bNdHS6#G_dKFLeNmBKIi|E2!IqgeefwGE3R{V%l* zk7D(|)Sp-s>3^v|@hDdROKruXNdHT1#iLmLFSP}WBKQ|x}-gi1>ioTjZUFK5^&`@ zj<$hi>~PQQ>W&rD*wBASutCowKI!NSlyYE2Q_DhWn8d`FNcrqq$TtM?1+|^FX1znV z+FKxRA1|DC_rdNPc3*(#A%m)OyG7vDO)sdqjy<5mE*Nc#`%a|PsP_*fB3twwc_~+? zBM7%IxEvOd%Lh}ts>KxyhLDe^x9>xm1l;@K-X$?7UMPLE)DlFm_Vk_j&RBJ3Dc9UH6kXnNl*#k-LT|X1= zQVP3!T8cc6S_3AIwk|eZ_iWSOuSg!%BgfRUnW;ypux9Belf2iWh3^)Ao)vgh20o}z z=J-+i5{>G2W$NF|)IYJJ`M<2@ocMDmEbzax!0!p}6+h(*O)q6#T&BG*??^dVXCv#$ zZ<~oK?qwgICw~#RU;1^^r}{3kt2Ga(vB^YouO{!1^t_9f-2PO1z56PvP+$~o{$7Xz z)pBPOI9OD;E#-x1CR&#Db}}Wo99^IHa#a|i=eRiDb+-E|7{vL6?7tU+dmMTpe1sm| z`If#-qM2YpR;#i|ayf`zbtnC>{6!R*b&%>>eHUN?kCZ;!WCFKKfxkou{gaNJS($UB z9PLNpK7zKHKx+Swfw#qZ-WabC!_cZto`BIhXsoA{gP&K@jz!&_bnL$m)jR>e{)9~ zSl7QNW0PDwn*UP7#bRhx)Sg$T^ZZE$IMTJ`invTN$hHVB`1Z4q1|zR; z?IiH|IxiX?6Y`uHQ(ache@OG%&9TCW(6hwp*iVeW(;fw7T_p7MUWn0OPh7Wt@1!0` zTsQjB&gOT-b+21YJMn?I&(MLa-D)z)i2KDpy=%vd3P!{9`xlI+8wf3E*>Yd9WNR3$ z=y*Uup&FgZFC_7T(KH6pG%_v1=y@>n+=cu-8Z&PoYxsXX1MNRAy&BGpDg_?t3T!vU z9O2GO4+58og~JrxRrY6{?+*9>**Qcg*gdWoqL?@)EHGh#|7r_F21INg4N@^Z%$(>9 znwwLI4oOQ%kL516)Cx0V0=lc7%xwr z|EoMVDfpjK{%&a)hc4`R!zgL@((}D{h#S?auA3`ykQUNtX^g491m8 zofv-~8ikY>uq`@Pt;z>iZ9Sc^_SRQG;v6SGvCD^df?3qqyzVVxaOBR3H~QDIXWagR z5TCxR$>_Sk7%nc7u9Nz(1=P27r|q|)!cjfHdGhzka`Zm`y!)2qNAG_k1PW;n${)T7 zJ7c`{jh2luqaQ+<=VZw|{d18Pns;TkP&Ue#TeNjv>tQmML?{ynOq^Wi9QvbcO8b30 z8@xeoeZ@b2%QJDr!~x4UM~jH^^Vb3Nxxnen`=>A8afj>9wwJdj2E$F^)HZN-Bof>k8aNv`4v*yx#}#rN9k$CCWgq$K5aq~M zXSkvs{~8{u`@7*h68~BF##(BH06gfdB9H=w;QD<&I|ll=;3xcLLZF*CTprL_qfD$f z|2P!?;Y3IQAn|qS$KLlZYy+gc{qm#%M^%|W>G8fYSMzGO8r-w{0+#bi20mgeJCf!k z26st$mbEkb!DjF0N2P*W!H(l=pP4TE0ydY&IW=1dz$;&ZUp{Z5z*n4`ROFhwh`{+@ zjtt)q!$KykUR7)iS774m$~S9=@AK`{0GYK^VK@|Vray41%%Ak+Eb?+c&nXL?^>5`} z^_7B7>OVyMiD*ZMoz#a6D;404)T!u1q8R+Lwa?Op$YMz1rFE2ZA6{Z(@>35yGCiiQ z@+Up*brR+63>i57(d05m584_0aP&TZue&GlQ9DrJs2wJ%hVk~BIR8Jg!0)PeImoE- zFpN0!pxg0^^hMBWe)fV!Vg?d&H@2!-nv50}MPA1r-KDLn6WZdZaf2qK|4vo)Mjp5k zm!W>->RsCAkZ50z#LK{oCT#eOkS}=>=PX7<60v=pIv>ugoB zX=fG`7N|FGMx*z8o;JPvfr;~XU$pdmr2xkfYcRMn@7~;8Pq2)4?zyn8jbNY53B9vP z)~v2WM^9V-&-?#}`~QNVC@3h^^>OjzLG zvA{8-h%L+j`Z6(IM7uq5dgOFUIPrO;=}r{G2ig8GaM0Ew+8TPq45E|1mb1QtmZJ_4 zNzFjd+1bF=$Ypf@9%}U+X@3yqzchVFwm1H%9N+Z+-=1Us-e;yeio&IQzn(_(_Jg0x zzYjdVFAfV!r$?L+RvN><(vR(M&S!&KSp`Q6#6)59{LHg?(-h$TjEqLVL|zy>Gc!N7 zDIbvd-48AKuWn3&-?a`_c)7Df`+Ro;=6aOJdFIPU47yp z5d^N&V^c$d3{;$#^m@O$)EIuMV|)A)_c~zV){{Bcy#si^JCih9SO&^oFjLvv-vhKR zs=T-aK1eBU;j1h&S4q2FKX^DfpQ7hY%Xp#^IR9Q_?qmqOOPEZ-Q|?% zKOh1RxyoFh8&^GscLg}o)yfsea8eI@L)+c^GnxUZ2Sz*ZY`Vk)4?U^uG`aT%>{1nt zXy=lGGxK&>zMtC&Sh%AbkoY~{1Zr@I0;K$)dTD$RhcUcf*;3tX@a7@+_=Hc2uq66(`jnRnW4OtZ z^(QkUd(oU38m|hIgrNIEfvW=_rod+PlsGJ?9!0AdHf{8803@z#^UXW4N*Rv+AC=&6 z1u%S!{vU_PjuT}~Ryqggd1W|45^n)+@dJrFu96)nO7jNCi+mv}6Z=F-15K0@7MQTW zga!WpVu9cNLq`!#sZ8hg;015Ag^!n`PZ?^wFLtB@lTOLU{G!)T;uSkt0tY1CI;Chn zx9Byn{>6$Vf4~t_9x&xeJ%}mK%$mNsKvt`y?-sp`nKeF|ISsp-#>AOf=80)oKTXL8 za%X_|kVS;S$4KzeH`s7M^C7x>LBgBB5s4ocvhD2Cdn1;$fh> zAn7LZ){(WUI^YO4>9_ru>{bq53)$t(Pr3cND1|RksP8^9;zh1Q_+FWg{jINc^}BrKN!VZTfP~_%bwuH;Dgq@=at}+1jQ8raX|z)|j?>P6g;HTM??3cpNNQ?K5Dx?gq&D=q*p+h{WTy%KewF zyMdVU2s4qJuAX=tG37}dkT_F*T>oE-h(rCC`~MZe%VNEa=NfOpxC5wg-JX;17+SVk z*8To)|DTP+g2G=$yiFVv7MQTWga!U>3&aIR_%TXkZcYg!K939q5ykLf zB~0a}aC7qz(VO%fbqtLR9QAZuot&K<9rZQ!wOouGT@8paB7L2aA)vdL4v^)@P|!aO z*^Ezo@lWU3m9dX+zN`VF6(v{wUnxMp1%CD-xFVErsadD3&=2kxe>1V=y zfPOpR!=)Mg@U|z8BO%4u zN$Zthy7I*U#ksN&dw1l$%h@7u;X|%v#{nT|x4KNtc#0}CWz(6Zw}c(8N;vpSfk@!d z|2}r-qtl`=mb+Jd?Kxri=G1o6y@iuugzcs1s^e|o49Co%Y$CBotlRBlW$cq-Xlvs^ zOEDr>p%#?7vXC9lI+SKS%jXxkx_n`lQlc^}Gmz{&R#*tSWW5G15jp*i%X>bvydwmg z%yuTF&!EB$nw0v$n@aG0==1Com4l!!fHqYjatO?WW%n=an?kHu58;fCRe|{~SuQta zUxI<&O`V@g+29H>(>r_ZIk`obIAC;iK}He-gY%`uvpki$0H1s0<)>Xl&g+sM zi#-7f(ELY-zI&7i3{_VaKdvJJN1i<5EpEGN+jN`su_1`N^AaA-RZ|_~?T%S}6ExaB zHUzQwl<=t)k!lk|5TpSo$_WenzhQyjUBi{Xp1v|mz1nqqx`AWhU7(g_ZJ~9i9DUK) z#J#HFI%498#A`JG&9>n>s9KtBPRJu(-DggOJYdR$ae4q#9y#rZ6A4Pb3l95TStBV| z2K*h>qB(@Gf!j}%Dz+!zMS*OyyoYh>GCvn7-2WNg*1`_g!DUS*- zs9F;8=!JRL>@%|-qW&W-Py6&rL8rmNnVZ-8045GdTsL?166v)*q&)bjvZS_t(sM9z`Xrm&?L&KYBDVg|i^s@jhk=2S zmXVPwvG7IH(a_OQQ%_&hP*Y!5hsf&epyh0!JJQ@JIh|M5^<-fIzb zJ`Q^I<7qJozvY=YV&bIy*bR`^8xN4@EQ`ks^!5L;|IY=E|9Du(u+M@v8l0vTgH>?% z2E!_21*wt#KerjN{-1c8I3_GGVSxz?OjuyT0uvVaUs@nCFv9N~sWJL_p_jpmQMuNpSe042(i1}K@_l^p-#&uYz8zfOe* zoNBN71+;_3Pa=^CKC)}z^y&v|3hLE1#Zde?;Ih;rfoSQaz&g3>+_3zL7PjP*{_WuO z+)|S}K9eB%S8_PLm3AlO6&a_!zCGoHqw;^=G0)Ufc)qI2ZT3aMvEL{4kmZcFT8TIT z+5fz8liv-#>65Q$_!I<_J2#Kl-CB7f-Ws(Q>^G@*ivi53G+Kj!2%{k08a#g1-dIcE z`+rne>aQ0Lp;zt3(pDUS}7skq$v7)$F>YYY#tTMd|c5K9A2i%2D^uc!T&jW`Yv zv+u%%lY8}`-8FN^LK4r%DG5h`h`A8xHBg&zam%yo^J1`jwvVN-7A@Bob@NxJyq;>@xa;tgxeQ> zYJ8wB0t=NGD|mMO06d@OG*kpC{Yj5v)TxHWLQ+t^hJDfEGCo*9x6^o=#STM9?sEzp z{D`WC_1K2jo_NwA4tFu}F991oMf0V{8z}1$(lS&aDe&F!6Nec@k}-Ij`MKj}suv==<^22M!{Omlh{oU(u1L z#SK~ir&%;1|M0U14y1$HpHmV`{(z5euisuEo#B1PBrOAog&#-@@i>80{dYMy6MEJ^ zSWp^F$VaWS5BUD;G>DS(ROx=20#xt%Upc!m0ccuuhYX#&NgKVd8#Kx8@PTayE}(Yn z$)1TEw`sE;dX=|7J%{dXkXkTA$kSa!f^Pjfjhfb-PleA?0H-;%zLdaA77v1oXD30K zg(_bYPat&D1=S`<=j9oQICSf!<%|y%q!)4n)zWssus*P{|a* z=kMTtt1w@R`3MT%ssKZ==44#ux@qXA3#W-{=6V!zI?lz5E3n126BV*>l6p=IHrG^8}&B z(`(-ohN#eK!L2nMdo_N)Nf$(kERU| zs|Da@m#~({4ip&iI>DbeL=y6tU0amFB@GV`{_^4L{XHO>v(;^}>Y92Gnxr+*lh+Aw z?KD%h0%;f-{K?aHQ3rS=7+aqzY=xHX6D^jka|QK1wQ5{D<^S{AsUwfn#M>Ws_gY~O z*1uaeKVN&Aiumhr^gY#liI7z~_;kvCzUtF6;I({7h{Wq~uy^HT&SS>l=KRmjt+0UOSfI69zgvpQ2e6MXq~>;-{?JD>VjC&jD*uguS% zkFB_d!bDU3z366$OV29iM$8(N^T{jnK*crCeV`^if?)-vTNOf#8bfMjjbgAKNJ)Z@Xz7@$jVF0Mbps9P+wnH%hB1v)zwj-2>YPtN@N(+CidSGskufftp^nw z$#SIf`lo)vc%H1>#+SqWkiU9W^$a}f)0TiaV%l-${bDd}QD#u%Szfrf=7QHL7j_ty zY~38lHyQ4{x5(sZy%22i`&nFjRuEQwZ=!x%Eel@=+f^T5)B-4>)8>8={(??${@4*~ z!3jSf%XN#8ECD_`Y<87%*kSdr#k+m&+kltG9aEhhUy#u5K{@xT7BH}?-oe8AGx&CE zk>O2g4ycvku$&`U2=4pjTac$O3Uw%E4I;s8g%4|D6z|*?fx%BolR3;gf!x7#+svH$2y_M`GTpAN8K_goIy zS_~?dw&{PTd`CWqVl1xf_Je_{qst;^O@@01_POqm9s=6yLIYns6ou)%UafOF#NedI zSAMi`{RI7G9+?VJJh1iWJT}_1pTHpEV$b3NA}`sZ_RfVbhk#k7OmETa@8FK?yH_FI zqEI1qXYfUrUX;{)e;aGg*!|r!ZQ(w zk>!rAZj^w10c>>X3^7fz4i1dN{qd7M3?_6^71+Iek&+Or-9zwbM}Y9w4q8BajVDZvT15X;*JMMJ^RMx zqqzuesEAl{>g+S5zW({g`#$-AsRuCifaq$O`-C3A)PtCM08H$nWfT;(`RGcm> z)4z&7ExBR0xav5vVY6*IC-NNKs8rLtH{&JybBU;PyL zq}chbopuepx~a--Uv!<+1DJXMQx9P3K}sRuFjAf_Ia^k$Map$DM#&Cu4m6jVfC zT3~Ve1&Cg<&}{kBG7x1Va6DV>8R|a1xv=|J1v36rEHHZ#LQHuyf2EHlA&F{oU-Fq#nT31DJXMQywwp5mO$S?Nd5=S|kXyt$Fs? z!P6OaE;8dPUKEPv>0M&mw#Wf=xA65`IuwBnIp*AM(egyMOU3hLGOo9BfT;&C^#G^X4}sagjteNy@}dv(7H&e!Dsml7;AEPUU>Oe@S1*oXm;HP5TbUf!cCM1 zK4}na7V_nXA9bDIPVqQ~c8K(x_41emQyy8qzc#HGJUhV|dVz*P5+AC$pqzX=86HO|Q~slucxYt|3EVy>HPBX_;vUE)Q$c*VCYqn)-BmeFe5^~v)hl3dxdEL*!U|s(FLumDMKyBUPBdj8?H7+VcLYB{#?HP^W=x7hM$4|>6xtMl zR%$4z9$1je+Dg~a)A{rIj^Rc+2Y4;nr@Z0BPPpn(dZ~$7D)f8bko6@b?W2=R zt0IhtCzoWxWGKYP(a9y_k%yEVpEWenadmOgbal|xG}3q0ax&D?BtoL<7#SK8>)dsX zM%Q<|+j(c?$s@{VLxlE;F+8=)F9FuQs-=FIo&^vZc>Yry@uyGTyc4YRk zqxUX_2|*I?u~&#;JO6tj6p3%1@tDA|!k`ry#m>YU4iX<*%|t`qy#X;7rO{aC|B1z8x_{m_GhDS`pt1UR?|J3paARD6 z#oMPoa8I^n-nErN!~K6MdkKa3nK&jaFkylJKno-qMg%h_a6|SD2TqUhB37J)5uZmI z2t+Y_kWEu39UXmLLqkJH7YCwo>Pj>Vj0`lj44oVuv<;0kbzDap2K7_B$a18CFkDUi zZ(idkbpPG+YR3Vg5I14i%==pHXmSq7**(O6T0sd0UE!Qx9U}sp3_Pxeh^2!?X?ZTK z$rv;}o_RIQqXoVEur_I*PWd;1j`Bn*|@D)6Qs0&sV?txoanneajOIp+Z{1t5Q?JBmlHxz_m_Y(be@kD?`nxNdBru0@&{AAaw zQ+N76>IJG?bIJ@@vR6Szt+^dZ>vmZ6HcLR=tuJ^To=%0+;q>Oz}LnwiWr=2u*2zD*<<{J#a%St_hU*cIC4-{REA3d9*fX4gvAI7xZ8G0#o8u=A=!LfSq08oL=`DfJFQ}zlE8ULNN;~lQT<&;OXU?9)A*)hB4>3 zJw9ktpwfeoN0~%)1BWGt);yT40GkDz3p8$0AnkGNVn@v8?VaaAEj$=4K6hrIAeuWopjSclYE z&yif+4vta}vk%_u1M?a~PF`K11z)CoZ0X7nhp!rEYkn2)1A*F;--vIX33Xdu_eeB- z10@@u__e^N(4+C z)rgia5(ZhcQUdZnzF=(ngF^!P9OzIM+s{x1MQzBb^s^te~)rraMZgn#MBRxvnQog5c-*R0G2C~w`vre z09LY{o}q*tG-vQ-+>6XZ+3p{HNP=6SXUp_T0k=G~HUup1({&F#GIepWjSDBe(k9r`geepgfKU?CKD8f zDK94UfO_>Mh|q(+xX2g>5PA*=uT3k7%mb1aElVeXTj-pW)#~BvIy0mSv(D$BVwG*H zPe&D?p^jY+RF^z-V$$+Uj#CPe!}>6*`jkjyv}Wl1o>!T`Cem@5yUk^=Guu^dMdnR( zaMe@ED^;1`)Q36e2pkQE^I4vG6EtWn_sApgl_KV1gnsZnE`Qglr~>eX%2&tsTR(V} zDcez<5(zlklFmjD`lrlay7)=RU9`)x+4oveB#Qf%ZS()I_a0zTEZw>=)8w2pk|Zc1 zl7nHY6ct4bs00-y44@(^7yywB0s=}_K*^whND>6oG+D%mn6^qVl2n2SO3>RgOgHLw zpYxsnIpLoFQV&D*`_7tGtJh3dccoS8HQ8VXdTpS+uo_s{`>vJOj{wb2W@kOb>@PQ0 zFyXj$0gdCmG3S&14OA!hOC<~DhwS9+G`Gd2pk+13l^>xspdn!5sXykoBd6NgF#VwL zs&0U*Y;rO|1B&gS*ohb@WI=zl>4zUtOUQZW4*Jnh&1$ym(-`dkIP3tPp_uQ z@g-U=^tdXT+B)s>%w2p-3b_sO`t$@}%QX>v}* zrE0VsFZcHxmtV5p2-%v5z^YFR*Vl{j6Z+T7I;yCdi@_tWCIo2j`}Nm529q=t;fS<#6YYIm zey_VcnI?CCgMqsc@AP8DvarXyX?V>|{x7u8P1(amd=CFSo=^El5l;EXmuO2dFvFjU zGi@c@VKe!R2mU)eF#XK!AoQbL*`5zCbo1vh_0JDOfT#S#=B70w%) zpHm8~?W&LPRb}8jnLG37jif6XsQxVDuivvvas2`Ei4E>)S26(A%yW%q4)zSoNV?&X zmo{T~p`DxNs;Uga&H|l#f@tss#^z6#s0{G{d@p7VtC!q-s^{@w+2jP~ef64`YN+BL zb!>Tpt`hQ7gHO%mFKZ269*?+B)LboXXx;aTYShBHUu&{5o3OKHR@e%?tJ$bhNYgi%ZsQ);RETijr@vy<+_(LqJ>Nfp(HBXpndZ?-47F5f`SxV2(eE`B}p{PfJ7_^eR zMc4CO&N$X2mj${Tv%F8&{!II(0`|(xrzU?n-(D>u3P)cwmTKIRhVRZbXwk}*iuIna zry&b_Gh6QXz1WM%2S{yp*-XB;vj6O8Tq{^Vw=$J?wLGod&bZq7)V!6OvNuVILCHe> z#Mi7`FmS^?db!tpKd>vNAh79Fq20SUVffc014A{0pZ2+5(#^fmMp(IM)BXHsmV4x} z{?{{0wd*GFUk+1FMrKCL@Dud)o~Nni%t|x&=3l%w1Abb~xw<8&>1J`z_bYFsA90Y; zw5&0s+iL^R$Y&oPJ=4nr3?49ez~IpXr#CQve`<;D4qm+BRo6v;h}&!}*x?2kJYeuB zgGbZ%m%Q{WwLqh!&iha(mr&u;%Gz7sX5lK3?Ak4uHB6BD1!$(Uxy!E8)=U6svS8wPq7{)_kELJH+>1Q z1#u#*Rdeo?0Kt@Kt)y%9IFB-Tl)?X6_ZsQx8vZeK3=e4B@m~L(sRTTD!}N_v=4TjL zaOB4mzEiaJKQq%NCZ3yYC$Q{4BSq;4`~Q2UJ2Z6p`XLuKFAa6q%A zB!l;nL$t~`0pqE=zF)twafcB6X0_3>jw%5CTum?0^oY1*mXzNn!TWIymQ=UtnNN86 zzvnVvSD!6YIfX9$W&kuzrW<-z$-@zQM-eFE^?crKMKOq zjPsglp#9>!8Sr1{f$4^c_3L;mQT{T)r$-mHCf=fw(yqVCJbMxya$HbbdNmhi@MMvI zalyf}CxO@8!I*@rxghjz>W4gsS7hJrps`a%+tEY%Wlqlu58^yEFnMlN>Ei96>D3w3 zwfG=fSE}F9C-InCU3lr<<*YcM(3^ZkI{gsvO>Pih!1tJ(uy(xtL{=Pns>JG%!I?w& z6%sHt3oVaom?+|&`neQuI517k`ih~RTjx#f9V{Um$H;dVxPPRkG?cS!D%^+eJRXwk z_Bjb<@MN14>ko&P?gMqRo}aKC#ya;IQEetwmB=!;QqZ}Xx7lp3Y{hwM+8PgQt?J?B{1LIE{uih1QU@5B=U5 zt*!INn-L#4&zYL=L2e&(3D~?p$Y&LdX!4WT9lQ~{bNKzMcMb{`yo}Z>VXA zZ7S-lZK$JVr0=AukHvSU7}4(-!@4nePQP>fJkRw{$NzNazPtH&8!-bxT#ZEZs8>t{PmnYJbiJ=jQeJuG6J=X$ACo&L{7!nIs9F6oA3n+c!zf z6@Wp_53FhOQpaAH_CJ{f%&X`4Fs-ED2};gKYm>7q=hexHi9(h4hk_a%M*t-{o-Q9_ zXBy?Uw+r~pdNlA}dwQ;F`l+@vU)PqQd1ewW8Aj80h(BCH)vJXzk(Y~ZOi|q21$df- zujdCLaNp?YW|}=bb}oUdB}L(n13NF(MTkP$=d)Sm$zo>~bL1zH;PBffRqF~-*l4qu ze!d$E)H8?n@xj?|H?#&|(=jk{yK;dAGs2~@}BJ9 zz5>_3B1!@T6yU&sBgX&z&Vg9-T4DB}yQAZimzE}Ch z=fM0vlm0tAFn#B+0?rDl99K;sdatEb8C|+tDXB<9ml}FpQ|e!ZTKOopHb1-vl48vtT-w?RIu2SlefzWu@D+M(PkVR|Rb{c? zetT;tafQV7U(9B$dqlUeljElxb5PI?y0e{yakGp#)w}g*dM(%+`f9nGPCClq z(T%4sy7Q*hqOy(hVW0KWfx(ZxvKc$JgHcIuL$J0LbdE#LW$WX z>jF_Jwx_=}V{|n2{^8E?S=ns4k;@ghvxj5uDV{AbvT23ayZR5IdcnR^D{~eH(Plup zx-wx0^zI`pqa}QOgrlRDfuWwEj=m!nvPMhGN!y;H>7=EL1>kbl*L9#wwK#g`*V6AB zSWdrh&{|19zr-+1d;L?|Z}9xlN1^XPFVo>ij|>yscAzfGr{oRlvRln`$9E2h^J{h| zS=sn;LU7^QL4Ce=g#1DAsEQLJOfa%9r5#PABC!{ZA8mMr7*dCREJChs6E*Q+EVdNO`Yu zpjcY8Jj21^5z%F9xb2Rhnp{6)o_-6#_v(dJEX(R8&r|&3DA(ymc=)8o8^_+8$ANN1 zL|g3}H{zSAQNHZ%efK!Jz-8-`YbIg1e1OA=3trVtnP7Iwduonp7%m@R6)^Yp*XGP$ zpSZNNu)fhhUN~$ZtE#wY((Mkkwy}>59@qkRAC?gK zf3pWpd517DW}di4&<1~k&4RH#1F>WJ?HkKz37>g_ML4Bs8ah+79h{7G?6n*;?H!!- zC=O0K6ayn?d;O_}wWBXQ>9=nz|Cc%p|Gil$|I{q}C;ngcpo;gvilt(TQ0Rke=Jm(5SMz+g@QF#PzIs^I^@=(jG7+Ki$l`axCc6k#)S#)KRiVC5@fX6FROu z+*8O2jn1UJ__|!}S~`3EM&AKfP@27NeXPHXfn_!~dsSg|nCite9Z^c2ck zWj)WQ`-g64k*~~knPd`_-l=%Gy;2i)GtTQKX=VKEQXyET`D$X$`!C>kUM?@s>}#1U z?R)%1&ytKNC$?54#_!?p&2Pr<5KV`h2TOgYzbCqrfBHA&H|?3xyPteAb2#IH84vs~ z_rP>F&#n(yv!4+J-NcTqVTJlj*dzEE`o zo~_%%`i7bxC3E<)mp_^~c3e-<=pD|J-C`YPNm=^?M?=!sqBr$arr^jjuC}}628TP# zi&R5U^Za>;fL=bz;Hf_A9w+Tl4*@%Ne@)@j&IdSuA%P_FN+Sg5j`CDV>Jswgv_M|l zSmhA3d8&e8ne>{tI5f?kUBTh#a`Z>!{-u&u&#CLe!o62?=A)@S z>sB}#l%gwgIys)$KPOjz