content
stringlengths
1
1.04M
input_ids
listlengths
1
774k
ratio_char_token
float64
0.38
22.9
token_count
int64
1
774k
import os from src.bin2png import bin2png for root, dirs, files in os.walk('./bin'): for file in files: if not file.endswith('.bin'): continue bin2png(os.path.join(root, file), './png') print('DONE')
[ 11748, 28686, 198, 6738, 12351, 13, 8800, 17, 11134, 1330, 9874, 17, 11134, 628, 198, 1640, 6808, 11, 288, 17062, 11, 3696, 287, 28686, 13, 11152, 7, 4458, 14, 8800, 6, 2599, 198, 220, 220, 220, 329, 2393, 287, 3696, 25, 198, 220, ...
2.122807
114
#!/usr/bin/python3 # -*- coding: utf-8 -*- import re from datetime import datetime import matplotlib.pyplot as plt logfile_live = "./android-live.logcat" logfile_app = "./android-app.logcat" TIMEFORMAT = "\d{2}-\d{2} (?P<datetime>\d{2}:\d{2}:\d{2}\.\d{3})" re_keyevent = re.compile(TIMEFORMAT + ".*action=0x0, flags=0x8, keyCode=21.*") re_l_pause = re.compile(TIMEFORMAT + ".*am_pause_activity.*Launcher\]$") re_s_new_intent = re.compile(TIMEFORMAT + ".*am_new_intent.*com\.stv\.signalsourcemanager.*\]$") re_l_on_pause = re.compile(TIMEFORMAT + ".*am_on_paused_called.*Launcher\]$") re_s_resume = re.compile(TIMEFORMAT + ".*am_resume_activity.*com\.stv\.signalsourcemanager/\.MainActivity]$") re_s_on_resume = re.compile(TIMEFORMAT + ".*am_on_resume_called.*com\.stv\.signalsourcemanager\.MainActivity]$") re_s_draw_ok = re.compile(TIMEFORMAT + ".*ActivityManager: Draw ok$") x_axes = [0, 1, 2, 3, 4, 5, 6] x_descr = ['Input', 'L.pause', 'S.new_inent', 'L.on_pause', 'S.resume', 'S.on_resume', 'S.Draw'] colors = ['r', 'b', 'y', 'g', 'k', 'c', 'm'] plt.figure() plt.xlim(0.0, 7) plt.ylim(0.0, 1500) for i, time_sample in enumerate(parser(logfile_live)): ticks = cal_time_ticks(time_sample) if i >= len(colors): break s = "sample{}".format(i) plt.plot(x_axes, ticks, color=colors[i], linewidth=4.5, linestyle="-", label=s) plt.xticks(x_axes, x_descr, rotation=30) plt.xlabel("EventStatus") plt.ylabel("Time(ms)") plt.title("Start Activity Time") plt.grid(True) plt.legend(loc='upper left') plt.show()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 302, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, ...
2.197425
699
import time import uasyncio ad asyncio from ds18x20 import DS18X20 from homie.node import HomieNode from homie.device import HomieDevice, await_ready_state from homie.property import HomieProperty from homie.constants import FLOAT from machine import Pin from onewire import OneWire if __name__ == "__main__": main()
[ 11748, 640, 198, 11748, 334, 292, 13361, 952, 512, 30351, 952, 198, 198, 6738, 288, 82, 1507, 87, 1238, 1330, 17400, 1507, 55, 1238, 198, 6738, 3488, 494, 13, 17440, 1330, 8074, 494, 19667, 198, 6738, 3488, 494, 13, 25202, 1330, 8074,...
3.114286
105
import torch import torch.nn as nn
[ 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77 ]
3.4
10
import pyxel from pongpy.interfaces.team import Team from pongpy.models.game_info import GameInfo from pongpy.models.state import State class ManualTeam(Team): """ デバッグ用の手動操作チーム。 Pyxel を直接読んでいるのでデバッグ用と以外では利用しない。 """ @property
[ 11748, 12972, 87, 417, 198, 198, 6738, 279, 506, 9078, 13, 3849, 32186, 13, 15097, 1330, 4816, 198, 6738, 279, 506, 9078, 13, 27530, 13, 6057, 62, 10951, 1330, 3776, 12360, 198, 6738, 279, 506, 9078, 13, 27530, 13, 5219, 1330, 1812, ...
1.858209
134
import random from tqdm import tqdm import glob import numpy as np import torch from sparse_ct.reconstructor_2d.n2self import ( N2SelfReconstructor) from sparse_ct.reconstructor_2d.dataset import ( DeepLesionDataset, EllipsesDataset) if __name__ == "__main__": params= {'batch_size': 8, 'shuffle': True, 'num_workers': 8} N_PROJ = 64 pwd_train = '/external/CT_30_000/train' pwd_test = '/external/CT_30_000/test' file_list_train = glob.glob(pwd_train+'/*/*/*/*.png') file_list_test = glob.glob(pwd_test+'/*/*/*/*.png') print("file_list_train", len(file_list_train)) print("file_list_test", len(file_list_test)) # train_loader = torch.utils.data.DataLoader( # DeepLesionDataset( # file_list_train, # return_gt=False, # n_proj=N_PROJ, # img_size=512), # **params # ) # test_loader = torch.utils.data.DataLoader( # DeepLesionDataset( # random.choices(file_list_test, k=1000), # return_gt=True, # n_proj=N_PROJ, # img_size=512), # **params # ) train_loader = torch.utils.data.DataLoader( EllipsesDataset( ellipses_type='train', return_gt=False, n_proj=N_PROJ, img_size=512), **params ) test_loader = torch.utils.data.DataLoader( EllipsesDataset( ellipses_type='validation', return_gt=True, n_proj=N_PROJ, img_size=512), **params ) theta = np.linspace(0.0, 180.0, N_PROJ, endpoint=False) recon_n2self = N2SelfReconstructor( 'N2SelfTrained', net='unet', lr=0.0001, n2self_weights=None,#'selfsuper-ellipses-64-l1-train1/iter_180000.pth',#'iter_15000.pth', #'selfsuper-ellipses-64-train8/iter_58800.pth', #'self-super-train9/iter_199800.pth', learnable_filter=False ) recon_n2self.init_train(theta) recon_n2self._eval(test_loader) for i in range(50): print('--------------- ',i) recon_n2self._train_one_epoch(train_loader, test_loader) recon_n2self._eval(test_loader) recon_n2self._save('epoch_{}.pth'.format(i)) recon_n2self._save('end.pth')
[ 198, 11748, 4738, 198, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 11748, 15095, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 6738, 29877, 62, 310, 13, 260, 41571, 273, 62, 17, 67, 13, 77, 17, 944, 1330, 357...
1.9375
1,200
""" Team: d3.js Authors: David Anderson, Duncan Gans, Dustin Hines Course: Nature-Inspired Computation Assignment: Evolutionary Algorithms for MAXSAT: A Comparison of Genetic Algorithms and Population Based Incremental Learning Date: 1 October 2018 Description: This file implements a PBIL approach to finding optima in MAXSAT problems. It is imported and executed by code in genetic_alg.py; this code just contains stuff directly relevant to PBIL. Specifically, it includes three classes: - PBILParameters - PopulationVector - BestSoFar and N functions: - score_pop - fitness - prettify - print_PBIL_solution - test_pbil - pbil The algorithm first generates a "population vector" containing likelihood values for each literal. This vector is used for generating individuals in test populations; for example, if the likelihood for literal 1 is 0.7, the algorithm should generate individuals where lit1 is true 70% of the time. Initially the population vector is entirely 0.5 values - a "flat" distribution. Each time the algorithm iterates, it generates a population of individuals using the population vector and then selects the best N individuals from the population. These individuals are used to update the Population vector with a CSL algorithm. The population is then erased, the population vector is mutated, and a new population is generated. The program keeps track of the best individual found and returns it at the end as the optimum solution found. A full description of the program and philosophy behind it can be found in our report. """ # Required libraries: import random # For testing: import time import parse_input as parse # Path is hardcoded to the problems folder. This folder # should be located adjacent to genetic_alg.py FILE = "problems/" MAXSAT_PROBLEM = [] class PBILParameters: """ Class used to store PBIL cmd line args conveniently. """ # A class to look after our population vector: class PopulationVector: ''' ''' ''' A method that allows you to generate a population from a given popVector. Inputs: pop_size, an int representation of how large a population to generate Returns: a population in the form of an array of arrays, where each sub- array is an array of bools corresponding to literal values. ''' ''' A method to update the probability vector based on the N best individuals from a population. Inputs: scored_pop - a sorted array containing tuple elements which contain individuals and their fitness scores ind_to_incl - The N best individuals to consider when updating pop_vector alpha - learning rate for CSL algorithm Returns: none, this operates in place ''' '''Mutate pop vector in place. inputs: Mu := P(mutation). Shift := degree of mutation returns: none ''' class BestSoFar: """ Purpose: keep track of the best solution so far and to provide a method to compare the best so far to an individual. """ def compare_to_best(self, individual, ind_fit, iteration): """ Purpose: Update the best solution so far if the given individual has a better solution. Input: individual to check against best so far, iteration this individual is from, fitness of this individual Return: Boolean indicating whether the given individual was better than the best solution so far. """ if ind_fit > self.fitness: self.individual = individual.copy() self.fitness = ind_fit self.iteration_found = iteration print("Found new best with score {} in generation {}".format( self.fitness, self.iteration_found)) return True return False def score_pop(population, problem): ''' Score the individuals in a population and sort them in descending order by fitness :param population: an array of arrays, where subarrays contain boolean representations of individuals. :param problem: A dictionary representation of a given MAXSAT problem as returned by parse_input.py :return: array of tuples. Each tuple contains two elements; the first is an array containing boolean values for each literal, and corresponds to an individual. The second value is an int representation of that individuals fitness; higher is better. ''' scored_generation = [] for individual in population: score = fitness(individual, problem) scored_generation.append((individual, score)) # From https://stackoverflow.com/questions/3121979/: return sorted(scored_generation, key=lambda tup: tup[1], reverse=True) def fitness(individual, problem): """ Score the fitness of an indivdual based on a MAXSAT problem. :param individual: An "individual" represented as an array :param problem: MAXSAT problem to compute fitness in ref to, usually stored as global MAXSAT_PROBLEM :return: An int representation of individuals fitness - higher is better """ fit_score = 0 for clause in problem["clauses"]: check = False for literal in clause: if literal > 0: check = check or individual[literal - 1] else: check = check or not individual[abs(literal) - 1] if check: fit_score += 1 return fit_score def prettify(individual): """ Formats an array representation of an individual s.t. it can be printed easily. :param individual: an array representation of an individual :return: a string representation of that same individual """ pretty = "" ith_literal = 1 ten_per_line = 0 for literal in individual: pretty = pretty + "L" + str(ith_literal) + ": " + str(literal) + " " ith_literal = ith_literal + 1 ten_per_line = ten_per_line + 1 if ten_per_line > 10: ten_per_line = 0 pretty = pretty + "\n" return pretty def print_PBIL_solution(curr_best, parameters, problem): """ Purpose: Print output in our nice lil standardized way; see writeup :param curr_best: List representing the best solution :param parameters: Problem parameters we got earlier :return: None, this is a printing function. """ print("File: {}".format(parameters.file_name)) num_literals = problem["num_literals"] num_clauses = problem["num_clauses"] print("Literals count: {}\nClauses count: {}".format(num_literals, num_clauses)) fitness_div_clauses = curr_best.fitness / problem["num_clauses"] percentage_correct = round(fitness_div_clauses * 100, 1) print("Best individual scored {} ({}%)".format(curr_best.fitness, percentage_correct)) print("Difference: {}".format(problem["num_clauses"] - curr_best.fitness)) print("Solution:\n{}".format(prettify(curr_best.individual))) print("Found in iteration {}".format(curr_best.iteration_found)) ''' def test_pbil(file_name, pop_size, num_incl, alpha, shift, mutation_prob, num_generations, algorithm): """ Used to test in conjuntion with the test module. Not important in final code """ global MAXSAT_PROBLEM MAXSAT_PROBLEM = parse.return_problem("testy/" + file_name) parameters = PBILParameters(file_name, pop_size, num_incl, alpha, shift, mutation_prob, num_generations, algorithm) start = time.time() solution = pbil(MAXSAT_PROBLEM, parameters) finished = time.time() run_time = finished - start #time taken, how good the solution is, generation best solution return (solution, run_time) ''' def pbil(problem, parameters): """ Purpose: This is a function implementing PBIL optimization of MAXSAT problems :param problem: the MAXSAT problem to optimize, as parsed in parse_input.py :param parameters: Problem parameters. Acquired in main of genetic_alg.py :return: Returns the best individual found """ pop_vector = PopulationVector(problem["num_literals"]) curr_best = BestSoFar([], 0) # The following is the actual PBIL algorithm: iteration = 0 while iteration < parameters.num_generations: print("Generation: {}".format(iteration)) nth_pop = pop_vector.generate_population(parameters.pop_size) nth_pop = score_pop(nth_pop, problem) # Initialize curr_best: if iteration == 0: curr_best = BestSoFar(nth_pop, iteration) # Pull out the best individual and update best_so_far if it's better curr_best.compare_to_best(nth_pop[0][0], nth_pop[0][1], iteration) # Update pop vector using CSL approach described in paper: pop_vector.update_vector(nth_pop, parameters.ind_to_incl, parameters.alpha) pop_vector.mutate_vector(parameters.mutation_prob, parameters.mu_shift) iteration += 1 # Final population vector might approximate correct solution. # So, we round it out and see if it's better than individuals we've already # checked. final_pop = [round(x) for x in pop_vector.vector] curr_best.compare_to_best(final_pop, fitness(final_pop, problem), parameters.num_generations) # Print and return the best individual found: print_PBIL_solution(curr_best, parameters, problem) return curr_best
[ 37811, 198, 15592, 25, 288, 18, 13, 8457, 198, 30515, 669, 25, 3271, 9918, 11, 18625, 402, 504, 11, 37616, 367, 1127, 198, 49046, 25, 10362, 12, 41502, 1202, 22476, 341, 198, 8021, 16747, 25, 15815, 560, 978, 7727, 907, 329, 25882, ...
2.888453
3,317
from abc import ABC, abstractmethod
[ 6738, 450, 66, 1330, 9738, 11, 12531, 24396, 628, 628, 628, 198 ]
3.5
12
from __future__ import division, print_function import sys from dark.btop import countGaps def _debugPrint(hsp, queryLen, localDict, msg=''): """ Print debugging information showing the local variables used during a call to normalizeHSP and the hsp and then raise an C{AssertionError}. @param hsp: The HSP C{dict} passed to normalizeHSP. @param queryLen: the length of the query sequence. @param localDict: A C{dict} of local variables (as produced by locals()). @param msg: A C{str} message to raise C{AssertionError} with. @raise AssertionError: unconditionally. """ print('normalizeHSP error:', file=sys.stderr) print(' queryLen: %d' % queryLen, file=sys.stderr) print(' Original HSP:', file=sys.stderr) for attr in ['bits', 'btop', 'expect', 'frame', 'query_end', 'query_start', 'sbjct', 'query', 'sbjct_end', 'sbjct_start']: print(' %s: %r' % (attr, hsp[attr]), file=sys.stderr) print(' Local variables:', file=sys.stderr) for var in sorted(localDict): if var != 'hsp': print(' %s: %s' % (var, localDict[var]), file=sys.stderr) raise AssertionError(msg) def _sanityCheck(subjectStart, subjectEnd, queryStart, queryEnd, queryStartInSubject, queryEndInSubject, hsp, queryLen, subjectGaps, queryGaps, localDict): """ Perform some sanity checks on an HSP. Call _debugPrint on any error. @param subjectStart: The 0-based C{int} start offset of the match in the subject. @param subjectEnd: The 0-based C{int} end offset of the match in the subject. @param queryStart: The 0-based C{int} start offset of the match in the query. @param queryEnd: The 0-based C{int} end offset of the match in the query. @param queryStartInSubject: The 0-based C{int} offset of where the query starts in the subject. @param queryEndInSubject: The 0-based C{int} offset of where the query ends in the subject. @param hsp: The HSP C{dict} passed to normalizeHSP. @param queryLen: the C{int} length of the query sequence. @param subjectGaps: the C{int} number of gaps in the subject. @param queryGaps: the C{int} number of gaps in the query. @param localDict: A C{dict} of local variables from our caller (as produced by locals()). """ # Subject indices must always be ascending. if subjectStart >= subjectEnd: _debugPrint(hsp, queryLen, localDict, 'subjectStart >= subjectEnd') subjectMatchLength = subjectEnd - subjectStart queryMatchLength = queryEnd - queryStart # Sanity check that the length of the matches in the subject and query # are identical, taking into account gaps in both. subjectMatchLengthWithGaps = subjectMatchLength + subjectGaps queryMatchLengthWithGaps = queryMatchLength + queryGaps if subjectMatchLengthWithGaps != queryMatchLengthWithGaps: _debugPrint(hsp, queryLen, localDict, 'Including gaps, subject match length (%d) != Query match ' 'length (%d)' % (subjectMatchLengthWithGaps, queryMatchLengthWithGaps)) if queryStartInSubject > subjectStart: _debugPrint(hsp, queryLen, localDict, 'queryStartInSubject (%d) > subjectStart (%d)' % (queryStartInSubject, subjectStart)) if queryEndInSubject < subjectEnd: _debugPrint(hsp, queryLen, localDict, 'queryEndInSubject (%d) < subjectEnd (%d)' % (queryEndInSubject, subjectEnd)) def normalizeHSP(hsp, queryLen, diamondTask): """ Examine an HSP and return information about where the query and subject match begins and ends. Return a dict with keys that allow the query to be displayed against the subject. The returned readStartInSubject and readEndInSubject indices are offsets into the subject. I.e., they indicate where in the subject the query falls. In the returned object, all indices are suitable for Python string slicing etc. We must be careful to convert from the 1-based offsets found in DIAMOND output properly. hsp['frame'] is a value from {-3, -2, -1, 1, 2, 3}. The sign indicates negative or positive sense (i.e., the direction of reading through the query to get the alignment). The frame value is the nucleotide match offset modulo 3, plus one (i.e., it tells us which of the 3 possible query reading frames was used in the match). NOTE: the returned readStartInSubject value may be negative. We consider the subject sequence to start at offset 0. So if the query string has sufficient additional nucleotides before the start of the alignment match, it may protrude to the left of the subject. Similarly, the returned readEndInSubject can be greater than the subjectEnd. @param hsp: an HSP in the form of a C{dict}, built from a DIAMOND record. All passed offsets are 1-based. @param queryLen: the length of the query sequence. @param diamondTask: The C{str} command-line matching algorithm that was run (either 'blastx' or 'blastp'). @return: A C{dict} with C{str} keys and C{int} offset values. Keys are readStart readEnd readStartInSubject readEndInSubject subjectStart subjectEnd The returned offset values are all zero-based. """ queryGaps, subjectGaps = countGaps(hsp['btop']) # Make some variables using Python's standard string indexing (start # offset included, end offset not). No calculations in this function # are done with the original 1-based HSP variables. queryStart = hsp['query_start'] - 1 queryEnd = hsp['query_end'] subjectStart = hsp['sbjct_start'] - 1 subjectEnd = hsp['sbjct_end'] queryReversed = hsp['frame'] < 0 # Query offsets must be ascending, unless we're looking at blastx output # and the query was reversed for the match. if queryStart >= queryEnd: if diamondTask == 'blastx' and queryReversed: # Compute new query start and end indices, based on their # distance from the end of the string. # # Above we took one off the start index, so we need to undo # that (because the start is actually the end). We didn't take # one off the end index, and need to do that now (because the # end is actually the start). queryStart = queryLen - (queryStart + 1) queryEnd = queryLen - (queryEnd - 1) else: _debugPrint(hsp, queryLen, locals(), 'queryStart >= queryEnd') if diamondTask == 'blastx': # In DIAMOND blastx output, subject offsets are based on protein # sequence length but queries (and the reported offsets) are # nucleotide. Convert the query offsets to protein because we will # plot against the subject (protein). # # Convert queryLen and the query nucleotide start and end offsets # to be valid for the query after translation to AAs. When # translating, DIAMOND may ignore some nucleotides at the start # and/or the end of the original DNA query. At the start this is # due to the frame in use, and at the end it is due to always using # three nucleotides at a time to form codons. # # So, for example, a query of 6 nucleotides that is translated in # frame 2 (i.e., the translation starts from the second nucleotide) # will have length 1 as an AA sequence. The first nucleotide is # ignored due to the frame and the last two due to there not being # enough final nucleotides to make another codon. # # In the following, the subtraction accounts for the first form of # loss and the integer division for the second. initiallyIgnored = abs(hsp['frame']) - 1 queryLen = (queryLen - initiallyIgnored) // 3 queryStart = (queryStart - initiallyIgnored) // 3 queryEnd = (queryEnd - initiallyIgnored) // 3 # unmatchedQueryLeft is the number of query bases that will extend # to the left of the start of the subject in our plots. unmatchedQueryLeft = queryStart # Set the query offsets into the subject. queryStartInSubject = subjectStart - unmatchedQueryLeft queryEndInSubject = queryStartInSubject + queryLen + queryGaps _sanityCheck(subjectStart, subjectEnd, queryStart, queryEnd, queryStartInSubject, queryEndInSubject, hsp, queryLen, subjectGaps, queryGaps, locals()) return { 'readStart': queryStart, 'readEnd': queryEnd, 'readStartInSubject': queryStartInSubject, 'readEndInSubject': queryEndInSubject, 'subjectStart': subjectStart, 'subjectEnd': subjectEnd, }
[ 6738, 11593, 37443, 834, 1330, 7297, 11, 3601, 62, 8818, 198, 11748, 25064, 198, 198, 6738, 3223, 13, 65, 4852, 1330, 954, 38, 1686, 628, 198, 4299, 4808, 24442, 18557, 7, 71, 2777, 11, 12405, 30659, 11, 1957, 35, 713, 11, 31456, 28...
2.753837
3,258
# -*- coding: utf-8 -*- """Various utilities.""" import json import logging import os import random import string import time from collections import defaultdict from datetime import date, datetime, timedelta from itertools import filterfalse, groupby from urllib.parse import quote, urlencode, urlparse import emails import flask import requests import yaml from flask import request, url_for from flask_login import current_user from html2text import html2text from jinja2 import Template from peewee import JOIN from yaml.dumper import Dumper from yaml.representer import SafeRepresenter from orcid_api_v3.rest import ApiException from . import app, db, orcid_client, rq from .models import ( AFFILIATION_TYPES, Affiliation, AffiliationRecord, Delegate, FundingInvitee, FundingRecord, Invitee, Log, MailLog, MessageRecord, NestedDict, OrcidApiCall, OrcidToken, Organisation, OrgInvitation, OtherIdRecord, PartialDate, PeerReviewExternalId, PeerReviewInvitee, PeerReviewRecord, PropertyRecord, RecordInvitee, ResourceRecord, Role, Task, TaskType, User, UserInvitation, UserOrg, WorkInvitee, WorkRecord, get_val, readup_file, ) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) EDU_CODES = {"student", "edu", "education"} EMP_CODES = {"faculty", "staff", "emp", "employment"} DST_CODES = {"distinction", "dist", "dst"} INV_POS_CODES = {"invited-position", "position"} QUA_CODES = {"qualification", "qua"} MEM_CODES = {"membership", "mem"} SER_CODES = {"service", "ser"} ENV = app.config.get("ENV") EXTERNAL_SP = app.config.get("EXTERNAL_SP") def get_next_url(endpoint=None): """Retrieve and sanitize next/return URL.""" _next = ( request.args.get("next") or request.args.get("_next") or request.args.get("url") or request.referrer ) if not _next and endpoint: _next = url_for(endpoint) if _next: if _next.startswith("/"): return _next try: csrf = urlparse(_next).netloc if ( csrf == urlparse(app.config.get("APP_URL")).netloc or csrf.startswith("127.0.") or csrf in app.config.get("CSRF_DOMAINS") ): return _next except: pass try: if Delegate.select().where(Delegate.hostname ** f"%{urlparse(_next).netloc}%").exists(): return _next except: pass return None def is_valid_url(url): """Validate URL (expexted to have a path).""" try: result = urlparse(url) return result.scheme and result.netloc and (result.path or result.path == "") except: return False def read_uploaded_file(form): """Read up the whole content and deconde it and return the whole content.""" if "file_" not in request.files: return content = readup_file(request.files[form.file_.name]) if content: return content raise ValueError("Unable to decode encoding.") def send_email( template, recipient, cc_email=None, sender=(app.config.get("APP_NAME"), app.config.get("MAIL_DEFAULT_SENDER")), reply_to=app.config.get("MAIL_SUPPORT_ADDRESS"), subject=None, base=None, logo=None, org=None, **kwargs, ): """Send an email, acquiring its payload by rendering a jinja2 template. :type template: :class:`str` :param subject: the subject of the email :param base: the base template of the email messagess :param template: name of the template file in ``templates/emails`` to use :type recipient: :class:`tuple` (:class:`str`, :class:`str`) :param recipient: 'To' (name, email) or just an email address :type sender: :class:`tuple` (:class:`str`, :class:`str`) :param sender: 'From' (name, email) :param org: organisation on which behalf the email is sent * `recipient` and `sender` are made available to the template as variables * In any email tuple, name may be ``None`` * The subject is retrieved from a sufficiently-global template variable; typically set by placing something like ``{% set subject = "My Subject" %}`` at the top of the template used (it may be inside some blocks (if, elif, ...) but not others (rewrap, block, ...). If it's not present, it defaults to "My Subject". * With regards to line lengths: :class:`email.mime.text.MIMEText` will (at least, in 2.7) encode the body of the text in base64 before sending it, text-wrapping the base64 data. You will therefore not have any problems with SMTP line length restrictions, and any concern to line lengths is purely aesthetic or to be nice to the MUA. :class:`RewrapExtension` may be used to wrap blocks of text nicely. Note that ``{{ variables }}`` in manually wrapped text can cause problems! """ if not org and current_user and not current_user.is_anonymous: org = current_user.organisation app = flask.current_app jinja_env = flask.current_app.jinja_env if logo is None: if org and org.logo: logo = url_for("logo_image", token=org.logo.token, _external=True) else: logo = url_for("static", filename="images/banner-small.png", _external=True) if not base and org: if org.email_template_enabled and org.email_template: base = org.email_template if not base: base = app.config.get("DEFAULT_EMAIL_TEMPLATE") jinja_env = jinja_env.overlay(autoescape=False) if "\n" not in template and template.endswith(".html"): template = jinja_env.get_template(template) else: template = Template(template) kwargs["sender"] = _jinja2_email(*sender) if isinstance(recipient, str): recipient = (recipient, recipient) kwargs["recipient"] = _jinja2_email(*recipient) if subject is not None: kwargs["subject"] = subject rendered = template.make_module(vars=kwargs) if subject is None: subject = getattr(rendered, "subject", "Welcome to the NZ ORCID Hub") html_msg = base.format( EMAIL=kwargs["recipient"]["email"], SUBJECT=subject, MESSAGE=str(rendered), LOGO=logo, BASE_URL=url_for("index", _external=True)[:-1], INCLUDED_URL=kwargs.get("invitation_url", "") or kwargs.get("include_url", ""), ) plain_msg = html2text(html_msg) msg = emails.html( subject=subject, mail_from=(app.config.get("APP_NAME", "ORCID Hub"), app.config.get("MAIL_DEFAULT_SENDER")), html=html_msg, text=plain_msg, ) dkim_key_path = app.config.get("DKIM_KEY_PATH") dkim_domain = app.config.get("MAIL_DKIM_DOMAIN") dkim_selector = app.config.get("MAIL_DKIM_SELECTOR") if dkim_key_path and os.path.exists(dkim_key_path): with open(dkim_key_path) as key_file: msg.dkim(key=key_file, domain=dkim_domain, selector=dkim_selector) elif dkim_key_path: raise Exception(f"Cannot find DKIM key file: {dkim_key_path}!") if cc_email: msg.cc.append(cc_email) msg.mail_to.append(recipient) # Unsubscribe link: token = new_invitation_token(length=10) unsubscribe_url = url_for("unsubscribe", token=token, _external=True) headers = { "x-auto-response-suppress": "DR, RN, NRN, OOF", "auto-submitted": "auto-generated", "List-Unsubscribe": f"<{unsubscribe_url}>", } if reply_to: headers["reply-to"] = reply_to msg.set_headers(headers) smtp = dict(host=app.config["MAIL_SERVER"], port=app.config["MAIL_PORT"]) if "MAIL_PORT" in app.config: smtp["port"] = app.config["MAIL_PORT"] if "MAIL_USE_TLS" in app.config: smtp["tls"] = app.config["MAIL_USE_TLS"] if "MAIL_USERNAME" in app.config: smtp["user"] = app.config["MAIL_USERNAME"] if "MAIL_PASSWORD" in app.config: smtp["password"] = app.config["MAIL_PASSWORD"] resp = msg.send(smtp=smtp) MailLog.create( org=org, recipient=recipient[1], sender=sender[1], subject=subject, was_sent_successfully=resp.success, error=resp.error, token=token, ) if not resp.success: raise Exception( f"Failed to email the message: {resp.error}. Please contact a Hub administrator!" ) def new_invitation_token(length=5): """Generate a unique invitation token.""" while True: token = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(length)) if not ( UserInvitation.select().where(UserInvitation.token == token).exists() or OrgInvitation.select().where(OrgInvitation.token == token).exists() or MailLog.select().where(MailLog.token == token).exists() ): break return token def append_qs(url, **qs): """Append new query strings to an arbitraty URL.""" return url + ("&" if urlparse(url).query else "?") + urlencode(qs, doseq=True) def track_event(category, action, label=None, value=0): """Track application events with Google Analytics.""" ga_tracking_id = app.config.get("GA_TRACKING_ID") if not ga_tracking_id: return data = { "v": "1", # API Version. "tid": ga_tracking_id, # Tracking ID / Property ID. # Anonymous Client Identifier. Ideally, this should be a UUID that # is associated with particular user, device, or browser instance. "cid": current_user.uuid, "t": "event", # Event hit type. "ec": category, # Event category. "ea": action, # Event action. "el": label, # Event label. "ev": value, # Event value, must be an integer } response = requests.post("http://www.google-analytics.com/collect", data=data) # If the request fails, this will raise a RequestException. Depending # on your application's needs, this may be a non-error and can be caught # by the caller. response.raise_for_status() # Returning response only for test, but can be used in application for some other reasons return response def set_server_name(): """Set the server name for batch processes.""" if not app.config.get("SERVER_NAME"): if EXTERNAL_SP: app.config["SERVER_NAME"] = "127.0.0.1:5000" else: app.config["SERVER_NAME"] = ( "orcidhub.org.nz" if ENV == "prod" else ENV + ".orcidhub.org.nz" ) def is_org_rec(org, rec): """Test if the record was authoritized by the organisation.""" client_id = org.orcid_client_id source_client_id = rec.get("source").get("source-client-id") return source_client_id and source_client_id.get("path") == client_id def create_or_update_work(user, org_id, records, *args, **kwargs): """Create or update work record of a user.""" records = list(unique_everseen(records, key=lambda t: t.record.id)) org = Organisation.get(id=org_id) api = orcid_client.MemberAPIV3(org, user) profile_record = api.get_record() if profile_record: activities = profile_record.get("activities-summary") works = [] for r in activities.get("works").get("group"): ws = r.get("work-summary")[0] if is_org_rec(org, ws): works.append(ws) taken_put_codes = {r.record.invitee.put_code for r in records if r.record.invitee.put_code} def match_put_code(records, record, invitee): """Match and assign put-code to a single work record and the existing ORCID records.""" if invitee.put_code: return for r in records: put_code = r.get("put-code") if put_code in taken_put_codes: continue if ( record.title and record.type and (r.get("title", "title", "value", default="") or "").lower() == record.title.lower() and (r.get("type", default="") or "").lower() == record.type.lower() ): invitee.put_code = put_code invitee.visibility = r.get("visibility") invitee.save() taken_put_codes.add(put_code) app.logger.debug( f"put-code {put_code} was asigned to the work record " f"(ID: {record.id}, Task ID: {record.task_id})" ) break for task_by_user in records: wr = task_by_user.record wi = task_by_user.record.invitee match_put_code(works, wr, wi) for task_by_user in records: wi = task_by_user.record.invitee try: put_code, orcid, created, visibility = api.create_or_update_work(task_by_user) if created: wi.add_status_line("Work record was created.") else: wi.add_status_line("Work record was updated.") wi.orcid = orcid wi.put_code = put_code if wi.visibility != visibility: wi.visibility = visibility except Exception as ex: logger.exception(f"For {user} encountered exception") exception_msg = json.loads(ex.body) if hasattr(ex, "body") else str(ex) wi.add_status_line(f"Exception occured processing the record: {exception_msg}.") wr.add_status_line( f"Error processing record. Fix and reset to enable this record to be processed: {exception_msg}." ) finally: wi.processed_at = datetime.utcnow() wr.save() wi.save() else: # TODO: Invitation resend in case user revokes organisation permissions app.logger.debug("Should resend an invite to the researcher asking for permissions") return def create_or_update_peer_review(user, org_id, records, *args, **kwargs): """Create or update peer review record of a user.""" records = list(unique_everseen(records, key=lambda t: t.record.id)) org = Organisation.get(id=org_id) api = orcid_client.MemberAPIV3(org, user) profile_record = api.get_record() if profile_record: peer_reviews = [ s for ag in profile_record.get("activities-summary", "peer-reviews", "group", default=[]) for pg in ag.get("peer-review-group", default=[]) for s in pg.get("peer-review-summary", default=[]) if is_org_rec(org, s) ] taken_put_codes = {r.record.invitee.put_code for r in records if r.record.invitee.put_code} def match_put_code(records, record, invitee, taken_external_id_values): """Match and assign put-code to a single peer review record and the existing ORCID records.""" if invitee.put_code: return for r in records: put_code = r.get("put-code") external_id_value = ( r.get("external-ids").get("external-id")[0].get("external-id-value") if r.get("external-ids") and r.get("external-ids").get("external-id") and r.get("external-ids").get("external-id")[0].get("external-id-value") else None ) if put_code in taken_put_codes: continue if ( record.review_group_id and external_id_value in taken_external_id_values and (r.get("review-group-id", default="") or "").lower() == record.review_group_id.lower() ): # noqa: E127 invitee.put_code = put_code invitee.save() taken_put_codes.add(put_code) app.logger.debug( f"put-code {put_code} was asigned to the peer review record " f"(ID: {record.id}, Task ID: {record.task_id})" ) break for task_by_user in records: pr = task_by_user.record pi = pr.invitee external_ids = PeerReviewExternalId.select().where( PeerReviewExternalId.record_id == pr.id ) taken_external_id_values = {ei.value for ei in external_ids if ei.value} match_put_code(peer_reviews, pr, pi, taken_external_id_values) for task_by_user in records: pr = task_by_user.record pi = pr.invitee try: put_code, orcid, created, visibility = api.create_or_update_peer_review( task_by_user ) if created: pi.add_status_line("Peer review record was created.") else: pi.add_status_line("Peer review record was updated.") pi.orcid = orcid pi.put_code = put_code if pi.visibility != visibility: pi.visibility = visibility except Exception as ex: logger.exception(f"For {user} encountered exception") exception_msg = json.loads(ex.body) if hasattr(ex, "body") else str(ex) pi.add_status_line(f"Exception occured processing the record: {exception_msg}.") pr.add_status_line( f"Error processing record. Fix and reset to enable this record to be processed: {exception_msg}." ) finally: pi.processed_at = datetime.utcnow() pr.save() pi.save() else: # TODO: Invitation resend in case user revokes organisation permissions app.logger.debug("Should resend an invite to the researcher asking for permissions") return def create_or_update_funding(user, org_id, records, *args, **kwargs): """Create or update funding record of a user.""" records = list(unique_everseen(records, key=lambda t: t.record.id)) org = Organisation.get(org_id) api = orcid_client.MemberAPIV3(org, user) profile_record = api.get_record() if profile_record: activities = profile_record.get("activities-summary") fundings = [] for r in activities.get("fundings").get("group"): fs = r.get("funding-summary")[0] if is_org_rec(org, fs): fundings.append(fs) taken_put_codes = {r.record.invitee.put_code for r in records if r.record.invitee.put_code} def match_put_code(records, record, invitee): """Match and asign put-code to a single funding record and the existing ORCID records.""" if invitee.put_code: return for r in records: put_code = r.get("put-code") if put_code in taken_put_codes: continue if ( record.title and record.type and record.org_name and (r.get("title", "title", "value", default="") or "").lower() == record.title.lower() and (r.get("type", default="") or "").lower() == record.type.lower() and (r.get("organization", "name", default="") or "").lower() == record.org_name.lower() ): invitee.put_code = put_code invitee.visibility = r.get("visibility") invitee.save() taken_put_codes.add(put_code) app.logger.debug( f"put-code {put_code} was asigned to the funding record " f"(ID: {record.id}, Task ID: {record.task_id})" ) break for task_by_user in records: fr = task_by_user.record fi = task_by_user.record.invitee match_put_code(fundings, fr, fi) for task_by_user in records: fi = task_by_user.record.invitee try: put_code, orcid, created, visibility = api.create_or_update_funding(task_by_user) if created: fi.add_status_line("Funding record was created.") else: fi.add_status_line("Funding record was updated.") fi.orcid = orcid fi.put_code = put_code if fi.visibility != visibility: fi.visibility = visibility except Exception as ex: logger.exception(f"For {user} encountered exception") if ex and hasattr(ex, "body"): exception_msg = json.loads(ex.body) else: exception_msg = str(ex) fi.add_status_line(f"Exception occured processing the record: {exception_msg}.") fr.add_status_line( f"Error processing record. Fix and reset to enable this record to be processed: {exception_msg}." ) finally: fi.processed_at = datetime.utcnow() fr.save() fi.save() else: # TODO: Invitation resend in case user revokes organisation permissions app.logger.debug("Should resend an invite to the researcher asking for permissions") return def create_or_update_resources(user, org_id, records, *args, **kwargs): """Create or update research resource record of a user.""" records = list(unique_everseen(records, key=lambda t: t.record.id)) org = Organisation.get(org_id) token = ( OrcidToken.select(OrcidToken.access_token) .where( OrcidToken.user_id == user.id, OrcidToken.org_id == org.id, OrcidToken.scopes.contains("/activities/update"), ) .first() ) api = orcid_client.MemberAPIV3(org, user, access_token=token.access_token) resources = api.get_resources() if resources: resources = resources.get("group") resources = [ r for r in resources if any( rr.get("source", "source-client-id", "path") == org.orcid_client_id for rr in r.get("research-resource-summary") ) ] taken_put_codes = {r.record.put_code for r in records if r.record.put_code} def match_record(records, record): """Match and assign put-code to the existing ORCID records.""" if record.put_code: return record.put_code for r in records: if all( eid.get("external-id-value") != record.proposal_external_id_value for eid in r.get("external-ids", "external-id") ): continue for rr in r.get("research-resource-summary"): put_code = rr.get("put-code") # if all(eid.get("external-id-value") != record.external_id_value # for eid in rr.get("proposal", "external-ids", "external-id")): # continue if put_code in taken_put_codes: continue record.put_code = put_code if not record.visibility: record.visibility = r.get("visibility") if not record.display_index: record.display_index = r.get("display-index") taken_put_codes.add(put_code) app.logger.debug( f"put-code {put_code} was asigned to the other id record " f"(ID: {record.id}, Task ID: {record.task_id})" ) return put_code for t in records: try: rr = t.record put_code = match_record(resources, rr) if put_code: resp = api.put(f"research-resource/{put_code}", rr.orcid_research_resource) else: resp = api.post("research-resource", rr.orcid_research_resource) if resp.status == 201: orcid, put_code = resp.headers["Location"].split("/")[-3::2] rr.add_status_line("ORCID record was created.") else: orcid = user.orcid rr.add_status_line("ORCID record was updated.") if not rr.put_code and put_code: rr.put_code = int(put_code) if not rr.orcid and orcid: rr.orcid = orcid visibility = ( json.loads(resp.data).get("visibility") if hasattr(resp, "data") and resp.data else None ) if rr.visibility != visibility: rr.visibility = visibility except ApiException as ex: if ex.status == 404: rr.put_code = None elif ex.status == 401: token.delete_instance() logger.exception(f"Exception occured {ex}") rr.add_status_line(f"ApiException: {ex}") except Exception as ex: logger.exception(f"For {user} encountered exception") rr.add_status_line(f"Exception occured processing the record: {ex}.") finally: rr.processed_at = datetime.utcnow() rr.save() else: # TODO: Invitation resend in case user revokes organisation permissions app.logger.debug("Should resend an invite to the researcher asking for permissions") return def create_or_update_record_from_messages(records, *args, **kwargs): """Create or update ORCID record of a user form the given message. :param records: iterator with records for a single profile. """ records = list(unique_everseen(records, key=lambda t: t.record.id)) if not records: return # add a few shortcuts: for r in records: r.record.msg = json.loads(r.record.message, object_pairs_hook=NestedDict) r.record.invitee = r.record.ri.invitee rec0 = records[0] org = rec0.org user = rec0.record.invitee.user token = user.token api = orcid_client.MemberAPIV3(org, user, access_token=token.access_token) resources = api.get_resources() if resources: resources = resources.get("group") resources = [ r for r in resources if any( rr.get("source", "source-client-id", "path") == org.orcid_client_id for rr in r.get("research-resource-summary") ) ] taken_put_codes = { r.record.ri.invitee.put_code for r in records if r.record.ri.invitee.put_code } def match_record(resource, record): """Match and assign put-code to the existing ORCID records.""" put_code = record.invitee.put_code if put_code: for rr in (rr for r in resources for rr in r.get("research-resource-summary")): if rr.get("put-code") == put_code: record.invitee.visibility = rr.get("visibility") break return put_code external_ids = record.msg.get("proposal", "external-ids", "external-id", default=[]) for r in resource: res_external_ids = r.get("external-ids", "external-id") if all( eid.get("external-id-value") != rec_eid.get("external-id-value") for eid in res_external_ids for rec_eid in external_ids ): continue for rr in r.get("research-resource-summary"): proposal_external_ids = record.msg.get( "proposal", "external-ids", "external-id" ) if all( eid.get("external-id-value") != rec_eid.get("external-id-value") for rec_eid in proposal_external_ids for eid in rr.get("proposal", "external-ids", "external-id") ): continue put_code = rr.get("put-code") if put_code in taken_put_codes: continue record.invitee.put_code = put_code record.ri.invitee.visibility = rr.get("visibility") taken_put_codes.add(put_code) app.logger.debug( f"put-code {put_code} was asigned to the record " f"(ID: {record.id}, Task ID: {record.task_id})" ) return put_code for t in records: try: rr = t.record if not rr.invitee.orcid: rr.invitee.orcid = user.orcid put_code = match_record(resources, rr) if "visibility" in rr.msg: del rr.msg["visibility"] if put_code: rr.msg["put-code"] = put_code resp = api.put(f"research-resource/{put_code}", rr.msg) else: resp = api.post("research-resource", rr.msg) if resp.status == 201: rr.invitee.add_status_line("ORCID record was created.") else: rr.invitee.add_status_line("ORCID record was updated.") if not put_code: location = resp.headers["Location"] rr.invitee.put_code = location.split("/")[-1] rec = api.get(location) rr.invitee.visibility = rec.json.get("visibility") except ApiException as ex: if ex.status == 404: rr.invitee.put_code = None elif ex.status == 401: token.delete_instance() logger.exception(f"Exception occured {ex}") rr.invitee.add_status_line(f"ApiException: {ex}") except Exception as ex: logger.exception(f"For {user} encountered exception") rr.invitee.add_status_line(f"Exception occured processing the record: {ex}.") finally: rr.invitee.processed_at = datetime.utcnow() rr.invitee.save() else: # TODO: Invitation resend in case user revokes organisation permissions app.logger.debug("Should resend an invite to the researcher asking for permissions") return @rq.job(timeout=300) def send_user_invitation( inviter, org, email=None, first_name=None, last_name=None, user=None, task_id=None, task_type=None, affiliation_types=None, orcid=None, department=None, organisation=None, city=None, region=None, country=None, course_or_role=None, start_date=None, end_date=None, affiliations=Affiliation.NONE, disambiguated_id=None, disambiguation_source=None, cc_email=None, invitation_template=None, **kwargs, ): """Send an invitation to join ORCID Hub logging in via ORCID.""" try: if not email: if user and user.email: email = user.email else: raise Exception( "Failed to find the email address for the record. Cannot send an invitation." ) else: email = email.lower() if isinstance(inviter, int): inviter = User.get(id=inviter) if isinstance(org, int): org = Organisation.get(id=org) if isinstance(start_date, list): start_date = PartialDate(*start_date) if isinstance(end_date, list): end_date = PartialDate(*end_date) set_server_name() task_type = task_type or (Task.get(task_id).task_type if task_id else TaskType.AFFILIATION) if not invitation_template: if task_type != TaskType.AFFILIATION: invitation_template = f"email/{task_type.name.lower()}_invitation.html" else: invitation_template = "email/researcher_invitation.html" if task_type == TaskType.AFFILIATION: logger.info( f"*** Sending an invitation to '{first_name} {last_name} <{email}>' " f"submitted by {inviter} of {org} for affiliations: {affiliation_types}" ) else: logger.info( f"*** Sending an invitation to '{first_name} <{email}>' " f"submitted by {inviter} of {org}" ) email = email.lower() if not user or not user.id: user, user_created = User.get_or_create(email=email) if user_created: user.organisation = org user.created_by = inviter.id user.first_name = first_name or "N/A" user.last_name = last_name or "N/A" else: user.updated_by = inviter.id if first_name and not user.first_name: user.first_name = first_name if last_name and not user.last_name: user.last_name = last_name if not first_name: first_name = user.first_name if not last_name: last_name = user.last_name user.roles |= Role.RESEARCHER token = new_invitation_token() with app.app_context(): invitation_url = flask.url_for( "orcid_login", invitation_token=token, _external=True, _scheme="http" if app.debug else "https", ) send_email( invitation_template, recipient=(org.name if org else user.organisation.name, user.email), reply_to=f"{inviter.name} <{inviter.email}>", cc_email=cc_email, invitation_url=invitation_url, org_name=org.name if org else user.organisation.name, org=org, user=user, ) user.save() user_org, user_org_created = UserOrg.get_or_create(user=user, org=org) if user_org_created: user_org.created_by = inviter.id if not affiliations and affiliation_types: if affiliation_types & EMP_CODES: affiliations |= Affiliation.EMP if affiliation_types & EDU_CODES: affiliations |= Affiliation.EDU user_org.affiliations = affiliations else: user_org.updated_by = inviter.id user_org.save() ui = UserInvitation.create( task_id=task_id, invitee_id=user.id, inviter_id=inviter.id, org=org, email=email, first_name=first_name, last_name=last_name, orcid=orcid, department=department, organisation=organisation, city=city, region=region, country=country, course_or_role=course_or_role, start_date=start_date, end_date=end_date, affiliations=affiliations, disambiguated_id=disambiguated_id, disambiguation_source=disambiguation_source, token=token, ) status = "The invitation sent at " + datetime.utcnow().isoformat(timespec="seconds") if task_type == TaskType.AFFILIATION: ( AffiliationRecord.update(status=AffiliationRecord.status + "\n" + status) .where( AffiliationRecord.status.is_null(False), AffiliationRecord.task_id == task_id, AffiliationRecord.email == email, ) .execute() ) ( AffiliationRecord.update(status=status) .where( AffiliationRecord.task_id == task_id, AffiliationRecord.status.is_null(), AffiliationRecord.email == email, ) .execute() ) elif task_type in [TaskType.FUNDING, TaskType.WORK, TaskType.PEER_REVIEW]: task = Task.get(task_id) for record in task.records.where(task.record_model.is_active): invitee_class = record.invitees.model invitee_class.update(status=status).where( invitee_class.record == record.id, invitee_class.email == email ).execute() return ui except Exception as ex: logger.exception(f"Exception occured while sending mail {ex}") raise def unique_everseen(iterable, key=None): """List unique elements, preserving order. Remember all elements ever seen. The snippet is taken form https://docs.python.org/3.6/library/itertools.html#itertools-recipes >>> unique_everseen('AAAABBBCCDAABBB') A B C D >>> unique_everseen('ABBCcAD', str.lower) A B C D """ seen = set() seen_add = seen.add if key is None: for element in filterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element def create_or_update_properties(user, org_id, records, *args, **kwargs): """Create or update researcher property records of a user.""" records = list(unique_everseen(records, key=lambda t: t.record.id)) org = Organisation.get(org_id) profile_record = None token = ( OrcidToken.select(OrcidToken.access_token) .where( OrcidToken.user_id == user.id, OrcidToken.org_id == org.id, OrcidToken.scopes.contains("/person/update"), ) .first() ) if token: api = orcid_client.MemberAPIV3(org, user, access_token=token.access_token) profile_record = api.get_record() if profile_record: activities = profile_record.get("person") researcher_urls = [ r for r in (activities.get("researcher-urls", "researcher-url", default=[])) if is_org_rec(org, r) ] other_names = [ r for r in (activities.get("other-names", "other-name", default=[])) if is_org_rec(org, r) ] keywords = [ r for r in (activities.get("keywords", "keyword", default=[])) if is_org_rec(org, r) ] countries = [ r for r in (activities.get("addresses", "address", default=[])) if is_org_rec(org, r) ] taken_put_codes = {r.record.put_code for r in records if r.record.put_code} def match_put_code(record): """Match and assign put-code to the existing ORCID records.""" for r in ( researcher_urls if record.type == "URL" else other_names if record.type == "NAME" else countries if record.type == "COUNTRY" else keywords ): try: orcid, put_code = r.get("path").split("/")[-3::2] except Exception: app.logger.exception("Failed to get ORCID iD/put-code from the response.") raise Exception("Failed to get ORCID iD/put-code from the response.") if record.put_code: return if put_code in taken_put_codes: continue if ( record.value and ( record.name and record.type == "URL" and (r.get("url-name", default="") or "").lower() == record.name.lower() and (r.get("url", "value", default="") or "").lower() == record.value.lower() ) or ( record.type in ["NAME", "KEYWORD"] and (r.get("content", default="") or "").lower() == record.value.lower() ) or ( record.type == "COUNTRY" and (r.get("country", "value", default="") or "").lower() == record.value.lower() ) ): # noqa: E129 record.put_code = put_code record.orcid = orcid record.visibility = r.get("visibility") if not record.display_index: record.display_index = r.get("display-index") taken_put_codes.add(put_code) app.logger.debug( f"put-code {put_code} was asigned to the record (ID: {record.id}, Task ID: {record.task_id})" ) break for rr in (t.record for t in records): try: match_put_code(rr) if rr.type == "URL": put_code, orcid, created, visibility = api.create_or_update_researcher_url( **rr.__data__ ) elif rr.type == "NAME": put_code, orcid, created, visibility = api.create_or_update_other_name( **rr.__data__ ) elif rr.type == "COUNTRY": put_code, orcid, created, visibility = api.create_or_update_address( **rr.__data__ ) else: put_code, orcid, created, visibility = api.create_or_update_keyword( **rr.__data__ ) if created: rr.add_status_line(f"Researcher {rr.type} record was created.") else: rr.add_status_line(f"Researcher {rr.type} record was updated.") rr.orcid = orcid rr.put_code = put_code if rr.visibility != visibility: rr.visibility = visibility except ApiException as ex: if ex.status == 404: rr.put_code = None elif ex.status == 401: token.delete_instance() logger.exception(f"Exception occured {ex}") rr.add_status_line(f"ApiException: {ex}") except Exception as ex: logger.exception(f"For {user} encountered exception") rr.add_status_line(f"Exception occured processing the record: {ex}.") finally: rr.processed_at = datetime.utcnow() rr.save() else: # TODO: Invitation resend in case user revokes organisation permissions app.logger.debug("Should resend an invite to the researcher asking for permissions") return # TODO: delete def create_or_update_other_id(user, org_id, records, *args, **kwargs): """Create or update Other Id record of a user.""" records = list(unique_everseen(records, key=lambda t: t.record.id)) org = Organisation.get(id=org_id) profile_record = None token = ( OrcidToken.select(OrcidToken.access_token) .where( OrcidToken.user_id == user.id, OrcidToken.org_id == org.id, OrcidToken.scopes.contains("/person/update"), ) .first() ) if token: api = orcid_client.MemberAPIV3(org, user, access_token=token.access_token) profile_record = api.get_record() if profile_record: activities = profile_record.get("person") other_id_records = [ r for r in (activities.get("external-identifiers").get("external-identifier")) if is_org_rec(org, r) ] taken_put_codes = {r.record.put_code for r in records if r.record.put_code} def match_put_code(records, record): """Match and assign put-code to the existing ORCID records.""" for r in records: try: orcid, put_code = r.get("path").split("/")[-3::2] except Exception: app.logger.exception("Failed to get ORCID iD/put-code from the response.") raise Exception("Failed to get ORCID iD/put-code from the response.") if record.put_code: return if put_code in taken_put_codes: continue if ( record.type and record.value and (r.get("external-id-type", default="") or "").lower() == record.type.lower() and (r.get("external-id-value", default="") or "").lower() == record.value.lower() ): record.put_code = put_code record.orcid = orcid record.visibility = r.get("visibility") if not record.display_index: record.display_index = r.get("display-index") taken_put_codes.add(put_code) app.logger.debug( f"put-code {put_code} was asigned to the other id record " f"(ID: {record.id}, Task ID: {record.task_id})" ) break for task_by_user in records: try: rr = task_by_user.record match_put_code(other_id_records, rr) put_code, orcid, created, visibility = api.create_or_update_person_external_id( **rr.__data__ ) if created: rr.add_status_line("Other ID record was created.") else: rr.add_status_line("Other ID record was updated.") rr.orcid = orcid rr.put_code = put_code if rr.visibility != visibility: rr.visibility = visibility except ApiException as ex: if ex.status == 404: rr.put_code = None elif ex.status == 401: token.delete_instance() logger.exception(f"Exception occured {ex}") rr.add_status_line(f"ApiException: {ex}") except Exception as ex: logger.exception(f"For {user} encountered exception") rr.add_status_line(f"Exception occured processing the record: {ex}.") finally: rr.processed_at = datetime.utcnow() rr.save() else: # TODO: Invitation resend in case user revokes organisation permissions app.logger.debug("Should resend an invite to the researcher asking for permissions") return def create_or_update_affiliations(user, org_id, records, *args, **kwargs): """Create or update affiliation record of a user. 1. Retries user edurcation and employment summamy from ORCID; 2. Match the recodrs with the summary; 3. If there is match update the record; 4. If no match create a new one. """ records = list(unique_everseen(records, key=lambda t: t.record.id)) org = Organisation.get(id=org_id) api = orcid_client.MemberAPIV3(org, user) profile_record = api.get_record() orcid_affiliation_types = [ "employment", "education", "distinction", "membership", "service", "qualification", "invited-position", ] if profile_record: affiliations = { at: [ s.get(f"{at}-summary") for ag in profile_record.get( "activities-summary", f"{at}s", "affiliation-group", default=[] ) for s in ag.get("summaries", default=[]) if is_org_rec(org, s.get(f"{at}-summary")) ] for at in orcid_affiliation_types } taken_put_codes = {r.record.put_code for r in records if r.record.put_code} put_codes = {at: [e["put-code"] for e in records] for at, records in affiliations.items()} def match_put_code(records, record): """Match and asign put-code to a single affiliation record and the existing ORCID records.""" for r in records: try: orcid, rec_type, put_code = r.get("path").split("/")[-3:] except Exception: app.logger.exception("Failed to get ORCID iD/put-code from the response.") raise Exception("Failed to get ORCID iD/put-code from the response.") start_date = record.start_date.as_orcid_dict() if record.start_date else None end_date = record.end_date.as_orcid_dict() if record.end_date else None if ( r.get("start-date") == start_date and r.get("end-date") == end_date and (rec_type == "education" or r.get("department-name") == record.department) and r.get("role-title") == record.role and get_val(r, "organization", "name") == record.organisation and get_val(r, "organization", "address", "city") == record.city and get_val(r, "organization", "address", "region") == record.region and get_val(r, "organization", "address", "country") == record.country and get_val( r, "organization", "disambiguated-organization", "disambiguated-organization-identifier", ) == record.disambiguated_id and get_val( r, "organization", "disambiguated-organization", "disambiguation-source" ) == record.disambiguation_source ): record.put_code = put_code record.orcid = orcid record.visibility = r.get("visibility") return True if record.put_code: return if put_code in taken_put_codes: continue if ( # pure vanilla: ( r.get("start-date") is None and r.get("end-date") is None and r.get("department-name") is None and r.get("role-title") is None ) or # partial match ( ( # for 'edu' records department and start-date can be missing: rec_type == "education" or ( record.department and r.get("start-date") == start_date and r.get("department-name", default="").lower() == record.department.lower() ) ) and record.role and r.get("role-title", default="".lower() == record.role.lower()) ) ): record.visibility = r.get("visibility") record.put_code = put_code record.orcid = orcid taken_put_codes.add(put_code) app.logger.debug( f"put-code {put_code} was asigned to the affiliation record " f"(ID: {record.id}, Task ID: {record.task_id})" ) break for task_by_user in records: try: ar = task_by_user.record at = ar.affiliation_type.lower() if ar.affiliation_type else None no_orcid_call = False if ar.delete_record and profile_record: try: for at in orcid_affiliation_types: if ar.put_code in put_codes[at]: getattr(api, f"delete_{at}v3")(user.orcid, ar.put_code) app.logger.info( f"ORCID record of {user} with put-code {ar.put_code} was deleted." ) break else: ar.add_status_line( f"There is no record with the given put-code {ar.put_code} in the user {user} profile." ) except Exception as ex: ar.add_status_line(f"Exception occured processing the record: {ex}.") ar.processed_at = datetime.utcnow() ar.save() continue if at in EMP_CODES: affiliation = Affiliation.EMP elif at in DST_CODES: affiliation = Affiliation.DST elif at in MEM_CODES: affiliation = Affiliation.MEM elif at in SER_CODES: affiliation = Affiliation.SER elif at in QUA_CODES: affiliation = Affiliation.QUA elif at in INV_POS_CODES: affiliation = Affiliation.POS elif at in EDU_CODES: affiliation = Affiliation.EDU else: logger.info(f"For {user} not able to determine affiliaton type with {org}") ar.add_status_line( f"Unsupported affiliation type '{at}' allowed values are: " ", ".join(at for at in AFFILIATION_TYPES) ) ar.save() continue no_orcid_call = match_put_code(affiliations.get(str(affiliation).lower()), ar) if no_orcid_call: ar.add_status_line(f"{str(affiliation)} record unchanged.") else: put_code, orcid, created, visibility = api.create_or_update_affiliation( affiliation=affiliation, **ar.__data__ ) if created: ar.add_status_line(f"{str(affiliation)} record was created.") else: ar.add_status_line(f"{str(affiliation)} record was updated.") ar.orcid = orcid ar.put_code = put_code if ar.visibility != visibility: ar.visibility = visibility except Exception as ex: logger.exception(f"For {user} encountered exception") ar.add_status_line(f"Exception occured processing the record: {ex}.") finally: ar.processed_at = datetime.utcnow() ar.save() else: for task_by_user in records: user = User.get(email=task_by_user.record.email, organisation=task_by_user.org) user_org = UserOrg.get(user=user, org=task_by_user.org) token = new_invitation_token() with app.app_context(): invitation_url = flask.url_for( "orcid_login", invitation_token=token, _external=True, _scheme="http" if app.debug else "https", ) send_email( "email/researcher_reinvitation.html", recipient=(user.organisation.name, user.email), reply_to=f"{task_by_user.created_by.name} <{task_by_user.created_by.email}>", invitation_url=invitation_url, org_name=user.organisation.name, org=org, user=user, ) UserInvitation.create( invitee_id=user.id, inviter_id=task_by_user.created_by.id, org=org, email=user.email, first_name=user.first_name, last_name=user.last_name, orcid=user.orcid, organisation=org.name, city=org.city, region=org.region, country=org.country, start_date=task_by_user.record.start_date, end_date=task_by_user.record.end_date, affiliations=user_org.affiliations, disambiguated_id=org.disambiguated_id, disambiguation_source=org.disambiguation_source, token=token, ) status = "Exception occured while accessing user's profile. Hence, The invitation resent at " status += datetime.utcnow().isoformat(timespec="seconds") AffiliationRecord.update(status=AffiliationRecord.status + "\n" + status).where( AffiliationRecord.status.is_null(False), AffiliationRecord.email == user.email ).execute() AffiliationRecord.update(status=status).where( AffiliationRecord.status.is_null(), AffiliationRecord.email == user.email ).execute() return @rq.job(timeout=300) def process_work_records(max_rows=20, record_id=None): """Process uploaded work records.""" set_server_name() task_ids = set() work_ids = set() """This query is to retrieve Tasks associated with work records, which are not processed but are active""" tasks = ( Task.select( Task, WorkRecord, WorkInvitee, User, UserInvitation.id.alias("invitation_id"), OrcidToken, ) .where( WorkRecord.processed_at.is_null(), WorkInvitee.processed_at.is_null(), WorkRecord.is_active, ( OrcidToken.id.is_null(False) | ( (WorkInvitee.status.is_null()) | (WorkInvitee.status.contains("sent").__invert__()) ) ), ) .join(WorkRecord, on=(Task.id == WorkRecord.task_id), attr="record") .join(WorkInvitee, on=(WorkRecord.id == WorkInvitee.record_id), attr="invitee") .join( User, JOIN.LEFT_OUTER, on=( (User.email == WorkInvitee.email) | ((User.orcid == WorkInvitee.orcid) & (User.organisation_id == Task.org_id)) ), ) .join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id)) .join( UserOrg, JOIN.LEFT_OUTER, on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)), ) .join( UserInvitation, JOIN.LEFT_OUTER, on=((UserInvitation.email == WorkInvitee.email) & (UserInvitation.task_id == Task.id)), ) .join( OrcidToken, JOIN.LEFT_OUTER, on=( (OrcidToken.user_id == User.id) & (OrcidToken.org_id == Organisation.id) & (OrcidToken.scopes.contains("/activities/update")) ), ) ) if record_id: tasks = tasks.where(WorkRecord.id == record_id) tasks = tasks.order_by(Task.id, Task.org_id, WorkRecord.id, User.id).limit(max_rows) tasks = list(tasks) for (task_id, org_id, record_id, user), tasks_by_user in groupby( tasks, lambda t: ( t.id, t.org_id, t.record.id, (lambda r: r.user if r.user.id else None)(t.record.invitee), ), ): # If we have the token associated to the user then update the work record, # otherwise send him an invite if ( user is None or user.orcid is None or not OrcidToken.select() .where( (OrcidToken.user_id == user.id) & (OrcidToken.org_id == org_id) & (OrcidToken.scopes.contains("/activities/update")) ) .exists() ): # noqa: E127, E129 for k, tasks in groupby( tasks_by_user, lambda t: ( t.created_by, t.org, t.record.invitee.email, t.record.invitee.first_name, t.record.invitee.last_name, ), ): # noqa: E501 email = k[2] try: send_user_invitation(*k, task_id=task_id) except Exception as ex: ( WorkInvitee.update( processed_at=datetime.utcnow(), status=f"Failed to send an invitation: {ex}.", ).where( WorkInvitee.email == email, WorkInvitee.record_id == record_id, WorkInvitee.processed_at.is_null(), ) ).execute() else: create_or_update_work(user, org_id, tasks_by_user) task_ids.add(task_id) work_ids.add(record_id) for record in WorkRecord.select().where(WorkRecord.id << work_ids): # The Work record is processed for all invitees if not ( WorkInvitee.select() .where(WorkInvitee.record_id == record.id, WorkInvitee.processed_at.is_null()) .exists() ): record.processed_at = datetime.utcnow() if not record.status or "error" not in record.status: record.add_status_line("Work record is processed.") record.save() for task in Task.select().where(Task.id << task_ids): # The task is completed (Once all records are processed): if not ( WorkRecord.select() .where(WorkRecord.task_id == task.id, WorkRecord.processed_at.is_null()) .exists() ): task.completed_at = datetime.utcnow() task.save() error_count = ( WorkRecord.select() .where(WorkRecord.task_id == task.id, WorkRecord.status ** "%error%") .count() ) row_count = task.record_count with app.app_context(): export_url = flask.url_for( "workrecord.export", export_type="json", _scheme="http" if EXTERNAL_SP else "https", task_id=task.id, _external=True, ) send_email( "email/work_task_completed.html", subject="Work Process Update", recipient=(task.created_by.name, task.created_by.email), error_count=error_count, row_count=row_count, export_url=export_url, task_name="Work", filename=task.filename, ) @rq.job(timeout=300) def process_peer_review_records(max_rows=20, record_id=None): """Process uploaded peer_review records.""" set_server_name() task_ids = set() peer_review_ids = set() """This query is to retrieve Tasks associated with peer review records, which are not processed but are active""" tasks = ( Task.select( Task, PeerReviewRecord, PeerReviewInvitee, User, UserInvitation.id.alias("invitation_id"), OrcidToken, ) .where( PeerReviewRecord.processed_at.is_null(), PeerReviewInvitee.processed_at.is_null(), PeerReviewRecord.is_active, ( OrcidToken.id.is_null(False) | ( (PeerReviewInvitee.status.is_null()) | (PeerReviewInvitee.status.contains("sent").__invert__()) ) ), ) .join(PeerReviewRecord, on=(Task.id == PeerReviewRecord.task_id), attr="record") .join( PeerReviewInvitee, on=(PeerReviewRecord.id == PeerReviewInvitee.record_id), attr="invitee", ) .join( User, JOIN.LEFT_OUTER, on=( (User.email == PeerReviewInvitee.email) | ((User.orcid == PeerReviewInvitee.orcid) & (User.organisation_id == Task.org_id)) ), ) .join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id)) .join( UserOrg, JOIN.LEFT_OUTER, on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)), ) .join( UserInvitation, JOIN.LEFT_OUTER, on=( (UserInvitation.email == PeerReviewInvitee.email) & (UserInvitation.task_id == Task.id) ), ) .join( OrcidToken, JOIN.LEFT_OUTER, on=( (OrcidToken.user_id == User.id) & (OrcidToken.org_id == Organisation.id) & (OrcidToken.scopes.contains("/activities/update")) ), ) ) if record_id: tasks = tasks.where(PeerReviewRecord.id == record_id) tasks = tasks.order_by(Task.id, Task.org_id, PeerReviewRecord.id, User.id).limit(max_rows) tasks = list(tasks) for (task_id, org_id, record_id, user), tasks_by_user in groupby( tasks, lambda t: ( t.id, t.org_id, t.record.id, (lambda r: r.user if r.user.id else None)(t.record.invitee), ), ): """If we have the token associated to the user then update the peer record, otherwise send him an invite""" if ( user is None or user.orcid is None or not OrcidToken.select() .where( (OrcidToken.user_id == user.id) & (OrcidToken.org_id == org_id) & (OrcidToken.scopes.contains("/activities/update")) ) .exists() ): # noqa: E127, E129 for k, tasks in groupby( tasks_by_user, lambda t: ( t.created_by, t.org, t.record.invitee.email, t.record.invitee.first_name, t.record.invitee.last_name, ), ): # noqa: E501 email = k[2] try: send_user_invitation(*k, task_id=task_id) except Exception as ex: ( PeerReviewInvitee.update( processed_at=datetime.utcnow(), status=f"Failed to send an invitation: {ex}.", ).where( PeerReviewInvitee.email == email, PeerReviewInvitee.record_id == record_id, PeerReviewInvitee.processed_at.is_null(), ) ).execute() else: create_or_update_peer_review(user, org_id, tasks_by_user) task_ids.add(task_id) peer_review_ids.add(record_id) for record in PeerReviewRecord.select().where(PeerReviewRecord.id << peer_review_ids): # The Peer Review record is processed for all invitees if not ( PeerReviewInvitee.select() .where( PeerReviewInvitee.record_id == record.id, PeerReviewInvitee.processed_at.is_null() ) .exists() ): record.processed_at = datetime.utcnow() if not record.status or "error" not in record.status: record.add_status_line("Peer Review record is processed.") record.save() for task in Task.select().where(Task.id << task_ids): # The task is completed (Once all records are processed): if not ( PeerReviewRecord.select() .where(PeerReviewRecord.task_id == task.id, PeerReviewRecord.processed_at.is_null()) .exists() ): task.completed_at = datetime.utcnow() task.save() error_count = ( PeerReviewRecord.select() .where(PeerReviewRecord.task_id == task.id, PeerReviewRecord.status ** "%error%") .count() ) row_count = task.record_count with app.app_context(): export_url = flask.url_for( "peerreviewrecord.export", export_type="json", _scheme="http" if EXTERNAL_SP else "https", task_id=task.id, _external=True, ) send_email( "email/work_task_completed.html", subject="Peer Review Process Update", recipient=(task.created_by.name, task.created_by.email), error_count=error_count, row_count=row_count, export_url=export_url, task_name="Peer Review", filename=task.filename, ) @rq.job(timeout=300) def process_funding_records(max_rows=20, record_id=None): """Process uploaded affiliation records.""" set_server_name() task_ids = set() funding_ids = set() """This query is to retrieve Tasks associated with funding records, which are not processed but are active""" tasks = ( Task.select( Task, FundingRecord, FundingInvitee, User, UserInvitation.id.alias("invitation_id"), OrcidToken, ) .where( FundingRecord.processed_at.is_null(), FundingInvitee.processed_at.is_null(), FundingRecord.is_active, ( OrcidToken.id.is_null(False) | ( (FundingInvitee.status.is_null()) | (FundingInvitee.status.contains("sent").__invert__()) ) ), ) .join(FundingRecord, on=(Task.id == FundingRecord.task_id), attr="record") .join(FundingInvitee, on=(FundingRecord.id == FundingInvitee.record_id), attr="invitee") .join( User, JOIN.LEFT_OUTER, on=( (User.email == FundingInvitee.email) | ((User.orcid == FundingInvitee.orcid) & (User.organisation_id == Task.org_id)) ), ) .join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id)) .join( UserOrg, JOIN.LEFT_OUTER, on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)), ) .join( UserInvitation, JOIN.LEFT_OUTER, on=( (UserInvitation.email == FundingInvitee.email) & (UserInvitation.task_id == Task.id) ), ) .join( OrcidToken, JOIN.LEFT_OUTER, on=( (OrcidToken.user_id == User.id) & (OrcidToken.org_id == Organisation.id) & (OrcidToken.scopes.contains("/activities/update")) ), ) .limit(max_rows) ) if record_id: tasks = tasks.where(FundingRecord.id == record_id) for (task_id, org_id, record_id, user), tasks_by_user in groupby( tasks, lambda t: ( t.id, t.org_id, t.record.id, (lambda r: r.user if r.user.id else None)(t.record.invitee), ), ): """If we have the token associated to the user then update the funding record, otherwise send him an invite""" if ( user is None or user.orcid is None or not OrcidToken.select() .where( (OrcidToken.user_id == user.id) & (OrcidToken.org_id == org_id) & (OrcidToken.scopes.contains("/activities/update")) ) .exists() ): # noqa: E127, E129 for k, tasks in groupby( tasks_by_user, lambda t: ( t.created_by, t.org, t.record.invitee.email, t.record.invitee.first_name, t.record.invitee.last_name, ), ): # noqa: E501 email = k[2] try: send_user_invitation(*k, task_id=task_id) except Exception as ex: ( FundingInvitee.update( processed_at=datetime.utcnow(), status=f"Failed to send an invitation: {ex}.", ).where( FundingInvitee.email == email, FundingInvitee.record_id == record_id, FundingInvitee.processed_at.is_null(), ) ).execute() else: create_or_update_funding(user, org_id, tasks_by_user) task_ids.add(task_id) funding_ids.add(record_id) for record in FundingRecord.select().where(FundingRecord.id << funding_ids): # The funding record is processed for all invitees if not ( FundingInvitee.select() .where(FundingInvitee.record_id == record.id, FundingInvitee.processed_at.is_null()) .exists() ): record.processed_at = datetime.utcnow() if not record.status or "error" not in record.status: record.add_status_line("Funding record is processed.") record.save() for task in Task.select().where(Task.id << task_ids): # The task is completed (Once all records are processed): if not ( FundingRecord.select() .where(FundingRecord.task_id == task.id, FundingRecord.processed_at.is_null()) .exists() ): task.completed_at = datetime.utcnow() task.save() error_count = ( FundingRecord.select() .where(FundingRecord.task_id == task.id, FundingRecord.status ** "%error%") .count() ) row_count = task.record_count with app.app_context(): export_url = flask.url_for( "fundingrecord.export", export_type="json", _scheme="http" if EXTERNAL_SP else "https", task_id=task.id, _external=True, ) send_email( "email/funding_task_completed.html", subject="Funding Process Update", recipient=(task.created_by.name, task.created_by.email), error_count=error_count, row_count=row_count, export_url=export_url, filename=task.filename, ) @rq.job(timeout=300) def process_affiliation_records(max_rows=20, record_id=None): """Process uploaded affiliation records.""" set_server_name() # TODO: optimize removing redundant fields # TODO: perhaps it should be broken into 2 queries task_ids = set() tasks = ( Task.select( Task, AffiliationRecord, User, UserInvitation.id.alias("invitation_id"), OrcidToken ) .where( AffiliationRecord.processed_at.is_null(), AffiliationRecord.is_active, ( (User.id.is_null(False) & User.orcid.is_null(False) & OrcidToken.id.is_null(False)) | ( (User.id.is_null() | User.orcid.is_null() | OrcidToken.id.is_null()) & UserInvitation.id.is_null() & ( AffiliationRecord.status.is_null() | AffiliationRecord.status.contains("sent").__invert__() ) ) ), ) .join(AffiliationRecord, on=(Task.id == AffiliationRecord.task_id), attr="record") .join( User, JOIN.LEFT_OUTER, on=( (User.email == AffiliationRecord.email) | ((User.orcid == AffiliationRecord.orcid) & (User.organisation_id == Task.org_id)) ), ) .join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id)) .join( UserOrg, JOIN.LEFT_OUTER, on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)), ) .join( UserInvitation, JOIN.LEFT_OUTER, on=( (UserInvitation.email == AffiliationRecord.email) & (UserInvitation.task_id == Task.id) ), ) .join( OrcidToken, JOIN.LEFT_OUTER, on=( (OrcidToken.user_id == User.id) & (OrcidToken.org_id == Organisation.id) & (OrcidToken.scopes.contains("/activities/update")) ), ) .limit(max_rows) ) if record_id: if isinstance(record_id, list): tasks = tasks.where(AffiliationRecord.id.in_(record_id)) else: tasks = tasks.where(AffiliationRecord.id == record_id) for (task_id, org_id, user), tasks_by_user in groupby( tasks, lambda t: (t.id, t.org_id, (lambda r: r.user if r.user.id else None)(t.record)) ): if ( user is None or user.orcid is None or not OrcidToken.select() .where( (OrcidToken.user_id == user.id) & (OrcidToken.org_id == org_id) & (OrcidToken.scopes.contains("/activities/update")) ) .exists() ): # noqa: E127, E129 # maps invitation attributes to affiliation type set: # - the user who uploaded the task; # - the user organisation; # - the invitee email; # - the invitee first_name; # - the invitee last_name invitation_dict = { k: set(t.record.affiliation_type.lower() for t in tasks) for k, tasks in groupby( tasks_by_user, lambda t: ( t.created_by, t.org, t.record.email, t.record.first_name, t.record.last_name, ), # noqa: E501 ) # noqa: E501 } for invitation, affiliations in invitation_dict.items(): email = invitation[2] try: send_user_invitation( *invitation, affiliation_types=affiliations, task_id=task_id ) except Exception as ex: ( AffiliationRecord.update( processed_at=datetime.utcnow(), status=f"Failed to send an invitation: {ex}.", ).where( AffiliationRecord.task_id == task_id, AffiliationRecord.email == email, AffiliationRecord.processed_at.is_null(), ) ).execute() else: # user exits and we have tokens create_or_update_affiliations(user, org_id, tasks_by_user) task_ids.add(task_id) for task in Task.select().where(Task.id << task_ids): # The task is completed (all recores are processed): if not ( AffiliationRecord.select() .where(AffiliationRecord.task_id == task.id, AffiliationRecord.processed_at.is_null()) .exists() ): task.completed_at = datetime.utcnow() task.save() error_count = ( AffiliationRecord.select() .where(AffiliationRecord.task_id == task.id, AffiliationRecord.status ** "%error%") .count() ) row_count = task.record_count orcid_rec_count = ( task.affiliation_records.select(AffiliationRecord.orcid).distinct().count() ) if task.filename and "INTEGRATION" not in task.filename: with app.app_context(): export_url = flask.url_for( "affiliationrecord.export", export_type="csv", _scheme="http" if EXTERNAL_SP else "https", task_id=task.id, _external=True, ) try: send_email( "email/task_completed.html", subject="Affiliation Process Update", recipient=(task.created_by.name, task.created_by.email), error_count=error_count, row_count=row_count, orcid_rec_count=orcid_rec_count, export_url=export_url, filename=task.filename, ) except Exception: logger.exception( "Failed to send batch process comletion notification message." ) @rq.job(timeout=300) def process_property_records(max_rows=20, record_id=None): """Process uploaded property records.""" set_server_name() # TODO: optimize removing redundant fields # TODO: perhaps it should be broken into 2 queries task_ids = set() tasks = ( Task.select( Task, PropertyRecord, User, UserInvitation.id.alias("invitation_id"), OrcidToken ) .where( PropertyRecord.processed_at.is_null(), PropertyRecord.is_active, ( (User.id.is_null(False) & User.orcid.is_null(False) & OrcidToken.id.is_null(False)) | ( (User.id.is_null() | User.orcid.is_null() | OrcidToken.id.is_null()) & UserInvitation.id.is_null() & ( PropertyRecord.status.is_null() | PropertyRecord.status.contains("sent").__invert__() ) ) ), ) .join(PropertyRecord, on=(Task.id == PropertyRecord.task_id), attr="record") .join( User, JOIN.LEFT_OUTER, on=( (User.email == PropertyRecord.email) | ((User.orcid == PropertyRecord.orcid) & (User.organisation_id == Task.org_id)) ), ) .join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id)) .join( UserOrg, JOIN.LEFT_OUTER, on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)), ) .join( UserInvitation, JOIN.LEFT_OUTER, on=( ( (UserInvitation.email == PropertyRecord.email) | (UserInvitation.email == User.email) ) & (UserInvitation.task_id == Task.id) ), ) .join( OrcidToken, JOIN.LEFT_OUTER, on=( (OrcidToken.user_id == User.id) & (OrcidToken.org_id == Organisation.id) & (OrcidToken.scopes.contains("/person/update")) ), ) ) if max_rows: tasks = tasks.limit(max_rows) if record_id: if isinstance(record_id, list): tasks = tasks.where(PropertyRecord.id.in_(record_id)) else: tasks = tasks.where(PropertyRecord.id == record_id) for (task_id, org_id, user), tasks_by_user in groupby( tasks, lambda t: (t.id, t.org_id, (lambda r: r.user if r.user.id else None)(t.record)) ): if ( not user or not user.orcid or not OrcidToken.select() .where( OrcidToken.user_id == user.id, OrcidToken.org_id == org_id, OrcidToken.scopes.contains("/person/update"), ) .exists() ): # noqa: E127, E129 for k, tasks in groupby( tasks_by_user, lambda t: ( t.created_by, t.org, t.record.email, t.record.first_name, t.record.last_name, user, ), ): # noqa: E501 try: send_user_invitation(*k, task_id=task_id) status = "The invitation sent at " + datetime.utcnow().isoformat( timespec="seconds" ) for r in tasks: r.record.add_status_line(status) r.record.save() except Exception as ex: for r in tasks: r.record.add_status_line(f"Failed to send an invitation: {ex}.") r.record.save() else: create_or_update_properties(user, org_id, tasks_by_user) task_ids.add(task_id) for task in Task.select().where(Task.id << task_ids): # The task is completed (all recores are processed): if not ( PropertyRecord.select() .where(PropertyRecord.task_id == task.id, PropertyRecord.processed_at.is_null()) .exists() ): task.completed_at = datetime.utcnow() task.save() error_count = ( PropertyRecord.select() .where(PropertyRecord.task_id == task.id, PropertyRecord.status ** "%error%") .count() ) row_count = task.record_count with app.app_context(): export_url = flask.url_for( "propertyrecord.export", export_type="json", _scheme="http" if EXTERNAL_SP else "https", task_id=task.id, _external=True, ) try: send_email( "email/task_completed.html", subject="Researcher Property Record Process Update", recipient=(task.created_by.name, task.created_by.email), error_count=error_count, row_count=row_count, export_url=export_url, task_name="Researcher Property", filename=task.filename, ) except Exception: logger.exception( "Failed to send batch process completion notification message." ) @rq.job(timeout=300) def process_other_id_records(max_rows=20, record_id=None): """Process uploaded Other ID records.""" set_server_name() # TODO: optimize task_ids = set() tasks = ( Task.select( Task, OtherIdRecord, User, UserInvitation.id.alias("invitation_id"), OrcidToken ) .where( OtherIdRecord.processed_at.is_null(), OtherIdRecord.is_active, ( (User.id.is_null(False) & User.orcid.is_null(False) & OrcidToken.id.is_null(False)) | ( (User.id.is_null() | User.orcid.is_null() | OrcidToken.id.is_null()) & UserInvitation.id.is_null() & ( OtherIdRecord.status.is_null() | OtherIdRecord.status.contains("sent").__invert__() ) ) ), ) .join(OtherIdRecord, on=(Task.id == OtherIdRecord.task_id), attr="record") .join( User, JOIN.LEFT_OUTER, on=( (User.email == OtherIdRecord.email) | ((User.orcid == OtherIdRecord.orcid) & (User.organisation_id == Task.org_id)) ), ) .join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id)) .join( UserOrg, JOIN.LEFT_OUTER, on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)), ) .join( UserInvitation, JOIN.LEFT_OUTER, on=( (UserInvitation.email == OtherIdRecord.email) & (UserInvitation.task_id == Task.id) ), ) .join( OrcidToken, JOIN.LEFT_OUTER, on=( (OrcidToken.user_id == User.id) & (OrcidToken.org_id == Organisation.id) & (OrcidToken.scopes.contains("/person/update")) ), ) .limit(max_rows) ) if record_id: tasks = tasks.where(OtherIdRecord.id == record_id) for (task_id, org_id, user), tasks_by_user in groupby( tasks, lambda t: (t.id, t.org_id, (lambda r: r.user if r.user.id else None)(t.record)) ): if ( user is None or user.orcid is None or not OrcidToken.select() .where( (OrcidToken.user_id == user.id) & (OrcidToken.org_id == org_id) & (OrcidToken.scopes.contains("/person/update")) ) .exists() ): # noqa: E127, E129 for k, tasks in groupby( tasks_by_user, lambda t: ( t.created_by, t.org, t.record.email, t.record.first_name, t.record.last_name, ), ): # noqa: E501 try: email = k[2] send_user_invitation( *k, task_id=task_id, invitation_template="email/property_invitation.html" ) status = "The invitation sent at " + datetime.utcnow().isoformat( timespec="seconds" ) ( OtherIdRecord.update(status=OtherIdRecord.status + "\n" + status) .where( OtherIdRecord.status.is_null(False), OtherIdRecord.task_id == task_id, OtherIdRecord.email == email, ) .execute() ) ( OtherIdRecord.update(status=status) .where( OtherIdRecord.status.is_null(), OtherIdRecord.task_id == task_id, OtherIdRecord.email == email, ) .execute() ) except Exception as ex: ( OtherIdRecord.update( processed_at=datetime.utcnow(), status=f"Failed to send an invitation: {ex}.", ).where( OtherIdRecord.task_id == task_id, OtherIdRecord.email == email, OtherIdRecord.processed_at.is_null(), ) ).execute() else: create_or_update_other_id(user, org_id, tasks_by_user) task_ids.add(task_id) for task in Task.select().where(Task.id << task_ids): # The task is completed (all recores are processed): if not ( OtherIdRecord.select() .where(OtherIdRecord.task_id == task.id, OtherIdRecord.processed_at.is_null()) .exists() ): task.completed_at = datetime.utcnow() task.save() error_count = ( OtherIdRecord.select() .where(OtherIdRecord.task_id == task.id, OtherIdRecord.status ** "%error%") .count() ) row_count = task.record_count with app.app_context(): export_url = flask.url_for( "otheridrecord.export", export_type="json", _scheme="http" if EXTERNAL_SP else "https", task_id=task.id, _external=True, ) try: send_email( "email/task_completed.html", subject="Other ID Record Process Update", recipient=(task.created_by.name, task.created_by.email), error_count=error_count, row_count=row_count, export_url=export_url, task_name="Other ID", filename=task.filename, ) except Exception: logger.exception( "Failed to send batch process completion notification message." ) @rq.job(timeout=300) def process_resource_records(max_rows=20, record_id=None): """Process uploaded resoucre records.""" set_server_name() # TODO: optimize removing redundant fields # TODO: perhaps it should be broken into 2 queries task_ids = set() tasks = ( Task.select( Task, ResourceRecord, User, UserInvitation.id.alias("invitation_id"), OrcidToken ) .where( ResourceRecord.processed_at.is_null(), ResourceRecord.is_active, ( (User.id.is_null(False) & User.orcid.is_null(False) & OrcidToken.id.is_null(False)) | ( (User.id.is_null() | User.orcid.is_null() | OrcidToken.id.is_null()) & UserInvitation.id.is_null() & ( ResourceRecord.status.is_null() | ResourceRecord.status.contains("sent").__invert__() ) ) ), ) .join(ResourceRecord, on=(Task.id == ResourceRecord.task_id), attr="record") .join( User, JOIN.LEFT_OUTER, on=( (User.email == ResourceRecord.email) | ((User.orcid == ResourceRecord.orcid) & (User.organisation_id == Task.org_id)) ), ) .join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id)) .join( UserOrg, JOIN.LEFT_OUTER, on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)), ) .join( UserInvitation, JOIN.LEFT_OUTER, on=( ( (UserInvitation.email == ResourceRecord.email) | (UserInvitation.email == User.email) ) & (UserInvitation.task_id == Task.id) ), ) .join( OrcidToken, JOIN.LEFT_OUTER, on=( (OrcidToken.user_id == User.id) & (OrcidToken.org_id == Organisation.id) & (OrcidToken.scopes.contains("/activities/update")) ), ) ) if max_rows: tasks = tasks.limit(max_rows) if record_id: if isinstance(record_id, list): tasks = tasks.where(ResourceRecord.id.in_(record_id)) else: tasks = tasks.where(ResourceRecord.id == record_id) for (task_id, org_id, user), tasks_by_user in groupby( tasks, lambda t: (t.id, t.org_id, (lambda r: r.user if r.user.id else None)(t.record)) ): if ( not user or not user.orcid or not OrcidToken.select() .where( OrcidToken.user_id == user.id, OrcidToken.org_id == org_id, OrcidToken.scopes.contains("/activities/update"), ) .exists() ): # noqa: E127, E129 for k, tasks in groupby( tasks_by_user, lambda t: ( t.created_by, t.org, t.record.email, t.record.first_name, t.record.last_name, user, ), ): # noqa: E501 try: send_user_invitation(*k, task_id=task_id) status = "The invitation sent at " + datetime.utcnow().isoformat( timespec="seconds" ) for r in tasks: r.record.add_status_line(status) r.record.save() except Exception as ex: for r in tasks: r.record.add_status_line(f"Failed to send an invitation: {ex}.") r.record.save() else: create_or_update_resources(user, org_id, tasks_by_user) task_ids.add(task_id) for task in Task.select().where(Task.id << task_ids): # The task is completed (all recores are processed): rm = task.record_model if not (rm.select().where(rm.task_id == task.id, rm.processed_at.is_null()).exists()): task.completed_at = datetime.utcnow() task.save() error_count = rm.select().where(rm.task_id == task.id, rm.status ** "%error%").count() row_count = task.record_count with app.app_context(): export_url = flask.url_for( "resourcerecord.export", export_type="json" if task.is_raw else "csv", _scheme="http" if EXTERNAL_SP else "https", task_id=task.id, _external=True, ) try: send_email( "email/task_completed.html", subject="Research Rresource Record Process Update", recipient=(task.created_by.name, task.created_by.email), error_count=error_count, row_count=row_count, export_url=export_url, task_name="Research Resource", filename=task.filename, ) except Exception: logger.exception( "Failed to send batch process completion notification message." ) @rq.job(timeout=300) def process_message_records(max_rows=20, record_id=None): """Process uploaded ORCID message records.""" RecordInvitee = MessageRecord.invitees.get_through_model() # noqa: N806 set_server_name() record_ids = set() task_ids = set() tasks = ( Task.select(Task, MessageRecord, RecordInvitee, Invitee, User, OrcidToken) .where(Task.is_raw, Invitee.processed_at.is_null(), MessageRecord.is_active) .join(MessageRecord, attr="record") .join(RecordInvitee, attr="ri") .join(Invitee) .join( User, JOIN.LEFT_OUTER, on=((User.email == Invitee.email) | ((User.orcid == Invitee.orcid))), ) .join( OrcidToken, JOIN.LEFT_OUTER, on=( (OrcidToken.user_id == User.id) & (OrcidToken.org_id == Task.org_id) & (OrcidToken.scopes.contains("/activities/update")) ), attr="token", ) ) if record_id: if isinstance(record_id, (list, tuple)): tasks = tasks.where(MessageRecord.id << record_id) else: tasks = tasks.where(MessageRecord.id == record_id) if max_rows: tasks = tasks.limit(max_rows) # Send an invitation for task_id, records in groupby( tasks.where(OrcidToken.id.is_null()).order_by( Task.id, Invitee.email, Invitee.first_name, Invitee.last_name ), lambda t: t.id, ): task_ids.add(task_id) invitees = set( ( t.created_by, t.org, t.record.ri.invitee.email or t.record.ri.invitee.user.email, t.record.ri.invitee.first_name or t.record.ri.invitee.user.first_name, t.record.ri.invitee.last_name or t.record.ri.invitee.user.last_name, ) for t in records ) for invitee in invitees: # noqa: E501 send_user_invitation(*invitee, task_id=task_id) # Create or update the resource record for (task_id, task_type, org_id, user), records in groupby( tasks.where(OrcidToken.id.is_null(False)).order_by(Task.id, User.id, MessageRecord.id), lambda t: (t.id, t.task_type, t.org_id, t.record.ri.invitee.user), ): # TODO: in the future - implememnt for other types: task_ids.add(task_id) records = list(records) create_or_update_record_from_messages(records) record_ids.update(r.record.id for r in records) for record in MessageRecord.select().where(MessageRecord.id << record_ids): # The Work record is processed for all invitees if not ( RecordInvitee.select() .join(Invitee) .where(RecordInvitee.messagerecord_id == record.id, Invitee.processed_at.is_null()) .exists() ): record.processed_at = datetime.utcnow() if not record.status or "error" not in record.status: record.add_status_line("record is processed.") record.save() for task in Task.select().where(Task.id << task_ids): # The task is completed (Once all records are processed): if not ( MessageRecord.select() .where(MessageRecord.task_id == task.id, MessageRecord.processed_at.is_null()) .exists() ): task.completed_at = datetime.utcnow() task.save() error_count = ( MessageRecord.select() .where(MessageRecord.task_id == task.id, MessageRecord.status ** "%error%") .count() ) row_count = task.record_count with app.app_context(): export_url = flask.url_for( "messagerecord.export", export_type="json", _scheme="http" if EXTERNAL_SP else "https", task_id=task.id, _external=True, ) send_email( "email/task_completed.html", subject=f"Batch Process Update: {task.filename}", recipient=(task.created_by.name, task.created_by.email), error_count=error_count, row_count=row_count, export_url=export_url, filename=task.filename, ) @rq.job(timeout=300) def process_tasks(max_rows=20): """Handle batch task expiration. Send a information messages about upcoming removal of the processed/uploaded tasks based on date whichever is greater either created_at + month or updated_at + 2 weeks and removal of expired tasks based on the expiry date. Args: max_rows (int): The maximum number of rows that will get processed in one go. Returns: int. The number of processed task records. """ Task.delete().where((Task.expires_at < datetime.utcnow())).execute() tasks = Task.select().where(Task.expires_at.is_null()) if max_rows and max_rows > 0: tasks = tasks.limit(max_rows) for task in tasks: max_created_at_expiry = task.created_at + timedelta(weeks=4) max_updated_at_expiry = task.updated_at + timedelta(weeks=2) max_expiry_date = max_created_at_expiry if max_created_at_expiry < max_updated_at_expiry: max_expiry_date = max_updated_at_expiry task.expires_at = max_expiry_date task.save() tasks = Task.select().where( Task.expires_at.is_null(False), Task.expiry_email_sent_at.is_null(), Task.expires_at < (datetime.now() + timedelta(weeks=1)), ) if max_rows and max_rows > 0: tasks = tasks.limit(max_rows) for task in tasks: if not task.task_type or task.records is None: app.logger.error(f'Unknown task "{task}" (ID: {task.id}) task type.') continue if task.filename and "INTEGRATION" in task.filename: continue export_model = task.record_model._meta.name + ".export" error_count = task.error_count set_server_name() with app.app_context(): export_url = flask.url_for( export_model, export_type="csv", _scheme="http" if EXTERNAL_SP else "https", task_id=task.id, _external=True, ) send_email( "email/task_expiration.html", task=task, subject="Batch process task is about to expire", recipient=(task.created_by.name, task.created_by.email), error_count=error_count, export_url=export_url, ) task.expiry_email_sent_at = datetime.utcnow() task.save() def get_client_credentials_token(org, scopes="/webhook"): """Request a cient credetials grant type access token and store it. The any previously requesed with the give scope tokens will be deleted. """ resp = requests.post( app.config["TOKEN_URL"], headers={"Accept": "application/json"}, data=dict( client_id=org.orcid_client_id, client_secret=org.orcid_secret, scope=scopes, grant_type="client_credentials", ), ) OrcidToken.delete().where(OrcidToken.org == org, OrcidToken.scopes == scopes).execute() data = resp.json() token = OrcidToken.create( org=org, access_token=data["access_token"], refresh_token=data["refresh_token"], scopes=data.get("scope") or scopes, expires_in=data["expires_in"], ) return token @rq.job(timeout=300) def register_orcid_webhook(user, callback_url=None, delete=False): """Register or delete an ORCID webhook for the given user profile update events. If URL is given, it will be used for as call-back URL. """ local_handler = callback_url is None # Don't delete the webhook if there is anyther organisation with enabled webhook: if ( local_handler and delete and user.organisations.where(Organisation.webhook_enabled).count() > 1 ): return # Any 'webhook' access token can be used: token = ( OrcidToken.select() .where(OrcidToken.org == user.organisation, OrcidToken.scopes == "/webhook") .order_by(OrcidToken.id.desc()) .first() ) if not token: token = get_client_credentials_token(org=user.organisation, scopes="/webhook") if local_handler: set_server_name() with app.app_context(): callback_url = quote( url_for("update_webhook", user_id=user.id, _external=True, _scheme="https"), safe="", ) elif "/" in callback_url or ":" in callback_url: callback_url = quote(callback_url, safe="") url = f"{app.config['ORCID_API_HOST_URL']}{user.orcid}/webhook/{callback_url}" headers = { "Accept": "application/json", "Authorization": f"Bearer {token.access_token}", "Content-Length": "0", } call = OrcidApiCall(method="DELETE" if delete else "PUT", url=url, query_params=headers) resp = requests.delete(url, headers=headers) if delete else requests.put(url, headers=headers) call.response = resp.text call.status = resp.status_code call.set_response_time() call.save() if local_handler: user.webhook_enabled = (resp.status_code in [201, 204]) and not delete user.save() if resp.status_code not in [201, 204]: raise ApiException(f"Failed to register or delete webhook {callback_url}: {resp.text}") return resp def notify_about_update(user, event_type="UPDATED"): """Notify all organisation about changes of the user.""" for org in user.organisations.where( Organisation.webhook_enabled | Organisation.email_notifications_enabled ): if org.webhook_enabled and org.webhook_url: invoke_webhook_handler.queue( org.id, user.orcid, user.created_at or user.updated_at, user.updated_at or user.created_at, event_type=event_type, ) if org.email_notifications_enabled: url = app.config["ORCID_BASE_URL"] + user.orcid send_email( f"""<p>User {user.name} (<a href="{url}" target="_blank">{user.orcid}</a>) {"profile was updated" if event_type == "UPDATED" else "has linked their account"} at {(user.updated_at or user.created_at).isoformat(timespec="minutes", sep=' ')}.</p>""", recipient=org.notification_email or (org.tech_contact.name, org.tech_contact.email), cc_email=(org.tech_contact.name, org.tech_contact.email) if org.notification_email else None, subject=f"ORCID Profile Update ({user.orcid})", org=org, ) @rq.job(timeout=300) def invoke_webhook_handler( org_id=None, orcid=None, created_at=None, updated_at=None, message=None, event_type="UPDATED", attempts=5, *args, **kwargs, ): """Propagate 'updated' event to the organisation event handler URL.""" if not message: url = app.config["ORCID_BASE_URL"] + orcid message = {"orcid": orcid, "url": url, "type": event_type} if event_type == "CREATED" and created_at: message["created-at"] = created_at.isoformat(timespec="seconds") if updated_at: message["updated-at"] = updated_at.isoformat(timespec="seconds") if orcid: user = ( User.select().where(User.orcid == orcid).order_by(User.id.desc()).limit(1).first() ) if user: message["email"] = user.email if user.eppn: message["eppn"] = user.eppn if org_id: org = Organisation.get(id=org_id) else: org = User.select().where(User.orcid == orcid).first().organisation org_id = org.id url = org.webhook_url if org.webhook_append_orcid: if not url.endswith("/"): url += "/" url += orcid try: app.logger.info(f"Invoking webhook: {url} with payload: {message}") if org.webhook_apikey: resp = requests.post(url, json=message, headers=dict(apikey=org.webhook_apikey)) else: resp = requests.post(url, json=message) except: if attempts == 1: raise if not resp or resp.status_code // 200 != 1: if attempts > 1: invoke_webhook_handler.schedule( timedelta(minutes=5 * (6 - attempts) if attempts < 6 else 5), org_id=org_id, message=message, attempts=attempts - 1, ) else: raise Exception(f"Failed to propaged the event. Status code: {resp.status_code}") return resp @rq.job(timeout=300) def enable_org_webhook(org): """Enable Organisation Webhook.""" org.webhook_enabled = True org.save() for u in org.users.where( User.webhook_enabled.NOT(), User.orcid.is_null(False) | (User.orcid != "") ): if u.orcid.strip(): register_orcid_webhook.queue(u) @rq.job(timeout=300) def disable_org_webhook(org): """Disable Organisation Webhook.""" org.webhook_enabled = False org.save() for u in org.users.where(User.webhook_enabled, User.orcid.is_null(False) | (User.orcid != "")): if u.orcid.strip(): register_orcid_webhook.queue(u, delete=True) def process_records(n): """Process first n records and run other batch tasks.""" process_affiliation_records(n) process_funding_records(n) process_work_records(n) process_peer_review_records(n) process_property_records(n) process_other_id_records(n) # process_tasks(n) @rq.job(timeout=300) def send_orcid_update_summary(org_id=None): """Send organisation researcher ORCID profile update summary report.""" first = date.today().replace(day=1) previous_last = first - timedelta(days=1) previous_first = previous_last.replace(day=1) if org_id is None: for o in ( Organisation.select(Organisation.id) .distinct() .join(UserOrg, on=UserOrg.org_id == Organisation.id) .join(User, on=User.id == UserOrg.user_id) .where(Organisation.webhook_enabled, Organisation.email_notifications_enabled) .where(User.orcid_updated_at >= previous_first, User.orcid_updated_at < first) ): send_orcid_update_summary.queue(o.id) return org = Organisation.select().where(Organisation.id == org_id).first() if org and org.webhook_enabled and org.email_notifications_enabled: updated_users = org.users.where( User.orcid_updated_at >= previous_first, User.orcid_updated_at < first ) recipient = org.notification_email or (org.tech_contact.name, org.tech_contact.email) if updated_users.exists(): message_template = """<p>The flollowing user profiles were updated from {{date_from}} until {{date_to}}:</p> <ul> {% for u in updated_users %} <li>{{u.name}} ({{u.email}}, <a href="{{orcid_base_url}}{{u.orcid}}" target="_blank">{{u.orcid}}</a>, updated at {{u.orcid_updated_at.isoformat(sep=" ", timespec="seconds")}}); </li> {% endfor %} </ul> """ set_server_name() with app.app_context(): send_email( message_template, org=org, recipient=recipient, subject="Updated ORCID Profiles", date_from=previous_first, date_to=previous_last, updated_users=updated_users, orcid_base_url=app.config["ORCID_BASE_URL"], ) @rq.job(timeout=300) def sync_profile(task_id, delay=0.1): """Verify and sync the user profile.""" if not task_id: return try: task = Task.get(task_id) except Task.DoesNotExist: return org = task.org if not org.disambiguated_id: return api = orcid_client.MemberAPIV3(org=org) count = 0 for u in ( task.org.users.select(User, OrcidToken.access_token.alias("access_token")) .where(User.orcid.is_null(False)) .join( OrcidToken, on=( (OrcidToken.user_id == User.id) & OrcidToken.scopes.contains("/activities/update") ), ) .objects() ): Log.create(task=task_id, message=f"Processing user {u} / {u.orcid} profile.") api.sync_profile(user=u, access_token=u.access_token, task=task) count += 1 time.sleep(delay) Log.create(task=task_id, message=f"In total, {count} user profiles were synchronized.") class SafeRepresenterWithISODate(SafeRepresenter): """Customized representer for datetaime rendering in ISO format.""" def represent_datetime(self, data): """Customize datetime rendering in ISO format.""" value = data.isoformat(timespec="seconds") return self.represent_scalar("tag:yaml.org,2002:timestamp", value) def dump_yaml(data): """Dump the objects into YAML representation.""" yaml.add_representer(datetime, SafeRepresenterWithISODate.represent_datetime, Dumper=Dumper) yaml.add_representer(defaultdict, SafeRepresenter.represent_dict) return yaml.dump(data, allow_unicode=True) def enqueue_user_records(user): """Enqueue all active and not yet processed record related to the user.""" for task in list( Task.select().where(Task.completed_at.is_null(), Task.task_type != TaskType.SYNC) ): func = globals().get(f"process_{task.task_type.name.lower()}_records") records = task.records.where( task.record_model.is_active, task.record_model.processed_at.is_null() ) if task.task_type == TaskType.FUNDING: records = records.join(FundingInvitee).where( (FundingInvitee.email.is_null() | (FundingInvitee.email == user.email)), (FundingInvitee.orcid.is_null() | (FundingInvitee.orcid == user.orcid)), ) elif task.task_type == TaskType.PEER_REVIEW: records = records.join(PeerReviewInvitee).where( (PeerReviewInvitee.email.is_null() | (PeerReviewInvitee.email == user.email)), (PeerReviewInvitee.orcid.is_null() | (PeerReviewInvitee.orcid == user.orcid)), ) elif task.task_type == TaskType.WORK: records = records.join(WorkInvitee).where( (WorkInvitee.email.is_null() | (WorkInvitee.email == user.email)), (WorkInvitee.orcid.is_null() | (WorkInvitee.orcid == user.orcid)), ) elif task.task_type == TaskType.RESOURCE and task.is_raw: invitee_model = task.record_model.invitees.rel_model records = ( records.join(RecordInvitee) .join(Invitee) .where( (invitee_model.email.is_null() | (invitee_model.email == user.email)), (invitee_model.orcid.is_null() | (invitee_model.orcid == user.orcid)), ) ) else: records = records.where( (task.record_model.email.is_null() | (task.record_model.email == user.email)), (task.record_model.orcid.is_null() | (task.record_model.orcid == user.orcid)), ) record_ids = [r.id for r in records] if record_ids: if task.task_type == TaskType.AFFILIATION: func.queue(record_id=record_ids) else: for record_id in record_ids: func.queue(record_id=record_id) def enqueue_task_records(task): """Enqueue all active and not yet processed record.""" records = task.records.where( task.record_model.is_active, task.record_model.processed_at.is_null() ) if task.is_raw: return process_message_records.queue(record_id=[r.id for r in records]) func = globals().get(f"process_{task.task_type.name.lower()}_records") if task.task_type in [TaskType.AFFILIATION, TaskType.PROPERTY]: records = records.order_by(task.record_model.email, task.record_model.orcid) for _, chunk in groupby(records, lambda r: (r.email, r.orcid)): func.queue(record_id=[r.id for r in chunk]) else: for r in records: func.queue(record_id=r.id) def activate_all_records(task): """Activate all submitted task records and enqueue it for processing.""" with db.atomic(): try: status = "The record was activated at " + datetime.now().isoformat(timespec="seconds") count = ( task.record_model.update(is_active=True, status=status) .where( task.record_model.task == task, ( task.record_model.is_active.is_null() | (task.record_model.is_active == False) # noqa: E712 ), ) .execute() ) # noqa: E712 task.status = "ACTIVE" task.save() enqueue_task_records(task) except: db.rollback() app.logger.exception("Failed to activate the selected records") raise return count def reset_all_records(task): """Batch reset of batch records.""" count = 0 with db.atomic(): try: status = "The record was reset at " + datetime.now().isoformat(timespec="seconds") tt = task.task_type if tt in [TaskType.AFFILIATION, TaskType.PROPERTY, TaskType.OTHER_ID]: count = ( task.record_model.update(processed_at=None, status=status) .where( task.record_model.task_id == task.id, task.record_model.is_active == True, # noqa: E712 ) .execute() ) # noqa: E712 else: for record in task.records.where( task.record_model.is_active == True # noqa: E712 ): # noqa: E712 record.processed_at = None record.status = status if hasattr(record, "invitees"): invitee_class = record.invitees.model invitee_class.update(processed_at=None, status=status).where( (invitee_class.id << [i.id for i in record.invitees]) if task.is_raw else (invitee_class.record == record.id) ).execute() record.save() count = count + 1 UserInvitation.delete().where(UserInvitation.task == task).execute() enqueue_task_records(task) except: db.rollback() app.logger.exception("Failed to reset the selected records") raise else: task.expires_at = None task.expiry_email_sent_at = None task.completed_at = None task.status = "RESET" task.save() return count def plural(word): """Convert a reguralr noun to its regular plural form.""" if word.endswith("fe"): # wolf -> wolves return word[:-2] + "ves" elif word.endswith("f"): # knife -> knives return word[:-1] + "ves" elif word.endswith("o"): # potato -> potatoes return word + "es" elif word.endswith("us"): # cactus -> cacti return word[:-2] + "i" elif word.endswith("ion"): # criterion -> criteria return word + "s" elif word.endswith("on"): # criterion -> criteria return word[:-2] + "a" elif word.endswith("y"): # community -> communities return word[:-1] + "ies" elif word[-1] in "sx" or word[-2:] in ["sh", "ch"]: return word + "es" elif word.endswith("an"): return word[:-2] + "en" else: return word + "s"
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 40009, 20081, 526, 15931, 198, 198, 11748, 33918, 198, 11748, 18931, 198, 11748, 28686, 198, 11748, 4738, 198, 11748, 4731, 198, 11748, 640, 198, 6738, 17268, 1330, ...
1.889842
66,550
# importa a biblioteca factorial existente em math from math import factorial # cabeçalho do programa print('-' * 30) print(f'{"Calcula Fatorial":^30}') print('-' * 30) # solicita o número ao usuário num = int(input('Digite um número: ')) # calcula o fatorial do número inserido fatorial = factorial(num) # imprime o fatorial print(f'O fatorial de {num} é {fatorial}.')
[ 2, 1330, 64, 257, 275, 29142, 313, 31047, 1109, 5132, 2152, 21872, 795, 10688, 198, 6738, 10688, 1330, 1109, 5132, 198, 198, 2, 269, 11231, 16175, 282, 8873, 466, 1430, 64, 198, 4798, 10786, 19355, 1635, 1542, 8, 198, 4798, 7, 69, 6...
2.527027
148
#!/usr/bin/env python3 import rospy from sensor_msgs.msg import CompressedImage import processor from constants import PATH_USBCAM IS_DEBUG_MODE = True CURRENT_STATE = 'traffic_light' rospy.Subscriber(PATH_USBCAM, CompressedImage, processor.process_front_image, queue_size=1)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 686, 2777, 88, 198, 6738, 12694, 62, 907, 14542, 13, 19662, 1330, 3082, 2790, 5159, 198, 11748, 12649, 198, 6738, 38491, 1330, 46490, 62, 2937, 2749, 2390, 198, 198, 17...
2.936842
95
""" Copyright 2017 Globo.com Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import hashlib import logging from time import time from requests import Session from globomap_driver_sample.util import timed_logging from globomap_driver_sample.settings import SSL_VERIFY logger = logging.getLogger(__name__)
[ 37811, 198, 220, 220, 15069, 2177, 2671, 20391, 13, 785, 628, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 220, 220, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 1...
3.632743
226
import pywps
[ 11748, 12972, 86, 862, 198 ]
2.6
5
from django.db import models from jsonfield import JSONField
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 33918, 3245, 1330, 19449, 15878, 628 ]
4.133333
15
""" CREATE DATASETS """ # pylint: disable=C0301,E1101,W0622,C0103,R0902,R0915 import torch.utils.data as data import torch from random import shuffle from torchvision.datasets import DatasetFolder from pathlib import Path from PIL import Image import numpy as np import os import os.path import random import imageio import numpy as np # pylint: disable=E1101 IMG_EXTENSIONS = [ '.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif', '.TIF', '.tiff', '.TIFF' ] class ImageFolder(data.Dataset): """A generic data loader where the images are arranged in this way: :: root/dog/xxx.png root/dog/xxy.png root/dog/xxz.png root/cat/123.png root/cat/nsdf3.png root/cat/asd932_.png Args: root (string): Root directory path. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` target_transform (callable, optional): A function/transform that takes in the target and transforms it. loader (callable, optional): A function to load an image given its path. Attributes: classes (list): List of the class names. class_to_idx (dict): Dict with items (class_name, class_index). imgs (list): List of (image path, class_index) tuples """ def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (image, target) where target is class_index of the target class. """ path, target = self.imgs[index] img = self.loader(path) if self.transform is not None: img = self.transform(img) latentz = self.noise[index] # TODO: Return these variables in a dict. # return img, latentz, index, target return {'image': img, 'latentz': latentz, 'index': index, 'frame_gt': target} # TODO: refactor cifar-mnist anomaly dataset functions into one generic function. ## def get_cifar_anomaly_dataset(train_ds, valid_ds, abn_cls_idx=0): """[summary] Arguments: train_ds {Dataset - CIFAR10} -- Training dataset valid_ds {Dataset - CIFAR10} -- Validation dataset. Keyword Arguments: abn_cls_idx {int} -- Anomalous class index (default: {0}) Returns: [np.array] -- New training-test images and labels. """ # Get images and labels. trn_img, trn_lbl = train_ds.data, np.array(train_ds.targets) tst_img, tst_lbl = valid_ds.data, np.array(valid_ds.targets) # -- # Find idx, img, lbl for abnormal and normal on org dataset. nrm_trn_idx = np.where(trn_lbl != abn_cls_idx)[0] abn_trn_idx = np.where(trn_lbl == abn_cls_idx)[0] nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels. nrm_tst_idx = np.where(tst_lbl != abn_cls_idx)[0] abn_tst_idx = np.where(tst_lbl == abn_cls_idx)[0] nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images. nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels. # -- # Assign labels to normal (0) and abnormals (1) nrm_trn_lbl[:] = 0 nrm_tst_lbl[:] = 0 abn_trn_lbl[:] = 1 abn_tst_lbl[:] = 1 # Create new anomaly dataset based on the following data structure: # - anomaly dataset # . -> train # . -> normal # . -> test # . -> normal # . -> abnormal train_ds.data = np.copy(nrm_trn_img) valid_ds.data = np.concatenate((nrm_tst_img, abn_trn_img, abn_tst_img), axis=0) train_ds.targets = np.copy(nrm_trn_lbl) valid_ds.targets = np.concatenate((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), axis=0) return train_ds, valid_ds ## def get_mnist_anomaly_dataset(train_ds, valid_ds, abn_cls_idx=0): """[summary] Arguments: train_ds {Dataset - MNIST} -- Training dataset valid_ds {Dataset - MNIST} -- Validation dataset. Keyword Arguments: abn_cls_idx {int} -- Anomalous class index (default: {0}) Returns: [np.array] -- New training-test images and labels. """ # Get images and labels. trn_img, trn_lbl = train_ds.data, train_ds.targets tst_img, tst_lbl = valid_ds.data, valid_ds.targets # -- # Find normal abnormal indexes. # TODO: PyTorch v0.4 has torch.where function nrm_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() != abn_cls_idx)[0]) abn_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() == abn_cls_idx)[0]) nrm_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() != abn_cls_idx)[0]) abn_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() == abn_cls_idx)[0]) # -- # Find normal and abnormal images nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images. nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images. # -- # Find normal and abnormal labels. nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels. nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels. # -- # Assign labels to normal (0) and abnormals (1) nrm_trn_lbl[:] = 0 nrm_tst_lbl[:] = 0 abn_trn_lbl[:] = 1 abn_tst_lbl[:] = 1 # Create new anomaly dataset based on the following data structure: train_ds.data = nrm_trn_img.clone() valid_ds.data = torch.cat((nrm_tst_img, abn_trn_img, abn_tst_img), dim=0) train_ds.targets = nrm_trn_lbl.clone() valid_ds.targets = torch.cat((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), dim=0) return train_ds, valid_ds ## def make_anomaly_dataset(train_ds, valid_ds, abn_cls_idx=0): """[summary] Arguments: train_ds {Dataset - MNIST} -- Training dataset valid_ds {Dataset - MNIST} -- Validation dataset. Keyword Arguments: abn_cls_idx {int} -- Anomalous class index (default: {0}) Returns: [np.array] -- New training-test images and labels. """ # Check the input type. if isinstance(train_ds.data, np.ndarray): train_ds.data = torch.from_numpy(train_ds.data) valid_ds.data = torch.from_numpy(valid_ds.data) train_ds.targets = torch.Tensor(train_ds.targets) valid_ds.targets = torch.Tensor(valid_ds.targets) # Get images and labels. trn_img, trn_lbl = train_ds.data, train_ds.targets tst_img, tst_lbl = valid_ds.data, valid_ds.targets # -- # Find normal abnormal indexes. # TODO: PyTorch v0.4 has torch.where function nrm_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() != abn_cls_idx)[0]) abn_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() == abn_cls_idx)[0]) nrm_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() != abn_cls_idx)[0]) abn_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() == abn_cls_idx)[0]) # -- # Find normal and abnormal images nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images. nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images. # -- # Find normal and abnormal labels. nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels. nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels. # -- # Assign labels to normal (0) and abnormals (1) nrm_trn_lbl[:] = 0 nrm_tst_lbl[:] = 0 abn_trn_lbl[:] = 1 abn_tst_lbl[:] = 1 # Create new anomaly dataset based on the following data structure: train_ds.data = nrm_trn_img.clone() valid_ds.data = torch.cat((nrm_tst_img, abn_trn_img, abn_tst_img), dim=0) train_ds.targets = nrm_trn_lbl.clone() valid_ds.targets = torch.cat((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), dim=0) return train_ds, valid_ds
[ 37811, 198, 43387, 6158, 360, 1404, 1921, 32716, 198, 37811, 198, 198, 2, 279, 2645, 600, 25, 15560, 28, 34, 3070, 486, 11, 36, 1157, 486, 11, 54, 3312, 1828, 11, 34, 486, 3070, 11, 49, 2931, 2999, 11, 49, 2931, 1314, 198, 198, ...
2.126084
4,037
import torch import torch.nn as nn from model import DeepPSC import dataset from torch.utils.data import DataLoader import pathlib import os import time import logging import argparse from test import test import numpy as np parser = argparse.ArgumentParser() parser.add_argument('--loc_dim', type=int, default=512) parser.add_argument('--glo_dim', type=int, default=1024) parser.add_argument('--tgt_dim', type=int, default=4) parser.add_argument('--set', type=str, default=None) parser.add_argument('--note', type=str, default=None) parser.add_argument('--epoch', type=int, default=30) args = parser.parse_args() train_name = '-'.join([arg for arg in [args.set, args.note] if arg]) save_dir = '../output/%s' % train_name model_dir = '../output/%s/model' % train_name dirs = [save_dir, model_dir] for folder in dirs: pathlib.Path(folder).mkdir(parents=True, exist_ok=True) logging.basicConfig(level=logging.INFO, filename='../logs/log_%s.txt' % train_name, filemode='w', format='%(message)s') logger = logging.getLogger(__name__) dataset = dataset.Image2Tor inp_dim = 5 train_dataset = dataset( dataset='nr40', file_list='train_%s' % args.set if args.set else 'train') val_dataset = dataset( dataset='nr40', file_list='val_%s' % args.set if args.set else 'val') test_dataset = dataset('test', with_target=False) train_loader = DataLoader(dataset=train_dataset, shuffle=True, num_workers=32, pin_memory=True) val_loader = DataLoader(dataset=val_dataset, num_workers=32, pin_memory=True) test_loader = DataLoader(dataset=test_dataset, num_workers=32, pin_memory=True) model = DeepPSC(dims=[inp_dim, args.loc_dim, args.glo_dim, args.tgt_dim]) logger.info('-----Model-----') logger.info(model) logger.info('-----Model-----\n\n') loss_function = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), 3e-4, weight_decay=10e-6) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=len(train_dataset)) warmup_epochs = 3 if __name__ == '__main__': total_iters = 0 for epoch in range(args.epoch): epoch_start_time = time.time() epoch_iter = 0 losses = 0 logger.info('epoch %s training' % epoch) for inp, tgt, filename, lengths in train_loader: optimizer.zero_grad() inp = inp[0].cuda(non_blocking=True) tgt = tgt[0].cuda(non_blocking=True) out = model(inp, lengths).squeeze(1).transpose(0, 1) loss = loss_function(out, tgt) losses += loss.item() total_iters += 1 epoch_iter += 1 loss.backward() optimizer.step() if total_iters % 20 == 0: logger.info('iters %s train_loss=%s' % (total_iters, losses / 20)) losses = 0 if total_iters % 1000 == 0: model.save_model(os.path.join(model_dir, 'last_model')) if epoch >= warmup_epochs: scheduler.step() logger.info('epoch %s validating...' % epoch) model.save_model(os.path.join(model_dir, '%s_model' % epoch)) model.eval() with torch.no_grad(): losses = 0 for inp, tgt, filename, lengths in val_loader: inp = inp[0].cuda(non_blocking=True) tgt = tgt[0].cuda(non_blocking=True) out = model(inp, lengths).squeeze(1).transpose(0, 1) loss = loss_function(out, tgt) losses += loss.item() logger.info('epoch %d, mean_val_loss= %f' % (epoch, losses / len(val_dataset))) model.train() logger.info('End of epoch %d \t Time Taken: %d sec' % (epoch, time.time() - epoch_start_time)) tester = test(model, train_name=train_name, test_loader=test_loader) tester.test_model('29') rmsd = np.round(np.mean(np.array(tester.rmsds), axis=0), 3) gdt = np.round( np.mean(np.mean(np.array(tester.gdts), axis=-1), axis=0)*100, 3) rama = np.round(np.mean(np.array(tester.ramas), axis=0)*100, 3) logger.info('\n\n--Results--') logger.info('-RMSD-') logger.info(rmsd) logger.info('-GDT-') logger.info(gdt) logger.info('-RAMA-') logger.info(rama)
[ 11748, 28034, 201, 198, 11748, 28034, 13, 20471, 355, 299, 77, 201, 198, 6738, 2746, 1330, 10766, 3705, 34, 201, 198, 11748, 27039, 201, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 6060, 17401, 201, 198, 11748, 3108, 8019, 201, 198, ...
2.07037
2,160
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import os import unittest from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer) from msrestazure.azure_exceptions import CloudError
[ 2, 16529, 1783, 10541, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 5964, 1321, 13, 198, 2, 16529, 1783, 10541, 198,...
5.229167
96
# -*- coding: utf-8 -*- # ***************************************************************************** # NICOS, the Networked Instrument Control System of the MLZ # Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS) # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Module authors: # Georg Brandl <georg.brandl@frm2.tum.de> # # ***************************************************************************** """Lubrication device for lifting counter.""" from nicos.core import status from nicos.devices.tango import DigitalOutput class LubeSwitch(DigitalOutput): """Special SPS digital output whose readback is a status value."""
[ 2, 220, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 41906, 17174, 4557, 35625, 198, 2, 45593, 2640, 11, 262, 7311, 276, 42410, 6779, 4482, 286, 262, 10373, 57, 198, 2, 15069, 357, 66, 8, 3717, 12, 1238, 2481, ...
3.930931
333
# MIT License # # Copyright (c) 2019 Morning Project Samurai (MPS) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. __author__ = 'Junya Kaneko <junya@mpsamurai.org>' import os from neochi.core.utils import environ from . import data_loaders from . import models
[ 2, 17168, 13789, 198, 2, 198, 2, 15069, 357, 66, 8, 13130, 14410, 4935, 23882, 357, 44, 3705, 8, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 4866, 198, 2, 286, 428, 3788, 29...
3.684971
346
# templatetags/include_as_js_str.py from django import template from django.template.loader_tags import do_include from django.utils.encoding import force_text from django.utils.safestring import mark_safe register = template.Library() @register.tag('include_as_js_str')
[ 2, 2169, 489, 265, 316, 3775, 14, 17256, 62, 292, 62, 8457, 62, 2536, 13, 9078, 198, 6738, 42625, 14208, 1330, 11055, 198, 6738, 42625, 14208, 13, 28243, 13, 29356, 62, 31499, 1330, 466, 62, 17256, 198, 6738, 42625, 14208, 13, 26791, ...
3.136364
88
X XXXX XXXXXXXXX XXXX XXXXXXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXX XXX X X XXX X XX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X X XXX X XX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XX XXX X XXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XX XXX X XXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XX XXX X XXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XX XXX X XXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XX XXX X XXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XX XXX X XXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XX XXX X XXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XX XXX X XXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XX XXX X XXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XX XXX X XXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XX XXX X XXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX X XXXX XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
[ 55, 27713, 55, 1395, 24376, 24376, 27713, 55, 1395, 24376, 24376, 24376, 8051, 27713, 55, 1395, 24376, 24376, 24376, 24376, 24376, 8051, 198, 198, 43145, 1395, 1395, 198, 43145, 1395, 21044, 198, 8051, 1395, 1395, 24376, 24376, 24376, 24376...
3.698181
26,009
import numpy as np import cv2 as cv from matplotlib import pyplot as plt imgL = cv.imread('images/example_l.png', 0) imgR = cv.imread('images/example_r.png', 0) window_size = 3 # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely stereo = cv.StereoSGBM_create( minDisparity=-1, numDisparities=5*16, # max_disp has to be dividable by 16 f. E. HH 192, 256 blockSize=window_size, P1=8 * 3 * window_size, # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely P2=32 * 3 * window_size, disp12MaxDiff=12, uniquenessRatio=10, speckleWindowSize=50, speckleRange=32, preFilterCap=63, mode=cv.STEREO_SGBM_MODE_SGBM_3WAY ) disparity = stereo.compute(imgL,imgR) plt.imshow(disparity,'gray') plt.show()
[ 11748, 299, 32152, 355, 45941, 198, 11748, 269, 85, 17, 355, 269, 85, 198, 6738, 2603, 29487, 8019, 1330, 12972, 29487, 355, 458, 83, 198, 198, 9600, 43, 796, 269, 85, 13, 320, 961, 10786, 17566, 14, 20688, 62, 75, 13, 11134, 3256, ...
2.389503
362
import re from model.contact import Contact """def test_data_on_home_page(app): contact_from_home_page = app.contact.get_contact_list()[0] contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0) assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page( contact_from_edit_page) assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page( contact_from_edit_page) assert contact_from_home_page.firstname == contact_from_edit_page.firstname assert contact_from_home_page.lastname == contact_from_edit_page.lastname assert contact_from_home_page.address == contact_from_edit_page.address"""
[ 11748, 302, 198, 6738, 2746, 13, 32057, 1330, 14039, 628, 198, 37811, 4299, 1332, 62, 7890, 62, 261, 62, 11195, 62, 7700, 7, 1324, 2599, 198, 220, 220, 220, 2800, 62, 6738, 62, 11195, 62, 7700, 796, 598, 13, 32057, 13, 1136, 62, 3...
2.695489
266
import os import re import sys import random import unittest test_str_re = re.compile(r'([^ ]+) \((.*)\)') if __name__ == "__main__": tests = unittest.defaultTestLoader.discover(os.path.dirname(__file__), pattern='*.py') if len(sys.argv) == 2 and sys.argv[1] == '--discover': map_tests(print_test, tests) else: random.seed() if len(sys.argv) == 0: unittest.main() else: lookup = {} map_tests( collect_tests(lookup), tests) suite = unittest.TestSuite() success = True for t in sys.argv[1:]: tc = lookup[tuple(t.split('.'))] suite.addTest(tc) result = unittest.TextTestRunner(verbosity=2).run(suite) if len(result.errors) > 0 or len(result.failures) > 0: success = False if success: sys.exit(0) else: sys.exit(1)
[ 11748, 28686, 198, 11748, 302, 198, 11748, 25064, 198, 11748, 4738, 198, 11748, 555, 715, 395, 198, 198, 9288, 62, 2536, 62, 260, 796, 302, 13, 5589, 576, 7, 81, 6, 26933, 61, 2361, 28988, 16792, 7, 15885, 19415, 8, 11537, 628, 198,...
2.002217
451
import gfapy try: from functools import partialmethod except ImportError: #for compatibility with old python versions for shall_version in ["gfa1", "gfa2"]: setattr(VersionConversion, "to_"+shall_version+"_s", partialmethod(VersionConversion.to_version_s, version = shall_version)) setattr(VersionConversion, "_to_"+shall_version+"_a", partialmethod(VersionConversion._to_version_a, version = shall_version)) setattr(VersionConversion, "to_"+shall_version, partialmethod(VersionConversion.to_version, version = shall_version))
[ 11748, 308, 69, 12826, 198, 28311, 25, 198, 220, 422, 1257, 310, 10141, 1330, 13027, 24396, 198, 16341, 17267, 12331, 25, 198, 220, 1303, 1640, 17764, 351, 1468, 21015, 6300, 198, 198, 1640, 2236, 62, 9641, 287, 14631, 70, 13331, 16, ...
2.612069
232
import argparse from cliva_fl.utils import Logger, Plotter from pathlib import Path from datetime import datetime parser = argparse.ArgumentParser(description='Argument parser for log processing and plot creation') parser.add_argument('-n', '--num', type=int, required=False, help='Index of previously executed experiment to select for processing. Last experiment equals --num 1.') parser.add_argument('-d', '--dir', type=str, required=False, help='Name of experiment directory to select for processing.') parser.add_argument('-l', '--log_dir', type=str, required=False, default='logs', help='Log directory to select for processing.') parser.add_argument('-g', '--generate', action='store_true', required=False, help='Flag to indicate weather metrics should be (re)generated from logs.') parser.add_argument('-c', '--clean', action='store_true', required=False, help='Flag to indicate a full clean-up of all generated files.') parser.add_argument('-m', '--metrics', nargs='+', required=True, help='List of metrics to be computed. Available metrics are {}'.format(Plotter.METRICS)) parser.add_argument('--xmin', type=float, default=None, required=False, help='Minimum value of x-axes in plot.') parser.add_argument('--xmax', type=float, default=None, required=False, help='Maximum value of x-axes in plot.') parser.add_argument('--ymin', type=float, default=None, required=False, help='Minimum value of y-axes in plot.') parser.add_argument('--ymax', type=float, default=None, required=False, help='Maximum value of y-axes in plot.') args = parser.parse_args() assert args.num or args.dir, 'You are required to specify num or dir parameter.' assert not (args.num and args.dir), 'You can not use num and dir parameter simultaneously to select a log directory' if args.num: exp_dirs = sorted(list(Path(args.log_dir).glob('experiment_*'))) assert args.num <= len(exp_dirs), 'Num can not be larger than the number of existing experiment directories' p = exp_dirs[-args.num].name elif args.dir: p = Path(args.dir).name _, YEAR, MONTH, DAY, _, TIME = p.split('_') HOUR, MINUTE = TIME.split(':') print(f'Experiment: {p}\nDate: {DAY}.{MONTH}.{YEAR}\tTime: {HOUR}:{MINUTE}') timestamp = datetime(year=int(YEAR), month=int(MONTH), day=int(DAY), hour=int(HOUR), minute=int(MINUTE)) logger = Logger(base_path=args.log_dir, timestamp=timestamp) plotter = Plotter(logger) if args.clean: print('Cleaning up all generated files.') plotter.clear_metrics() plotter.clear_plots() if args.generate: print('Generating metrics from logs.') plotter.clear_metrics() plotter.generate() for metric in args.metrics: assert metric in Plotter.METRICS, f'Metric {metric} is not a valid metric.' plotter.plot_metric(metric, args.ymin, args.ymax, args.xmin, args.xmax)
[ 11748, 1822, 29572, 198, 6738, 537, 12151, 62, 2704, 13, 26791, 1330, 5972, 1362, 11, 28114, 353, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 48610, 796, 1822, 29572, 13, 28100, 1713, 46677, 7,...
3.057987
914
from typing import List, Optional from flask import escape, Response from rabbitai.models.dashboard import Dashboard from tests.dashboards.base_case import DashboardTestCase
[ 6738, 19720, 1330, 7343, 11, 32233, 198, 198, 6738, 42903, 1330, 6654, 11, 18261, 198, 198, 6738, 22746, 1872, 13, 27530, 13, 42460, 3526, 1330, 16189, 3526, 198, 6738, 5254, 13, 42460, 12821, 13, 8692, 62, 7442, 1330, 16189, 3526, 1440...
4.116279
43
"""A tmslack-specific wrapper for the slack client.""" from pathlib import Path from typing import Sequence, NoReturn from slackclient import SlackClient from tmslack.cache import Cache from tmslack.config import Config class ClientException(Exception): """Class for exceptions in the slack client.""" class Client: """"A slack client that can be used to send messages. Attributes: info: Generic information about the client """ @property def info(self): """Returns general information about the client. The returned map will have the url of the team, the team name, the bot name, the bot user identifier, and the team identifier. """ return self._info def lookup_user_id(self, username) -> str: """Looks up a user identifier in the team by the user's name or real name.""" return self._user_cache.get_through(username, do_lookup_id) def lookup_conversation_id(self, user_ids: Sequence[str]) -> str: """Given a sequence of user names, get the identifier of the conversation between all those users.""" result = self._slack.api_call('conversations.open', users=list(user_ids)) if not result['ok']: raise ClientException(f'Failed to retrieve get conversation: {result["error"]}') return result['channel']['id'] def send_message(self, channel_id: str, message: str) -> NoReturn: """Sends the given message to the given channel.""" result = self._slack.api_call('chat.postMessage', channel=channel_id, as_user=True, text=message) if not result['ok']: raise ClientException(f'Failed to post message: {result["error"]}')
[ 37811, 32, 256, 907, 75, 441, 12, 11423, 29908, 329, 262, 30740, 5456, 526, 15931, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 19720, 1330, 45835, 11, 1400, 13615, 198, 198, 6738, 30740, 16366, 1330, 36256, 11792, 198, 198, 6738, 256...
2.604552
703
# Copyright (c) 2020, NVIDIA CORPORATION. from cudf.api import types
[ 2, 15069, 357, 66, 8, 12131, 11, 15127, 23929, 44680, 6234, 13, 198, 198, 6738, 269, 463, 69, 13, 15042, 1330, 3858, 198 ]
3.043478
23
from __future__ import print_function import sys import os from collections import OrderedDict from itertools import chain import numpy from six import iteritems, itervalues, reraise from openmdao.core.component import Component from openmdao.util.dict_util import _jac_to_flat_dict from openmdao.core.mpi_wrap import MPI def _reraise(pathname, exc): """ Rather than adding the sub-Problem pathname to every system and variable in the sub-Problem (and causing more complication w.r.t. promoted names), just put a try block around all of the calls to the sub-Problem and preface any exception messages with "In subproblem 'x' ..." """ new_err = exc[0]("In subproblem '%s': %s" % (pathname, str(exc[1]))) reraise(exc[0], new_err, exc[2]) class SubProblem(Component): """A Component that wraps a sub-Problem. Args ---- problem : Problem The Problem to be wrapped by this component. params : iter of str Names of variables that are to be visible as parameters to this component. Note that these are allowed to be unknowns in the sub-problem. unknowns : iter of str Names of variables that are to be visible as unknowns in this component. """ def check_setup(self, out_stream=sys.stdout): """Write a report to the given stream indicating any potential problems found with the current configuration of this ``System``. Args ---- out_stream : a file-like object, optional Stream where report will be written. """ try: self._problem.check_setup(out_stream) except: _reraise(self.pathname, sys.exc_info()) def cleanup(self): """ Clean up resources prior to exit. """ try: self._problem.cleanup() except: _reraise(self.pathname, sys.exc_info()) def get_req_procs(self): """ Returns ------- tuple A tuple of the form (min_procs, max_procs), indicating the min and max processors usable by this `System`. """ # because this is called before self._problem.setup, we need to go # ahead and set the problem's driver's root explicitly here. self._problem.driver.root = self._problem.root try: return self._problem.get_req_procs() except: _reraise(self.pathname, sys.exc_info()) def _get_relname_map(self, parent_proms): """ Args ---- parent_proms : `dict` A dict mapping absolute names to promoted names in the parent system. Returns ------- dict Maps promoted name in parent (owner of unknowns) to the corresponding promoted name in the child. """ # use an ordered dict here so we can use this smaller dict when looping # during get_view. # (the order of this one matches the order in the parent) umap = OrderedDict() for key in self._prob_unknowns: pkey = '.'.join((self.name, key)) if pkey in parent_proms: umap[parent_proms[pkey]] = key return umap def _setup_communicators(self, comm, parent_dir): """ Assign communicator to this `System` and run full setup on its subproblem. Args ---- comm : an MPI communicator (real or fake) The communicator being offered by the parent system. parent_dir : str The absolute directory of the parent, or '' if unspecified. Used to determine the absolute directory of all subsystems. """ self._problem.comm = comm # do full setup on our subproblem now that we have what we need # check_setup will be called later if specified from the top level # Problem so always set check=False here. try: self._problem.setup(check=False) except: _reraise(self.pathname, sys.exc_info()) super(SubProblem, self)._setup_communicators(comm, parent_dir) self._problem.pathname = self.pathname self._problem._parent_dir = self._sysdata.absdir for p in self._prob_params: if not (p in self._problem._dangling or p in self._problem.root.unknowns): raise RuntimeError("Param '%s' cannot be set. Either it will " "be overwritten by a connected output or it " "doesn't exist." % p) def _setup_variables(self): """ Returns copies of our params and unknowns dictionaries, re-keyed to use absolute variable names. """ to_prom_name = self._sysdata.to_prom_name = {} to_abs_uname = self._sysdata.to_abs_uname = {} to_abs_pnames = self._sysdata.to_abs_pnames = OrderedDict() to_prom_uname = self._sysdata.to_prom_uname = OrderedDict() to_prom_pname = self._sysdata.to_prom_pname = OrderedDict() # Our subproblem has been completely set up. We now just pull # variable metadata from our subproblem subparams = self._problem.root.params subunknowns = self._problem.root.unknowns # keep track of params that are actually unknowns in the subproblem self._unknowns_as_params = [] self._params_dict = self._init_params_dict = OrderedDict() for name in self._prob_params: pathname = self._get_var_pathname(name) if name in subparams: meta = subparams._dat[name].meta elif name in self._problem._dangling: meta = self._rec_get_param_meta(name) else: meta = subunknowns._dat[name].meta if not meta.get('_canset_'): raise TypeError("SubProblem param '%s' is mapped to the output of an internal component." " This is illegal because a value set into the param will be overwritten" " by the internal component." % name) self._unknowns_as_params.append(name) meta = meta.copy() # don't mess with subproblem's metadata! self._params_dict[pathname] = meta meta['pathname'] = pathname del meta['top_promoted_name'] to_prom_pname[pathname] = name to_abs_pnames[name] = (pathname,) self._unknowns_dict = self._init_unknowns_dict = OrderedDict() # if we have params that are really unknowns in the subproblem, we # also add them as unknowns so we can take derivatives for name in self._prob_unknowns: pathname = self._get_var_pathname(name) meta = subunknowns._dat[name].meta.copy() self._unknowns_dict[pathname] = meta meta['pathname'] = pathname del meta['top_promoted_name'] to_prom_uname[pathname] = name to_abs_uname[name] = pathname to_prom_name.update(to_prom_uname) to_prom_name.update(to_prom_pname) self._post_setup_vars = True self._sysdata._params_dict = self._params_dict self._sysdata._unknowns_dict = self._unknowns_dict return self._params_dict, self._unknowns_dict def solve_nonlinear(self, params, unknowns, resids): """Sets params into the sub-problem, runs the sub-problem, and updates our unknowns with values from the sub-problem. Args ---- params : `VecWrapper` `VecWrapper` containing parameters. (p) unknowns : `VecWrapper` `VecWrapper` containing outputs and states. (u) resids : `VecWrapper` `VecWrapper` containing residuals. (r) """ if not self.is_active(): return try: # set params into the subproblem prob = self._problem for name in self._prob_params: prob[name] = params[name] self._problem.run() # update our unknowns from subproblem for name in self._sysdata.to_abs_uname: unknowns[name] = prob.root.unknowns[name] resids[name] = prob.root.resids[name] # if params are really unknowns, they may have changed, so update for name in self._unknowns_as_params: params[name] = prob.root.unknowns[name] except: _reraise(self.pathname, sys.exc_info()) def linearize(self, params, unknowns, resids): """ Returns Jacobian. J is a dictionary whose keys are tuples of the form ('unknown', 'param') and whose values are ndarrays. Args ---- params : `VecWrapper` `VecWrapper` containing parameters. (p) unknowns : `VecWrapper` `VecWrapper` containing outputs and states. (u) resids : `VecWrapper` `VecWrapper` containing residuals. (r) Returns ------- dict Dictionary whose keys are tuples of the form ('unknown', 'param') and whose values are ndarrays. """ try: prob = self._problem # set params into the subproblem for name in self._prob_params: prob[name] = params[name] # have to convert jacobian returned from calc_gradient from a # nested dict to a flat dict with tuple keys. return _jac_to_flat_dict(prob.calc_gradient(self.params.keys(), self.unknowns.keys(), return_format='dict')) except: _reraise(self.pathname, sys.exc_info())
[ 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 11748, 25064, 198, 11748, 28686, 198, 6738, 17268, 1330, 14230, 1068, 35, 713, 198, 6738, 340, 861, 10141, 1330, 6333, 198, 198, 11748, 299, 32152, 198, 198, 6738, 2237, 1330...
2.244228
4,418
"""Joint abstraction. It can be any DynamixelMotor or an OrbitaMotor. """ from abc import ABC from logging import Logger from typing import Callable, Dict, Optional, Tuple from .register import Register class Joint(ABC): """Joint abstraction. Should define the following registers: 'torque_enable' 'goal_position' 'moving_speed' 'torque_limit' 'present_position' 'temperature' """ def __init__(self, register_config: Dict[str, Tuple[ Callable[[bytes], float], Callable[[float], bytes] ]] ) -> None: """Set up internal registers.""" self.registers = { reg: Register(cvt_as_usi, cvt_as_raw) for reg, (cvt_as_usi, cvt_as_raw) in register_config.items() } self.logger: Optional[Logger] = None def is_value_set(self, register: str) -> bool: """Check if the register has been set since last reset.""" return self.registers[register].is_set() def clear_value(self, register: str): """Clear the specified value, meaning its value should be make obsolete.""" self.registers[register].reset() def get_value(self, register: str) -> bytes: """Get the up-to-date specified value.""" return self.registers[register].get() def get_value_as_usi(self, register: str) -> float: """Get the up-to-date specified value.""" return self.registers[register].get_as_usi() def update_value(self, register: str, val: bytes): """Update the specified register with the raw value received from a gate.""" self.registers[register].update(val) def update_value_using_usi(self, register: str, val: float): """Update the specified register with its USI value received from a gate.""" self.registers[register].update_using_usi(val)
[ 37811, 41, 1563, 34651, 13, 198, 198, 1026, 460, 307, 597, 14970, 7168, 34919, 393, 281, 38161, 64, 34919, 13, 198, 37811, 198, 198, 6738, 450, 66, 1330, 9738, 198, 6738, 18931, 1330, 5972, 1362, 198, 6738, 19720, 1330, 4889, 540, 11,...
2.251917
913
import can import asyncio import time can0 = can.ThreadSafeBus(channel = 'can0', bustype = 'socketcan_ctypes') loop = asyncio.get_event_loop() ready = False przelaczone = False counter = 0; can.Notifier(can0, [ on_message ], loop=loop) loop.call_soon(send_status) loop.run_forever()
[ 11748, 460, 198, 11748, 30351, 952, 198, 11748, 640, 628, 198, 5171, 15, 796, 460, 13, 16818, 31511, 16286, 7, 17620, 796, 705, 5171, 15, 3256, 13076, 2981, 796, 705, 44971, 5171, 62, 310, 9497, 11537, 198, 198, 26268, 796, 30351, 952...
2.633929
112
import sys from PyQt6 import QtCore, QtGui, QtWidgets if __name__ == "__main__": main()
[ 11748, 25064, 198, 198, 6738, 9485, 48, 83, 21, 1330, 33734, 14055, 11, 33734, 8205, 72, 11, 33734, 54, 312, 11407, 628, 628, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, 3419, 198 ]
2.365854
41
from .Point import Point
[ 6738, 764, 12727, 1330, 6252, 628 ]
4.333333
6
# -*- coding: utf-8 -*- # # Copyright 2012-2020 BigML # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Resources management functions """ from __future__ import absolute_import import sys import time try: import simplejson as json except ImportError: import json import bigml.api from bigml.util import bigml_locale from bigml.multivote import THRESHOLD_CODE from bigmler.utils import (dated, get_url, log_message, plural, check_resource, check_resource_error, log_created_resources, decode2, transform_fields_keys, is_shared, FILE_ENCODING, PYTHON3) from bigmler.labels import label_model_args, get_all_labels from bigmler.reports import report EVALUATE_SAMPLE_RATE = 0.8 SEED = "BigML, Machine Learning made easy" LOCALE_DEFAULT = "en_US" FIELDS_QS = 'only_model=true' ALL_FIELDS_QS = "limit=-1" ADD_PREFIX = '+' REMOVE_PREFIX = '-' ADD_REMOVE_PREFIX = [ADD_PREFIX, REMOVE_PREFIX] BRIEF_FORMAT = 'brief' NORMAL_FORMAT = 'normal' FULL_FORMAT = 'full' VALID_FIELD_ATTRIBUTES = { "source": ["name", "label", "description", "optype", "term_analysis"], "dataset": ["name", "label", "description", "preferred", "term_analysis"]} BOOSTING_OPTIONS = ["iterations", "early_holdout", "learning_rate", \ "early_out_of_bag", "step_out_of_bag"] DS_NAMES = "ABCDEFGHIJKLMNOPQRSTUVXYZ" def get_basic_seed(order): """ Builds a standard seed from a text adding the order """ return "%s - %s" % (SEED, order) def shared_changed(shared, resource): """Returns True if the shared status of the resource differs from the user given value """ return is_shared(resource) != shared def configure_input_fields(fields, user_given_fields, by_name=False): """ Returns the input fields used in the new resource creation as given The user can choose to write all the fields that will be used in the new resource or modify the set of fields retrieved from the resource that will be used to create the new one. """ def modify_input_fields(prefix, field, input_fields): """Adds or removes according to the prefix in the given field this field from the list of input fields. """ if prefix == ADD_PREFIX: if not field in input_fields: input_fields.append(field) elif field in input_fields: input_fields.remove(field) # case of adding and removing fields to the dataset preferred field set if all([name[0] in ADD_REMOVE_PREFIX for name in user_given_fields]): preferred_fields = fields.preferred_fields() input_fields = preferred_fields.keys() if by_name: input_fields = [fields.field_name(field_id) for field_id in input_fields] for name in user_given_fields: prefix = name[0] field_name = name[1:] if by_name: modify_input_fields(prefix, field_name, input_fields) else: try: field_id = fields.field_id(field_name) except ValueError, exc: sys.exit(exc) modify_input_fields(prefix, field_id, input_fields) # case of user given entire list of fields else: if by_name: return user_given_fields else: input_fields = [] for name in user_given_fields: try: input_fields.append(fields.field_id(name)) except ValueError, exc: sys.exit(exc) return input_fields def utf8(text): """Encodes using the global FILE_ENCODING """ return text.encode(FILE_ENCODING) def update_attributes(updatable_attributes, new_attributes, by_column=False, fields=None): """Correctly merging the "fields" attribute substructure in updates. updatable_attributes: previous attributes to be updated new_attributes: updates to be added by_column: set to True is keys are the column position of the field fields: Fields object info """ if new_attributes: fields_substructure = updatable_attributes.get("fields", {}) field_attributes = new_attributes.get("fields", {}) if field_attributes and (not by_column or fields): for field_key, value in field_attributes.items(): field_id = (field_key if not by_column else fields.field_id(field_key)) if not field_id in fields_substructure.keys(): fields_substructure.update({field_id: {}}) fields_substructure[field_id].update(value) updatable_attributes.update({"fields": fields_substructure}) else: updatable_attributes.update(new_attributes) def update_json_args(resource_attributes, json_attributes, fields=None): """Updating the resource attributes with the contents of a JSON file """ if fields is not None: # transforms the fields structure changes if columns are used as keys json_attributes = transform_fields_keys(json_attributes, fields) update_attributes(resource_attributes, json_attributes) def relative_input_fields(fields, user_given_fields): """Returns the user given input fields using relative syntax """ input_fields = [] if all([(name[0] in ADD_REMOVE_PREFIX) for name in user_given_fields]): return user_given_fields preferred_fields = fields.preferred_fields() for field_id in preferred_fields.keys(): name = fields.fields[field_id]['name'] if not name in user_given_fields: input_fields.append("%s%s" % (REMOVE_PREFIX, name)) for name in user_given_fields: try: field_id = fields.field_id(name) except ValueError, exc: sys.exit(exc) input_fields.append("%s%s" % (ADD_PREFIX, name)) return input_fields def wait_for_available_tasks(inprogress, max_parallel, api, resource_type, wait_step=2): """According to the max_parallel number of parallel resources to be created, when the number of in progress resources reaches the limit, it checks the ones in inprogress to see if there's a FINISHED or FAULTY resource. If found, it is removed from the inprogress list and returns to allow another one to be created. """ check_kwargs = {"retries": 0, "query_string": "full=false", "api": api} while len(inprogress) == max_parallel: for j in range(0, len(inprogress)): try: ready = check_resource(inprogress[j], **check_kwargs) status = bigml.api.get_status(ready) if status['code'] == bigml.api.FINISHED: del inprogress[j] return elif status['code'] == bigml.api.FAULTY: raise ValueError(status['message']) except ValueError, exception: sys.exit("Failed to get a finished %s: %s" % (resource_type, str(exception))) time.sleep(max_parallel * wait_step) def check_fields_struct(update_args, resource_type): """In case the args to update have a `fields` attribute, it checks the structure in this attribute and removes the attributes for each field that will not be accepted by the API. """ if "fields" in update_args: fields_substr = update_args.get("fields") for _, field in fields_substr.items(): attributes = field.keys() for attribute in attributes: if not attribute in VALID_FIELD_ATTRIBUTES.get(resource_type): del field[attribute] def set_basic_args(args, name): """Sets the basic arguments, common to all resources """ return { "name": name, "description": args.description_, "category": args.category, "tags": args.tag} def set_basic_model_args(args, name): """Sets the additional args common to all models """ model_args = set_basic_args(args, name) if args.default_numeric_value is not None: model_args.update({ \ "default_numeric_value": args.default_numeric_value}) return model_args def set_basic_batch_args(args, name): """Sets the additional args common to all batch resources """ batch_args = set_basic_args(args, name) header = (hasattr(args, "prediction_header") and args.prediction_header) \ or (hasattr(args, "projection_header") and args.projection_header) batch_args.update({ \ "header": header, "output_dataset": args.to_dataset }) return batch_args def set_source_args(args, name=None, multi_label_data=None, data_set_header=None, fields=None): """Returns a source arguments dict """ if name is None: name = args.name source_args = set_basic_args(args, name) if args.project_id is not None: source_args.update({"project": args.project_id}) # if header is set, use it if data_set_header is not None: source_args.update({"source_parser": {"header": data_set_header}}) # If user has given an OS locale, try to add the locale used in bigml.com if args.user_locale is not None: source_locale = bigml_locale(args.user_locale) if source_locale is None: log_message("WARNING: %s locale equivalence not found." " Using %s instead.\n" % (args.user_locale, LOCALE_DEFAULT), log_file=None, console=True) source_locale = LOCALE_DEFAULT source_args.update({'source_parser': {}}) source_args["source_parser"].update({'locale': source_locale}) # If user has set a training separator, use it. if args.training_separator is not None: training_separator = decode2(args.training_separator, encoding="string_escape") source_args["source_parser"].update({'separator': training_separator}) # If uploading a multi-label file, add the user_metadata info needed to # manage the multi-label fields if (hasattr(args, 'multi_label') and args.multi_label and multi_label_data is not None): source_args.update({ "user_metadata": { "multi_label_data": multi_label_data}}) # to update fields attributes or types you must have a previous fields # structure (at update time) if fields: if args.field_attributes_: update_attributes(source_args, {"fields": args.field_attributes_}, by_column=True, fields=fields) if args.types_: update_attributes(source_args, {"fields": args.types_}, by_column=True, fields=fields) if args.import_fields: fields_struct = fields.new_fields_structure(args.import_fields) check_fields_struct(fields_struct, "source") update_attributes(source_args, fields_struct) if 'source' in args.json_args: update_json_args(source_args, args.json_args.get('source'), fields) return source_args def create_source(data_set, source_args, args, api=None, path=None, session_file=None, log=None, source_type=None): """Creates remote source """ if api is None: api = bigml.api.BigML() suffix = "" if source_type is None else "%s " % source_type message = dated("Creating %ssource.\n" % suffix) log_message(message, log_file=session_file, console=args.verbosity) check_fields_struct(source_args, "source") source = api.create_source(data_set, source_args, progress_bar=args.progress_bar) if path is not None: suffix = "_" + source_type if source_type else "" log_created_resources( "source%s" % suffix, path, source['resource'], mode='a', comment=("%s\n" % source['object']['name'])) source_id = check_resource_error(source, "Failed to create source: ") try: source = check_resource(source, api.get_source, query_string=ALL_FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished source: %s" % str(exception)) message = dated("Source created: %s\n" % get_url(source)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % source_id, log_file=log) if args.reports: report(args.reports, path, source) return source def data_to_source(args): """Extracts the flags info to create a source object """ data_set = None data_set_header = None if (args.training_set and not args.source and not args.dataset and not args.has_models_): data_set = args.training_set data_set_header = args.train_header elif (hasattr(args, 'evaluate') and args.evaluate and args.test_set and not args.source): data_set = args.test_set data_set_header = args.test_header return data_set, data_set_header def get_source(source, api=None, verbosity=True, session_file=None): """Retrieves the source in its actual state and its field info """ if api is None: api = bigml.api.BigML() if (isinstance(source, basestring) or bigml.api.get_status(source)['code'] != bigml.api.FINISHED): message = dated("Retrieving source. %s\n" % get_url(source)) log_message(message, log_file=session_file, console=verbosity) try: source = check_resource(source, api.get_source, query_string=ALL_FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished source: %s" % str(exception)) return source def update_source(source, source_args, args, api=None, session_file=None): """Updates source properties """ if api is None: api = bigml.api.BigML() message = dated("Updating source. %s\n" % get_url(source)) log_message(message, log_file=session_file, console=args.verbosity) source = api.update_source(source, source_args) check_resource_error(source, "Failed to update source: ") source = check_resource(source, api.get_source) return source def set_basic_dataset_args(args, name=None): """Return dataset basic arguments dict """ if name is None: name = args.name dataset_args = set_basic_args(args, name) if args.sample_rate != 1 and args.no_model: dataset_args.update({ "seed": SEED if args.seed is None else args.seed, "sample_rate": args.sample_rate }) if hasattr(args, "range") and args.range_: dataset_args.update({ "range": args_range }) return dataset_args def set_dataset_args(args, fields, multi_label_data=None): """Return dataset arguments dict """ dataset_args = set_basic_dataset_args(args) objective_field = (None if not hasattr(args, 'objective_field') else args.objective_field) if multi_label_data is not None and objective_field is None: objective_field = multi_label_data['objective_name'] if objective_field is not None and fields is not None: try: objective_id = fields.field_id(objective_field) except ValueError, exc: sys.exit(exc) dataset_args.update(objective_field={'id': objective_id}) if hasattr(args, 'juxtapose') and args.juxtapose: dataset_args.update({"juxtapose": args.juxtapose}) if hasattr(args, 'sql_query') and args.sql_query: dataset_args.update({"sql_query": args.sql_query}) if hasattr(args, 'sql_output_fields_') and args.sql_output_fields_: dataset_args.update({"sql_output_fields": args.sql_output_fields_}) if hasattr(args, 'json_query_') and args.json_query_: dataset_args.update({"json_query": args.json_query_}) if args.json_filter: dataset_args.update(json_filter=args.json_filter) elif args.lisp_filter: dataset_args.update(lisp_filter=args.lisp_filter) if args.dataset_fields_ and fields is not None: input_fields = configure_input_fields(fields, args.dataset_fields_) dataset_args.update(input_fields=input_fields) if (hasattr(args, 'multi_label') and args.multi_label and multi_label_data is not None): dataset_args.update( user_metadata={'multi_label_data': multi_label_data}) if fields and args.import_fields: fields_struct = fields.new_fields_structure(args.import_fields) check_fields_struct(fields_struct, "dataset") update_attributes(dataset_args, fields_struct) if 'dataset' in args.json_args: update_json_args(dataset_args, args.json_args.get('dataset'), fields) return dataset_args def set_dataset_split_args(name, description, args, sample_rate=1, out_of_bag=False, multi_label_data=None): """Return dataset arguments dict to split a dataset """ dataset_args = { "name": name, "description": description, "category": args.category, "tags": args.tag, "seed": SEED if args.seed is None else args.seed, "sample_rate": sample_rate, "out_of_bag": out_of_bag } if hasattr(args, "range") and args.range_: dataset_args.update({ "range": args_range }) if (hasattr(args, "multi_label") and args.multi_label and multi_label_data is not None): dataset_args.update( user_metadata={'multi_label_data': multi_label_data}) return dataset_args def create_dataset(origin_resource, dataset_args, args, api=None, path=None, session_file=None, log=None, dataset_type=None): """Creates remote dataset from source, dataset, cluster or datasets list """ if api is None: api = bigml.api.BigML() message = dated("Creating dataset.\n") log_message(message, log_file=session_file, console=args.verbosity) check_fields_struct(dataset_args, "dataset") # if --json-query or --sql-query are used and no names are set for # the datasets, we create default naming to A, B, C, etc. for the datasets # to be used as origin if ((hasattr(args, 'sql_query') and args.sql_query) or \ (hasattr(args, 'json_query') and args.sql_query)) and \ isinstance(origin_resource, list) and \ ((not isinstance(origin_resource[0], dict)) or \ origin_resource[0].get("name") is None): for index, element in enumerate(origin_resource): if index < len(DS_NAMES): if isinstance(element, dict): if element.get("resource") is not None: element = {"id": element["resource"]} element.update({"name": DS_NAMES[index]}) origin_resource[index] = element elif isinstance(element, basestring): origin_resource[index] = {"id": element, "name": DS_NAMES[index]} dataset = api.create_dataset(origin_resource, dataset_args, retries=None) suffix = "_" + dataset_type if dataset_type else "" log_created_resources("dataset%s" % suffix, path, bigml.api.get_dataset_id(dataset), mode='a') dataset_id = check_resource_error(dataset, "Failed to create dataset: ") try: dataset = check_resource(dataset, api.get_dataset, query_string=ALL_FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished dataset: %s" % str(exception)) message = dated("Dataset created: %s\n" % get_url(dataset)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % dataset_id, log_file=log) if args.reports: report(args.reports, path, dataset) return dataset def get_dataset(dataset, api=None, verbosity=True, session_file=None): """Retrieves the dataset in its actual state """ if api is None: api = bigml.api.BigML() if (isinstance(dataset, basestring) or bigml.api.get_status(dataset)['code'] != bigml.api.FINISHED): message = dated("Retrieving dataset. %s\n" % get_url(dataset)) log_message(message, log_file=session_file, console=verbosity) dataset = check_resource(dataset, api.get_dataset, query_string=ALL_FIELDS_QS) check_resource_error(dataset, "Failed to get dataset: ") return dataset def publish_dataset(dataset, args, api=None, session_file=None): """Publishes dataset and sets its price (if any) """ if api is None: api = bigml.api.BigML() public_dataset = {"private": False} if args.dataset_price: public_dataset.update(price=args.dataset_price) dataset = update_dataset(dataset, public_dataset, args, api=api, session_file=session_file) check_resource_error(dataset, "Failed to update dataset: ") dataset = check_resource(dataset, api.get_dataset, query_string=ALL_FIELDS_QS) return dataset def update_dataset(dataset, dataset_args, args, api=None, path=None, session_file=None): """Updates dataset properties """ if api is None: api = bigml.api.BigML() message = dated("Updating dataset. %s\n" % get_url(dataset)) log_message(message, log_file=session_file, console=args.verbosity) dataset = api.update_dataset(dataset, dataset_args) if is_shared(dataset): message = dated("Shared dataset link. %s\n" % get_url(dataset, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, dataset) check_resource_error(dataset, "Failed to update dataset: ") dataset = check_resource(dataset, api.get_dataset, query_string=ALL_FIELDS_QS) return dataset def set_model_args(args, name=None, objective_id=None, fields=None, model_fields=None, other_label=None): """Return model arguments dict """ if name is None: name = args.name if objective_id is None and args.max_categories is None: objective_id = args.objective_id_ if args.max_categories > 0: objective_id = args.objective_field if model_fields is None: model_fields = args.model_fields_ model_args = set_basic_model_args(args, name) model_args.update({"missing_splits": args.missing_splits}) if objective_id is not None and fields is not None: model_args.update({"objective_field": objective_id}) # If evaluate flag is on and no test_split flag is provided, # we choose a deterministic sampling with # args.sample_rate (80% by default) of the data to create the model # If cross_validation_rate = n/100, then we choose to run 2 * n evaluations # by holding out a n% of randomly sampled data. if ((args.evaluate and args.test_split == 0 and args.test_datasets is None) or args.cross_validation_rate > 0): model_args.update(seed=SEED) if args.cross_validation_rate > 0: args.sample_rate = 1 - args.cross_validation_rate args.replacement = False elif (args.sample_rate == 1 and args.test_datasets is None and not args.dataset_off): args.sample_rate = EVALUATE_SAMPLE_RATE if model_fields and fields is not None: input_fields = configure_input_fields( fields, model_fields, by_name=(args.max_categories > 0)) model_args.update(input_fields=input_fields) if args.pruning and args.pruning != 'smart': model_args.update(stat_pruning=(args.pruning == 'statistical')) if args.node_threshold > 0: model_args.update(node_threshold=args.node_threshold) if args.balance: model_args.update(balance_objective=True) if args.split_field: model_args.update(split_field=args.split_field) if args.focus_field: model_args.update(focus_field=args.focus_field) if args.weight_field: try: weight_field = fields.field_id(args.weight_field) except ValueError, exc: sys.exit(exc) model_args.update(weight_field=weight_field) if args.objective_weights: model_args.update(objective_weights=args.objective_weights_json) if args.max_categories > 0: model_args.update( user_metadata={'other_label': other_label, 'max_categories': args.max_categories}) model_args = update_sample_parameters_args(model_args, args) if 'model' in args.json_args: update_json_args(model_args, args.json_args.get('model'), fields) return model_args def set_label_model_args(args, fields, labels, multi_label_data): """Set of args needed to build a model per label """ objective_field = args.objective_field if not args.model_fields_: model_fields = [] else: model_fields = relative_input_fields(fields, args.model_fields_) if objective_field is None: objective_field = fields.objective_field try: objective_id = fields.field_id(objective_field) objective_field = fields.field_name(objective_id) except ValueError, exc: sys.exit(exc) all_labels = get_all_labels(multi_label_data) model_args_list = [] for index in range(args.number_of_models - 1, -1, -1): label = labels[index] (new_name, label_field, single_label_fields) = label_model_args( args.name, label, all_labels, model_fields, objective_field) model_args = set_model_args(args, name=new_name, objective_id=label_field, fields=fields, model_fields=single_label_fields) if multi_label_data is not None: model_args.update( user_metadata={'multi_label_data': multi_label_data}) model_args_list.append(model_args) return model_args_list def create_models(datasets, model_ids, model_args, args, api=None, path=None, session_file=None, log=None): """Create remote models """ if api is None: api = bigml.api.BigML() models = model_ids[:] existing_models = len(models) model_args_list = [] if args.dataset_off and args.evaluate: args.test_dataset_ids = datasets[:] if not args.multi_label: datasets = datasets[existing_models:] # if resuming and all models were created, there will be no datasets left if datasets: dataset = datasets[0] if isinstance(model_args, list): model_args_list = model_args if args.number_of_models > 0: message = dated("Creating %s.\n" % plural("model", args.number_of_models)) log_message(message, log_file=session_file, console=args.verbosity) single_model = args.number_of_models == 1 and existing_models == 0 # if there's more than one model the first one must contain # the entire field structure to be used as reference. query_string = (FIELDS_QS if single_model and (args.test_header \ and not args.export_fields) else ALL_FIELDS_QS) inprogress = [] for i in range(0, args.number_of_models): wait_for_available_tasks(inprogress, args.max_parallel_models, api, "model") if model_args_list: model_args = model_args_list[i] if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_models) model_args.update(seed=new_seed) # one model per dataset (--max-categories or single model) if (args.max_categories > 0 or (args.test_datasets and args.evaluate)): dataset = datasets[i] model = api.create_model(dataset, model_args, retries=None) elif args.dataset_off and args.evaluate: multi_dataset = args.test_dataset_ids[:] del multi_dataset[i + existing_models] model = api.create_model(multi_dataset, model_args, retries=None) else: model = api.create_model(datasets, model_args, retries=None) model_id = check_resource_error(model, "Failed to create model: ") log_message("%s\n" % model_id, log_file=log) model_ids.append(model_id) inprogress.append(model_id) models.append(model) log_created_resources("models", path, model_id, mode='a') if args.number_of_models < 2 and args.verbosity: if bigml.api.get_status(model)['code'] != bigml.api.FINISHED: try: model = check_resource(model, api.get_model, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models[0] = model message = dated("Model created: %s\n" % get_url(model)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, model) return models, model_ids def create_model(cluster, model_args, args, api=None, path=None, session_file=None, log=None, model_type=None): """Creates remote model from cluster and centroid """ if api is None: api = bigml.api.BigML() message = dated("Creating model.\n") log_message(message, log_file=session_file, console=args.verbosity) model = api.create_model(cluster, model_args, retries=None) suffix = "" if model_type is None else "_%s" % model_type log_created_resources("models%s" % suffix, path, bigml.api.get_model_id(model), mode='a') model_id = check_resource_error(model, "Failed to create model: ") try: model = check_resource(model, api.get_model, query_string=ALL_FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) message = dated("Model created: %s\n" % get_url(model)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % model_id, log_file=log) if args.reports: report(args.reports, path, model) return model def update_model(model, model_args, args, api=None, path=None, session_file=None): """Updates model properties """ if api is None: api = bigml.api.BigML() message = dated("Updating model. %s\n" % get_url(model)) log_message(message, log_file=session_file, console=args.verbosity) model = api.update_model(model, model_args) check_resource_error(model, "Failed to update model: %s" % model['resource']) model = check_resource(model, api.get_model, query_string=ALL_FIELDS_QS) if is_shared(model): message = dated("Shared model link. %s\n" % get_url(model, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, model) return model def get_models(model_ids, args, api=None, session_file=None): """Retrieves remote models in its actual status """ if api is None: api = bigml.api.BigML() model_id = "" models = model_ids single_model = len(model_ids) == 1 if single_model: model_id = model_ids[0] message = dated("Retrieving %s. %s\n" % (plural("model", len(model_ids)), get_url(model_id))) log_message(message, log_file=session_file, console=args.verbosity) if len(model_ids) < args.max_batch_models: models = [] for model in model_ids: try: # if there's more than one model the first one must contain # the entire field structure to be used as reference. query_string = ( ALL_FIELDS_QS if ( (not single_model and ( len(models) == 0 or args.multi_label)) or not args.test_header) else FIELDS_QS) model = check_resource(model, api.get_model, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models.append(model) model = models[0] else: try: query_string = (ALL_FIELDS_QS if not single_model or not args.test_header else FIELDS_QS) model = check_resource(model_ids[0], api.get_model, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models[0] = model return models, model_ids def set_label_ensemble_args(args, labels, multi_label_data, number_of_ensembles, fields): """Set of args needed to build an ensemble per label """ if not args.model_fields_: args.model_fields_ = relative_input_fields(fields, args.model_fields_) if args.objective_field is None: args.objective_field = fields.objective_field try: objective_id = fields.field_id(args.objective_field) except ValueError, exc: sys.exit(exc) objective_field = fields.fields[objective_id]['name'] ensemble_args_list = [] for index in range(number_of_ensembles - 1, -1, -1): label = labels[index] all_labels = get_all_labels(multi_label_data) (new_name, label_field, single_label_fields) = label_model_args( args.name, label, all_labels, args.model_fields_, objective_field) ensemble_args = set_ensemble_args(args, name=new_name, objective_id=label_field, model_fields=single_label_fields, fields=fields) if multi_label_data is not None: ensemble_args.update( user_metadata={'multi_label_data': multi_label_data}) ensemble_args_list.append(ensemble_args) return ensemble_args_list def set_ensemble_args(args, name=None, objective_id=None, model_fields=None, fields=None): """Return ensemble arguments dict """ if name is None: name = args.name if objective_id is None: objective_id = args.objective_id_ if model_fields is None: model_fields = args.model_fields_ ensemble_args = set_basic_model_args(args, name) ensemble_args.update({ "missing_splits": args.missing_splits, "ensemble_sample": {"seed": SEED if args.ensemble_sample_seed is None \ else args.ensemble_sample_seed}, "seed": SEED if args.seed is None else args.seed }) if objective_id is not None and fields is not None: ensemble_args.update({"objective_field": objective_id}) if args.boosting: boosting_args = {} for option in BOOSTING_OPTIONS: if hasattr(args, option) and getattr(args, option) is not None: boosting_args.update({option: getattr(args, option)}) ensemble_args.update({"boosting": boosting_args}) else: ensemble_args.update({"number_of_models": args.number_of_models}) # If evaluate flag is on and no test_split flag is provided, # we choose a deterministic sampling with # args.sample_rate (80% by default) of the data to create the model if (args.evaluate and args.test_split == 0 and args.test_datasets is None and not args.dataset_off): ensemble_args.update({"seed": SEED}) if args.sample_rate == 1: args.sample_rate = EVALUATE_SAMPLE_RATE if model_fields and fields is not None: input_fields = configure_input_fields(fields, model_fields) ensemble_args.update(input_fields=input_fields) if args.pruning and args.pruning != 'smart': ensemble_args.update(stat_pruning=(args.pruning == 'statistical')) if args.node_threshold > 0: ensemble_args.update(node_threshold=args.node_threshold) if args.balance: ensemble_args.update(balance_objective=True) if args.weight_field: try: weight_field = fields.field_id(args.weight_field) except ValueError, exc: sys.exit(exc) ensemble_args.update(weight_field=weight_field) if args.objective_weights: ensemble_args.update(objective_weights=args.objective_weights_json) if args.random_candidates: ensemble_args.update(random_candidates=args.random_candidates) update_attributes(ensemble_args, args.json_args.get('model')) ensemble_args = update_sample_parameters_args(ensemble_args, args) ensemble_args["ensemble_sample"].update( \ {"rate": args.ensemble_sample_rate, "replacement": args.ensemble_sample_replacement}) if 'ensemble' in args.json_args: update_json_args(ensemble_args, args.json_args.get('ensemble'), fields) return ensemble_args def create_ensembles(datasets, ensemble_ids, ensemble_args, args, number_of_ensembles=1, api=None, path=None, session_file=None, log=None): """Create ensembles from input data """ if api is None: api = bigml.api.BigML() ensembles = ensemble_ids[:] existing_ensembles = len(ensembles) model_ids = [] ensemble_args_list = [] if isinstance(ensemble_args, list): ensemble_args_list = ensemble_args if args.dataset_off and args.evaluate: args.test_dataset_ids = datasets[:] if not args.multi_label: datasets = datasets[existing_ensembles:] if number_of_ensembles > 0: message = dated("Creating %s.\n" % plural("ensemble", number_of_ensembles)) log_message(message, log_file=session_file, console=args.verbosity) inprogress = [] for i in range(0, number_of_ensembles): wait_for_available_tasks(inprogress, args.max_parallel_ensembles, api, "ensemble", wait_step=args.number_of_models) if ensemble_args_list: ensemble_args = ensemble_args_list[i] if args.dataset_off and args.evaluate: multi_dataset = args.test_dataset_ids[:] del multi_dataset[i + existing_ensembles] ensemble = api.create_ensemble(multi_dataset, ensemble_args, retries=None) else: ensemble = api.create_ensemble(datasets, ensemble_args, retries=None) ensemble_id = check_resource_error(ensemble, "Failed to create ensemble: ") log_message("%s\n" % ensemble_id, log_file=log) ensemble_ids.append(ensemble_id) inprogress.append(ensemble_id) ensembles.append(ensemble) log_created_resources("ensembles", path, ensemble_id, mode='a') models, model_ids = retrieve_ensembles_models(ensembles, api, path) if number_of_ensembles < 2 and args.verbosity: message = dated("Ensemble created: %s\n" % get_url(ensemble)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, ensemble) return ensembles, ensemble_ids, models, model_ids def retrieve_ensembles_models(ensembles, api, path=None): """Retrieves the models associated to a list of ensembles """ models = [] model_ids = [] for index in range(0, len(ensembles)): ensemble = ensembles[index] if (isinstance(ensemble, basestring) or bigml.api.get_status(ensemble)['code'] != bigml.api.FINISHED): try: ensemble = check_resource(ensemble, api.get_ensemble) ensembles[index] = ensemble except ValueError, exception: sys.exit("Failed to get a finished ensemble: %s" % str(exception)) model_ids.extend(ensemble['object']['models']) if path is not None: for model_id in model_ids: log_created_resources("models", path, model_id, mode='a') models = model_ids[:] models[0] = check_resource(models[0], api.get_model, query_string=ALL_FIELDS_QS) return models, model_ids def get_ensemble(ensemble, api=None, verbosity=True, session_file=None): """Retrieves remote ensemble in its actual status """ if api is None: api = bigml.api.BigML() if (isinstance(ensemble, basestring) or bigml.api.get_status(ensemble)['code'] != bigml.api.FINISHED): message = dated("Retrieving ensemble. %s\n" % get_url(ensemble)) log_message(message, log_file=session_file, console=verbosity) ensemble = check_resource(ensemble, api.get_ensemble) check_resource_error(ensemble, "Failed to get ensemble: ") return ensemble def set_publish_model_args(args): """Set args to publish model """ public_model = {} if args.black_box: public_model = {"private": False} if args.white_box: public_model = {"private": False, "white_box": True} if args.model_price: public_model.update(price=args.model_price) if args.cpp: public_model.update(credits_per_prediction=args.cpp) return public_model def map_fields(fields_map, model_fields, dataset_fields): """Build a dict to map model to dataset fields """ update_map = {} for (model_column, dataset_column) in fields_map.iteritems(): try: update_map.update({ model_fields.field_id(model_column): dataset_fields.field_id(dataset_column)}) except ValueError, exc: sys.exit(exc) return update_map def set_evaluation_args(args, fields=None, dataset_fields=None, name=None): """Return evaluation args dict """ if name is None: name = args.name evaluation_args = set_basic_args(args, name) if hasattr(args, 'method') and (args.number_of_models > 1 or args.ensemble): evaluation_args.update(combiner=args.method) if hasattr(args, 'method') and args.method: evaluation_args.update({"combiner": args.method}) if args.method == THRESHOLD_CODE: threshold = {} if hasattr(args, 'threshold') and args.threshold is not None: threshold.update(k=args.threshold) if hasattr(args, 'threshold_class') \ and args.threshold_class is not None: threshold.update({"class": args.threshold_class}) evaluation_args.update(threshold=threshold) if args.fields_map_ and fields is not None: if dataset_fields is None: dataset_fields = fields evaluation_args.update({"fields_map": map_fields(args.fields_map_, fields, dataset_fields)}) if hasattr(args, 'missing_strategy') and args.missing_strategy: evaluation_args.update(missing_strategy=args.missing_strategy) if 'evaluation' in args.json_args: update_json_args( evaluation_args, args.json_args.get('evaluation'), fields) # if evaluating time series we need to use ranges if args.subcommand == "time-series" and args.test_split == 0 and \ not args.has_test_datasets_: args.range_ = [int(args.max_rows * EVALUATE_SAMPLE_RATE) + 1, args.max_rows] evaluation_args.update({"range": args.range_}) return evaluation_args # Two cases to use out_of_bag and sample_rate: standard evaluations where # only the training set is provided, and cross_validation # [--dataset|--test] [--model|--models|--model-tag|--ensemble] --evaluate if (((hasattr(args, "dataset") and args.dataset) or args.test_set) and args.has_supervised_): return evaluation_args # [--train|--dataset] --test-split --evaluate if args.test_split > 0 and (args.training_set or args.dataset): return evaluation_args # --datasets --test-datasets or equivalents #if args.datasets and (args.test_datasets or args.dataset_off): if args.has_datasets_ and (args.has_test_datasets_ or args.dataset_off): return evaluation_args if args.sample_rate == 1: args.sample_rate = EVALUATE_SAMPLE_RATE evaluation_args.update(out_of_bag=True, seed=SEED, sample_rate=args.sample_rate) return evaluation_args def set_label_evaluation_args(args, labels, all_labels, number_of_evaluations, fields, dataset_fields, objective_field): """Set of args needed to build an evaluation per label """ if objective_field is None: try: objective_id = fields.field_id(fields.objective_field) except ValueError, exc: sys.exit(exc) objective_field = fields.fields[objective_id]['name'] evaluation_args_list = [] for index in range(number_of_evaluations - 1, -1, -1): label = labels[index] new_name = label_model_args( args.name, label, all_labels, [], objective_field)[0] evaluation_args = set_evaluation_args(args, fields=fields, dataset_fields=dataset_fields, name=new_name) evaluation_args_list.append(evaluation_args) return evaluation_args_list def create_evaluations(model_or_ensemble_ids, datasets, evaluation_args, args, api=None, path=None, session_file=None, log=None, existing_evaluations=0): """Create evaluations for a list of models ``model_or_ensemble_ids``: list of model or ensemble ids to create an evaluation of ``datasets``: dataset objects or ids to evaluate with ``evaluation_args``: arguments for the ``create_evaluation`` call ``args``: input values for bigmler flags ``api``: api to remote objects in BigML ``path``: directory to store the BigMLer generated files in ``session_file``: file to store the messages of that session ``log``: user provided log file ``existing_evaluations``: evaluations found when attempting resume """ evaluations = [] dataset = datasets[0] evaluation_args_list = [] if isinstance(evaluation_args, list): evaluation_args_list = evaluation_args if api is None: api = bigml.api.BigML() remaining_ids = model_or_ensemble_ids[existing_evaluations:] if args.test_dataset_ids or args.dataset_off: remaining_datasets = datasets[existing_evaluations:] number_of_evaluations = len(remaining_ids) message = dated("Creating evaluations.\n") log_message(message, log_file=session_file, console=args.verbosity) inprogress = [] for i in range(0, number_of_evaluations): model = remaining_ids[i] if args.test_dataset_ids or args.dataset_off: dataset = remaining_datasets[i] wait_for_available_tasks(inprogress, args.max_parallel_evaluations, api, "evaluation") if evaluation_args_list != []: evaluation_args = evaluation_args_list[i] if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_evaluations) evaluation_args.update(seed=new_seed) evaluation = api.create_evaluation(model, dataset, evaluation_args, retries=None) evaluation_id = check_resource_error(evaluation, "Failed to create evaluation: ") inprogress.append(evaluation_id) log_created_resources("evaluations", path, evaluation_id, mode='a') evaluations.append(evaluation) log_message("%s\n" % evaluation['resource'], log_file=log) if (args.number_of_evaluations < 2 and len(evaluations) == 1 and args.verbosity): evaluation = evaluations[0] if bigml.api.get_status(evaluation)['code'] != bigml.api.FINISHED: try: evaluation = check_resource(evaluation, api.get_evaluation) except ValueError, exception: sys.exit("Failed to get a finished evaluation: %s" % str(exception)) evaluations[0] = evaluation message = dated("Evaluation created: %s\n" % get_url(evaluation)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, evaluation) return evaluations def get_evaluation(evaluation, api=None, verbosity=True, session_file=None): """Retrieves evaluation in its actual state """ if api is None: api = bigml.api.BigML() message = dated("Retrieving evaluation. %s\n" % get_url(evaluation)) log_message(message, log_file=session_file, console=verbosity) try: evaluation = check_resource(evaluation, api.get_evaluation) except ValueError, exception: sys.exit("Failed to get a finished evaluation: %s" % str(exception)) return evaluation def update_evaluation(evaluation, evaluation_args, args, api=None, path=None, session_file=None): """Updates evaluation properties """ if api is None: api = bigml.api.BigML() message = dated("Updating evaluation. %s\n" % get_url(evaluation)) log_message(message, log_file=session_file, console=args.verbosity) evaluation = api.update_evaluation(evaluation, evaluation_args) check_resource_error(evaluation, "Failed to update evaluation: %s" % evaluation['resource']) evaluation = check_resource(evaluation, api.get_evaluation) if is_shared(evaluation): message = dated("Shared evaluation link. %s\n" % get_url(evaluation, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, evaluation) return evaluation def save_evaluation(evaluation, output, api=None): """Creates the evaluation .txt and .json files """ if api is None: api = bigml.api.BigML() evaluation = evaluation.get('object', evaluation).get('result', evaluation) save_txt_and_json(evaluation, output, api=api) def save_txt_and_json(object_dict, output, api=None): """Saves in txt and JSON format the contents of a dict object """ open_mode = 'wt' if PYTHON3 else 'wb' message = json.dumps(object_dict) if not PYTHON3: message = utf8(message) with open(output + '.json', open_mode) as dict_json: dict_json.write(message) with open(output + '.txt', open_mode) as dict_txt: api.pprint(object_dict, dict_txt) def set_batch_prediction_args(args, fields=None, dataset_fields=None): """Return batch prediction args dict """ batch_prediction_args = set_basic_batch_args(args, args.name) if hasattr(args, 'method') and args.method: batch_prediction_args.update({"combiner": args.method}) if args.method == THRESHOLD_CODE: threshold = {} if hasattr(args, 'threshold') and args.threshold is not None: threshold.update(k=args.threshold) if hasattr(args, 'threshold_class') \ and args.threshold_class is not None: threshold.update({"class": args.threshold_class}) batch_prediction_args.update(threshold=threshold) if args.fields_map_ and fields is not None: if dataset_fields is None: dataset_fields = fields batch_prediction_args.update({ "fields_map": map_fields(args.fields_map_, fields, dataset_fields)}) if args.prediction_info in [NORMAL_FORMAT, FULL_FORMAT]: if (hasattr(args, 'boosting') and args.boosting) or \ (hasattr(args, 'probability') and args.probability): batch_prediction_args.update(probability=True) else: batch_prediction_args.update(confidence=True) if args.prediction_info == FULL_FORMAT: batch_prediction_args.update(all_fields=True) if hasattr(args, 'prediction_name') and args.prediction_name: batch_prediction_args.update(prediction_name=args.prediction_name) if args.prediction_fields: batch_prediction_args.update(all_fields=False) prediction_fields = [] for field in args.prediction_fields.split(args.args_separator): field = field.strip() if not field in dataset_fields.fields: try: field = dataset_fields.field_id(field) except ValueError, exc: sys.exit(exc) prediction_fields.append(field) batch_prediction_args.update(output_fields=prediction_fields) if hasattr(args, 'missing_strategy') and args.missing_strategy: batch_prediction_args.update(missing_strategy=args.missing_strategy) if hasattr(args, "operating_point_") and args.operating_point_: batch_prediction_args.update(operating_point=args.operating_point_) if args.operating_point_.get("kind") == "probability": batch_prediction_args.update({"probability": True, "confidence": False}) if 'batch_prediction' in args.json_args: update_json_args( batch_prediction_args, args.json_args.get('batch_prediction'), fields) return batch_prediction_args def create_batch_prediction(model_or_ensemble, test_dataset, batch_prediction_args, args, api=None, session_file=None, path=None, log=None): """Creates remote batch_prediction """ if api is None: api = bigml.api.BigML() message = dated("Creating batch prediction.\n") log_message(message, log_file=session_file, console=args.verbosity) batch_prediction = api.create_batch_prediction(model_or_ensemble, test_dataset, batch_prediction_args, retries=None) log_created_resources("batch_prediction", path, bigml.api.get_batch_prediction_id(batch_prediction), mode='a') batch_prediction_id = check_resource_error( batch_prediction, "Failed to create batch prediction: ") try: batch_prediction = check_resource(batch_prediction, api.get_batch_prediction) except ValueError, exception: sys.exit("Failed to get a finished batch prediction: %s" % str(exception)) message = dated("Batch prediction created: %s\n" % get_url(batch_prediction)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % batch_prediction_id, log_file=log) if args.reports: report(args.reports, path, batch_prediction) return batch_prediction def set_cluster_args(args, name=None, fields=None, cluster_fields=None): """Return cluster arguments dict """ if name is None: name = args.name if cluster_fields is None: cluster_fields = args.cluster_fields_ cluster_args = set_basic_model_args(args, name) cluster_args.update({ "seed": SEED if args.seed is None else args.seed, "cluster_seed": (SEED if args.cluster_seed is None else args.cluster_seed) }) if args.cluster_models is not None: cluster_args.update({"model_clusters": True}) if args.cluster_k: cluster_args.update({"k": args.cluster_k}) if cluster_fields and fields is not None: input_fields = configure_input_fields(fields, cluster_fields) cluster_args.update(input_fields=input_fields) if args.summary_fields is not None: cluster_args.update({"summary_fields": args.summary_fields_}) cluster_args = update_sample_parameters_args(cluster_args, args) if 'cluster' in args.json_args: update_json_args(cluster_args, args.json_args.get('cluster'), fields) return cluster_args def create_clusters(datasets, cluster_ids, cluster_args, args, api=None, path=None, session_file=None, log=None): """Create remote clusters """ if api is None: api = bigml.api.BigML() clusters = cluster_ids[:] existing_clusters = len(clusters) cluster_args_list = [] datasets = datasets[existing_clusters:] # if resuming and all clusters were created, there will be no datasets left if datasets: if isinstance(cluster_args, list): cluster_args_list = cluster_args # Only one cluster per command, at present number_of_clusters = 1 message = dated("Creating %s.\n" % plural("cluster", number_of_clusters)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_clusters): wait_for_available_tasks(inprogress, args.max_parallel_clusters, api, "cluster") if cluster_args_list: cluster_args = cluster_args_list[i] cluster = api.create_cluster(datasets, cluster_args, retries=None) cluster_id = check_resource_error(cluster, "Failed to create cluster: ") log_message("%s\n" % cluster_id, log_file=log) cluster_ids.append(cluster_id) inprogress.append(cluster_id) clusters.append(cluster) log_created_resources("clusters", path, cluster_id, mode='a') if args.verbosity: if bigml.api.get_status(cluster)['code'] != bigml.api.FINISHED: try: cluster = check_resource(cluster, api.get_cluster, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished cluster: %s" % str(exception)) clusters[0] = cluster message = dated("Cluster created: %s\n" % get_url(cluster)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, cluster) return clusters, cluster_ids def get_clusters(cluster_ids, args, api=None, session_file=None): """Retrieves remote clusters in its actual status """ if api is None: api = bigml.api.BigML() cluster_id = "" clusters = cluster_ids cluster_id = cluster_ids[0] message = dated("Retrieving %s. %s\n" % (plural("cluster", len(cluster_ids)), get_url(cluster_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one cluster to predict at present try: # we need the whole fields structure when exporting fields query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS cluster = check_resource(cluster_ids[0], api.get_cluster, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished cluster: %s" % str(exception)) clusters[0] = cluster return clusters, cluster_ids def set_batch_centroid_args(args, fields=None, dataset_fields=None): """Return batch centroid args dict """ batch_centroid_args = set_basic_batch_args(args, args.name) if args.fields_map_ and fields is not None: if dataset_fields is None: dataset_fields = fields batch_centroid_args.update({ "fields_map": map_fields(args.fields_map_, fields, dataset_fields)}) if args.prediction_info == FULL_FORMAT: batch_centroid_args.update(all_fields=True) if args.prediction_fields: batch_centroid_args.update(all_fields=False) prediction_fields = [] for field in args.prediction_fields.split(args.args_separator): field = field.strip() if not field in dataset_fields.fields: try: field = dataset_fields.field_id(field) except ValueError, exc: sys.exit(exc) prediction_fields.append(field) batch_centroid_args.update(output_fields=prediction_fields) if 'batch_centroid' in args.json_args: update_json_args( batch_centroid_args, args.json_args.get('batch_centroid'), fields) return batch_centroid_args def create_batch_centroid(cluster, test_dataset, batch_centroid_args, args, api=None, session_file=None, path=None, log=None): """Creates remote batch_centroid """ if api is None: api = bigml.api.BigML() message = dated("Creating batch centroid.\n") log_message(message, log_file=session_file, console=args.verbosity) batch_centroid = api.create_batch_centroid(cluster, test_dataset, batch_centroid_args, retries=None) log_created_resources("batch_centroid", path, bigml.api.get_batch_centroid_id(batch_centroid), mode='a') batch_centroid_id = check_resource_error( batch_centroid, "Failed to create batch prediction: ") try: batch_centroid = check_resource(batch_centroid, api.get_batch_centroid) except ValueError, exception: sys.exit("Failed to get a finished batch centroid: %s" % str(exception)) message = dated("Batch centroid created: %s\n" % get_url(batch_centroid)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % batch_centroid_id, log_file=log) if args.reports: report(args.reports, path, batch_centroid) return batch_centroid def set_publish_cluster_args(args): """Set args to publish cluster """ public_cluster = {} if args.public_cluster: public_cluster = {"private": False} if args.model_price: public_cluster.update(price=args.model_price) if args.cpp: public_cluster.update(credits_per_prediction=args.cpp) return public_cluster def update_cluster(cluster, cluster_args, args, api=None, path=None, session_file=None): """Updates cluster properties """ if api is None: api = bigml.api.BigML() message = dated("Updating cluster. %s\n" % get_url(cluster)) log_message(message, log_file=session_file, console=args.verbosity) cluster = api.update_cluster(cluster, cluster_args) check_resource_error(cluster, "Failed to update cluster: %s" % cluster['resource']) cluster = check_resource(cluster, api.get_cluster, query_string=FIELDS_QS) if is_shared(cluster): message = dated("Shared cluster link. %s\n" % get_url(cluster, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, cluster) return cluster def set_batch_anomaly_score_args(args, fields=None, dataset_fields=None): """Return batch anomaly score args dict """ batch_anomaly_score_args = set_basic_batch_args(args, args.name) if args.fields_map_ and fields is not None: if dataset_fields is None: dataset_fields = fields batch_anomaly_score_args.update({ "fields_map": map_fields(args.fields_map_, fields, dataset_fields)}) if args.prediction_info == FULL_FORMAT: batch_anomaly_score_args.update(all_fields=True) if args.prediction_fields: batch_anomaly_score_args.update(all_fields=False) prediction_fields = [] for field in args.prediction_fields.split(args.args_separator): field = field.strip() if not field in dataset_fields.fields: try: field = dataset_fields.field_id(field) except ValueError, exc: sys.exit(exc) prediction_fields.append(field) batch_anomaly_score_args.update(output_fields=prediction_fields) if 'batch_anomaly_score' in args.json_args: update_json_args( batch_anomaly_score_args, args.json_args.get('batch_anomaly_score'), fields) return batch_anomaly_score_args def set_anomaly_args(args, name=None, fields=None, anomaly_fields=None): """Return anomaly arguments dict """ if name is None: name = args.name if anomaly_fields is None: anomaly_fields = args.anomaly_fields_ anomaly_args = set_basic_model_args(args, name) anomaly_args.update({ "seed": SEED if args.seed is None else args.seed, "anomaly_seed": (SEED if args.anomaly_seed is None else args.anomaly_seed) }) if anomaly_fields and fields is not None: input_fields = configure_input_fields(fields, anomaly_fields) anomaly_args.update(input_fields=input_fields) if args.top_n > 0: anomaly_args.update(top_n=args.top_n) if args.forest_size > 0: anomaly_args.update(forest_size=args.forest_size) anomaly_args = update_sample_parameters_args(anomaly_args, args) if 'anomaly' in args.json_args: update_json_args(anomaly_args, args.json_args.get('anomaly'), fields) return anomaly_args def set_publish_anomaly_args(args): """Set args to publish anomaly """ public_anomaly = {} if args.public_anomaly: public_anomaly = {"private": False} if args.model_price: public_anomaly.update(price=args.model_price) if args.cpp: public_anomaly.update(credits_per_prediction=args.cpp) return public_anomaly def create_anomalies(datasets, anomaly_ids, anomaly_args, args, api=None, path=None, session_file=None, log=None): """Create remote anomalies """ if api is None: api = bigml.api.BigML() anomalies = anomaly_ids[:] existing_anomalies = len(anomalies) anomaly_args_list = [] datasets = datasets[existing_anomalies:] # if resuming and all anomalies were created, # there will be no datasets left if datasets: if isinstance(anomaly_args, list): anomaly_args_list = anomaly_args # Only one anomaly per command, at present number_of_anomalies = 1 message = dated("Creating %s.\n" % plural("anomaly detector", number_of_anomalies)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_anomalies): wait_for_available_tasks(inprogress, args.max_parallel_anomalies, api, "anomaly") if anomaly_args_list: anomaly_args = anomaly_args_list[i] anomaly = api.create_anomaly(datasets, anomaly_args, retries=None) anomaly_id = check_resource_error(anomaly, "Failed to create anomaly: ") log_message("%s\n" % anomaly_id, log_file=log) anomaly_ids.append(anomaly_id) inprogress.append(anomaly_id) anomalies.append(anomaly) log_created_resources("anomalies", path, anomaly_id, mode='a') if args.verbosity: if bigml.api.get_status(anomaly)['code'] != bigml.api.FINISHED: try: anomaly = api.check_resource(anomaly, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished anomaly: %s" % str(exception)) anomalies[0] = anomaly message = dated("Anomaly created: %s\n" % get_url(anomaly)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, anomaly) return anomalies, anomaly_ids def get_anomalies(anomaly_ids, args, api=None, session_file=None): """Retrieves remote anomalies in its actual status """ if api is None: api = bigml.api.BigML() anomaly_id = "" anomalies = anomaly_ids anomaly_id = anomaly_ids[0] message = dated("Retrieving %s. %s\n" % (plural("anomaly detector", len(anomaly_ids)), get_url(anomaly_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one anomaly to predict at present try: # we need the whole fields structure when exporting fields query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS anomaly = api.check_resource(anomaly_ids[0], query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished anomaly: %s" % str(exception)) anomalies[0] = anomaly return anomalies, anomaly_ids def create_batch_anomaly_score(anomaly, test_dataset, batch_anomaly_score_args, args, api=None, session_file=None, path=None, log=None): """Creates remote batch anomaly score """ if api is None: api = bigml.api.BigML() message = dated("Creating batch anomaly score.\n") log_message(message, log_file=session_file, console=args.verbosity) batch_anomaly_score = api.create_batch_anomaly_score( anomaly, test_dataset, batch_anomaly_score_args, retries=None) log_created_resources( "batch_anomaly_score", path, bigml.api.get_batch_anomaly_score_id(batch_anomaly_score), mode='a') batch_anomaly_score_id = check_resource_error( batch_anomaly_score, "Failed to create batch prediction: ") try: batch_anomaly_score = api.check_resource(batch_anomaly_score) except ValueError, exception: sys.exit("Failed to get a finished batch anomaly score: %s" % str(exception)) message = dated("Batch anomaly score created: %s\n" % get_url(batch_anomaly_score)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % batch_anomaly_score_id, log_file=log) if args.reports: report(args.reports, path, batch_anomaly_score) return batch_anomaly_score def update_anomaly(anomaly, anomaly_args, args, api=None, path=None, session_file=None): """Updates anomaly properties """ if api is None: api = bigml.api.BigML() message = dated("Updating anomaly detector. %s\n" % get_url(anomaly)) log_message(message, log_file=session_file, console=args.verbosity) anomaly = api.update_anomaly(anomaly, anomaly_args) check_resource_error(anomaly, "Failed to update anomaly: %s" % anomaly['resource']) anomaly = api.check_resource(anomaly, query_string=FIELDS_QS) if is_shared(anomaly): message = dated("Shared anomaly link. %s\n" % get_url(anomaly, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, anomaly) return anomaly def set_project_args(args, name=None): """Return project arguments dict """ if name is None: name = args.name project_args = set_basic_args(args, name) if 'project' in args.json_args: update_json_args(project_args, args.json_args.get('project'), None) return project_args def create_project(project_args, args, api=None, session_file=None, path=None, log=None): """Creates remote project """ if api is None: api = bigml.api.BigML() message = dated("Creating project.\n") log_message(message, log_file=session_file, console=args.verbosity) project = api.create_project(project_args) log_created_resources("project", path, bigml.api.get_project_id(project), mode='a') project_id = check_resource_error(project, "Failed to create project: ") try: project = check_resource(project, api=api) except ValueError, exception: sys.exit("Failed to get a finished project: %s" % str(exception)) message = dated("Project \"%s\" has been created.\n" % project['object']['name']) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % project_id, log_file=log) try: if args.reports: report(args.reports, path, project) except AttributeError: pass return project def update_project(project_args, args, api=None, session_file=None, log=None): """Updates project properties """ if api is None: api = bigml.api.BigML() message = dated("Updating project attributes.\n") log_message(message, log_file=session_file, console=args.verbosity) project = api.update_project(args.project_id, project_args) check_resource_error(project, "Failed to update project: %s" % project['resource']) message = dated("Project \"%s\" has been updated.\n" % project['resource']) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % args.project_id, log_file=log) return project def get_project_by_name(project, api=None, verbosity=True, session_file=None): """Retrieves the project info by project name """ if api is None: api = bigml.api.BigML() project_id = None if (isinstance(project, basestring) or bigml.api.get_status(project)['code'] != bigml.api.FINISHED): message = dated("Retrieving project info.\n") log_message(message, log_file=session_file, console=verbosity) projects = api.list_projects(query_string="name=%s" % project) projects = projects.get('objects', []) if projects: project_id = projects[0]['resource'] return project_id def set_sample_args(args, name=None): """Return sample arguments dict """ if name is None: name = args.name sample_args = set_basic_args(args, name) if 'sample' in args.json_args: update_json_args(sample_args, args.json_args.get('sample')) return sample_args def create_samples(datasets, sample_ids, sample_args, args, api=None, path=None, session_file=None, log=None): """Create remote samples """ if api is None: api = bigml.api.BigML() samples = sample_ids[:] existing_samples = len(samples) sample_args_list = [] datasets = datasets[existing_samples:] # if resuming and all samples were created, there will be no datasets left if datasets: if isinstance(sample_args, list): sample_args_list = sample_args # Only one sample per command, at present number_of_samples = 1 max_parallel_samples = 1 message = dated("Creating %s.\n" % plural("sample", number_of_samples)) log_message(message, log_file=session_file, console=args.verbosity) inprogress = [] for i in range(0, number_of_samples): wait_for_available_tasks(inprogress, max_parallel_samples, api, "sample") if sample_args_list: sample_args = sample_args_list[i] sample = api.create_sample(datasets[i], sample_args, retries=None) sample_id = check_resource_error(sample, "Failed to create sample: ") log_message("%s\n" % sample_id, log_file=log) sample_ids.append(sample_id) inprogress.append(sample_id) samples.append(sample) log_created_resources("samples", path, sample_id, mode='a') if args.verbosity: if bigml.api.get_status(sample)['code'] != bigml.api.FINISHED: try: sample = check_resource(sample, api.get_sample) except ValueError, exception: sys.exit("Failed to get a finished sample: %s" % str(exception)) samples[0] = sample message = dated("Sample created: %s\n" % get_url(sample)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, sample) return samples, sample_ids def update_sample(sample, sample_args, args, api=None, path=None, session_file=None): """Updates sample properties """ if api is None: api = bigml.api.BigML() message = dated("Updating sample. %s\n" % get_url(sample)) log_message(message, log_file=session_file, console=args.verbosity) sample = api.update_sample(sample, sample_args) check_resource_error(sample, "Failed to update sample: %s" % sample['resource']) sample = check_resource(sample, api.get_sample) if is_shared(sample): message = dated("Shared sample link. %s\n" % get_url(sample, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, sample) return sample def get_samples(sample_ids, args, api=None, session_file=None, query_string=''): """Retrieves remote samples in its actual status """ if api is None: api = bigml.api.BigML() sample_id = "" samples = sample_ids sample_id = sample_ids[0] message = dated("Retrieving %s. %s\n" % (plural("sample", len(sample_ids)), get_url(sample_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one sample to predict at present try: sample = api.get_sample(sample_ids[0], query_string=query_string) check_resource_error(sample, "Failed to create sample: %s" % sample['resource']) sample = check_resource(sample, api=api, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished sample: %s" % str(exception)) samples[0] = sample return samples, sample_ids def set_publish_sample_args(args): """Set args to publish sample """ public_sample = {} if args.public_sample: public_sample = {"private": False} return public_sample def set_association_args(args, name=None, fields=None, association_fields=None): """Return association arguments dict """ if name is None: name = args.name if association_fields is None: association_fields = args.association_fields_ association_args = set_basic_model_args(args, name) if association_fields and fields is not None: input_fields = configure_input_fields(fields, association_fields) association_args.update(input_fields=input_fields) if args.association_k: association_args.update({"max_k": args.association_k}) if args.search_strategy: association_args.update({"search_strategy": args.search_strategy}) association_args = update_sample_parameters_args(association_args, args) if 'association' in args.json_args: update_json_args(association_args, args.json_args.get('association'), fields) return association_args def create_associations(datasets, association_ids, association_args, args, api=None, path=None, session_file=None, log=None): """Create remote associations """ if api is None: api = bigml.api.BigML() associations = association_ids[:] existing_associations = len(associations) association_args_list = [] datasets = datasets[existing_associations:] # if resuming and all associations were created, # there will be no datasets left if datasets: if isinstance(association_args, list): association_args_list = association_args # Only one association per command, at present number_of_associations = 1 message = dated("Creating %s.\n" % plural("association", number_of_associations)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_associations): wait_for_available_tasks(inprogress, args.max_parallel_associations, api, "association") if association_args_list: association_args = association_args_list[i] association = api.create_association( datasets, association_args, retries=None) association_id = check_resource_error( \ association, "Failed to create association: ") log_message("%s\n" % association_id, log_file=log) association_ids.append(association_id) inprogress.append(association_id) associations.append(association) log_created_resources( \ "associations", path, association_id, mode='a') if args.verbosity: if bigml.api.get_status(association)['code'] != bigml.api.FINISHED: try: association = check_resource( \ association, api.get_association, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished association: %s" % str(exception)) associations[0] = association message = dated("Association created: %s\n" % get_url(association)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, association) return associations, association_ids def get_associations(association_ids, args, api=None, session_file=None): """Retrieves remote associations in its actual status """ if api is None: api = bigml.api.BigML() association_id = "" associations = association_ids association_id = association_ids[0] message = dated("Retrieving %s. %s\n" % (plural("association", len(association_ids)), get_url(association_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one association to predict at present try: query_string = FIELDS_QS association = check_resource(association_ids[0], api.get_association, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished association: %s" % str(exception)) associations[0] = association return associations, association_ids def set_publish_association_args(args): """Set args to publish association """ public_association = {} if args.public_association: public_association = {"private": False} if args.model_price: public_association.update(price=args.model_price) if args.cpp: public_association.update(credits_per_prediction=args.cpp) return public_association def update_association(association, association_args, args, api=None, path=None, session_file=None): """Updates association properties """ if api is None: api = bigml.api.BigML() message = dated("Updating association. %s\n" % get_url(association)) log_message(message, log_file=session_file, console=args.verbosity) association = api.update_association(association, association_args) check_resource_error(association, "Failed to update association: %s" % association['resource']) association = check_resource(association, api.get_association, query_string=FIELDS_QS) if is_shared(association): message = dated("Shared association link. %s\n" % get_url(association, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, association) return association def set_script_args(args, name=None): """Returns a script arguments dict """ if name is None: name = args.name script_args = set_basic_args(args, name) if args.project_id is not None: script_args.update({"project": args.project_id}) if args.imports is not None: script_args.update({"imports": args.imports_}) if args.parameters_ is not None: script_args.update({"inputs": args.parameters_}) if args.declare_outputs_: script_args.update({"outputs": args.declare_outputs_}) update_attributes(script_args, args.json_args.get('script')) return script_args def create_script(source_code, script_args, args, api=None, path=None, session_file=None, log=None): """Creates remote script """ if api is None: api = bigml.api.BigML() message = dated("Creating script \"%s\".\n" % script_args["name"]) log_message(message, log_file=session_file, console=args.verbosity) script = api.create_script(source_code, script_args) log_created_resources("scripts", path, bigml.api.get_script_id(script), mode='a') script_id = check_resource_error(script, "Failed to create script: ") try: script = check_resource(script, api.get_script) except ValueError, exception: sys.exit("Failed to get a compiled script: %s" % str(exception)) message = dated("Script created: %s\n" % get_url(script)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % script_id, log_file=log) return script def get_script(script, api=None, verbosity=True, session_file=None): """Retrieves the script in its actual state """ if api is None: api = bigml.api.BigML() if (isinstance(script, basestring) or bigml.api.get_status(script)['code'] != bigml.api.FINISHED): message = dated("Retrieving script. %s\n" % get_url(script)) log_message(message, log_file=session_file, console=verbosity) try: script = check_resource(script, api.get_script) except ValueError, exception: sys.exit("Failed to get a compiled script: %s" % str(exception)) return script def set_execution_args(args, name=None): """Returns an execution arguments dict """ if name is None: name = args.name execution_args = set_basic_args(args, name) if args.project_id is not None: execution_args.update({"project": args.project_id}) if args.arguments_: execution_args.update({"inputs": args.arguments_}) if args.creation_defaults is not None: execution_args.update({"creation_defaults": args.creation_defaults_}) if args.outputs_: execution_args.update({"outputs": args.outputs_}) if args.input_maps_: execution_args.update({"input_maps_": args.input_maps_}) update_attributes(execution_args, args.json_args.get('execution')) return execution_args def create_execution(execution_args, args, api=None, path=None, session_file=None, log=None): """Creates remote execution """ message = dated("Creating execution.\n") log_message(message, log_file=session_file, console=args.verbosity) scripts = args.script_ids if args.script_ids else args.script execution = api.create_execution(scripts, execution_args) log_created_resources("execution", path, bigml.api.get_execution_id(execution), mode='a') execution_id = check_resource_error(execution, "Failed to create execution: ") try: execution = check_resource(execution, api.get_execution) except ValueError, exception: sys.exit("Failed to get a finished execution: %s" % str(exception)) message = dated("Execution created: %s\n" % get_url(execution)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % execution_id, log_file=log) return execution def get_execution(execution, api=None, verbosity=True, session_file=None): """Retrieves the execution in its actual state """ if api is None: api = bigml.api.BigML() if (isinstance(execution, basestring) or bigml.api.get_status(execution)['code'] != bigml.api.FINISHED): message = dated("Retrieving execution. %s\n" % get_url(execution)) log_message(message, log_file=session_file, console=verbosity) try: execution = check_resource(execution, api.get_execution) except ValueError, exception: sys.exit("Failed to get a finished execution: %s" % str(exception)) return execution def set_logistic_regression_args(args, name=None, fields=None, objective_id=None, logistic_regression_fields=None): """Return logistic regression arguments dict """ if name is None: name = args.name if logistic_regression_fields is None: logistic_regression_fields = args.logistic_regression_fields_ if objective_id is None: objective_id = args.objective_id_ logistic_regression_args = set_basic_model_args(args, name) logistic_regression_args.update({ "seed": SEED if args.seed is None else args.seed }) if objective_id is not None and fields is not None: logistic_regression_args.update({"objective_field": objective_id}) if logistic_regression_fields and fields is not None: input_fields = configure_input_fields(fields, logistic_regression_fields) logistic_regression_args.update(input_fields=input_fields) if ((args.evaluate and args.test_split == 0 and args.test_datasets is None) or args.cross_validation_rate > 0): logistic_regression_args.update(seed=SEED) if args.cross_validation_rate > 0: args.sample_rate = 1 - args.cross_validation_rate args.replacement = False elif (args.sample_rate == 1 and args.test_datasets is None and not args.dataset_off): args.sample_rate = EVALUATE_SAMPLE_RATE logistic_regression_args.update({"sample_rate": args.sample_rate}) if args.lr_c: logistic_regression_args.update({"c": args.lr_c}) logistic_regression_args.update({"bias": args.bias}) logistic_regression_args.update( \ {"balance_fields": args.balance_fields}) if args.eps: logistic_regression_args.update({"eps": args.eps}) if args.normalize is not None: logistic_regression_args.update({"normalize": args.normalize}) if args.missing_numerics is not None: logistic_regression_args.update( \ {"missing_numerics": args.missing_numerics}) if args.field_codings is not None: logistic_regression_args.update(\ {"field_codings": args.field_codings_}) logistic_regression_args = update_sample_parameters_args( \ logistic_regression_args, args) if 'logistic_regression' in args.json_args: update_json_args(logistic_regression_args, args.json_args.get('logistic_regression'), fields) return logistic_regression_args def create_logistic_regressions(datasets, logistic_regression_ids, logistic_regression_args, args, api=None, path=None, session_file=None, log=None): """Create remote logistic regressions """ if api is None: api = bigml.api.BigML() logistic_regressions = logistic_regression_ids[:] existing_logistic_regressions = len(logistic_regressions) logistic_regression_args_list = [] datasets = datasets[existing_logistic_regressions:] # if resuming and all logistic regressions were created, # there will be no datasets left if datasets: if isinstance(logistic_regression_args, list): logistic_regression_args_list = logistic_regression_args # Only one logistic regression per command, at present number_of_logistic_regressions = 1 message = dated("Creating %s.\n" % plural("logistic regression", number_of_logistic_regressions)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_logistic_regressions): wait_for_available_tasks(inprogress, args.max_parallel_logistic_regressions, api, "logisticregression") if logistic_regression_args_list: logistic_regression_args = logistic_regression_args_list[i] if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_logistic_regressions) logistic_regression_args.update(seed=new_seed) if (args.test_datasets and args.evaluate): dataset = datasets[i] logistic_regression = api.create_logistic_regression( \ dataset, logistic_regression_args, retries=None) elif args.dataset_off and args.evaluate: multi_dataset = args.test_dataset_ids[:] del multi_dataset[i + existing_logistic_regressions] logistic_regression = api.create_logistic_regression( \ multi_dataset, logistic_regression_args, retries=None) else: logistic_regression = api.create_logistic_regression( \ datasets, logistic_regression_args, retries=None) logistic_regression_id = check_resource_error( \ logistic_regression, "Failed to create logistic regression: ") log_message("%s\n" % logistic_regression_id, log_file=log) logistic_regression_ids.append(logistic_regression_id) inprogress.append(logistic_regression_id) logistic_regressions.append(logistic_regression) log_created_resources("logistic_regressions", path, logistic_regression_id, mode='a') if args.verbosity: if bigml.api.get_status(logistic_regression)['code'] != \ bigml.api.FINISHED: try: logistic_regression = check_resource( \ logistic_regression, api.get_logistic_regression, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished logistic regression:" " %s" % str(exception)) logistic_regressions[0] = logistic_regression message = dated("Logistic regression created: %s\n" % get_url(logistic_regression)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, logistic_regression) return logistic_regressions, logistic_regression_ids def get_logistic_regressions(logistic_regression_ids, args, api=None, session_file=None): """Retrieves remote logistic regression in its actual status """ if api is None: api = bigml.api.BigML() logistic_regression_id = "" logistic_regressions = logistic_regression_ids logistic_regression_id = logistic_regression_ids[0] message = dated("Retrieving %s. %s\n" % (plural("logistic regression", len(logistic_regression_ids)), get_url(logistic_regression_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one logistic regression to predict at present try: # we need the whole fields structure when exporting fields query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS logistic_regression = check_resource(logistic_regression_ids[0], api.get_logistic_regression, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished logistic regression: %s" % \ str(exception)) logistic_regressions[0] = logistic_regression return logistic_regressions, logistic_regression_ids def set_publish_logistic_regression_args(args): """Set args to publish logistic regression """ public_logistic_regression = {} if args.public_logistic_regression: public_logistic_regression = {"private": False} if args.model_price: public_logistic_regression.update(price=args.model_price) if args.cpp: public_logistic_regression.update(credits_per_prediction=args.cpp) return public_logistic_regression def update_logistic_regression(logistic_regression, logistic_regression_args, args, api=None, path=None, session_file=None): """Updates logistic regression properties """ if api is None: api = bigml.api.BigML() message = dated("Updating logistic regression. %s\n" % get_url(logistic_regression)) log_message(message, log_file=session_file, console=args.verbosity) logistic_regression = api.update_logistic_regression(logistic_regression, \ logistic_regression_args) check_resource_error(logistic_regression, "Failed to update logistic regression: %s" % logistic_regression['resource']) logistic_regression = check_resource(logistic_regression, api.get_logistic_regression, query_string=FIELDS_QS) if is_shared(logistic_regression): message = dated("Shared logistic regression link. %s\n" % get_url(logistic_regression, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, logistic_regression) return logistic_regression def set_linear_regression_args(args, name=None, fields=None, objective_id=None, linear_regression_fields=None): """Return linear regression arguments dict """ if name is None: name = args.name if linear_regression_fields is None: linear_regression_fields = args.linear_regression_fields_ if objective_id is None: objective_id = args.objective_id_ linear_regression_args = set_basic_model_args(args, name) linear_regression_args.update({ "seed": SEED if args.seed is None else args.seed }) if objective_id is not None and fields is not None: linear_regression_args.update({"objective_field": objective_id}) if linear_regression_fields and fields is not None: input_fields = configure_input_fields(fields, linear_regression_fields) linear_regression_args.update(input_fields=input_fields) if ((args.evaluate and args.test_split == 0 and args.test_datasets is None) or args.cross_validation_rate > 0): linear_regression_args.update(seed=SEED) if args.cross_validation_rate > 0: args.sample_rate = 1 - args.cross_validation_rate args.replacement = False elif (args.sample_rate == 1 and args.test_datasets is None and not args.dataset_off): args.sample_rate = EVALUATE_SAMPLE_RATE linear_regression_args.update({"sample_rate": args.sample_rate}) linear_regression_args.update({"bias": args.bias}) if args.field_codings is not None: linear_regression_args.update(\ {"field_codings": args.field_codings_}) linear_regression_args = update_sample_parameters_args( \ linear_regression_args, args) if 'linear_regression' in args.json_args: update_json_args(linear_regression_args, args.json_args.get('linear_regression'), fields) return linear_regression_args def create_linear_regressions(datasets, linear_regression_ids, linear_regression_args, args, api=None, path=None, session_file=None, log=None): """Create remote linear regressions """ if api is None: api = bigml.api.BigML() linear_regressions = linear_regression_ids[:] existing_linear_regressions = len(linear_regressions) linear_regression_args_list = [] datasets = datasets[existing_linear_regressions:] # if resuming and all linear regressions were created, # there will be no datasets left if datasets: if isinstance(linear_regression_args, list): linear_regression_args_list = linear_regression_args # Only one linear regression per command, at present number_of_linear_regressions = 1 message = dated("Creating %s.\n" % plural("linear regression", number_of_linear_regressions)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_linear_regressions): wait_for_available_tasks(inprogress, args.max_parallel_linear_regressions, api, "linearregression") if linear_regression_args_list: linear_regression_args = linear_regression_args_list[i] if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_linear_regressions) linear_regression_args.update(seed=new_seed) if (args.test_datasets and args.evaluate): dataset = datasets[i] linear_regression = api.create_linear_regression( \ dataset, linear_regression_args, retries=None) elif args.dataset_off and args.evaluate: multi_dataset = args.test_dataset_ids[:] del multi_dataset[i + existing_linear_regressions] linear_regression = api.create_linear_regression( \ multi_dataset, linear_regression_args, retries=None) else: linear_regression = api.create_linear_regression( \ datasets, linear_regression_args, retries=None) linear_regression_id = check_resource_error( \ linear_regression, "Failed to create linear regression: ") log_message("%s\n" % linear_regression_id, log_file=log) linear_regression_ids.append(linear_regression_id) inprogress.append(linear_regression_id) linear_regressions.append(linear_regression) log_created_resources("linear_regressions", path, linear_regression_id, mode='a') if args.verbosity: if bigml.api.get_status(linear_regression)['code'] != \ bigml.api.FINISHED: try: linear_regression = check_resource( \ linear_regression, api.get_linear_regression, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished linear regression:" " %s" % str(exception)) linear_regressions[0] = linear_regression message = dated("linear regression created: %s\n" % get_url(linear_regression)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, linear_regression) return linear_regressions, linear_regression_ids def get_linear_regressions(linear_regression_ids, args, api=None, session_file=None): """Retrieves remote linear regression in its actual status """ if api is None: api = bigml.api.BigML() linear_regression_id = "" linear_regressions = linear_regression_ids linear_regression_id = linear_regression_ids[0] message = dated("Retrieving %s. %s\n" % (plural("linear regression", len(linear_regression_ids)), get_url(linear_regression_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one linear regression to predict at present try: # we need the whole fields structure when exporting fields query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS linear_regression = check_resource(linear_regression_ids[0], api.get_linear_regression, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished linear regression: %s" % \ str(exception)) linear_regressions[0] = linear_regression return linear_regressions, linear_regression_ids def set_publish_linear_regression_args(args): """Set args to publish linear regression """ public_linear_regression = {} if args.public_linear_regression: public_linear_regression = {"private": False} if args.model_price: public_linear_regression.update(price=args.model_price) if args.cpp: public_linear_regression.update(credits_per_prediction=args.cpp) return public_linear_regression def update_linear_regression(linear_regression, linear_regression_args, args, api=None, path=None, session_file=None): """Updates linear regression properties """ if api is None: api = bigml.api.BigML() message = dated("Updating linear regression. %s\n" % get_url(linear_regression)) log_message(message, log_file=session_file, console=args.verbosity) linear_regression = api.update_linear_regression(linear_regression, \ linear_regression_args) check_resource_error(linear_regression, "Failed to update linear regression: %s" % linear_regression['resource']) linear_regression = check_resource(linear_regression, api.get_linear_regression, query_string=FIELDS_QS) if is_shared(linear_regression): message = dated("Shared linear regression link. %s\n" % get_url(linear_regression, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, linear_regression) return linear_regression def set_time_series_args(args, name=None, fields=None, objective_id=None, time_series_fields=None): """Return time-series arguments dict """ if name is None: name = args.name if objective_id is None: objective_id = args.objective_id_ time_series_args = set_basic_model_args(args, name) time_series_args.update({ "all_numeric_objectives": args.all_numeric_objectives, "period": args.period }) # if we need to evaluate and there's no previous split, use a range if args.evaluate and args.test_split == 0 and not args.has_test_datasets_: args.range_ = [1, int(args.max_rows * EVALUATE_SAMPLE_RATE)] if objective_id is not None: time_series_args.update({"objective_field": objective_id}) if args.objectives: time_series_args.update({"objective_fields": args.objective_fields_}) if args.damped_trend is not None: time_series_args.update({"damped_trend": args.damped_trend}) if args.error is not None: time_series_args.update({"error": args.error}) if args.field_parameters: time_series_args.update({"field_parameters": args.field_parameters_}) if args.range_: time_series_args.update({"range": args.range_}) if args.seasonality is not None: time_series_args.update({"seasonality": args.seasonality}) if args.trend is not None: time_series_args.update({"trend": args.trend}) if args.time_start or args.time_end or args.time_interval or \ args.time_interval_unit: time_range = {} if args.time_start: time_range.update({"start": args.time_start}) if args.time_end: time_range.update({"end": args.time_end}) if args.time_interval: time_range.update({"interval": args.time_interval}) if args.time_interval_unit: time_range.update({"interval_unit": args.time_interval_unit}) time_series.update({"time_range": time_range}) if 'time_series' in args.json_args: update_json_args(time_series_args, args.json_args.get('time_series'), fields) return time_series_args def create_time_series(datasets, time_series_ids, time_series_args, args, api=None, path=None, session_file=None, log=None): """Create remote time-series """ if api is None: api = bigml.api.BigML() time_series_set = time_series_ids[:] existing_time_series = len(time_series_set) time_series_args_list = [] datasets = datasets[existing_time_series:] # if resuming and all time-series were created, # there will be no datasets left if datasets: if isinstance(time_series_args, list): time_series_args_list = time_series_args # Only one time-series per command, at present number_of_time_series = 1 message = dated("Creating %s time-series.\n" % number_of_time_series) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_time_series): wait_for_available_tasks(inprogress, args.max_parallel_time_series, api, "timeseries") if time_series_args_list: time_series_args = time_series_args_list[i] if (args.test_datasets and args.evaluate): dataset = datasets[i] time_series = api.create_time_series( \ dataset, time_series_args, retries=None) else: time_series = api.create_time_series( \ datasets, time_series_args, retries=None) time_series_id = check_resource_error( \ time_series, "Failed to create time-series: ") log_message("%s\n" % time_series_id, log_file=log) time_series_ids.append(time_series_id) inprogress.append(time_series_id) time_series_set.append(time_series) log_created_resources("time_series", path, time_series_id, mode='a') if args.verbosity: if bigml.api.get_status(time_series)['code'] != \ bigml.api.FINISHED: try: time_series = check_resource( \ time_series, api.get_time_series, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished time-series:" " %s" % str(exception)) time_series_set[0] = time_series message = dated("Time-series created: %s\n" % get_url(time_series)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, time_series) return time_series_set, time_series_ids def get_time_series(time_series_ids, args, api=None, session_file=None): """Retrieves remote time-series in its actual status """ if api is None: api = bigml.api.BigML() time_series_id = "" time_series_set = time_series_ids time_series_id = time_series_ids[0] message = dated("Retrieving %s. %s\n" % (plural("time-series", len(time_series_ids)), get_url(time_series_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one time-series to predict at present try: # we need the whole fields structure when exporting fields query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS time_series = check_resource(time_series_ids[0], api.get_time_series, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished time-series: %s" % \ str(exception)) time_series_set[0] = time_series return time_series_set, time_series_ids def set_publish_time_series_args(args): """Set args to publish time-series """ public_time_series = {} if args.public_time_series: public_time_series = {"private": False} if args.model_price: public_time_series.update(price=args.model_price) if args.cpp: public_time_series.update(credits_per_prediction=args.cpp) return public_time_series def update_time_series(time_series, time_series_args, args, api=None, path=None, session_file=None): """Updates time-series properties """ if api is None: api = bigml.api.BigML() message = dated("Updating time-series. %s\n" % get_url(time_series)) log_message(message, log_file=session_file, console=args.verbosity) time_series = api.update_time_series(time_series, \ time_series_args) check_resource_error(time_series, "Failed to update time-series: %s" % time_series['resource']) time_series = check_resource(time_series, api.get_time_series, query_string=FIELDS_QS) if is_shared(time_series): message = dated("Shared time-series link. %s\n" % get_url(time_series, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, time_series) return time_series def set_library_args(args, name=None): """Returns a library arguments dict """ if name is None: name = args.name library_args = set_basic_args(args, name) if args.project_id is not None: library_args.update({"project": args.project_id}) if args.imports is not None: library_args.update({"imports": args.imports_}) update_attributes(library_args, args.json_args.get('library')) return library_args def create_library(source_code, library_args, args, api=None, path=None, session_file=None, log=None): """Creates remote library """ if api is None: api = bigml.api.BigML() message = dated("Creating library \"%s\".\n" % library_args["name"]) log_message(message, log_file=session_file, console=args.verbosity) library = api.create_library(source_code, library_args) log_created_resources("library", path, bigml.api.get_library_id(library), mode='a') library_id = check_resource_error(library, "Failed to create library: ") try: library = check_resource(library, api.get_library) except ValueError, exception: sys.exit("Failed to get a compiled library: %s" % str(exception)) message = dated("Library created: %s\n" % get_url(library)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % library_id, log_file=log) return library def update_sample_parameters_args(resource_args, args): """Updates the information related to the common sampling options """ if args.sample_rate != 1: resource_args.update({"sample_rate": args.sample_rate}) if hasattr(args, "out_of_bag") and args.out_of_bag: resource_args.update({"out_of_bag": True}) if hasattr(args, "replacement") and args.replacement: resource_args.update({"replacement": True}) if hasattr(args, "randomize") and args.randomize: resource_args.update({"randomize": True}) return resource_args def set_topic_model_args(args, name=None, fields=None, topic_model_fields=None): """Return topic_model arguments dict """ if name is None: name = args.name if topic_model_fields is None: topic_model_fields = args.topic_model_fields_ topic_model_args = set_basic_args(args, name) topic_model_args.update({ "seed": SEED if args.seed is None else args.seed, "topicmodel_seed": SEED if args.seed is None else args.seed }) if topic_model_fields and fields is not None: input_fields = configure_input_fields(fields, topic_model_fields) topic_model_args.update(input_fields=input_fields) topic_model_args.update({"sample_rate": args.sample_rate}) topic_model_args.update({"bigrams": args.bigrams}) topic_model_args.update({"case_sensitive": args.case_sensitive}) if args.number_of_topics is not None: topic_model_args.update({"number_of_topics": args.number_of_topics}) if args.term_limit is not None: topic_model_args.update({"term_limit": args.term_limit}) if args.top_n_terms is not None: topic_model_args.update({"top_n_terms": args.top_n_terms}) if args.minimum_name_terms is not None: topic_model_args.update({"minimum_name_terms": args.minimum_name_terms}) if args.excluded_terms: topic_model_args.update({"excluded_terms": args.excluded_terms_}) topic_model_args = update_sample_parameters_args( \ topic_model_args, args) if 'topic_model' in args.json_args: update_json_args(topic_model_args, args.json_args.get('topic_model'), fields) return topic_model_args def create_topic_models(datasets, topic_model_ids, topic_model_args, args, api=None, path=None, session_file=None, log=None): """Create remote topic models """ if api is None: api = bigml.api.BigML() topic_models = topic_model_ids[:] existing_topic_models = len(topic_models) topic_model_args_list = [] datasets = datasets[existing_topic_models:] # if resuming and all topic models were created, there will # be no datasets left if datasets: if isinstance(topic_model_args, list): topic_model_args_list = topic_model_args # Only one topic model per command, at present number_of_topic_models = 1 message = dated("Creating %s.\n" % plural("topic model", number_of_topic_models)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_topic_models): wait_for_available_tasks(inprogress, args.max_parallel_topic_models, api, "topicmodel") if topic_model_args_list: topic_model_args = topic_model_args_list[i] topic_model = api.create_topic_model(datasets, topic_model_args, retries=None) topic_model_id = check_resource_error( \ topic_model, "Failed to create topic model: ") log_message("%s\n" % topic_model_id, log_file=log) topic_model_ids.append(topic_model_id) inprogress.append(topic_model_id) topic_models.append(topic_model) log_created_resources("topic_models", path, topic_model_id, mode='a') if args.verbosity: if bigml.api.get_status(topic_model)['code'] != bigml.api.FINISHED: try: topic_model = check_resource( \ topic_model, api.get_topic_model, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished topic model: %s" % str(exception)) topic_models[0] = topic_model message = dated("Topic model created: %s\n" % get_url(topic_model)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, topic_model) return topic_models, topic_model_ids def get_topic_models(topic_model_ids, args, api=None, session_file=None): """Retrieves remote topic model in its actual status """ if api is None: api = bigml.api.BigML() topic_model_id = "" topic_models = topic_model_ids topic_model_id = topic_model_ids[0] message = dated("Retrieving %s. %s\n" % (plural("topic model", len(topic_model_ids)), get_url(topic_model_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one topic_model at present try: # we need the whole fields structure when exporting fields query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS topic_model = check_resource(topic_model_ids[0], api.get_topic_model, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished topic model: %s" % \ str(exception)) topic_models[0] = topic_model return topic_models, topic_model_ids def set_publish_topic_model_args(args): """Set args to publish topic model """ public_topic_model = {} if args.public_topic_model: public_topic_model = {"private": False} if args.model_price: public_topic_model.update(price=args.model_price) if args.cpp: public_topic_model.update(credits_per_prediction=args.cpp) return public_topic_model def update_topic_model(topic_model, topic_model_args, args, api=None, path=None, session_file=None): """Updates topic model properties """ if api is None: api = bigml.api.BigML() message = dated("Updating topic model. %s\n" % get_url(topic_model)) log_message(message, log_file=session_file, console=args.verbosity) topic_model = api.update_topic_model(topic_model, \ topic_model_args) check_resource_error(topic_model, "Failed to update topic model: %s" % topic_model['resource']) topic_model = check_resource(topic_model, api.get_topic_model, query_string=FIELDS_QS) if is_shared(topic_model): message = dated("Shared topic model link. %s\n" % get_url(topic_model, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, topic_model) return topic_model def set_batch_topic_distribution_args( \ args, fields=None, dataset_fields=None): """Return batch topic distribution args dict """ batch_topic_distribution_args = set_basic_batch_args(args, args.name) if args.fields_map_ and fields is not None: if dataset_fields is None: dataset_fields = fields batch_topic_distribution_args.update({ "fields_map": map_fields(args.fields_map_, fields, dataset_fields)}) if args.prediction_info == FULL_FORMAT: batch_topic_distribution_args.update(all_fields=True) if args.prediction_fields: batch_topic_distribution_args.update(all_fields=False) prediction_fields = [] for field in args.prediction_fields.split(args.args_separator): field = field.strip() if not field in dataset_fields.fields: try: field = dataset_fields.field_id(field) except ValueError, exc: sys.exit(exc) prediction_fields.append(field) batch_topic_distribution_args.update(output_fields=prediction_fields) if 'batch_topic_distribution' in args.json_args: update_json_args( batch_topic_distribution_args, args.json_args.get( \ 'batch_topic_distribution'), fields) return batch_topic_distribution_args def create_batch_topic_distribution(topic_model, test_dataset, batch_topic_distribution_args, args, api=None, session_file=None, path=None, log=None): """Creates remote batch topic distribution """ if api is None: api = bigml.api.BigML() message = dated("Creating batch topic distribution.\n") log_message(message, log_file=session_file, console=args.verbosity) batch_topic_distribution = api.create_batch_topic_distribution( \ topic_model, test_dataset, batch_topic_distribution_args, retries=None) log_created_resources( \ "batch_topic_distribution", path, bigml.api.get_batch_topic_distribution_id(batch_topic_distribution), mode='a') batch_topic_distribution_id = check_resource_error( batch_topic_distribution, "Failed to create batch topic distribution: ") try: batch_topic_distribution = check_resource( \ batch_topic_distribution, api.get_batch_topic_distribution) except ValueError, exception: sys.exit("Failed to get a finished batch topic distribution: %s" % str(exception)) message = dated("Batch topic distribution created: %s\n" % get_url(batch_topic_distribution)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % batch_topic_distribution_id, log_file=log) if args.reports: report(args.reports, path, batch_topic_distribution) return batch_topic_distribution def set_forecast_args(args, fields=None): """Return forecast dict """ forecast_args = set_basic_args(args, args.name) forecast_args.update({ "intervals": args.intervals, }) if 'forecast' in args.json_args: update_json_args( forecast_args, args.json_args.get('forecast'), fields) return forecast_args def create_forecast(time_series, input_data, forecast_args, args, api=None, session_file=None, path=None, log=None): """Creates remote forecast """ if api is None: api = bigml.api.BigML() message = dated("Creating remote forecast.\n") log_message(message, log_file=session_file, console=args.verbosity) forecast = api.create_forecast(time_series, input_data, forecast_args, retries=None) log_created_resources("forecast", path, bigml.api.get_forecast_id(forecast), mode='a') forecast_id = check_resource_error( forecast, "Failed to create forecast: ") try: forecast = check_resource(forecast, api.get_forecast) except ValueError, exception: sys.exit("Failed to get a finished forecast: %s" % str(exception)) message = dated("Forecast created: %s\n" % get_url(forecast)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % forecast_id, log_file=log) if args.reports: report(args.reports, path, forecast) return forecast def set_deepnet_args(args, name=None, fields=None, objective_id=None, deepnet_fields=None): """Return deepnet arguments dict """ if name is None: name = args.name if deepnet_fields is None: deepnet_fields = args.deepnet_fields_ if objective_id is None: objective_id = args.objective_id_ deepnet_args = set_basic_model_args(args, name) deepnet_args.update({ "seed": SEED if args.seed is None else args.seed }) if objective_id is not None and fields is not None: deepnet_args.update({"objective_field": objective_id}) if deepnet_fields and fields is not None: input_fields = configure_input_fields(fields, deepnet_fields) deepnet_args.update(input_fields=input_fields) if ((args.evaluate and args.test_split == 0 and args.test_datasets is None) or args.cross_validation_rate > 0): deepnet_args.update(seed=SEED) if args.cross_validation_rate > 0: args.sample_rate = 1 - args.cross_validation_rate args.replacement = False elif (args.sample_rate == 1 and args.test_datasets is None and not args.dataset_off): args.sample_rate = EVALUATE_SAMPLE_RATE deepnet_args.update({"sample_rate": args.sample_rate}) if args.batch_normalization is not None: deepnet_args.update({"batch_normalization": args.batch_normalization}) if args.dropout_rate: deepnet_args.update({"dropout_rate": args.dropout_rate}) if args.hidden_layers is not None: deepnet_args.update({"hidden_layers": args.hidden_layers_}) if args.learn_residuals is not None: deepnet_args.update( \ {"learn_residuals": args.learn_residuals}) if args.max_iterations is not None: deepnet_args.update(\ {"learning_rate": args.learning_rate}) if args.max_training_time is not None: deepnet_args.update(\ {"max_training_time": args.max_training_time}) if args.number_of_hidden_layers is not None: deepnet_args.update(\ {"number_of_hidden_layers": args.number_of_hidden_layers}) if args.number_of_model_candidates is not None: deepnet_args.update(\ {"number_of_model_candidates": args.number_of_model_candidates}) if args.search is not None: deepnet_args.update(\ {"search": args.search}) if args.suggest_structure is not None: deepnet_args.update(\ {"suggest_structure": args.suggest_structure}) if not args.missing_numerics: deepnet_args.update(\ {"missing_numerics": args.missing_numerics}) if args.tree_embedding: deepnet_args.update(\ {"tree_embedding": args.tree_embedding}) deepnet_args = update_sample_parameters_args( \ deepnet_args, args) if 'deepnet' in args.json_args: update_json_args(deepnet_args, args.json_args.get('deepnet'), fields) return deepnet_args def create_deepnets(datasets, deepnet_ids, deepnet_args, args, api=None, path=None, session_file=None, log=None): """Create remote deepnets """ if api is None: api = bigml.api.BigML() deepnets = deepnet_ids[:] existing_deepnets = len(deepnets) deepnet_args_list = [] datasets = datasets[existing_deepnets:] # if resuming and all deepnets were created, # there will be no datasets left if datasets: if isinstance(deepnet_args, list): deepnet_args_list = deepnet_args # Only one deepnet per command, at present number_of_deepnets = 1 message = dated("Creating %s.\n" % plural("deepnet", number_of_deepnets)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_deepnets): wait_for_available_tasks(inprogress, args.max_parallel_deepnets, api, "deepnet") if deepnet_args_list: deepnet_args = deepnet_args_list[i] if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_deepnets) deepnet_args.update(seed=new_seed) if (args.test_datasets and args.evaluate): dataset = datasets[i] deepnet = api.create_deepnet( \ dataset, deepnet_args, retries=None) elif args.dataset_off and args.evaluate: multi_dataset = args.test_dataset_ids[:] del multi_dataset[i + existing_deepnets] deepnet = api.create_deepnet( \ multi_dataset, deepnet_args, retries=None) else: deepnet = api.create_deepnet( \ datasets, deepnet_args, retries=None) deepnet_id = check_resource_error( \ deepnet, "Failed to create deepnet: ") log_message("%s\n" % deepnet_id, log_file=log) deepnet_ids.append(deepnet_id) inprogress.append(deepnet_id) deepnets.append(deepnet) log_created_resources("deepnets", path, deepnet_id, mode='a') if args.verbosity: if bigml.api.get_status(deepnet)['code'] != \ bigml.api.FINISHED: try: deepnet = check_resource( \ deepnet, api.get_deepnet, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished deepnet:" " %s" % str(exception)) deepnets[0] = deepnet message = dated("Deepnet created: %s\n" % get_url(deepnet)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, deepnet) return deepnets, deepnet_ids def get_deepnets(deepnet_ids, args, api=None, session_file=None): """Retrieves remote deepnet in its actual status """ if api is None: api = bigml.api.BigML() deepnet_id = "" deepnets = deepnet_ids deepnet_id = deepnet_ids[0] message = dated("Retrieving %s. %s\n" % (plural("deepnet", len(deepnet_ids)), get_url(deepnet_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one deepnet to predict at present try: # we need the whole fields structure when exporting fields query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS deepnet = check_resource(deepnet_ids[0], api.get_deepnet, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished deepnet: %s" % \ str(exception)) deepnets[0] = deepnet return deepnets, deepnet_ids def update_deepnets(deepnet, deepnet_args, args, api=None, path=None, session_file=None): """Updates deepnet properties """ if api is None: api = bigml.api.BigML() message = dated("Updating deepnet. %s\n" % get_url(deepnet)) log_message(message, log_file=session_file, console=args.verbosity) deepnet = api.update_deepnet(deepnet, deepnet_args) check_resource_error(deepnet, "Failed to update deepnet: %s" % deepnet['resource']) deepnet = check_resource(deepnet, api.get_deepnet, query_string=FIELDS_QS) if is_shared(deepnet): message = dated("Shared deepnet link. %s\n" % get_url(deepnet, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, deepnet) return deepnet def set_pca_args(args, name=None, fields=None, pca_fields=None): """Return pca arguments dict """ if name is None: name = args.name if pca_fields is None: pca_fields = args.pca_fields_ pca_args = set_basic_args(args, name) pca_args.update({ "seed": SEED if args.seed is None else args.seed, "pca_seed": SEED if args.seed is None else args.seed }) pca_args.update({"sample_rate": args.sample_rate}) pca_args = update_sample_parameters_args( \ pca_args, args) if fields is not None: input_fields = fields.fields.keys() if pca_fields and fields is not None: input_fields = configure_input_fields(fields, pca_fields) if args.exclude_objective: input_fields = [field for field in input_fields \ if field not in args.exclude_fields] pca_args.update(input_fields=input_fields) if 'pca' in args.json_args: update_json_args(pca_args, args.json_args.get('pca'), fields) return pca_args def create_pca(datasets, pca, pca_args, args, api=None, path=None, session_file=None, log=None): """Create remote pcas """ if api is None: api = bigml.api.BigML() pcas = [] pca_ids = [] if pca is not None: pcas = [pca] pca_ids = [pca] existing_pcas = len(pcas) pca_args_list = [] datasets = datasets[existing_pcas:] # if resuming and all pcas were created, there will # be no datasets left if datasets: if isinstance(pca_args, list): pca_args_list = pca_args # Only one pca per command, at present number_of_pcas = 1 message = dated("Creating %s.\n" % plural("pca", number_of_pcas)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_pcas): wait_for_available_tasks(inprogress, args.max_parallel_pcas, api, "pca") if pca_args_list: pca_args = pca_args_list[i] pca = api.create_pca(datasets, pca_args, retries=None) pca_id = check_resource_error( \ pca, "Failed to create pca: ") log_message("%s\n" % pca_id, log_file=log) pca_ids.append(pca_id) inprogress.append(pca_id) pcas.append(pca) log_created_resources("pcas", path, pca_id, mode='a') if args.verbosity: if bigml.api.get_status(pca)['code'] != bigml.api.FINISHED: try: pca = check_resource( \ pca, api.get_pca, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished pca: %s" % str(exception)) pcas[0] = pca message = dated("PCA created: %s\n" % get_url(pca)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, pca) return pca def get_pca(pca, args, api=None, session_file=None): """Retrieves remote pca in its actual status """ if api is None: api = bigml.api.BigML() message = dated("Retrieving PCA. %s\n" % get_url(pca)) log_message(message, log_file=session_file, console=args.verbosity) # only one PCA at present try: # we need the whole fields structure when exporting fields query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS pca = check_resource(pca, api.get_pca, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished pca: %s" % \ str(exception)) return pca def set_publish_pca_args(args): """Set args to publish pca """ public_pca = {} if args.public_pca: public_pca = {"private": False} if args.model_price: public_pca.update(price=args.model_price) if args.cpp: public_pca.update(credits_per_prediction=args.cpp) return public_pca def update_pca(pca, pca_args, args, api=None, path=None, session_file=None): """Updates pca properties """ if api is None: api = bigml.api.BigML() message = dated("Updating PCA. %s\n" % get_url(pca)) log_message(message, log_file=session_file, console=args.verbosity) pca = api.update_pca(pca, pca_args) check_resource_error(pca, "Failed to update PCA: %s" % pca['resource']) pca = check_resource(pca, api.get_pca, query_string=FIELDS_QS) if is_shared(pca): message = dated("Shared PCA link. %s\n" % get_url(pca, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, pca) return pca def set_batch_projection_args( \ args, fields=None, dataset_fields=None): """Return batch projection args dict """ batch_projection_args = set_basic_batch_args(args, args.name) if args.fields_map_ and fields is not None: if dataset_fields is None: dataset_fields = fields batch_projection_args.update({ "fields_map": map_fields(args.fields_map_, fields, dataset_fields)}) batch_projection_args.update(all_fields=False) if args.projection_fields: batch_projection_args.update(all_fields=True) projection_fields = [] if args.projection_fields != "all": batch_projection_args.update(all_fields=True) for field in args.projection_fields.split(args.args_separator): field = field.strip() if not field in dataset_fields.fields: try: field = dataset_fields.field_id(field) except ValueError, exc: sys.exit(exc) projection_fields.append(field) batch_projection_args.update(output_fields=projection_fields) if 'batch_projection' in args.json_args: update_json_args( batch_projection_args, args.json_args.get( \ 'batch_projection'), fields) return batch_projection_args def create_batch_projection(pca, test_dataset, batch_projection_args, args, api=None, session_file=None, path=None, log=None): """Creates remote batch projection """ if api is None: api = bigml.api.BigML() message = dated("Creating batch projection.\n") log_message(message, log_file=session_file, console=args.verbosity) batch_projection = api.create_batch_projection( \ pca, test_dataset, batch_projection_args, retries=None) log_created_resources( \ "batch_projection", path, bigml.api.get_batch_projection_id(batch_projection), mode='a') batch_projection_id = check_resource_error( batch_projection, "Failed to create batch projection: ") try: batch_projection = check_resource( \ batch_projection, api.get_batch_projection) except ValueError, exception: sys.exit("Failed to get a finished batch projection: %s" % str(exception)) message = dated("Batch projection created: %s\n" % get_url(batch_projection)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % batch_projection_id, log_file=log) if args.reports: report(args.reports, path, batch_projection) return batch_projection def set_fusion_args(args, name=None, fields=None): """Return fusion arguments dict """ if name is None: name = args.name fusion_args = set_basic_args(args, name) if 'fusion' in args.json_args: update_json_args(fusion_args, args.json_args.get('fusion'), fields) return fusion_args def create_fusion(models, fusion, fusion_args, args, api=None, path=None, session_file=None, log=None): """Create remote fusion """ if api is None: api = bigml.api.BigML() fusions = [] fusion_ids = [] if fusion is not None: fusions = [fusion] fusion_ids = [fusion] existing_fusions = len(fusions) # if resuming and all fusions were created if models: # Only one fusion per command, at present number_of_fusions = 1 message = dated("Creating %s.\n" % plural("fusion", number_of_fusions)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_fusions): wait_for_available_tasks(inprogress, args.max_parallel_fusions, api, "fusion") fusion = api.create_fusion(models, fusion_args, retries=None) fusion_id = check_resource_error( \ fusion, "Failed to create fusion: ") log_message("%s\n" % fusion_id, log_file=log) fusion_ids.append(fusion_id) inprogress.append(fusion_id) fusions.append(fusion) log_created_resources("fusions", path, fusion_id, mode='a') if args.verbosity: if bigml.api.get_status(fusion)['code'] != bigml.api.FINISHED: try: fusion = check_resource( \ fusion, api.get_fusion, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished fusion: %s" % str(exception)) fusions[0] = fusion message = dated("Fusion created: %s\n" % get_url(fusion)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, fusion) return fusion def get_fusion(fusion, args, api=None, session_file=None): """Retrieves remote fusion in its actual status """ if api is None: api = bigml.api.BigML() message = dated("Retrieving Fusion. %s\n" % get_url(fusion)) log_message(message, log_file=session_file, console=args.verbosity) # only one fusion at present try: # we need the whole fields structure when exporting fields fusion = check_resource(fusion, api.get_fusion, query_string=ALL_FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished fusion: %s" % \ str(exception)) return fusion def set_publish_fusion_args(args): """Set args to publish fusion """ public_fusion = {} if args.public_fusion: public_fusion = {"private": False} if args.model_price: public_fusion.update(price=args.model_price) if args.cpp: public_fusion.update(credits_per_prediction=args.cpp) return public_fusion def update_fusion(fusion, fusion_args, args, api=None, path=None, session_file=None): """Updates fusion properties """ if api is None: api = bigml.api.BigML() message = dated("Updating Fusion. %s\n" % get_url(fusion)) log_message(message, log_file=session_file, console=args.verbosity) fusion = api.update_fusion(fusion, fusion_args) check_resource_error(fusion, "Failed to update Fusion: %s" % fusion['resource']) fusion = check_resource(fusion, api.get_fusion, query_string=FIELDS_QS) if is_shared(fusion): message = dated("Shared Fusion link. %s\n" % get_url(fusion, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, fusion) return fusion
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 2321, 12, 42334, 4403, 5805, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2,...
2.158051
72,546
# Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Contains a helper function for deploying and executing a packaged executable on a Target.""" from __future__ import print_function import common import hashlib import logging import multiprocessing import os import re import select import subprocess import sys import threading import uuid from symbolizer import BuildIdsPaths, RunSymbolizer, SymbolizerFilter FAR = common.GetHostToolPathFromPlatform('far') # Amount of time to wait for the termination of the system log output thread. _JOIN_TIMEOUT_SECS = 5 def _AttachKernelLogReader(target): """Attaches a kernel log reader as a long-running SSH task.""" logging.info('Attaching kernel logger.') return target.RunCommandPiped(['dlog', '-f'], stdin=open(os.devnull, 'r'), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) class SystemLogReader(object): """Collects and symbolizes Fuchsia system log to a file.""" def __exit__(self, exc_type, exc_val, exc_tb): """Stops the system logging processes and closes the output file.""" if self._symbolizer_proc: self._symbolizer_proc.kill() if self._listener_proc: self._listener_proc.kill() if self._system_log: self._system_log.close() def Start(self, target, package_paths, system_log_file): """Start a system log reader as a long-running SSH task.""" logging.debug('Writing fuchsia system log to %s' % system_log_file) self._listener_proc = target.RunCommandPiped(['log_listener'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) self._system_log = open(system_log_file,'w', buffering=1) self._symbolizer_proc = RunSymbolizer(self._listener_proc.stdout, self._system_log, BuildIdsPaths(package_paths)) class MergedInputStream(object): """Merges a number of input streams into a UNIX pipe on a dedicated thread. Terminates when the file descriptor of the primary stream (the first in the sequence) is closed.""" def Start(self): """Returns a pipe to the merged output stream.""" read_pipe, write_pipe = os.pipe() # Disable buffering for the stream to make sure there is no delay in logs. self._output_stream = os.fdopen(write_pipe, 'w', 0) self._thread = threading.Thread(target=self._Run) self._thread.start(); return os.fdopen(read_pipe, 'r') class RunPackageArgs: """RunPackage() configuration arguments structure. symbolizer_config: A newline delimited list of source files contained in the package. Omitting this parameter will disable symbolization. system_logging: If set, connects a system log reader to the target. """ @staticmethod def _DrainStreamToStdout(stream, quit_event): """Outputs the contents of |stream| until |quit_event| is set.""" while not quit_event.is_set(): rlist, _, _ = select.select([ stream ], [], [], 0.1) if rlist: line = rlist[0].readline() if not line: return print(line.rstrip()) def RunPackage(output_dir, target, package_paths, package_name, package_args, args): """Installs the Fuchsia package at |package_path| on the target, executes it with |package_args|, and symbolizes its output. output_dir: The path containing the build output files. target: The deployment Target object that will run the package. package_paths: The paths to the .far packages to be installed. package_name: The name of the primary package to run. package_args: The arguments which will be passed to the Fuchsia process. args: Structure of arguments to configure how the package will be run. Returns the exit code of the remote package process.""" system_logger = ( _AttachKernelLogReader(target) if args.system_logging else None) try: if system_logger: # Spin up a thread to asynchronously dump the system log to stdout # for easier diagnoses of early, pre-execution failures. log_output_quit_event = multiprocessing.Event() log_output_thread = threading.Thread( target= lambda: _DrainStreamToStdout(system_logger.stdout, log_output_quit_event) ) log_output_thread.daemon = True log_output_thread.start() with target.GetAmberRepo(): target.InstallPackage(package_paths) if system_logger: log_output_quit_event.set() log_output_thread.join(timeout=_JOIN_TIMEOUT_SECS) logging.info('Running application.') command = ['run', _GetComponentUri(package_name)] + package_args process = target.RunCommandPiped( command, stdin=open(os.devnull, 'r'), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) if system_logger: output_stream = MergedInputStream( [process.stdout, system_logger.stdout]).Start() else: output_stream = process.stdout # Run the log data through the symbolizer process. output_stream = SymbolizerFilter(output_stream, BuildIdsPaths(package_paths)) for next_line in output_stream: print(next_line.rstrip()) process.wait() if process.returncode == 0: logging.info('Process exited normally with status code 0.') else: # The test runner returns an error status code if *any* tests fail, # so we should proceed anyway. logging.warning( 'Process exited with status code %d.' % process.returncode) finally: if system_logger: logging.info('Terminating kernel log reader.') log_output_quit_event.set() log_output_thread.join() system_logger.kill() return process.returncode
[ 2, 15069, 2864, 383, 18255, 1505, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460, 307, 198, 2, 1043, 287, 262, 38559, 24290, 2393, 13, 198, 198, 37811...
2.645944
2,268
""" handle PEP 420 implicit namespace packages for labextensions """ # pylint: disable=protected-access import importlib import sys from pathlib import Path from jupyterlab import federated_labextensions from jupyterlab.labextensions import LabExtensionApp HERE = Path(__file__).parent ROOT = HERE.parent NODE_MODULES = ROOT / "node_modules" BUILDER = NODE_MODULES / "@jupyterlab" / "builder" / "lib" / "build-labextension.js" federated_labextensions._get_labextension_metadata = _get_labextension_metadata main = LabExtensionApp.launch_instance if __name__ == "__main__": sys.exit(main())
[ 37811, 5412, 350, 8905, 28262, 16992, 25745, 10392, 329, 2248, 2302, 5736, 198, 37811, 198, 2, 279, 2645, 600, 25, 15560, 28, 24326, 12, 15526, 198, 11748, 1330, 8019, 198, 11748, 25064, 198, 6738, 3108, 8019, 1330, 10644, 198, 198, 673...
2.960591
203
# For faster and efficient mathematical evaluations we use numpy import numpy as np # For image proccesing we use openCV import cv2 # For creating and manipulating files import os # the time package provides various time-related functions import time # Create a video capture object to capture a video using device(camera) or from an existing video cap = cv2.VideoCapture(0) # Define the codec and create VideoWriter object fourcc = cv2.VideoWriter_fourcc('M','P','E','G') out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480)) # Create a directory for storing the frames try: if not os.path.exists('data'): os.makedirs('data') except OSError: print ('Error: Creating directory of data') # Sometimes, cap(video capture object) may not have initialized the capture. In that case cap.read() raises an error. # So we first check if the capture has been initialized by using the isOpened() method and use the open() method to initilaise the capture(if necessary...) if not cap.isOpened() : cap.open() # log the start time in seconds using the time() function start = time.time() currentFrame = 0 while(time.time()-start < 2): ret, frame = cap.read() if ret==True: out.write(frame) cv2.imshow('frame',frame) if cv2.waitKey(1) & 0xFF == ord('q'): break # Saves image of the current frame in jpg file name = './data/frame'+str(currentFrame) +'.png' print ('Creating...' + name) cv2.imwrite(name, frame) # To stop duplicate images currentFrame += 1 else: break # Release everything if job is finished cap.release() out.release() cv2.destroyAllWindows()
[ 2, 1114, 5443, 290, 6942, 18069, 34109, 356, 779, 299, 32152, 198, 11748, 299, 32152, 355, 45941, 198, 2, 1114, 2939, 386, 535, 274, 278, 356, 779, 1280, 33538, 198, 11748, 269, 85, 17, 198, 2, 1114, 4441, 290, 29349, 3696, 198, 117...
2.893471
582
import sys, os, shutil from PIL import Image import matplotlib.pyplot as plt import matplotlib.colors as colors import numpy as np if __name__ == '__main__': main(sys.argv[1])
[ 11748, 25064, 11, 28686, 11, 4423, 346, 198, 6738, 350, 4146, 1330, 7412, 198, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 2603, 29487, 8019, 13, 4033, 669, 355, 7577, 198, 11748, 299, 32152, 355, 45941, ...
2.666667
69
import FWCore.ParameterSet.Config as cms process = cms.Process("HLTCOMPARE") process.load("HLTriggerOffline.Common.HLTValidation_cff") process.load("HLTriggerOffline.Common.HLTValidationHarvest_cff") process.load("HLTriggerOffline.Common.HLTValidationQT_cff") process.load("L1TriggerConfig.L1GtConfigProducers.L1GtConfig_cff") process.load("Configuration.StandardSequences.L1TriggerDefaultMenu_cff") process.load("Configuration.StandardSequences.GeometryRecoDB_cff") process.load('Configuration/StandardSequences/Services_cff') process.load('FWCore/MessageService/MessageLogger_cfi') process.load("DQMServices.Components.EDMtoMEConverter_cff") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) ) process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring("") ) #process.dqmSaver.convention = 'RelVal' process.dqmSaver.convention = 'Offline' process.dqmSaver.saveByRun = cms.untracked.int32(-1) process.dqmSaver.saveAtJobEnd = cms.untracked.bool(True) process.dqmSaver.forceRunNumber = cms.untracked.int32(1) process.dqmSaver.workflow = "/CMSSW_3_1_0/RelVal/TrigVal" #process.dqmSaver.referenceHandling = cms.untracked.string('skip') process.dqmSaver.referenceHandling = cms.untracked.string('all') process.DQMStore.verbose=0 #"/build/nuno/test/CMSSW_3_1_X_2009-02-05-0000/src/HltReference.root" process.options = cms.untracked.PSet( fileMode = cms.untracked.string('FULLMERGE') ) process.MessageLogger.categories.append('DQMFileSaver') process.MessageLogger.cout.DQMFileSaver = cms.untracked.PSet( limit = cms.untracked.int32(1000000) ) process.MessageLogger.cerr.DQMFileSaver = cms.untracked.PSet( limit = cms.untracked.int32(1000000) ) process.source.fileNames = cms.untracked.vstring( '/store/relval/CMSSW_3_1_0_pre4/RelValTTbar/GEN-SIM-RECO/STARTUP_30X_v1/0001/A42D4BC9-8C16-DE11-8767-003048678B00.root' ) process.validation = cms.Path( process.hltvalidation # process.HLTMuonVal # process.muonTriggerRateTimeAnalyzer #+process.HLTTauVal #+process.egammaValidationSequence #+process.HLTSusyExoVal #+process.heavyFlavorValidationSequence #+process.HLTJetMETValSeq #+process.HLTAlCaVal ) process.post_validation = cms.Path( process.hltpostvalidation # process.HLTMuonPostVal #+process.HLTTauPostVal #+process.EgammaPostVal #+process.SusyExoPostVal #+process.heavyFlavorValidationHarvestingSequence #+process.JetMETPostVal #+process.HLTAlCaPostVal ) process.qt_validation = cms.Path( process.hltvalidationqt ) process.edmtome = cms.Path(process.EDMtoMEConverter) process.saver = cms.Path(process.dqmSaver) process.schedule = cms.Schedule( process.validation, process.edmtome, process.post_validation, process.qt_validation, process.saver ) for filter in (getattr(process,f) for f in process.filters_()): if hasattr(filter,"outputFile"): filter.outputFile=""
[ 11748, 48849, 14055, 13, 36301, 7248, 13, 16934, 355, 269, 907, 198, 198, 14681, 796, 269, 907, 13, 18709, 7203, 6581, 4825, 2662, 47, 12203, 4943, 198, 198, 14681, 13, 2220, 7203, 6581, 48344, 28657, 13, 17227, 13, 6581, 6849, 10751, ...
2.409385
1,236
from django.test import TestCase from django.contrib.auth import get_user_model from django.urls import reverse # so we can generate our API URL. # rest framework test helper tools: from rest_framework.test import APIClient # test client that we can use to make requests to our API and # then check what the response is. from rest_framework import status # a module that contains some status codes that we can see in # basically human readable form so instead of just typing 200 it's # HTTP 200 ok it just makes the tests a little bit easier to # read and understand. # add a helper function or a # constant variable for our URL that we're going to be testing # so we're be testing the create user URL: CREATE_USER_URL = reverse('user:create') # create the user # create URL and assign it to this create user URL variable. TOKEN_URL = reverse('user:token') # this is going to be the URL that we're going to use to make # the HTTP POST request to generate our token. ME_URL = reverse('user:me') # account of the user who is authenticated def create_user(**params): # **: dynamic list of arguments. # we can basically add as many arguments as we want """Helper function to create new user that you're testing with""" return get_user_model().objects.create_user(**params) class PublicUserApiTests(TestCase): """Test the users API (public)(unauthenticated)""" # this just makes it a little easier # to call our client in our test so every single test # we run we don't need to manually create this API client # we just have one client for our test suite that we can # reuse for all of the tests. def test_create_valid_user_success(self): """Test creating using with a valid payload is successful""" # payload is the object that you pass to # the API when you make the request payload = { 'email': 'test@londonappdev.com', 'password': 'testpass', 'name': 'name', } res = self.client.post(CREATE_USER_URL, payload) # make request # do a HTTP POST request to our client # to our URL for creating users # test that the outcome is what we expect: http 201 is created self.assertEqual(res.status_code, status.HTTP_201_CREATED) # test that the object is actually created user = get_user_model().objects.get(**res.data) # here is we can unwind the response for this because when # we do a HTTP POST and create a user we expect to see the # created user object returned in the API along with this # HTTP_201_created So if we do **res.data then it will take # the dictionary response which should look very similar # to this but it should have an added ID field We take the # res.data and we just pass it in as the parameters for the # get then if this gets the user successfully then we know # that the user is actually being created properly. self.assertTrue( user.check_password(payload['password']) # test our password is correct ) self.assertNotIn('password', res.data) # we don't want the password # being returned in the request because it is a potential # security vulnerability. # password shouldn't be returned when we return our user def test_user_exists(self): """Test creating a user that already exists failure""" payload = {'email': 'test@londonappdev.com', 'password': 'testpass', 'name': 'Test'} create_user(**payload) res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) # make sure it's a bad request because the user already exists def test_password_too_short(self): """Test that password must be more than 5 characters""" payload = {'email': 'test@londonappdev.com', 'password': 'pw', 'name': 'Test'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) user_exists = get_user_model().objects.filter( email=payload['email'] ).exists() # if the user exists it will return true otherwise it will return false self.assertFalse(user_exists) # every single test that runs it refreshes the database # so these users that were created in this test are not going to # be accessible in this test so each test it basically starts anew def test_create_token_for_user(self): """Test that a token is created for the user""" payload = {'email': 'test@londonappdev.com', 'password': 'testpass'} create_user(**payload) res = self.client.post(TOKEN_URL, payload) # response # make a request to post payload to our token_url self.assertIn('token', res.data) # checks that there is a key called token in the # response.data that we get back. self.assertEqual(res.status_code, status.HTTP_200_OK) def test_create_token_invalid_credentials(self): """Test that token is not created if invalid credentials are given""" create_user(email='test@londonappdev.com', password='testpass') payload = {'email': 'test@londonappdev.com', 'password': 'wrong'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) # because the password is wrong def test_create_token_no_user(self): """Test that token is not created if user doens't exist""" payload = {'email': 'test@londonappdev.com', 'password': 'testpass'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) def test_create_token_missing_field(self): """Test that email and password are required""" res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''}) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) # because each test it resets the database # from scratch we don't need to worry about the fact that we created # the user in this test because this test is going to run isolated # from this test and the user won't exist by the time we start this test. # test that authentication is required for the endpoint. # make sure that after any changes that you make those api's will # always be private def test_retrieve_user_unauthorized(self): """Test that authentication required for users""" res = self.client.get(ME_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) # retrieve profile successful. # private means that authentication is required before # you can use these endpoints. class PrivateUserApiTests(TestCase): """Test API requests that require authentication""" # we don't need to basically set the authentication every single test we're # just doing the setup and then that happens automatically before each test. # test that we can retrieve the profile of the logged in user. def test_retrieve_profile_success(self): """Test retrieving profile for logged in user""" res = self.client.get(ME_URL) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, { 'name': self.user.name, 'email': self.user.email, }) # test that you cannot do a HTTP POST request on the profile. def test_post_me_not_allowed(self): """Test that POST is not allowed on the me URL""" res = self.client.post(ME_URL, {}) # we'll just post the empty object here to test it. self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED) # our user profile update test. def test_update_user_profile(self): """Test updating the user profile for authenticated user""" payload = {'name': 'new name', 'password': 'newpassword123'} res = self.client.patch(ME_URL, payload) self.user.refresh_from_db() self.assertEqual(self.user.name, payload['name']) self.assertTrue(self.user.check_password(payload['password'])) self.assertEqual(res.status_code, status.HTTP_200_OK)
[ 6738, 42625, 14208, 13, 9288, 1330, 6208, 20448, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 1330, 651, 62, 7220, 62, 19849, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 9575, 198, 2, 523, 356, 460, 7716, 674, 7824, 10289, 13, ...
2.834455
2,972
# Copyright (C) 2017, Philsong <songbohr@gmail.com> import json import config from .market import Market from exchanges.okcoin.OkcoinSpotAPI import OKCoinSpot
[ 2, 15069, 357, 34, 8, 2177, 11, 1380, 4487, 506, 1279, 34050, 65, 1219, 81, 31, 14816, 13, 785, 29, 198, 198, 11748, 33918, 198, 11748, 4566, 198, 6738, 764, 10728, 1330, 5991, 198, 6738, 14525, 13, 482, 3630, 13, 18690, 3630, 32565...
3.3125
48
import io import pathlib from typing import Any, Dict, Union import numpy as np import pandas as pd from qcelemental.util.serialization import serialize from ..interface.collections import HDF5View
[ 11748, 33245, 198, 11748, 3108, 8019, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 11, 4479, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 10662, 344, 1732, 282, 13, 22602, 13, 46911, 1634, 1...
3.465517
58
#! /usr/local/bin/python # A simple gopher client. # # Usage: gopher [ [selector] host [port] ] import string import sys import os import socket # Default selector, host and port DEF_SELECTOR = '' DEF_HOST = 'gopher.micro.umn.edu' DEF_PORT = 70 # Recognized file types T_TEXTFILE = '0' T_MENU = '1' T_CSO = '2' T_ERROR = '3' T_BINHEX = '4' T_DOS = '5' T_UUENCODE = '6' T_SEARCH = '7' T_TELNET = '8' T_BINARY = '9' T_REDUNDANT = '+' T_SOUND = 's' # Dictionary mapping types to strings typename = {'0': '<TEXT>', '1': '<DIR>', '2': '<CSO>', '3': '<ERROR>', \ '4': '<BINHEX>', '5': '<DOS>', '6': '<UUENCODE>', '7': '<SEARCH>', \ '8': '<TELNET>', '9': '<BINARY>', '+': '<REDUNDANT>', 's': '<SOUND>'} # Oft-used characters and strings CRLF = '\r\n' TAB = '\t' # Open a TCP connection to a given host and port # Send a selector to a given host and port, return a file with the reply # Get a menu in the form of a list of entries # Get a text file as a list of lines, with trailing CRLF stripped # Get a text file and pass each line to a function, with trailing CRLF stripped # Get a binary file as one solid data block # Get a binary file and pass each block to a function # A *very* simple interactive browser # Browser main command, has default arguments # Browse a menu # Browse a text file # Browse a search index # "Browse" telnet-based information, i.e. open a telnet session # "Browse" a binary file, i.e. save it to a file # "Browse" a sound file, i.e. play it or save it # Dictionary mapping types to browser functions typebrowser = {'0': browse_textfile, '1': browse_menu, \ '4': browse_binary, '5': browse_binary, '6': browse_textfile, \ '7': browse_search, \ '8': browse_telnet, '9': browse_binary, 's': browse_sound} # Class used to save lines, appending a newline to each line # Class used to save data while showing progress # Ask for and open a save file, or return None if not to save # Test program # Call the test program as a main program test()
[ 2, 0, 1220, 14629, 14, 12001, 14, 8800, 14, 29412, 198, 198, 2, 317, 2829, 308, 8803, 5456, 13, 198, 2, 198, 2, 29566, 25, 308, 8803, 685, 685, 19738, 273, 60, 2583, 685, 634, 60, 2361, 198, 198, 11748, 4731, 198, 11748, 25064, ...
2.590851
787
# https://leetcode.com/problems/symmetric-tree/solution/ # Definition for a binary tree node. # recursive, by comparing two copies of the tree from collections import deque # level order traversal # just need to handle Null nodes carefully
[ 2, 3740, 1378, 293, 316, 8189, 13, 785, 14, 1676, 22143, 14, 1837, 3020, 19482, 12, 21048, 14, 82, 2122, 14, 198, 2, 30396, 329, 257, 13934, 5509, 10139, 13, 198, 220, 220, 220, 1303, 45115, 11, 416, 14176, 734, 9088, 286, 262, 55...
3.5
72
import contextlib import numpy as np import mytorch import mytorch.simple_core from mytorch.simple_core import Variable, square, add ex1() ex2() ex3()
[ 11748, 4732, 8019, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 11748, 616, 13165, 354, 198, 11748, 616, 13165, 354, 13, 36439, 62, 7295, 198, 6738, 616, 13165, 354, 13, 36439, 62, 7295, 1330, 35748, 11, 6616, 11, 751, 628, 628,...
2.821429
56
#! coding=utf-8 """ high-level toolbox for Document Extractor """ import re import typing import nlpir from nlpir import get_instance as __get_instance__ from nlpir import native # class and class instance __cls__ = native.doc_extractor.DocExtractor __instance__: typing.Optional[native.doc_extractor.DocExtractor] = None # Location of DLL __lib__ = None # Data directory __data__ = None # license_code __license_code__ = None # encode __nlpir_encode__ = native.UTF8_CODE class ExtractResult: """ A class for retrieve result from Document Extractor's handle """ #: Types map can be retrieved from DocExtractor retrieve_type_map: typing.Dict[str, int] = { "person": native.doc_extractor.DOC_EXTRACT_TYPE_PERSON, "location": native.doc_extractor.DOC_EXTRACT_TYPE_LOCATION, "organization": native.doc_extractor.DOC_EXTRACT_TYPE_ORGANIZATION, "keyword": native.doc_extractor.DOC_EXTRACT_TYPE_KEYWORD, "author": native.doc_extractor.DOC_EXTRACT_TYPE_AUTHOR, "media": native.doc_extractor.DOC_EXTRACT_TYPE_MEDIA, "country": native.doc_extractor.DOC_EXTRACT_TYPE_COUNTRY, "province": native.doc_extractor.DOC_EXTRACT_TYPE_PROVINCE, "abstract": native.doc_extractor.DOC_EXTRACT_TYPE_ABSTRACT, "positive": native.doc_extractor.DOC_EXTRACT_TYPE_POSITIVE, "negative": native.doc_extractor.DOC_EXTRACT_TYPE_NEGATIVE, "text": native.doc_extractor.DOC_EXTRACT_TYPE_TEXT, "time": native.doc_extractor.DOC_EXTRACT_TYPE_TIME, "user": native.doc_extractor.DOC_EXTRACT_TYPE_USER } def get_available_retrieve_types(self) -> typing.Dict[str, int]: """ Get a set of types_name and types available for current extraction result :return: """ return {**self.retrieve_type_map, **self.user_retrieve_type_map} def set_retrieve_types(self, retrieve_type_list: typing.List[int]) -> bool: """ Set what type of data want to get from :func:`get_result` , can be set multi-times :param retrieve_type_list: list of retrieve types :return: """ self.retrieve_types = retrieve_type_list return True @__get_instance__ def get_result( self, retrieve_types: typing.Optional[typing.List[int]] = None ) -> typing.Dict[str, typing.List[typing.Dict[str, typing.Union[str, int, float]]]]: """ Get result from current result, can be retrieved multi-times. :param retrieve_types: option, a list of retrieve types want to get, default is all types can be retrieved or certain types set by :func:`set_retrieve_types` :return: a dict of result : ``{type_name: [result}]}`` , example :: { "person": [ { "word": "卢梭", "pos": "n", "weight": 1.5, "freq": 100 } ] } """ if retrieve_types is not None: self.set_retrieve_types(retrieve_types) result_dict = dict() for retrieve_type in self.retrieve_types: result = __instance__.get_result( handle=self.handle, doc_extract_type=retrieve_type ) re_, func = self.re_result_map.get(retrieve_type, self.re_sharp_split) result = re_.findall("" if result is None else result) result_list = list() for string_tuple in result: result_map = func(string_tuple) result_list.append({ "word": result_map.get(0, None), "pos": result_map.get(1, None), "weight": float(result_map.get(2, None)) if result_map.get(2, None) is not None else None, "freq": int(result_map.get(3, None)) if result_map.get(3, None) is not None else None }) result_dict[self.__retrieve_type_reverse_map[retrieve_type]] = result_list return result_dict @__get_instance__ def get_sentiment_result(self) -> int: """ Get sentiment point from current extraction result :return: """ return __instance__.get_sentiment_score(self.handle) @__get_instance__ @__get_instance__ def get_native_instance() -> native.doc_extractor.DocExtractor: """ 返回原生NLPIR接口,使用更多函数 :return: The singleton instance """ return __instance__ @__get_instance__ def extract(text: str, user_define_pos: typing.List[str]) -> ExtractResult: """ :param text: :param user_define_pos: :return: """ handle = __instance__.pares_doc_e(text, "#".join(user_define_pos)) return ExtractResult(handle=handle, user_retrieve_type=user_define_pos) @__get_instance__ def import_dict(word_list: list) -> list: """ See :func:`nlpir.import_dict` :param word_list: list of words want to add to NLPIR :return: the word fail to add to the NLPIR """ return nlpir.import_dict(word_list=word_list, instance=__instance__) @__get_instance__ def clean_user_dict() -> bool: """ See :func:`nlpir.clean_user_dict` :return: success or not """ return nlpir.clean_user_dict(instance=__instance__) @__get_instance__ def delete_user_word(word_list: list): """ See :func:`nlpir.delete_user_word` :param word_list: list of words want to delete """ return nlpir.delete_user_word(word_list=word_list, instance=__instance__) @__get_instance__ def save_user_dict() -> bool: """ See :func:`nlpir.save_user_dict` :return: Success or not """ return nlpir.save_user_dict(instance=__instance__) @__get_instance__ def clean_saved_user_dict(): """ See :func:`nlpir.clean_saved_user_dict` :return: Delete success or not """ return nlpir.clean_saved_user_dict() @__get_instance__ def import_blacklist(filename: str, pos_blacklist=typing.List[str]) -> bool: """ Import Blacklist to system, see :func:`nlpir.import_blacklist` """ return nlpir.import_blacklist(__instance__, filename, pos_blacklist) @__get_instance__ def clean_blacklist() -> bool: """ 清除黑名单词表, see :func:`nlpir.clean_blacklist` :return: clean success or not """ return nlpir.clean_blacklist() @__get_instance__ def recover_blacklist() -> bool: """ 恢复黑名单词表,仅在被重命名的词表存在时才起作用, see :func:`nlpir.recover_blacklist` :return: """ return nlpir.recover_blacklist()
[ 2, 0, 19617, 28, 40477, 12, 23, 198, 37811, 198, 8929, 12, 5715, 2891, 3524, 329, 16854, 29677, 273, 198, 37811, 198, 11748, 302, 198, 11748, 19720, 198, 11748, 299, 75, 4063, 198, 6738, 299, 75, 4063, 1330, 651, 62, 39098, 355, 115...
2.236073
2,944
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Ed Mountjoy (June 2018) Formats the output of bedtools into final format for loading ''' import argparse import pandas as pd def parse_args(): ''' Load command line args ''' parser = argparse.ArgumentParser() parser.add_argument('--inf', metavar='<file>', help=('Input file'), type=str, required=True) parser.add_argument('--outf', metavar='<file>', help=('Output file'), type=str, required=True) parser.add_argument('--cell_name', metavar='<str>', help=('Name of cell type'), type=str, required=True) args = parser.parse_args() return args if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 7061, 6, 198, 7407, 5628, 2633, 357, 15749, 2864, 8, 198, 198, 8479, 1381, 262, 5072, 286, 3996, 31391, 656, 2457, 57...
2.746888
241
import torch import numpy as np from torch.utils.data.sampler import Sampler class UniformSampler(Sampler): """Samples elements with roughly uniform distribution of samples with the same label Arguments: """ def __init__(self, data_source, batch_size, number_of_different_classes_in_batch, batches_number): """ :param data_source: dataset, should be an inheritor of Dataset :param batch_size: desired batch size, int :param number_of_different_classes_in_batch: desired number of different classes in batch,usually 2 or 3 :param batches_number: how many batches you want to create """ super().__init__(data_source) self.data_source = data_source self.labels = self.data_source.labels self.length = len(self.labels) # how many samples we have in our dataset self.number_of_samples_with_the_same_label_in_the_batch = batch_size // number_of_different_classes_in_batch self.number_of_different_classes_in_batch = number_of_different_classes_in_batch self.batches_number = batches_number
[ 11748, 28034, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 28034, 13, 26791, 13, 7890, 13, 37687, 20053, 1330, 3409, 20053, 628, 198, 4871, 35712, 16305, 20053, 7, 16305, 20053, 2599, 198, 220, 220, 220, 37227, 50, 12629, 4847, 3...
2.591518
448
""" Functions for reading ascidoc files (*.adoc, *.asc, *.asciidoc) and tagging each line """ import os import sys import glob import re import logging from shutil import copyfile from pugnlp import futil from nlpia.regexes import CRE_ACRONYM # from nlpia.data_utils import iter_lines # FIXME: reuse from nlpia.constants import BOOK_PATH from nlpia.regexes import RE_URL_SIMPLE, splitext from nlpia.loaders import get_url_title, get_url_filemeta from nlpia.transcoders import delimit_slug from nlpia.translators import HyperlinkStyleCorrector from nlpia.futil import rm_rf, rm_r # noqa (used in doctests to clean up) log = logging.getLogger(__name__) # FIXME: redundant definitions here from develop branch BLOCK_DELIMITERS = dict([('--', 'natural'), ('==', 'natural'), ('__', 'natural'), ('**', 'natural'), ('++', 'latex'), ('////', 'comment')]) BLOCK_DELIM_CHRS = dict([(k[0], v) for k, v in BLOCK_DELIMITERS.items()]) BLOCK_DELIM_REGEXES = dict([(r'^[' + s[0] + r']{' + str(len(s)) + r',160}$', tag) for (s, tag) in BLOCK_DELIMITERS.items()]) BLOCK_HEADERS = dict([('[tip]', 'natural'), ('[note]', 'natural'), ('[important]', 'natural'), ('[quote]', 'natural')]) CRE_BLOCK_DELIMITER = re.compile('|'.join([s for s, tag in BLOCK_DELIM_REGEXES.items()])) HEADER_TYPES = [('source', 'code'), ('latex', 'latex')] # Working definitions from master branch BLOCK_DELIMITERS = dict([('--', 'code'), ('==', 'natural_sidenote'), ('__', 'natural_quote'), ('**', 'natural_asside'), ('++', 'latexmath'), ('//', 'comment')]) BLOCK_DELIMITER_CHRS = ''.join([k[0] for k in BLOCK_DELIMITERS.keys()]) BLOCK_HEADERS = dict([('[tip]', 'natural_tip'), ('[note]', 'natural_note'), ('[important]', 'natural_important'), ('[quote]', 'natural_quote')]) BLOCK_HEADERS4 = dict([(k[:4], v) for k, v in BLOCK_HEADERS.items()]) CRE_BLOCK_DELIMITER = re.compile(r'^[' + BLOCK_DELIMITER_CHRS + r']{2,50}$') CRE_ANNOTATION = re.compile(r'^<([0-9]{1,2})>.*') HEADER_TYPES = [('source', 'code'), ('latex', 'latex'), ('latexmath', 'latex'), ('template="glossary"', 'natural_glossary'), ("template='glossary'", 'natural_glossary')] VALID_TAGS = set(['anchor', 'attribute', 'blank_line', 'block_header', 'caption', 'code', 'code_end', 'code_start', ] + [b for b in BLOCK_DELIMITERS.values()] + [b + '_start' for b in BLOCK_DELIMITERS.values()] + [b + '_end' for b in BLOCK_DELIMITERS.values()] + ['natural_heading{}'.format(i) for i in range(1, 6)] + ['image_link', 'natural', 'natural_end', 'natural_start', 'code_header']) INCLUDE_TAGS = set(['natural', 'caption'] + ['natural_heading{}'.format(i) for i in range(1, 6)]) re_bad_footnotes = re.compile(r'footnote:\[' + RE_URL_SIMPLE + r'\]') def get_lines(file_path=BOOK_PATH): r""" Retrieve text lines from the manuscript Chapter*.asc and Appendix*.asc files Args: file_path (str): Path to directory containing manuscript asciidoc files i.e.: /Users/cole-home/repos/nlpinaction/manuscript/ or nlpia.constants.BOOK_PATH Returns: list of lists of str, one list for each Chapter or Appendix >>> lines = get_lines(BOOK_PATH) >>> next(lines) ('.../src/nlpia/data/book/Appendix F -- Glossary.adoc', ['= Glossary\n', '\n', "We've collected some ...]) """ if os.path.isdir(file_path): file_path = os.path.join(file_path, '*.adoc') files = glob.glob(file_path) elif os.path.isfile(file_path): files = [file_path] elif '*' in file_path: if os.path.sep not in file_path: file_path = os.path.join(os.path.abspath(os.path.curdir), file_path) files = glob.glob(file_path) else: raise FileNotFoundError("Unable to find the directory or files requested.") lines = [] for filepath in files: with open(filepath, 'r') as f: lines.append(f.readlines()) return zip(files, lines) def get_acronyms(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript')): """ Find all the 2 and 3-letter acronyms in the manuscript and return as a sorted list of tuples """ acronyms = [] for f, lines in get_lines(manuscript): for line in lines: matches = CRE_ACRONYM.finditer(line) if matches: for m in matches: if m.group('a2'): acronyms.append((m.group('a2'), m.group('s2'))) elif m.group('a3'): acronyms.append((m.group('a3'), m.group('s3'))) elif m.group('a4'): acronyms.append((m.group('a4'), m.group('s4'))) elif m.group('a5'): acronyms.append((m.group('a5'), m.group('s5'))) return sorted(dict(acronyms).items()) def write_glossary(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript'), linesep=None): """ Compose an asciidoc string with acronyms culled from the manuscript """ linesep = linesep or os.linesep lines = ['[acronyms]', '== Acronyms', '', '[acronyms,template="glossary",id="terms"]'] acronyms = get_acronyms(manuscript) for a in acronyms: lines.append('*{}*:: {} -- '.format(a[0], a[1][0].upper() + a[1][1:])) return linesep.join(lines) def tag_lines(lines, include_tags=None): r""" Naively tags lines from manuscript with: code, natural, heading, etc. Returns: list of tuples [(tag, line), ...] >>> ' '.join(sorted(VALID_TAGS)) 'anchor attribute blank_line block_header caption code code_end code_header code_start comment comment_end comment_start image_link latexmath latexmath_end latexmath_start natural natural_asside natural_asside_end natural_asside_start natural_end natural_heading1 natural_heading2 natural_heading3 natural_heading4 natural_heading5 natural_quote natural_quote_end natural_quote_start natural_sidenote natural_sidenote_end natural_sidenote_start natural_start' >>> list(tag_lines('|= Title| :chapter: 0|Hello|cruel world|==Heading Level 2| \t| [source,bash]|====|$ grep this|====|'\ ... .split('|'))) [('blank_line', ''), ('natural_heading1', '= Title'), ('attribute', ' :chapter: 0'), ('natural', 'Hello'), ('natural', 'cruel world'), ('natural_heading2', '==Heading Level 2'), ('blank_line', ' \t'), ('code_header', ' [source,bash]'), ('code_start', '===='), ('code', '$ grep this'), ('code_end', '===='), ('blank_line', '')] """ current_block_type = None block_terminator = None tag = '' tagged_lines = [] for idx, line in enumerate(lines): normalized_line = line.lower().strip().replace(" ", "") # [source,...] with or without any following "----" block delimiter # TODO: make this a regex that classifies among the different types (source, glossary, tip, etc) header_type = next((HEADER_TYPES[i] for i in range(len(HEADER_TYPES)) if normalized_line.startswith('[') and normalized_line[1:].startswith(HEADER_TYPES[i][0])), None) if header_type: current_block_type = header_type[1] tag = current_block_type + '_header' block_terminator = None elif normalized_line[:4] in BLOCK_HEADERS4: current_block_type = BLOCK_HEADERS4[normalized_line[:4]] tag = current_block_type + '_header' # BLOCK_HEADERS[normalized_line] block_terminator = None elif ( CRE_BLOCK_DELIMITER.match(normalized_line) and normalized_line[:2] in BLOCK_DELIMITERS): # or (tag in set('caption anchor'.split()))): if (not idx or not current_block_type or not block_terminator): current_block_type = (current_block_type or BLOCK_DELIMITERS[normalized_line[:2]]) tag = current_block_type + '_start' block_terminator = normalized_line elif block_terminator and line.rstrip() == block_terminator: tag = current_block_type + '_end' current_block_type = None block_terminator = None else: tag = current_block_type elif current_block_type and (line.rstrip() == block_terminator or (not block_terminator and not normalized_line)): tag = current_block_type + '_end' current_block_type = None block_terminator = None elif current_block_type: tag = current_block_type elif not normalized_line: tag = 'blank_line' elif normalized_line.startswith(r'//'): tag = 'comment' elif normalized_line.startswith(r':'): tag = 'attribute' elif normalized_line.startswith('='): tag = 'natural_heading' tag += str(len([c for c in normalized_line[:6].split()[0] if c == '='])) elif normalized_line.startswith('.'): tag = 'caption' elif normalized_line.startswith('image:'): tag = 'image_link' elif normalized_line.startswith('[['): tag = 'anchor' else: tag = 'natural' current_block_type = None tagged_lines.append((tag, line)) return filter_tagged_lines(tagged_lines, include_tags=include_tags) def get_tagged_sections(book_dir=BOOK_PATH, include_tags=None): """ Get list of (adoc_file_path, (adoc_syntax_tag, raw_line_str)) >>> get_tagged_sections() [('...src/nlpia/data/book/Appendix F -- Glossary.asc', <generator object filter_tagged_lines at ...>)] """ return [(filepath, tag_lines(lines, include_tags=include_tags)) for filepath, lines in get_lines(book_dir)] def find_bad_footnote_urls(tagged_lines, include_tags=None): """ Find lines in the list of 2-tuples of adoc-tagged lines that contain bad footnotes (only urls) >>> sections = get_tagged_sections(BOOK_PATH) >>> tagged_lines = list(sections[0][1]) >>> find_bad_footnote_urls(tagged_lines) [[30, 'https://spacy.io/usage/linguistic-features#rule-based-morphology']] """ section_baddies = [] log.debug(tagged_lines[:2]) for lineno, (tag, line) in enumerate(tagged_lines): line_baddies = None if tag is None or include_tags is None or tag in include_tags or any((tag.startswith(t) for t in include_tags)): line_baddies = get_line_bad_footnotes(line=line, tag=tag) if line_baddies and len(line_baddies) > 1: section_baddies.append([lineno] + line_baddies[1:]) else: pass # section_baddies.append(line) return section_baddies # def find_all_bad_footnote_urls(book_dir=BOOK_PATH, include_tags=['natural']): # """ Find lines in the manuscript that contain bad footnotes (only urls) """ # sections = get_tagged_sections(book_dir=book_dir, include_tags=include_tags) # bad_url_lines = {} # for fileid, (filepath, tagged_lines) in enumerate(sections): # section_baddies = find_bad_footnote_urls(tagged_lines, include_tags=include_tags) # if section_baddies: # bad_url_lines[filepath] = section_baddies # return bad_url_lines def infer_url_title(url): """ Guess what the page title is going to be from the path and FQDN in the URL >>> infer_url_title('https://ai.googleblog.com/2018/09/the-what-if-tool-code-free-probing-of.html') 'the what if tool code free probing of' """ meta = get_url_filemeta(url) title = '' if meta: if meta.get('hostname', url) == 'drive.google.com': title = get_url_title(url) else: title = meta.get('filename', meta['hostname']) or meta['hostname'] title, fileext = splitext(title) else: logging.error('Unable to retrieve URL: {}'.format(url)) return None return delimit_slug(title, ' ') def get_line_bad_footnotes(line, tag=None, include_tags=None): """ Return [original_line, url_footnote1, url_footnote2, ... url_footnoteN] for N bad footnotes in the line """ if tag is None or include_tags is None or tag in include_tags or any((tag.startswith(t) for t in include_tags)): found_baddies = re_bad_footnotes.findall(line) return [line] + [baddie[0] for baddie in found_baddies] return [line] def translate_line_footnotes(line, tag=None, default_title='<NOT_FOUND>'): r""" Find all bare-url footnotes, like "footnote:[moz.org]" and add a title like "footnote:[Moz (moz.org)]" >>> translate_line_footnotes('*Morphemes*:: Parts of tokens or words that contain meaning in and of themselves.'\ ... 'footnote:[https://spacy.io/usage/linguistic-features#rule-based-morphology]') '*Morphemes*:: Parts of tokens or words that contain meaning in and of themselves.footnote:[See the web page titled "Linguistic Features : spaCy Usage Documentation" (https://spacy.io/usage/linguistic-features#rule-based-morphology).]' """ line_urls = get_line_bad_footnotes(line, tag=tag) urls = line_urls[1:] if line_urls else [] for url in urls: footnote = 'footnote:[{url}]'.format(url=url) new_footnote = footnote # TODO: use these to extract name from hyperlinks title = get_url_title(url) title = title or infer_url_title(url) title = (title or '').strip(' \t\n\r\f-_:|="\'/\\') title = title if ' ' in (title or 'X') else None if title: brief_title = title.split('\n')[0].strip().split('|')[0].strip().split('Â')[0].strip().split('·')[0].strip() logging.info('URL: {}'.format(url)) logging.info('TITLE: {}'.format(title)) title = brief_title if len(brief_title) > 3 and len(title) > 55 else title title = title.replace('Â', '').replace('·', ':').replace('|', ':').replace('\n', '--') logging.info('FINAL: {}'.format(title)) title = title or default_title if title: new_footnote = 'footnote:[See the web page titled "{title}" ({url}).]'.format(title=(title or default_title), url=url) elif title is None: logging.error('Unable to find a title for url: {}'.format(url)) else: new_footnote = 'footnote:[See the web page ({url}).]'.format(url=url) line = line.replace( footnote, new_footnote) return line def translate_book(translators=(HyperlinkStyleCorrector().translate, translate_line_footnotes), book_dir=BOOK_PATH, dest=None, include_tags=None, ext='.nlpiabak', skip_untitled=True): """ Fix any style corrections listed in `translate` list of translation functions >>> len(translate_book(book_dir=BOOK_PATH, dest='cleaned_hyperlinks')) 3 >>> rm_rf(os.path.join(BOOK_PATH, 'cleaned_hyperlinks')) """ if callable(translators) or not hasattr(translators, '__len__'): translators = (translators,) sections = get_tagged_sections(book_dir=book_dir, include_tags=include_tags) file_line_maps = [] for fileid, (filepath, tagged_lines) in enumerate(sections): log.info('filepath={}'.format(filepath)) destpath = filepath if not dest: copyfile(filepath, filepath + '.' + ext.lstrip('.')) elif os.path.sep in dest: destpath = os.path.join(dest, os.path.basename(filepath)) else: destpath = os.path.join(os.path.dirname(filepath), dest, os.path.basename(filepath)) ensure_dir_exists(os.path.dirname(destpath)) with open(destpath, 'w') as fout: log.info('destpath={}'.format(destpath)) for lineno, (tag, line) in enumerate(tagged_lines): if (include_tags is None or tag in include_tags or any((tag.startswith(t) for t in include_tags))): for translate in translators: new_line = translate(line) # TODO: be smarter about writing to files in-place if line != new_line: file_line_maps.append((fileid, lineno, filepath, destpath, line, new_line)) line = new_line fout.write(line) return file_line_maps def correct_hyperlinks(book_dir=BOOK_PATH, dest=None, include_tags=None, ext='.nlpiabak', skip_untitled=True): """ DEPRECATED (see translate_line_footnotes) Find bad footnotes (only urls), visit the page, add the title to the footnote >>> len(correct_hyperlinks(book_dir=BOOK_PATH, dest='cleaned_hyperlinks')) 2 >>> rm_rf(os.path.join(BOOK_PATH, 'cleaned_hyperlinks')) """ # bad_url_lines = find_all_bad_footnote_urls(book_dir=book_dir) # file_line_maps = [] return translate_book(translators=HyperlinkStyleCorrector().translate, book_dir=book_dir, dest=dest, include_tags=include_tags, ext=ext, skip_untitled=skip_untitled) def correct_bad_footnote_urls(book_dir=BOOK_PATH, dest=None, include_tags=None, ext='.nlpiabak', skip_untitled=True): """ DEPRECATED (see translate_line_footnotes) Find bad footnotes (only urls), visit the page, add the title to the footnote >>> len(correct_bad_footnote_urls(book_dir=BOOK_PATH, dest='cleaned_footnotes')) 1 >>> rm_r(os.path.join(BOOK_PATH, 'cleaned_footnotes')) """ # bad_url_lines = find_all_bad_footnote_urls(book_dir=book_dir) # file_line_maps = [] return translate_book(translators=translate_line_footnotes, book_dir=book_dir, dest=dest, include_tags=include_tags, ext=ext, skip_untitled=skip_untitled) def filter_lines(input_file, output_file, translate=lambda line: line): """ Translate all the lines of a single file """ filepath, lines = get_lines([input_file])[0] return filepath, [(tag, translate(line=line, tag=tag)) for (tag, line) in lines] def filter_tagged_lines(tagged_lines, include_tags=None, exclude_tags=None): r""" Return iterable of tagged lines where the tags all start with one of the include_tags prefixes >>> filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')]) <generator object filter_tagged_lines at ...> >>> list(filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')], ... include_tags='natural')) [('natural', 'Hello.')] """ include_tags = (include_tags,) if isinstance(include_tags, str) else include_tags exclude_tags = (exclude_tags,) if isinstance(exclude_tags, str) else exclude_tags for tagged_line in tagged_lines: if (include_tags is None or tagged_line[0] in include_tags or any((tagged_line[0].startswith(t) for t in include_tags))): if exclude_tags is None or not any((tagged_line[0].startswith(t) for t in exclude_tags)): yield tagged_line else: log.debug('skipping tag {} because it starts with one of the exclude_tags={}'.format( tagged_line[0], exclude_tags)) else: log.debug('skipping tag {} because not in {}'.format(tagged_line[0], include_tags)) def main(book_dir=BOOK_PATH, include_tags=None, verbosity=1): r""" Parse all the asciidoc files in book_dir, returning a list of 2-tuples of lists of 2-tuples (tagged lines) >>> main(BOOK_PATH, verbosity=0) [('.../src/nlpia/data/book/Appendix F -- Glossary.asc', <generator object filter_tagged_lines at ...>)] >>> main(BOOK_PATH, include_tags='natural', verbosity=1) = Glossary We've collected some definitions of some common NLP and ML acronyms and terminology here.footnote:[Bill Wilson... at the university of New South Wales in Australia has a more complete one here:... https://www.cse.unsw.edu.au/~billw/nlpdict.html]... You can find some of the tools we used to generate this list in the `nlpia` python package at... ... >>> tagged_lines = list(main(BOOK_PATH, include_tags=['natural', 'blank'], verbosity=0)) >>> len(tagged_lines[0]) 2 >>> tagged_lines = list(main(BOOK_PATH, include_tags=['natural', 'blank'], verbosity=1)) = Glossary <BLANKLINE> We've collected some definitions of some common NLP and ML acronyms and terminology here.footnote:[... >>> tagged_lines = list(main(BOOK_PATH, include_tags='natural', verbosity=1)) = Glossary We've collected some definitions of some common NLP and ML acronyms and terminology here.footnote:[... TODO: `def filter_tagged_lines(tagged_lines)` that returns an iterable. """ if verbosity: log.info('book_dir: {}'.format(book_dir)) log.info('include_tags: {}'.format(include_tags)) log.info('verbosity: {}'.format(verbosity)) include_tags = [include_tags] if isinstance(include_tags, str) else include_tags include_tags = None if not include_tags else set([t.lower().strip() for t in include_tags]) sections = get_tagged_sections(book_dir=book_dir) if verbosity >= 1: for filepath, tagged_lines in sections: tagged_lines = filter_tagged_lines(tagged_lines, include_tags=include_tags) if verbosity > 1: print('=' * 75) print(filepath) print('-' * 75) if verbosity == 1: for tag, line in tagged_lines: print(line) else: for tagged_line in tagged_lines: print(tagged_line) if verbosity > 1: print('=' * 79) print() else: log.debug('vebosity={} so nothing output to stdout with print()'.format(verbosity)) return sections if __name__ == '__main__': args = sys.argv[1:] book_dir = os.path.curdir verbosity = 1 include_tags = tuple(INCLUDE_TAGS) if args: book_dir = args[0] args = args[1:] if args: try: verbosity = int(args[0]) include_tags = args[1:] or include_tags except ValueError: verbosity = 1 include_tags = args if include_tags and include_tags[0].strip().lower()[: 3] in ('all', 'none', 'true'): include_tags = None # print('Parsing Chapters and Appendices in: ' + book_dir) # print('***PRINTING LINES WITH TAGS***: ' + str(include_tags)) main(book_dir=book_dir, include_tags=include_tags, verbosity=verbosity)
[ 37811, 40480, 329, 3555, 10570, 312, 420, 3696, 20789, 13, 324, 420, 11, 46866, 3372, 11, 46866, 292, 979, 312, 420, 8, 290, 49620, 1123, 1627, 37227, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 15095, 198, 11748, 302, 198, 11748,...
2.324217
9,799
import unittest # Given a non-empty string s and a dictionary wordDict containing a list of non-empty words, # determine if s can be segmented into a space-separated sequence of one or more dictionary words. s = "leetcode" wordDict = ["leet", "code"] output_value = True if __name__ == '__main__': unittest.main(argv=['first-arg-is-ignored'], exit=False) # extra conditions for jupyter notebook
[ 11748, 555, 715, 395, 198, 198, 2, 11259, 257, 1729, 12, 28920, 4731, 264, 290, 257, 22155, 1573, 35, 713, 7268, 257, 1351, 286, 1729, 12, 28920, 2456, 11, 198, 2, 5004, 611, 264, 460, 307, 10618, 276, 656, 257, 2272, 12, 25512, 5...
3.099237
131
from django.core.cache import cache from enum import Enum Cache = CacheWrapper(cache)
[ 6738, 42625, 14208, 13, 7295, 13, 23870, 1330, 12940, 198, 6738, 33829, 1330, 2039, 388, 628, 198, 198, 30562, 796, 34088, 36918, 2848, 7, 23870, 8, 198 ]
3.296296
27
''' 「検査陽性者の状況」画像から数値データを抽出する処理 パターン1 ''' import re import pytesseract import cv2
[ 7061, 6, 198, 13697, 162, 97, 250, 162, 253, 119, 165, 50159, 45250, 100, 38519, 17683, 232, 35050, 111, 223, 13700, 18796, 119, 161, 225, 237, 27370, 36853, 46763, 108, 161, 222, 97, 21959, 6312, 23376, 31758, 162, 232, 121, 49035, 1...
1.092105
76
#!/usr/bin/env python # # original code from # https://github.com/giampaolo/psutil/blob/master/examples/ # # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Print system memory information. """ import psutil from cStringIO import StringIO from scouter.lang.pack import * from scouter.lang.value import * if __name__ == '__main__': # main() pack = process(None) print pack
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 2, 198, 2, 2656, 2438, 422, 220, 198, 2, 220, 220, 220, 3740, 1378, 12567, 13, 785, 14, 12397, 13299, 14057, 14, 862, 22602, 14, 2436, 672, 14, 9866, 14, 1069, 12629, 14, 198,...
2.948276
174
""" Deep Interest Network. """ from .nn import Attention from .interest_net import InterestNet class DIN(InterestNet): """Deep Interest Network. Parameters ---------- features : Features attention_groups : list of AttentionGroup num_classes : int Number of classes. embedding_size : int Size of embedding. hidden_layers : list Size of hidden layers. Example: [96, 32] dnn_activation : str Activation function of deep layers. Example: relu final_activation : str Activation function of output. dropout : float Dropout rate. """
[ 37811, 198, 29744, 12033, 7311, 13, 198, 37811, 198, 198, 6738, 764, 20471, 1330, 47406, 198, 6738, 764, 9446, 62, 3262, 1330, 12033, 7934, 628, 198, 4871, 360, 1268, 7, 19302, 7934, 2599, 198, 220, 220, 220, 37227, 29744, 12033, 7311, ...
2.708333
240
# coding=utf-8 import os from tqdm import tqdm import networkx as nx from networkx.algorithms import bipartite import argparse import torch """ import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt """ # maximal Bipartite matching. # python program to find if __name__ == "__main__": main()
[ 2, 19617, 28, 40477, 12, 23, 198, 198, 11748, 28686, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 11748, 3127, 87, 355, 299, 87, 198, 6738, 3127, 87, 13, 282, 7727, 907, 1330, 14141, 433, 578, 198, 11748, 1822, 29572, 198, ...
2.900901
111
# SPDX-License-Identifier: MIT # # The MIT License (MIT) # # Copyright (c) <2021> Hottinger Brüel & Kjaer GmbH, Im Tiefen See 45, 64293 Darmstadt, Germany # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # High Level Analyzer # For more information and documentation, please go to https://support.saleae.com/extensions/high-level-analyzer-extensions from saleae.analyzers import HighLevelAnalyzer, AnalyzerFrame, StringSetting, NumberSetting, ChoicesSetting import IOLinkFrame import DirectparameterPage type1_frames = { # (pd, od) 'Type_1_1': (2, 0), 'Type_1_2': (0, 2), 'Type_1_V (8 OD)': (0, 8), 'Type_1_V (32 OD)': (0, 32), } type2_frames = { # (pdout, od, pdin) 'Type_2_1': (0, 1, 1), 'Type_2_2': (0, 1, 2), 'Type_2_3': (1, 1, 0), 'Type_2_4': (2, 1, 0), 'Type_2_5': (1, 1, 1), 'Type_2_6': (2, 1, 2), 'Type_2_V': (0, 0, 0), } # High level analyzers must subclass the HighLevelAnalyzer class.
[ 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 17168, 198, 2, 198, 2, 383, 17168, 13789, 357, 36393, 8, 198, 2, 198, 2, 15069, 357, 66, 8, 1279, 1238, 2481, 29, 6964, 889, 263, 1709, 9116, 417, 1222, 509, 6592, 263, 402, 2022, 39...
2.882096
687
#!/usr/bin/env python3 import pathlib from typing import Set import frontmatter import datetime import re import urllib.request import os if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 11748, 3108, 8019, 198, 6738, 19720, 1330, 5345, 198, 11748, 2166, 47635, 198, 11748, 4818, 8079, 198, 11748, 302, 198, 11748, 2956, 297, 571, 13, 25927, 198, 11748, 28686, 628, 628,...
3.050847
59
import meta_ultra.config as config from meta_ultra.utils import * from meta_ultra.modules import * from meta_ultra.data_type import * modules.append(Humann2Module)
[ 11748, 13634, 62, 586, 430, 13, 11250, 355, 4566, 198, 6738, 13634, 62, 586, 430, 13, 26791, 1330, 1635, 198, 6738, 13634, 62, 586, 430, 13, 18170, 1330, 1635, 198, 6738, 13634, 62, 586, 430, 13, 7890, 62, 4906, 1330, 1635, 628, 197...
3.092593
54
import sys import traceback from bproxypool.scheduler import run from bproxypool.server import create_app from bproxypool.utils.notify import ding app = create_app() if __name__ == '__main__': # app.run(debug=True) if len(sys.argv) == 2: if sys.argv[1] == 'scheduler': try: run() except Exception as e: tp, msg, tb = sys.exc_info() e_msg = '>'.join(traceback.format_exception(tp, msg, tb)) ding(f'> ProxyPoolError: \n{e_msg}', 'ProxyPoolError') raise e
[ 11748, 25064, 198, 11748, 12854, 1891, 198, 198, 6738, 275, 1676, 87, 4464, 970, 13, 1416, 704, 18173, 1330, 1057, 198, 6738, 275, 1676, 87, 4464, 970, 13, 15388, 1330, 2251, 62, 1324, 198, 6738, 275, 1676, 87, 4464, 970, 13, 26791, ...
1.976027
292
from __future__ import division if (__name__ == "__main__"): import sys run(args=sys.argv[1:])
[ 6738, 11593, 37443, 834, 1330, 7297, 198, 198, 361, 357, 834, 3672, 834, 6624, 366, 834, 12417, 834, 1, 2599, 198, 220, 1330, 25064, 198, 220, 1057, 7, 22046, 28, 17597, 13, 853, 85, 58, 16, 25, 12962, 198 ]
2.564103
39
import random import sys from copy import deepcopy from .happines_base import Bot as HappinessBaseBot """ ........... .....3..... ....323.... ...32123... ..321H123.. ...32X23... ....323.... .....3..... ........... ........... ........... """ bot = Bot()
[ 11748, 4738, 198, 11748, 25064, 198, 6738, 4866, 1330, 2769, 30073, 198, 198, 6738, 764, 71, 1324, 1127, 62, 8692, 1330, 18579, 355, 38456, 14881, 20630, 628, 198, 37811, 198, 2109, 986, 198, 12359, 18, 12359, 198, 1106, 32637, 1106, 19...
3.107143
84
import os import sys import time import logging import logging.config from k8sPaiLibrary.maintainlib import add as k8s_add from k8sPaiLibrary.maintainlib import remove as k8s_remove from k8sPaiLibrary.maintainlib import etcdfix as k8s_etcd_fix from k8sPaiLibrary.maintainlib import kubectl_conf_check from k8sPaiLibrary.maintainlib import kubectl_install from k8sPaiLibrary.maintainlib import update as k8s_update from k8sPaiLibrary.maintainlib import k8s_util from clusterObjectModel.cluster_object_model import cluster_object_model logger = logging.getLogger(__name__)
[ 11748, 28686, 198, 11748, 25064, 198, 11748, 640, 198, 11748, 18931, 198, 11748, 18931, 13, 11250, 198, 198, 6738, 479, 23, 82, 47, 1872, 23377, 13, 76, 32725, 8019, 1330, 751, 355, 479, 23, 82, 62, 2860, 198, 6738, 479, 23, 82, 47,...
2.948718
195
import requests url = 'https://api.quotable.io/random' r = requests.get(url) quote = r.json() print(quote['content']) print(' -',quote['author'])
[ 11748, 7007, 201, 198, 201, 198, 6371, 796, 705, 5450, 1378, 15042, 13, 421, 313, 540, 13, 952, 14, 25120, 6, 201, 198, 201, 198, 81, 796, 7007, 13, 1136, 7, 6371, 8, 201, 198, 22708, 796, 374, 13, 17752, 3419, 201, 198, 4798, 7...
2.430769
65
from ptvp3 import P3terpreter import sys import json p3p = P3terpreter.from_param_dict(json.load(open(sys.argv[1]))) p3p.start()
[ 6738, 279, 14981, 79, 18, 1330, 350, 18, 353, 3866, 353, 198, 11748, 25064, 198, 11748, 33918, 628, 198, 79, 18, 79, 796, 350, 18, 353, 3866, 353, 13, 6738, 62, 17143, 62, 11600, 7, 17752, 13, 2220, 7, 9654, 7, 17597, 13, 853, 8...
2.298246
57
#!/usr/bin/env python # -*- coding: utf-8 -*- ######################################################################## # # Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved # ######################################################################## """ File: feature.py Author: zhenglinhai(zhenglinhai@baidu.com) Date: 2018/04/23 11:40:14 """ from scipy.spatial.distance import pdist import time import sys import base64 import json import numpy as np import time import os import random import cv2 if __name__ == '__main__': imgfea_list = [] with open(sys.argv[1]) as fp: for line in fp: imgfea_list.append(line.strip().split('\t')) max_len = len(imgfea_list) index = 0 while True: if index+1 >= max_len: break name1, fea2048_1, fea128_1 = imgfea_list[index][0], [float(x) for x in imgfea_list[index][1].split(' ')], [float(x) for x in imgfea_list[index][2].split(' ')] name2, fea2048_2, fea128_2 = imgfea_list[index+1][0], [float(x) for x in imgfea_list[index+1][1].split(' ')], [float(x) for x in imgfea_list[index+1][2].split(' ')] dist2048 = pdist(np.vstack([fea2048_1, fea2048_2]),'cosine') dist128 = pdist(np.vstack([fea128_1, fea128_2]),'cosine') print(name1, name2, dist2048, dist128) index = index + 2
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 29113, 29113, 7804, 198, 2, 220, 198, 2, 15069, 357, 66, 8, 2864, 347, 1698, 84, 13, 785, 11, 3457, 13, 1439, 6923,...
2.35689
566
# This is an input class. Do not edit. """ def traversal(node, array): if not array: return left, right = -1, -1 for i in range(len(array)): if left == -1 and array[i] < node.value: left = i if right == -1 and array[i] >= node.value: right = i if left != -1: node.left = BST(array[left]) bound = right if right != -1 else len(array) traversal(node.left, array[left + 1:bound]) if right != -1: node.right = BST(array[right]) traversal(node.right, array[right + 1:len(array)]) def reconstructBst(preOrderTraversalValues): n = preOrderTraversalValues.pop(0) root = BST(n) traversal(root, preOrderTraversalValues) return root """
[ 2, 770, 318, 281, 5128, 1398, 13, 2141, 407, 4370, 13, 628, 198, 37811, 198, 4299, 33038, 282, 7, 17440, 11, 7177, 2599, 198, 220, 220, 220, 611, 407, 7177, 25, 198, 220, 220, 220, 220, 220, 220, 220, 1441, 198, 220, 220, 220, 1...
2.253731
335
import pytest from onesignalclient.app_client import OneSignalAppClient from requests.exceptions import HTTPError from .base_test import BaseTest
[ 11748, 12972, 9288, 198, 6738, 3392, 570, 282, 16366, 13, 1324, 62, 16366, 1330, 1881, 11712, 282, 4677, 11792, 198, 6738, 7007, 13, 1069, 11755, 1330, 14626, 12331, 198, 6738, 764, 8692, 62, 9288, 1330, 7308, 14402, 628 ]
3.868421
38
__author__ = 'Stefan Krawczyk <stefank@cs.stanford.edu>'
[ 834, 9800, 834, 796, 705, 7447, 24408, 509, 1831, 66, 46355, 1279, 301, 891, 962, 31, 6359, 13, 14192, 3841, 13, 15532, 29, 6, 198 ]
2.28
25
# ****************************************************************************** # Copyright 2017-2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # **************************************************************************** from neon.callbacks import Callbacks from neon.initializers import Gaussian from neon.layers import Affine, GeneralizedCost from neon.models import Model from neon.optimizers import GradientDescentMomentum from neon.transforms import SumSquared, Softmax, Rectlin from neon.transforms import Misclassification
[ 2, 41906, 17174, 46068, 1174, 198, 2, 15069, 2177, 12, 7908, 8180, 10501, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
4.404167
240
from dataclasses import dataclass, field from typing import TypeVar import websocket from io import IOBase import json WebSocket = TypeVar('WebSocket') @dataclass
[ 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 11, 2214, 198, 6738, 19720, 1330, 5994, 19852, 198, 11748, 2639, 5459, 198, 6738, 33245, 1330, 314, 9864, 589, 198, 11748, 33918, 198, 198, 13908, 39105, 796, 5994, 19852, 10786, 13908, 3910...
3.38
50
from .investments import * manager = InvestmentsManager()
[ 6738, 764, 24859, 902, 1330, 1635, 198, 198, 37153, 796, 46568, 13511, 3419, 198 ]
4.214286
14
# # ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- # # BitBake Toaster Implementation # # Copyright (C) 2013 Intel Corporation # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from django.db import models from django.db.models import F from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible
[ 2, 198, 2, 409, 25, 912, 28, 19, 25, 2032, 28, 19, 25, 6448, 28, 19, 25, 316, 198, 2, 532, 9, 12, 7400, 12, 10394, 25, 604, 26, 269, 12, 35487, 12, 28968, 25, 604, 26, 33793, 12, 8658, 82, 12, 14171, 25, 18038, 532, 9, 12,...
3.385417
288
from setuptools import setup, find_packages setup( name="robosuite", packages=[ package for package in find_packages() if package.startswith("robosuite") ], install_requires=[ "numpy>=1.13.3", "scipy>=1.3.1", "opencv-python>=3.4.3.18", # "mujoco-py<1.50.2,>=1.50.1", ], eager_resources=['*'], include_package_data=True, python_requires='>=3', description="Surreal Robotics Suite: Standardized and Accessible Robot Manipulation Benchmark in Physics Simulation", author="Yuke Zhu, Jiren Zhu, Ajay Mandlekar, Joan Creus-Costa, Anchit Gupta", url="https://github.com/StanfordVL/robosuite", author_email="yukez@cs.stanford.edu", version="0.1.0", )
[ 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 628, 198, 40406, 7, 198, 220, 220, 220, 1438, 2625, 22609, 418, 84, 578, 1600, 198, 220, 220, 220, 10392, 41888, 198, 220, 220, 220, 220, 220, 220, 220, 5301, 329, 5301, 287,...
2.332278
316
__all__ = ["jobs", "staffing_plan", "transfer_generator"]
[ 834, 439, 834, 796, 14631, 43863, 1600, 366, 28120, 278, 62, 11578, 1600, 366, 39437, 62, 8612, 1352, 8973, 198 ]
2.9
20
import argparse import re import os import ssh_config import paramiko import getpass from pathlib import Path from ssh_config.hosts import ping def open_ssh_key(path: str, password: str=None) -> (paramiko.pkey.PKey, str): """ Function will return the Pkey object for the path specified :param path: The path to the Pkey object :param password: (Optional) The password for the private key :return: Loaded Pkey """ with open(path, "r") as __f: for key_type in [paramiko.DSSKey, paramiko.RSAKey, paramiko.ECDSAKey]: try: key_type.from_private_key(__f) except paramiko.PasswordRequiredException: if password is None: password = getpass.getpass("SSH Key password: ") try: return key_type.from_private_key(__f, password), password except paramiko.SSHException: pass if __name__ == "__main__": main()
[ 11748, 1822, 29572, 198, 11748, 302, 198, 11748, 28686, 198, 11748, 26678, 62, 11250, 198, 11748, 5772, 12125, 198, 11748, 651, 6603, 198, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 26678, 62, 11250, 13, 4774, 82, 1330, 29400, 628, ...
2.36165
412
from django.db import models from treeckle.common.models import TimestampedModel # Create your models here.
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 5509, 694, 293, 13, 11321, 13, 27530, 1330, 5045, 395, 13322, 17633, 198, 198, 2, 13610, 534, 4981, 994, 13, 198 ]
3.633333
30
from dateutil.parser import parse from configs import ConfigFactory config = ConfigFactory.factory()
[ 6738, 3128, 22602, 13, 48610, 1330, 21136, 198, 6738, 4566, 82, 1330, 17056, 22810, 198, 198, 11250, 796, 17056, 22810, 13, 69, 9548, 3419, 628 ]
4.12
25
try: from sympy import * # this kind of import puts a lot of stuff in global namespace; # usually better to import what you need or import sympy except ImportError: st_local("importerror", "True") raise x = Symbol('x') f = Function('f') f = eval(st_local("anything").replace('^','**')) st_local("anything", st_local("anything").replace("**", "^")) st_local("fprime", repr(f.diff(x)).replace("**", "^")) st_local("fint", repr(f.integrate(x)).replace("**", "^"))
[ 28311, 25, 198, 220, 220, 220, 422, 10558, 88, 1330, 1635, 198, 220, 220, 220, 1303, 428, 1611, 286, 1330, 7584, 257, 1256, 286, 3404, 287, 3298, 25745, 26, 198, 220, 220, 220, 1303, 3221, 1365, 284, 1330, 644, 345, 761, 393, 1330, ...
2.797688
173
#!/usr/bin/python from sense_hat import SenseHat sense = SenseHat() sense.clear() temp = sense.get_temperature_from_pressure() temp = round(temp, 1) print("Temperature: %s C" % temp) humidity = sense.get_humidity() humidity = round(humidity, 2) print("Humidity: %s %%rH" % humidity)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 6738, 2565, 62, 5183, 1330, 24956, 40483, 198, 198, 33819, 796, 24956, 40483, 3419, 198, 33819, 13, 20063, 3419, 198, 198, 29510, 796, 2565, 13, 1136, 62, 11498, 21069, 62, 6738, 62, 361...
2.786408
103
''' Given a set of distinct integers, print the size of a maximal subset of where the sum of any numbers in is not evenly divisible by . For example, the array and . One of the arrays that can be created is . Another is . After testing all permutations, the maximum length solution array has elements. Function Description Complete the nonDivisibleSubset function in the editor below. It should return an integer representing the length of the longest subset of meeting the criteria. nonDivisibleSubset has the following parameter(s): S: an array of integers k: an integer Input Format The first line contains space-separated integers, and , the number of values in and the non factor. The second line contains space-separated integers describing , the unique values of the set. Constraints All of the given numbers are distinct. Output Format Print the size of the largest possible subset (). Sample Input 4 3 1 7 2 4 Sample Output 3 ''' #!/bin/python3 import math import os import random import re import sys if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') first_multiple_input = input().rstrip().split() n = int(first_multiple_input[0]) k = int(first_multiple_input[1]) s = list(map(int, input().rstrip().split())) result = nonDivisibleSubset(k, s) fptr.write(str(result) + '\n') fptr.close()
[ 7061, 6, 198, 15056, 257, 900, 286, 7310, 37014, 11, 3601, 262, 2546, 286, 257, 40708, 24637, 286, 220, 810, 262, 2160, 286, 597, 220, 3146, 287, 220, 318, 407, 21894, 2659, 12843, 416, 764, 198, 1890, 1672, 11, 262, 7177, 220, 290,...
3.404938
405
from yowsup.layers.protocol_media.protocolentities.message_media import MediaMessageProtocolEntity from yowsup.layers.protocol_messages.protocolentities.test_message import MessageProtocolEntityTest from yowsup.structs import ProtocolTreeNode
[ 6738, 331, 1666, 929, 13, 75, 6962, 13, 11235, 4668, 62, 11431, 13, 11235, 4668, 298, 871, 13, 20500, 62, 11431, 1330, 6343, 12837, 19703, 4668, 32398, 198, 6738, 331, 1666, 929, 13, 75, 6962, 13, 11235, 4668, 62, 37348, 1095, 13, 1...
3.521739
69
# Parte 1 # with open('message.txt','r') as file: # print(file.read()) # # Parte 2 # with open('alice.txt','r') as file: # (file.read()) # Parte 2 #with open('alice.txt','r') as file: # line_counter = 0 # for line in file: # print(line, end="") # # if (line_counter != 0) and (line_counter % 20 == 0) : # continuar = input("\n------\n\tPresione 'Enter' para continuar ->") # # line_counter += 1 # # Parte 3 words_to_change = { "Alice":"Bob", "She":"He", "Her":"His", "Herself":"Himself", "she":"he", "her":"his", "herself":"himself", "Lewis":"Renato", "Carroll":"Balcazar" } with open('alice.txt','r') as file: line_counter = 0 for line in file: for word in words_to_change: line = line.replace(word, words_to_change[word]) # Esta linea reemplaza las palabras del direccionario por sus valores en el mismo print(line, end="") if (line_counter != 0) and (line_counter % 20 == 0) : continuar = input("\n------\n\tPresione 'Enter' para continuar ->") line_counter += 1
[ 2, 2142, 68, 352, 198, 2, 351, 1280, 10786, 20500, 13, 14116, 41707, 81, 11537, 355, 2393, 25, 198, 2, 220, 3601, 7, 7753, 13, 961, 28955, 198, 2, 220, 220, 220, 220, 198, 220, 220, 198, 2, 2142, 68, 362, 198, 2, 351, 1280, 10...
2.346491
456
# -*- coding: utf-8 -*- """ pybitcoin ~~~~~ :copyright: (c) 2014 by Halfmoon Labs :license: MIT, see LICENSE for more details. """ import json, requests, traceback from ..hash import reverse_hash BLOCKCHAIN_API_BASE_URL = "https://blockchain.info" from .blockchain_client import BlockchainClient def get_unspents(address, blockchain_client=BlockchainInfoClient()): """ Get the spendable transaction outputs, also known as UTXOs or unspent transaction outputs. """ if not isinstance(blockchain_client, BlockchainInfoClient): raise Exception('A BlockchainInfoClient object is required') url = BLOCKCHAIN_API_BASE_URL + "/unspent?format=json&active=" + address auth = blockchain_client.auth if auth and len(auth) == 2 and isinstance(auth[0], str): url = url + "&api_code=" + auth[0] r = requests.get(url, auth=auth) try: unspents = r.json()["unspent_outputs"] except ValueError, e: raise Exception('Invalid response from blockchain.info.') return format_unspents(unspents) def broadcast_transaction(hex_tx, blockchain_client=BlockchainInfoClient()): """ Dispatch a raw transaction to the network. """ url = BLOCKCHAIN_API_BASE_URL + '/pushtx' payload = {'tx': hex_tx} r = requests.post(url, data=payload, auth=blockchain_client.auth) if 'submitted' in r.text.lower(): return {'success': True} else: raise Exception('Invalid response from blockchain.info.')
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 220, 220, 220, 12972, 35395, 198, 220, 220, 220, 220, 8728, 93, 628, 220, 220, 220, 1058, 22163, 4766, 25, 357, 66, 8, 1946, 416, 13139, 22977, 23500, 198,...
2.72482
556
""" REST API Documentation for the NRS TFRS Credit Trading Application The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation. OpenAPI spec version: v1 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from rest_framework import permissions from api.models.Document import Document from api.models.DocumentComment import DocumentComment from api.models.DocumentStatus import DocumentStatus class DocumentCommentPermissions(permissions.BasePermission): """Used by Viewset to check permissions for API requests""" @staticmethod def user_can_comment(user, document, privileged): """ Check whether the user should have authority to add a comment. Government Users with abilities to review the documents should always have authority to add a comment, unless it's archived. Fuel Suppliers with abilities to add or submit can add a comment if the document is either in draft or submitted status. """ if user.is_government_user and \ user.has_perm('DOCUMENTS_GOVERNMENT_REVIEW') and \ document.status.status in ['Received', 'Submitted']: return True if not user.is_government_user and not privileged and \ document.status.status in ['Draft', 'Submitted']: return True return False @staticmethod def user_can_edit_comment(user, comment: DocumentComment): """ Check whether the user should be able to edit their own comment. Conditions for now is simple: Only the user that made the comment can update their comment. And the status of the document has to be in draft state. """ current_status = comment.document.status.id if user.id == comment.create_user_id and \ current_status == \ DocumentStatus.objects.get_by_natural_key('Draft').id: return True return False def has_permission(self, request, view): """Check permissions When an object does not yet exist (POST)""" # Fallback to has_object_permission unless it's a POST if request.method != 'POST': return True # Need this information to make a decision if 'privileged_access' not in request.data and \ 'document' in request.data: return False document = request.data['document'] privileged_access = request.data['privileged_access'] found = Document.objects.filter(id=document).first() if not found: return False if found.create_user.organization != request.user.organization and \ not request.user.is_government_user: return False return DocumentCommentPermissions.user_can_comment( request.user, found, privileged_access ) def has_object_permission(self, request, view, obj): """Check permissions When an object does exist (PUT, GET)""" # Users can always see and edit their own comments if obj.create_user == request.user: return True # And see but not edit those from their others in their own # organization if obj.create_user.organization == request.user.organization and \ request.method in permissions.SAFE_METHODS: return True # Government roles can always view comments # and can view or edit privileged comments with correct permission if request.user.is_government_user: # read if request.method in permissions.SAFE_METHODS: if obj.privileged_access: return request.user.has_perm('DOCUMENTS_VIEW') return True # write if request.method not in permissions.SAFE_METHODS: if obj.privileged_access: return request.user.has_perm('DOCUMENTS_GOVERNMENT_REVIEW') return True # not authorized return False
[ 37811, 198, 220, 220, 220, 30617, 7824, 43925, 329, 262, 40034, 24958, 6998, 10504, 25469, 15678, 628, 220, 220, 220, 383, 15198, 13333, 1424, 29595, 4482, 318, 852, 3562, 284, 4269, 1370, 198, 220, 220, 220, 11846, 6447, 329, 9358, 525...
2.714449
1,751
import numpy as np from itertools import product from prostagma.techniques.technique import SearchTechnique from prostagma.performances.cross_validation import CrossValidation from sklearn.grid_search import ParameterGrid class GridSearch(SearchTechnique): """ The class implement the simple Grid Search algorithm to find the best parameters using Cross Validation. """ def fit(self, X_train, y_train): """ The method computes a score for each combination of hyperparameters using the performance validator @args X_train : numpy array -> features y_train : numpy array -> labels @return all_scores : numpy array -> all the mean scores obtained using the performance validator """ all_scores = [] grid = ParameterGrid(self.parameters) for params in grid: print("Validating the model with: ", params) scores = self.performance_validator.fit(X_train, y_train, self.model, params) all_scores.append((scores.mean(), scores.std())) if scores.mean() > self.best_score[0]: self.best_score = (scores.mean(), scores.std()) self.best_param = params all_scores = np.asarray(scores) return all_scores
[ 11748, 299, 32152, 355, 45941, 198, 198, 6738, 340, 861, 10141, 1330, 1720, 198, 198, 6738, 11825, 363, 2611, 13, 23873, 6368, 13, 23873, 2350, 1330, 11140, 25574, 2350, 198, 6738, 11825, 363, 2611, 13, 525, 687, 1817, 13, 19692, 62, ...
2.913366
404
import os import numpy as np from omero.gateway import BlitzGateway from am_utils.utils import imsave
[ 11748, 28686, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 267, 647, 78, 13, 10494, 1014, 1330, 29299, 22628, 1014, 198, 198, 6738, 716, 62, 26791, 13, 26791, 1330, 545, 21928, 628, 628 ]
3.147059
34
from src.utils import utils from src.configurations import classifier_config as config from src.configurations import embeddings_config as embeddings_config from src.dataset.dataset import * from src.modules.contextual_embedder import ContextualEmbedder from src.evaluation.evaluators import ParaphraseEvaluator from src.models.modeling import SiameseSentenceEmbedder, MBERTClassifier from src.utils.metrics import SimilarityAccuracyMeter, SimilarityAveragePrecisionMeter, SimilarityF1Meter, AccuracyMeter import numpy as np import torch from torch import nn from torch.nn import functional as F MODEL_NAMES = { "mbert_sense": "training_mbert_paws_sense_avg_features_5epochs", "siamese_online_contrastive": "training_siamese_paws_sense_online_contrastive_combine_5epochs", "siamese_oc_cls": "training_siamese_paws_sense_online_contrastive_cls_pooling_5epochs", "no_sense": "training_siamese_paws_no_sense_online_contrastive_8epochs", "no_sense_softmax": "training_siamese_paws_no_sense_softmax_5epochs" } MODEL_NAME = MODEL_NAMES["mbert_sense"] PRETRAINED_PATH = f"../training/trained_models/{MODEL_NAME}" valid_data_loader = utils.load_file(f"../dataset/cached/pawsx_test_all_languages_16") #metrics = {"validation": [SimilarityAveragePrecisionMeter, SimilarityAccuracyMeter]} metrics = {"validation": [AccuracyMeter]} """ model = SiameseSentenceEmbedder( train_model = False, use_sense_embeddings=False ) """ model = MBERTClassifier( train_model = False, use_sense_embeddings=True, senses_as_features=True ) model.load_pretrained(PRETRAINED_PATH) #model_2.load_pretrained(PRETRAINED_PATH_2) evaluator = ParaphraseEvaluator( model = model, data_loader = valid_data_loader, device = config.DEVICE, metrics = metrics, fp16=True, verbose=True ) evaluator.evaluate()
[ 6738, 12351, 13, 26791, 1330, 3384, 4487, 198, 6738, 12351, 13, 11250, 20074, 1330, 1398, 7483, 62, 11250, 355, 4566, 198, 6738, 12351, 13, 11250, 20074, 1330, 11525, 67, 654, 62, 11250, 355, 11525, 67, 654, 62, 11250, 198, 6738, 12351,...
2.654232
697
#!/bin/python import os import sys import numpy as np ''' input: VW training file ''' ''' output: feature_to_id.txt, feature_appearence_pos.npy, feature_appearence_neg.npy ''' ''' note for data with sample weight feature_off = 3, otherwise feature_off = 2''' feature_off = 3 #calculate the appearche of each features in the training data, for postive and negative samples #in the end, appending the number of pos. & neg. samples if __name__ == '__main__': if len(sys.argv) != 2: print 'Usage: %s <VW training file>'%sys.argv[0] sys.exit(1) #VW format features = {} ct = 0 with open(sys.argv[1],'r') as f: for line in f: items = line.strip().split(' ') ct = ct + 1 if ct%10000 == 0: print ct for item in items[feature_off:]: key = item.split(':')[0] if key not in features: id = len(features) features[ key ] = id with open('feature_to_id.txt', 'w') as f: for key in features: id = features[key] print >>f, '%s\t%d'%(key, id) calc_per_feature_appearence(len(features), features, sys.argv[1])
[ 2, 48443, 8800, 14, 29412, 628, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 299, 32152, 355, 45941, 198, 198, 7061, 6, 5128, 25, 220, 31388, 3047, 2393, 705, 7061, 198, 7061, 6, 5072, 25, 3895, 62, 1462, 62, 312, 13, 14116, 11...
2.117647
578
import pytest from ethereum import tester from utils import sign from eth_utils import encode_hex, is_same_address from tests.utils import balance_proof_hash, closing_message_hash from tests.fixtures import ( owner_index, owner, contract_params, create_contract, get_token_contract, get_accounts, create_accounts ) from tests.fixtures_uraiden import ( token_contract, token_instance, get_uraiden_contract, uraiden_contract, uraiden_instance, delegate_contract, delegate_instance, ) @pytest.fixture
[ 11748, 12972, 9288, 198, 6738, 304, 17733, 1330, 256, 7834, 198, 6738, 3384, 4487, 1330, 1051, 198, 6738, 4555, 62, 26791, 1330, 37773, 62, 33095, 11, 318, 62, 31642, 62, 21975, 198, 6738, 5254, 13, 26791, 1330, 5236, 62, 13288, 62, 1...
2.8
200
import socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(('localhost', 7777)) sock.send('Hello, server! :)'.encode('utf-8')) d = sock.recv(1024) print(d.decode('utf-8')) sock.close()
[ 11748, 17802, 198, 198, 82, 735, 796, 17802, 13, 44971, 7, 44971, 13, 8579, 62, 1268, 2767, 11, 17802, 13, 50, 11290, 62, 2257, 32235, 8, 198, 82, 735, 13, 8443, 7, 10786, 36750, 3256, 767, 29331, 4008, 198, 82, 735, 13, 21280, 10...
2.382022
89
import os import csv #maxi function identified to get the maximum number in a list and give its index in the list together as a list #min function identified to get the minimum number in a list and give its index in the list together as a list #initiating lists list_date=[] list_rev=[] list_change=[] csv_path=os.path.join("..","Resources","budget_data_1.csv") with open(csv_path,newline="") as csvfile: csv_reader=csv.reader(csvfile,delimiter=",") next(csv_reader) # skip title line csv_reader = list(csv_reader) # turn the reader into a list count = len(csv_reader) # since we converted reader to list, now we can use len method to get total months from length of csv observations # compute total by summing every second col of each row total = sum(int(mylist[1]) for mylist in csv_reader) #turn each column into its own separate list, store into date and revenue lists (list_date and list_rev) for i in csv_reader: list_date.append(str(i[0])) list_rev.append(float(i[1])) #create a difference list as list_change by differencing the revenues in list_rev for i in range(0,len(list_rev)-1): list_change.append(list_rev[i+1]-list_rev[i]) #compute average change from the computed differences in list_change for j in list_change: average_diff=sum(list_change)/len(list_change) print(" Financial Analysis\n-------------------------------------------") print("Total Months: " + str(count)) print("Total Revenue: " + str(total)) print("Average Revenue Change: "+str(average_diff)) #predefined maxi,mini functions return max and min values and the index of values in the same list #slice dates list (list_date) via the returned index #get max/min values via index 1 of both maxi/min functions print("Greatest Increase in Revenue: "+str(list_date[maxi(list_change)[0]])+" ("+ str(maxi(list_change)[1])+")") print("Greatest Decrease in Revenue: "+str(list_date[min(list_change)[0]])+" ("+ str(min(list_change)[1])+")")
[ 11748, 28686, 198, 11748, 269, 21370, 198, 2, 9806, 72, 2163, 5174, 284, 651, 262, 5415, 1271, 287, 257, 1351, 290, 1577, 663, 6376, 287, 262, 1351, 1978, 355, 257, 1351, 198, 198, 2, 1084, 2163, 5174, 284, 651, 262, 5288, 1271, 287...
2.902098
715
from typing import List, Dict import json import spacy import pickle import multiprocessing import argparse from scispacy.abbreviation import AbbreviationDetector from tqdm import tqdm from collections import defaultdict from forecite.consts import * from forecite import s2_utils def compute_noun_phrases_worker(input_text: str) -> List: """Returns the noun phrases in a string""" doc = s2_utils.nlp_md(input_text[:1000000]) return [span.text.lower() for span in doc.noun_chunks] def get_date_key_from_arxiv_id(arxiv_id: str): """ Return a date key of the form <year>_<month> from an arxiv id """ if "/" in arxiv_id: arxiv_id = arxiv_id.split("/")[1] short_year = arxiv_id[0:2] month = arxiv_id[2:4] if ( short_year.startswith("0") or short_year.startswith("1") or short_year.startswith("2") ): year = "20" + short_year else: year = "19" + short_year return year + "_" + month def generate_dataset_no_refs_arxiv_cs(num_processes: int = 1): """ Function to generate the full topic extraction dataset for arxiv cs with references clipped """ if not os.path.exists(NO_REFS_ARXIV_CS_DATA_ROOT): print("Creating directory at {}".format(NO_REFS_ARXIV_CS_DATA_ROOT)) os.mkdir(NO_REFS_ARXIV_CS_DATA_ROOT) if not os.path.exists(NO_REFS_ARXIV_CS_IDS_PATH): print("Querying for all arxiv cs ids...") arxiv_ids = s2_utils.get_all_arxiv_cs_ids() print("Writing arxiv cs ids to {}".format(NO_REFS_ARXIV_CS_IDS_PATH)) with open(NO_REFS_ARXIV_CS_IDS_PATH, "w") as _arxiv_ids_json_file: json.dump(list(arxiv_ids), _arxiv_ids_json_file) else: print("Loading arxiv cs ids from {}".format(NO_REFS_ARXIV_CS_IDS_PATH)) with open(NO_REFS_ARXIV_CS_IDS_PATH) as _arxiv_ids_json_file: arxiv_ids = json.load(_arxiv_ids_json_file) if not os.path.exists(NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH): print("Generating arxiv to s2 mapping...") arxiv_to_s2_mapping = s2_utils.get_arxiv_to_s2_id_mapping(arxiv_ids, []) print( "Writing arxiv to s2 mapping to {}".format( NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH ) ) with open( NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH, "w" ) as _arxiv_to_s2_mapping_json_file: json.dump(arxiv_to_s2_mapping, _arxiv_to_s2_mapping_json_file) else: print( "Loading arxiv to s2 mapping from {}".format( NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH ) ) with open(NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH) as _json_file: arxiv_to_s2_mapping = json.load(_json_file) s2_ids = [ arxiv_to_s2_mapping[arxiv_id] for arxiv_id in arxiv_ids if arxiv_to_s2_mapping[arxiv_id] != "" ] if not os.path.exists(NO_REFS_ARXIV_CS_TITLE_NPS_PATH): print("Getting data") ( title_inverted_index, abstract_inverted_index, body_inverted_index, normalization_dict, s2_id_to_citing_ids, s2_id_to_references, s2_id_to_canonical, ) = s2_utils.full_data_collection_parallel(s2_ids, num_processes) print("Dumping title nps to {}".format(NO_REFS_ARXIV_CS_TITLE_NPS_PATH)) with open(NO_REFS_ARXIV_CS_TITLE_NPS_PATH, "w") as _json_file: json.dump(title_inverted_index, _json_file) print("Dumping abstract nps to {}".format(NO_REFS_ARXIV_CS_ABSTRACT_NPS_PATH)) with open(NO_REFS_ARXIV_CS_ABSTRACT_NPS_PATH, "w") as _json_file: json.dump(abstract_inverted_index, _json_file) print("Dumping body nps to {}".format(NO_REFS_ARXIV_CS_BODY_NPS_PATH)) with open(NO_REFS_ARXIV_CS_BODY_NPS_PATH, "w") as _json_file: json.dump(body_inverted_index, _json_file) print("Dumping normalization to {}".format(NO_REFS_ARXIV_CS_NORMALIZATION_PATH)) with open(NO_REFS_ARXIV_CS_NORMALIZATION_PATH, "w") as _json_file: json.dump(normalization_dict, _json_file) print("Dumping citing ids to {}".format(NO_REFS_ARXIV_CS_CITING_IDS_PATH)) with open(NO_REFS_ARXIV_CS_CITING_IDS_PATH, "w") as _json_file: json.dump(s2_id_to_citing_ids, _json_file) print("Dumping references to {}".format(NO_REFS_ARXIV_CS_REFERENCES_PATH)) with open(NO_REFS_ARXIV_CS_REFERENCES_PATH, "w") as _json_file: json.dump(s2_id_to_references, _json_file) print( "Dumping canonicalization to {}".format( NO_REFS_ARXIV_CS_CANONICALIZATION_PATH ) ) with open(NO_REFS_ARXIV_CS_CANONICALIZATION_PATH, "w") as _json_file: json.dump(s2_id_to_canonical, _json_file) print("Done.") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dataset", help="Which dataset to run on") parser.add_argument("--num_processes", type=int, help="How many processes to use") args = parser.parse_args() if args.dataset == "no_refs_arxiv": generate_dataset_no_refs_arxiv_cs(num_processes=args.num_processes) else: raise Exception(f"Dataset {args.dataset} not supported")
[ 6738, 19720, 1330, 7343, 11, 360, 713, 198, 198, 11748, 33918, 198, 11748, 599, 1590, 198, 11748, 2298, 293, 198, 11748, 18540, 305, 919, 278, 198, 11748, 1822, 29572, 198, 198, 6738, 629, 8802, 1590, 13, 397, 4679, 47625, 1330, 2275, ...
1.994755
2,669
from django.contrib import admin from mptt.admin import DraggableMPTTAdmin from .models import Customer, Product, Category, Order # Register your models here. admin.site.register(Customer, CustomerAdmin) admin.site.register(Product, Productdmin) admin.site.register(Category, CategoryAdmin) admin.site.register(Order, OrderAdmin)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 201, 198, 6738, 285, 457, 83, 13, 28482, 1330, 1583, 9460, 540, 7378, 15751, 46787, 201, 198, 6738, 764, 27530, 1330, 22092, 11, 8721, 11, 21743, 11, 8284, 201, 198, 2, 17296, 534, 4981,...
3.233645
107
#!/usr/bin/env python3 from utilities import ImagesLoader from utilities import VideoLoader from utilities import Visualizer from Line import ImageProcessor import argparse import sys if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 6738, 20081, 1330, 5382, 17401, 198, 6738, 20081, 1330, 7623, 17401, 198, 6738, 20081, 1330, 15612, 7509, 198, 6738, 6910, 1330, 7412, 18709, 273, 198, 11748, 1822, 29572, 198, ...
3.645161
62
if ping('52.113.194.132', verbose = True): print('ping occuring was succesful') else: print('website is vulnerable to be hacked')
[ 361, 29400, 10786, 4309, 13, 16616, 13, 22913, 13, 19924, 3256, 15942, 577, 796, 6407, 2599, 201, 198, 197, 4798, 10786, 13886, 1609, 870, 373, 17458, 274, 913, 11537, 201, 198, 17772, 25, 201, 198, 197, 4798, 10786, 732, 12485, 318, ...
2.833333
48
# -*- coding: utf-8 -*- """ Created on Wed Sep 16 15:08:51 2020 @author: Albo """ from qcodes import Instrument from qcodes.utils.validators import Numbers from functools import reduce class VirtualInstrument(Instrument): """ Implements a device as a virtual instrument for QCoDeS """ def __init__(self, name, parameter_map): """Create a virtual instrument to represent the device params Args: name (str): name of the virtual instrument parameter_map (dict): dictionary mapping the hallbars measurable parameters to their measurement instruments """ super().__init__(name=name) self._map_parameters(parameter_map) def _map_parameters(self, parameter_map): """Add all parameters that need to be mapped into a single virtual instrument""" for parameter_name in parameter_map: info = parameter_map[parameter_name].copy() external_instrument = Instrument.find_instrument(info.pop('instrument')) # external_parameter = external_instrument[info.pop('parameter')] param_seq = info.pop('parameter').split('.') external_parameter = reduce(getattr, param_seq, external_instrument) unit = info.pop('unit', external_parameter.unit) if external_parameter.gettable: get_cmd = external_parameter.get else: get_cmd = False if external_parameter.settable: set_cmd = external_parameter.set else: set_cmd = False self.add_parameter(name=parameter_name, get_cmd=get_cmd, set_cmd=set_cmd, #external_parameter.__call__, unit=unit, vals=Numbers(), **info) def ask_raw(self, cmd: str) -> None: """Dummy method to satisfy base class overrides""" raise NotImplementedError( f'Instrument {type(self).__name__} is virtual and requires no ask_raw method' ) def write_raw(self, cmd: str) -> None: """Dummy method to satisfy base class overrides""" raise NotImplementedError( f'Instrument {type(self).__name__} is virtual and requires no write_raw method' )
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 3300, 8621, 1467, 1315, 25, 2919, 25, 4349, 12131, 198, 198, 31, 9800, 25, 978, 2127, 198, 37811, 198, 198, 6738, 10662, 40148, 1330, 42410, 198,...
2.238051
1,067
import copy import os import math import numpy as np import random '''inputs''' os.chdir('input/') questionInput = open('Prob-30A-50.txt', 'r') questionInput = questionInput.readlines() noOfVehicles = int(questionInput[0]) maxCapacity = int(questionInput[1]) Tmax = int(questionInput[2]) depot = questionInput[5].replace(',', '.').split() depot = Node(int(depot[0]), float(depot[1]), float(depot[2]), 0, 0, Tmax) pickupNodes = {} requests=0 for i in range(9, 999): # additional logic to detect end of pick up nodes if len(questionInput[i]) < 3: break else: node = questionInput[i].replace(',', '.').split() if node==[]: break pickupNodes[int(node[0])] = Node(int(node[0]), float(node[1]), float(node[2]), int(node[4]), float(node[6]), float(node[7])) # count number of requests requests += 1 deliveryNodes = {} for i in range(9+requests+3, 9+requests+3+requests): node = questionInput[i].replace(',', '.').split() deliveryNodes[int(node[0])] = Node(int(node[0]), float(node[1]), float(node[2]), int(node[4]), float(node[6]), float(node[7])) deliveryNodes[int(node[0])].profit = 80 #each node's profit upon delivery allNodes = {0: depot, **pickupNodes, **deliveryNodes} # build the pickup delivery matching dict pickupDeliveryPair = {} iter = 1 for item in deliveryNodes: pickupDeliveryPair[iter] = deliveryNodes[item].idx iter += 1 speedMat = [] # blockcount = 9+requests+3+requests+2 brings you to the first speed pattern in input file blockcount = 9+requests+3+requests+2 for i in range(5): speed = [] for j in range(i * 6 + blockcount, i * 6 + (blockcount+4)): time = questionInput[j].replace(',', '.').split() speed.append([float(time[0]), float(time[1]), float(time[3])]) speedMat.append(speed) speedChooseMat = [] # use blockcocunt to read the speed choose matrix for i in range(blockcount+31, blockcount+31+2*requests+1): speedChooseMat.append([int(i) for i in questionInput[i].replace(',', '.').split()]) ''' processing input ''' # calculate distance matrix # total lines required = 2*requests + 2 (but we minus 1 because range starts from 0) distMat = [[0.0] * (2*requests + 2-1) for i in range(2*requests + 2-1)] for i in range((2*requests + 2-1)): for j in range(i + 1, (2*requests + 2-1)): dist = getDist(i, j) distMat[i][j] = dist distMat[j][i] = dist ''' generate BFS ''' currOptimalSolution = RandomBFS() ''' how to use checking function ''' # currOptimalSolution = [ # [0,1,11,3,13,0], # [0,10,20,0], # [0,5,15,6,16,0]] #just an example # #iterate based on this # print(currOptimalSolution) s0=[] for vehicle in currOptimalSolution: oneroute=[vehicle, checkFeasible(vehicle)] s0.append(oneroute) nn= len(pickupNodes) profit0 = Profit_Solution(s0) print('******* inital solution') print(profit0) print(*s0, sep = "\n") # print(*s0) print('******** inital solution') # print( 'score:' + str(checkFeasible(vehicle)) + ' route:' + str(vehicle) ) # s0 = [[0, 14, 29, 11, 26, 1, 16, 3, 18, 0], 138.45952332463185], [[0, 10, 25, 0], 18.873851271071658], [ # [0, 8, 23, 6, 21, 13, 28, 5, 20, 15, 30, 4, 19, 2, 17, 0], 366.359325517866], [[0, 7, 22, 0], 29.4378156146646], [ # [0, 0], False] # nn=15 # s1= NEIGHBOR_VRP(s0,nn) # print(s1) #GA Part pop= get_population(s0,nn) retain_rate=0.3 random_select_rate=0.5 mutation_rate=0.4 # # selected = selection(pop) # # selected=rank_route(selected) # grandpare = remove_profit(selected) # # crossed = crossover(grandpare) # children = profit_children(crossed) # children=mutation(children) # # pop= selected+children # # pop = get_population(newpop,nn) # selected = selection(pop) maxprofit=0 register = [] i = 0 itter_time=100 while i < itter_time: # 选择繁殖个体群 selected = selection(pop) # selected=rank_route(selected) grandpare = remove_profit(selected) # 交叉繁殖 crossed = crossover(grandpare) children= profit_children(crossed) # 变异操作 children = mutation(children) # 更新种群 pop = selected + children [bestroute, bestprofit] = find_max(pop) if maxprofit < bestprofit: maxprofit = bestprofit # print('New best profit is ' + str(maxprofit)) # print('New best route is ' + str(bestroute)) i=i+1 print('Current population is :') print(*pop,sep='\n') print('---------------------------------------------------------------------') # print('The best profit is ' + str(maxprofit)) # print('The best route is ' + str(bestroute)) print(maxprofit) # def decoding2(corssed): # decoded=[] # for routes in crossed: # start = routes.index(0) # if routes[start + 1] == 0: # start += 1 # # newroutes = routes[start:] + routes[:start] # routes = newroutes # routes.append(0) # # print(routes) # l = [] # for w in routes: # if start: start = l.append([]) # l.append() returns None, that is falsey... # if w != 0: # l[-1].append(w) # if w == 0: start = 1 # # res = [] # for m in l: # if m != []: # res.append([0] + m + [0]) # while len(res) < noOfVehicles: # res.append([0, 0]) # # # soluion0 = [res, checkFeasible(res)] # # soluion0.append(oneroute) # decoded.append(res) # return decoded # s2=[] # for n_ in crossed_res: # oneroute_=[n_, checkFeasible(n_)] # s2.append(oneroute_) # print(allNodes[25].load) #
[ 11748, 4866, 201, 198, 11748, 28686, 201, 198, 11748, 10688, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 4738, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 7061, 6, 15414, 82, 7061, 6, 201, 198...
2.137482
2,764
""" 3D Anchor Generator for Voxel """ import math import sys import numpy as np import torch import torch.nn.functional as F from opencood.data_utils.post_processor.voxel_postprocessor \ import VoxelPostprocessor from opencood.utils import box_utils
[ 37811, 198, 18, 35, 29253, 273, 35986, 329, 28035, 417, 198, 37811, 198, 11748, 10688, 198, 11748, 25064, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 198, 6738, 128...
3.185185
81