content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from olympics_engine.core import OlympicsBase
from olympics_engine.viewer import Viewer, debug
from olympics_engine.objects import Ball, Agent
from pathlib import Path
CURRENT_PATH = str(Path(__file__).resolve().parent.parent)
import numpy as np
import math
import pygame
import sys
import os
import random
import copy
# color 宏
COLORS = {
'red': [255, 0, 0],
'green': [0, 255, 0],
'blue': [0, 0, 255],
'yellow': [255, 255, 0],
'grey': [176,196,222],
'purple': [160, 32, 240],
'black': [0, 0, 0],
'white': [255, 255, 255],
'light green': [204, 255, 229],
'sky blue': [0,191,255]
}
COLOR_TO_IDX = {
'red': 7,
'green': 1,
'sky blue': 2,
'yellow': 3,
'grey': 4,
'purple': 5,
'black': 6,
'light green': 0,
'blue':8
}
IDX_TO_COLOR = {
0: 'light green',
1: 'green',
2: 'sky blue',
3: 'yellow',
4: 'grey',
5: 'purple',
6: 'black',
7: 'red',
8: 'blue'
}
grid_node_width = 2 #for view drawing
grid_node_height = 2
def closest_point(l1, l2, point):
"""
compute the coordinate of point on the line l1l2 closest to the given point, reference: https://en.wikipedia.org/wiki/Cramer%27s_rule
:param l1: start pos
:param l2: end pos
:param point:
:return:
"""
A1 = l2[1] - l1[1]
B1 = l1[0] - l2[0]
C1 = (l2[1] - l1[1])*l1[0] + (l1[0] - l2[0])*l1[1]
C2 = -B1 * point[0] + A1 * point[1]
det = A1*A1 + B1*B1
if det == 0:
cx, cy = point
else:
cx = (A1*C1 - B1*C2)/det
cy = (A1*C2 + B1*C1)/det
return [cx, cy]
| [
6738,
267,
6760,
873,
62,
18392,
13,
7295,
1330,
14935,
14881,
198,
6738,
267,
6760,
873,
62,
18392,
13,
1177,
263,
1330,
3582,
263,
11,
14257,
198,
6738,
267,
6760,
873,
62,
18392,
13,
48205,
1330,
6932,
11,
15906,
198,
6738,
3108,
... | 2.055128 | 780 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
from struct import pack
import copy
from onnx import defs
from onnx import numpy_helper
from onnx.backend.base import Backend
from onnx.backend.base import Device
from onnx.backend.base import namedtupledict
from onnx.helper import make_opsetid
from onnx import numpy_helper
from msnhnet_onnx import util
from msnhnet_onnx.x2msnhnet.handler import BackendHandler
from msnhnet_onnx.x2msnhnet.handlers import *
from msnhnet_onnx.onnx_wrapper import Node as OnnxNode
from msnhnet_onnx.x2msnhnet.handler import msnhnet_params, msnhnet_weights, msnhnet_input_layer_shape
import io
import tempfile
import os
import shutil
import numpy as np
import onnx
try:
import torch
except ImportError:
print('If you want to convert pytorch model to msnhnet model, please install pytorch first')
try:
import paddle
except ImportError:
print('If you want to convert paddle model to msnhnet model, please install paddle first')
try:
import tensorflow as tf
import tf2onnx
except ImportError:
print('If you want to convert tensorflow2 model to msnhnet model, please install tensorflow and tf2onnx first')
import logging
import onnxoptimizer
try:
import onnxsim
has_onnxsim = True
except ImportError:
has_onnxsim = False
logger = logging.getLogger(__name__)
init_weight_dict = {}
def get_all_backend_handlers(opset_dict):
""" Get a dict of all backend handler classes.
e.g. {'domain': {'Abs': Abs handler class}, ...}, }.
:param opset_dict: A dict of opset. e.g. {'domain': version, ...}
:return: Dict.
"""
handlers = {}
for handler in BackendHandler.__subclasses__():
handler.check_cls()
domain = handler.DOMAIN
version = opset_dict[domain]
handler.VERSION = version
since_version = 1
if defs.has(handler.ONNX_OP, domain=handler.DOMAIN):
try:
since_version = defs.get_schema(
handler.ONNX_OP,
domain=handler.DOMAIN,
max_inclusive_version=version,
).since_version
except RuntimeError:
logger.info(
"Fail to get since_version of {} in domain `{}` "
"with max_inclusive_version={}. Set to 1.".format(
handler.ONNX_OP, handler.DOMAIN, version
)
)
else:
logger.info(
"Unknown op {} in domain `{}`.".format(
handler.ONNX_OP, handler.DOMAIN or "ai.onnx"
)
)
handler.SINCE_VERSION = since_version
handlers.setdefault(domain, {})[handler.ONNX_OP] = handler
return handlers
class MsnhnetBackend(Backend):
""" Msnhnet Backend for ONNX
"""
@classmethod
def prepare(
cls,
model,
device="CPU",
strict=True,
logging_level="INFO",
blob_dict=None,
**kwargs
):
"""Prepare an ONNX model for MsnhNet Backend.
:param model: The ONNX model to be converted.
:param device: The device to execute this model on.
:param strict: Whether to enforce semantic equivalence between the original model
and the converted msnhnet model, defaults to True (yes, enforce semantic equivalence).
Changing to False is strongly discouraged.
Currently, the strict flag only affects the behavior of MaxPool and AveragePool ops.
:param logging_level: The logging level, default is INFO. Change it to DEBUG
to see more conversion details or to WARNING to see less
:returns: The variable dict of the converted msnhnet model
"""
super(MsnhnetBackend, cls).prepare(model, device, **kwargs)
logger.setLevel(logging_level)
return cls.onnx_model_to_msnhnet(model, strict, blob_dict=blob_dict)
@classmethod
def onnx_model_to_msnhnet(cls, model, strict, blob_dict=None):
""" Convert ONNX model to MsnhNet.
:param model: ONNX ModelProto object.
:param strict: whether to enforce semantic equivalence between the original model
and the converted msnhnet model.
:return: The variable dict of the converted msnhnet model
"""
# Models with IR_VERSION less than 3 does not have opset_import set.
# We default to minimum opset, this behavior is consistent with
# onnx checker.
# c.f. https://github.com/onnx/onnx/blob/427ac0c1b792363d373e3d7e4eef97fa46458420/onnx/checker.cc#L478
if model.ir_version < 3:
opset_import = [make_opsetid(defs.ONNX_DOMAIN, 1)]
else:
opset_import = model.opset_import
return cls._onnx_graph_to_msnhnet(
model.graph, opset_import, strict, blob_dict=blob_dict
)
@classmethod
def _onnx_graph_to_msnhnet(cls, graph_def, opset, strict, blob_dict=None):
""" Convert ONNX graph to msnhnet.
:param graph_def: ONNX GraphProto object.
:param opset: ONNX OperatorSetIdProto list.
:param strict: whether to enforce semantic equivalence between the original model
and the converted msnhnet.
:param blob_dict: {name: msnhnet_blob}, the inputs of onnx graph will be populated with msnhnet_blob with the same name
:return: The variable dict of the converted msnhnet model
"""
if blob_dict is None:
blob_dict = {}
handlers = cls._get_handlers(opset)
# initializer: TensorProtos representing the values to initialize
# a given tensor.
# initialized: A list of names of the initialized tensors.
if graph_def.initializer:
input_dict_items = cls._onnx_initializer_to_input_dict_items(
graph_def.initializer
)
initialized = {
init.name: onnx.numpy_helper.to_array(init)
for init in graph_def.initializer
}
else:
input_dict_items = []
initialized = {}
for node in graph_def.node:
node = OnnxNode(node)
if node.op_type == "Constant":
initialized[node.output_tensor_names[0]] = numpy_helper.to_array(
node.attrs["value"]
)
# creating placeholders for currently unknown inputs
for value_info in graph_def.input:
if value_info.name in initialized:
continue
shape = list(
d.dim_value if (d.dim_value > 0 and d.dim_param == "") else None
for d in value_info.type.tensor_type.shape.dim
)
if value_info.name not in blob_dict:
raise NotImplementedError("no blob named {}".format(value_info.name))
input_dict_items.append((value_info.name, blob_dict[value_info.name]))
# tensor dict: this dictionary is a map from variable names
# to the latest produced msnhnet variables of the given name.
# This dictionary will get updated as we build the graph to
# record the names of newly produced tensors.
tensor_dict = dict(input_dict_items)
# Since tensor dict may be updated, we need to keep a copy
# of the original input dict where we track the earliest
# defined tensors so we can have access to the placeholders
# to feed in input tensors when we run the graph.
input_dict = dict(input_dict_items)
for node in graph_def.node:
onnx_node = OnnxNode(node)
output_ops = cls._onnx_node_to_msnhnet_op(
onnx_node,
tensor_dict,
initialized,
handlers,
opset=opset,
strict=strict,
)
curr_node_output_map = dict(zip(onnx_node.output_tensor_names, output_ops))
tensor_dict.update(curr_node_output_map)
return tensor_dict
@classmethod
def _onnx_initializer_to_input_dict_items(cls, initializer):
""" Convert ONNX graph initializer to input dict items.
:param initializer: ONNX graph initializer, list of TensorProto.
:return: List of input dict items.
"""
return [
(
init.name,
# flow.get_variable(
# name=init.name,
# shape=get_flow_shape(list(init.dims)),
# initializer=flow.zeros_initializer(),
# trainable=True,
# dtype=util.Onnx2FlowDtype(init.data_type),
# ),
init_weight_dict[init.name],
)
for init in initializer
]
@classmethod
def _onnx_node_to_msnhnet_op(
cls, node, tensor_dict, init_dict, handlers=None, opset=None, strict=True
):
"""
Convert onnx node to msnhnet op.
Args:
node: Onnx node object.
tensor_dict: Tensor dict of graph.
opset: Opset version of the operator set. Default 0 means using latest version.
strict: whether to enforce semantic equivalence between the original model
and the converted msnhnet model, defaults to True (yes, enforce semantic equivalence).
Changing to False is strongly discouraged.
Returns:
msnhnet op
"""
handlers = handlers or cls._get_handlers(opset)
handler = handlers[node.domain].get(node.op_type, None)
if handler:
output = handler.handle(
node, tensor_dict, init_dict=init_dict, strict=strict
)
if not isinstance(output, (list, tuple)):
output = [output]
return output
else:
raise ValueError("{} is not supported".format(node.op_type))
@classmethod
def _get_handlers(cls, opset):
""" Get all backend handlers with opset.
:param opset: ONNX OperatorSetIdProto list.
:return: All backend handlers.
"""
opset = opset or [make_opsetid(defs.ONNX_DOMAIN, defs.onnx_opset_version())]
opset_dict = dict([(o.domain, o.version) for o in opset])
return get_all_backend_handlers(opset_dict)
prepare = MsnhnetBackend.prepare
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
28311,
25,
198,
220... | 2.24715 | 4,649 |
import xml.etree.ElementTree as ET
# Point this to the output of exportpicasa
XML_FILE_PATH = '/home/user/3/index.xml'
tree = ET.parse(XML_FILE_PATH)
root = tree.getroot()
for folder in root:
folderName = folder.get('name')
for file in folder:
fileName = file.get('name')
for face in file:
personName = face.get('contact_name')
# Let digikam calculate these to train its AI
# rectLeft = float(face.get('rect_left'))
# rectRight = float(face.get('rect_right'))
# rectTop = float(face.get('rect_top'))
# rectBottom = float(face.get('rect_bottom'))
if personName:
print ('Image: ' + folderName + '/' + fileName + ', personName: ' + personName)
print (rectLeft)
| [
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
198,
2,
6252,
428,
284,
262,
5072,
286,
10784,
16564,
15462,
198,
55,
5805,
62,
25664,
62,
34219,
796,
31051,
11195,
14,
7220,
14,
18,
14,
9630,
13,
19875,
6,
198,
198,
... | 2.657692 | 260 |
import numpy as np
from torch.utils.data import Dataset
# Custom collate for dataset | [
11748,
299,
32152,
355,
45941,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
198,
198,
2,
8562,
2927,
378,
329,
27039
] | 3.541667 | 24 |
# lexer.py
import string
| [
198,
198,
2,
220,
31191,
263,
13,
9078,
220,
628,
628,
198,
11748,
4731,
220,
198,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
628,
628,
198
] | 1.512821 | 39 |
from typing import List
nums = [1,-2,-3,4]
res = Solution().getMaxLen(nums)
print(res) | [
6738,
19720,
1330,
7343,
628,
198,
77,
5700,
796,
685,
16,
12095,
17,
12095,
18,
11,
19,
60,
198,
411,
796,
28186,
22446,
1136,
11518,
30659,
7,
77,
5700,
8,
198,
4798,
7,
411,
8
] | 2.514286 | 35 |
from chroma_core.lib.cache import ObjectCache
from chroma_core.models import Nid
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
from chroma_core.models import ManagedTarget, ManagedMgs, ManagedHost
from tests.unit.chroma_core.helpers import freshen
from tests.unit.chroma_core.helpers import MockAgentRpc
from tests.unit.chroma_core.helpers import create_simple_fs
from tests.unit.services.job_scheduler.job_test_case import JobTestCaseWithHost
| [
6738,
15358,
64,
62,
7295,
13,
8019,
13,
23870,
1330,
9515,
30562,
198,
6738,
15358,
64,
62,
7295,
13,
27530,
1330,
46798,
198,
6738,
15358,
64,
62,
7295,
13,
30416,
13,
21858,
62,
1416,
704,
18173,
13,
21858,
62,
1416,
704,
18173,
... | 3.210526 | 152 |
"""
Created on Feb 24, 2017
@author: Siyuan Huang
Process the skeleton, get the input for LSTM.
Input: Aligned human skeleton feature.
"""
import config
import json
import scipy.io
import os
import numpy as np
if __name__ == '__main__':
main()
| [
37811,
198,
41972,
319,
3158,
1987,
11,
2177,
198,
198,
31,
9800,
25,
311,
7745,
7258,
31663,
198,
198,
18709,
262,
18328,
11,
651,
262,
5128,
329,
406,
2257,
44,
13,
198,
198,
20560,
25,
978,
3916,
1692,
18328,
3895,
13,
198,
198,
... | 2.943182 | 88 |
#!/usr/bin/python
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import collections
from paleomix.common.fileutils import swap_ext
from paleomix.nodes.commands import CoverageNode, MergeCoverageNode, DepthHistogramNode
from paleomix.pipelines.ngs.parts.summary import SummaryTableNode
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
15069,
357,
66,
8,
2321,
17722,
7750,
3059,
84,
4835,
1279,
44,
1134,
7750,
14874,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
... | 3.667553 | 376 |
import pytest
from skvalidate.commands.execute import print_metrics
@pytest.mark.parametrize('metrics,command', [
(
{'sleep 2':
{
'cpu_time': {
'value': 23,
'unit': 's',
},
'max_rss': {
'value': 200,
'unit': 'MB',
}
}
},
'sleep 2'
),
])
| [
11748,
12972,
9288,
198,
6738,
1341,
12102,
378,
13,
9503,
1746,
13,
41049,
1330,
3601,
62,
4164,
10466,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
10786,
4164,
10466,
11,
21812,
3256,
685,
198,
220,
220,
220,
357,... | 1.570881 | 261 |
# -*- coding: utf-8 -*-
''' For mischief module, all the helper methods are
added in this file, for user to use in core. '''
from datetime import datetime
import tweepy
from .config import PARDON_LIST
def generate_summary_report(api):
""" Generate Summary Report of Authenticated User """
# Get the User object for twitter...
user = api.me()
print '------------------------'
print 'Hello ' + user.name + ' (' + user.screen_name + ') !!'
print '------------------------'
print datetime.now()
print 'Following: ' + str(user.friends_count)
print 'Followers: ' + str(user.followers_count)
print 'Total Tweets: ' + str(user.statuses_count)
print 'Location: ' + user.location
print 'Description: ' + user.description
def generate_follower_list(api):
""" Generate Complete follower list of Authenticated User """
print '------- Followers ---------'
for friend in tweepy.Cursor(api.followers).items():
print friend.screen_name
def generate_following_list(api):
""" Generate Complete following list of Authenticated User """
print '------- Following ---------'
for friend in tweepy.Cursor(api.followers).items():
print friend.screen_name
def get_arrogance_list(api, user_name):
""" Whom you follow and doesn't follow back """
following = api.friends_ids(user_name)
followers = api.followers_ids(user_name)
arrogance_list = []
for user_id in following:
if user_id not in followers and user_id not in PARDON_LIST:
arrogance_list.append(user_id)
return arrogance_list
def get_losers_list(api, user_name):
""" Who follows you and whom you don't follow back """
following = api.friends_ids(user_name)
followers = api.followers_ids(user_name)
losers_list = []
for user_id in followers:
if user_id not in following:
losers_list.append(user_id)
return losers_list
def clean_following_list(api):
""" Unfollow those who doesn't follow back """
user = api.me()
users_to_unfollow = get_arrogance_list(api=api, user_name=user.screen_name)
for user_id in users_to_unfollow:
unfollowed_user = api.destroy_friendship(user_id)
print 'Unfollowed: ' + unfollowed_user.screen_name
def generate_report(api):
""" Generates complete report for Authenticated User """
generate_summary_report(api=api)
generate_follower_list(api=api)
generate_following_list(api=api)
def get_user(api, user_name, min_details=False):
""" Get User Details """
print api.get_user(user_name)
if not min_details:
print 'Following: ' + str(api.friends_ids(user_name))
print 'Followed By: ' + str(api.followers_ids(user_name))
def find_people(api, query):
""" Find People """
for user in tweepy.Cursor(api.search_users, q=query).items():
print user.screen_name
def get_status(api, status_id):
""" Get Status Details """
status = api.get_status(status_id)
print status.text
print str(status)
def show_rate_limit(api):
""" Show Rate Limit """
print str(api.rate_limit_status())
def new_tweet(api):
""" New Tweet """
tweet = raw_input('Tweet here buddy: ')
#tweet = tweet + '\nvia #Mischief'
if len(tweet) <= 140:
api.update_status(status=tweet)
else:
print 'Please remove extra ' + len(tweet)-140 + ' characters.'
def show_diff_lists(api, user_name):
""" Show arrogance and losers lists of a user """
print ('Arrogance List: ' +
str(get_arrogance_list(api=api, user_name=user_name)))
print '\n-----------------------------------\n'
print 'Losers List: ' + str(get_losers_list(api=api, user_name=user_name))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
1114,
38625,
8265,
11,
477,
262,
31904,
5050,
389,
198,
220,
220,
220,
2087,
287,
428,
2393,
11,
329,
2836,
284,
779,
287,
4755,
13,
705,
7061,
198,
19... | 2.72674 | 1,365 |
from data_for_tests import Kermany_DataSet
import timm
import wandb
import os
from timm.models.swin_transformer import SwinTransformer
from utils import *
from res_models import *
from model_running import *
from convnext import convnext_base, convnext_large, convnext_xlarge
import numpy as np
import random
from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, FullGrad
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from pytorch_grad_cam.utils.image import show_cam_on_image
import torch
import matplotlib.pyplot as plt
from torchvision import transforms as transforms
import cv2 as cv
import cv2
import umap
wandb.init(project="featureViz")
seed = 25
torch.manual_seed(hash("by removing stochasticity") % seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(hash("so runs are repeatable") % seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def_args = dot_dict({
"train": ["../../../data/kermany/train"],
"val": ["../../../data/kermany/val"],
"test": ["../../../data/kermany/test"],
})
label_names = [
"NORMAL",
"CNV",
"DME",
"DRUSEN",
]
test_dataset = Kermany_DataSet(def_args.test[0])
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=1,
shuffle=True)
names = ["convnext_base"] # , "res50", "res101", "res152"]
models = [convnext_base(pretrained=False, num_classes=4)] # , Resnet50(4), Resnet101(4), Resnet152(4)]
with torch.no_grad():
for name, model in zip(names, models):
embds = []
colors = []
model.load_state_dict(torch.load(f'{name}.pt', map_location=torch.device(device)))
model = model.to(device)
correct = 0.0
correct_arr = [0.0] * 10
total = 0.0
total_arr = [0.0] * 10
predictions = None
ground_truth = None
# Iterate through test dataset
for i, (images, labels) in enumerate(test_loader):
if i % 10 == 0:
print(f'image : {i}\n\n\n')
images = Variable(images).to(device)
labels = labels.to(device)
# Forward pass only to get logits/output
outputs = model(images)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data, 1)
# Total number of labels
total += labels.size(0)
correct += (predicted == labels).sum()
for label in range(4):
correct_arr[label] += (((predicted == labels) & (labels == label)).sum())
total_arr[label] += (labels == label).sum()
if i == 0:
predictions = predicted
ground_truth = labels
else:
predictions = torch.cat((predictions, predicted), 0)
ground_truth = torch.cat((ground_truth, labels), 0)
accuracy = correct / total
# pass the image through all the layers
# visualize 64 features from each layer
# (although there are more feature maps in the upper layers)
layer_viz = model.forward_features(images)
embds.append(layer_viz.data.flatten().cpu().detach().numpy())
colors.append(labels.item())
embds = np.array(embds)
colors = np.array(colors)
embedding = umap.UMAP(n_components=3).fit_transform(embds)
plt.scatter(embedding[:, 0], embedding[:, 1], c=colors)
plt.gca().legend(tuple(label_names))
plt.title(f'Feature Map of {name} Network 2_')
plt.show()
plt.savefig(f'Feature Map of {name} Network 2_')
plt.close()
point_cloud = np.hstack([embedding, colors.reshape(-1, 1)])
wandb.log({f"3D_UMAP_FeatureMap_{name}": wandb.Object3D(point_cloud)})
metrics = {f'Test Accuracy_{name}': accuracy}
for label in range(4):
metrics[f'Test Accuracy_{name}' + label_names[label]] = correct_arr[label] / total_arr[label]
wandb.log(metrics)
| [
6738,
1366,
62,
1640,
62,
41989,
1330,
509,
2224,
88,
62,
6601,
7248,
198,
11748,
4628,
76,
198,
11748,
11569,
65,
198,
11748,
28686,
198,
6738,
4628,
76,
13,
27530,
13,
2032,
259,
62,
7645,
16354,
1330,
2451,
259,
8291,
16354,
198,
... | 2.241361 | 1,910 |
# -*- coding: utf-8 -*-
'''
Escribe un programa troceador.py que pedirá un fichero de una imagen o una canción y la troceará en archivos más pequeños de 521 bytes.
El programa irá numerandolos archivos (trozo1, trozo2, etc) Un segundo programa tomará los archivos troceados y recompondrá el archivo original
'''
import os
cont = 1
fw = open('unido.jpg', 'wb')
namefile = 'trozo' + str(cont)
print namefile
while os.path.exists(namefile):
cont += 1
fr = abrir_trozo(namefile)
reconstruir_fichero(fr,fw)
fr.close()
namefile = 'trozo' + str(cont)
print namefile
fw.close() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
36,
12522,
555,
1430,
64,
4161,
344,
7079,
13,
9078,
8358,
7190,
343,
6557,
555,
277,
291,
11718,
390,
555,
64,
3590,
268,
267,
555,
64,
460,
979,... | 2.404 | 250 |
# -*- coding: utf-8 -*-
import pytest
from crawlib.cache import create_cache_here
from crawlib.cached_request import CachedRequest
from crawlib.tests.dummy_site.music.view import (
max_n_artist, max_n_genre,
)
from crawlib.tests.dummy_site_crawler.mongo_backend.s2_music import MusicPage
cache = create_cache_here(__file__)
spider = CachedRequest(cache=cache, log_cache_miss=True, expire=24 * 3600)
spider.use_requests()
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
27784,
8019,
13,
23870,
1330,
2251,
62,
23870,
62,
1456,
198,
6738,
27784,
8019,
13,
66,
2317,
62,
25927,
1330,
327,
2317,
18453... | 2.587156 | 218 |
#!/usr/bin/python3
__author__ = "yang.dd"
"""
example 087
python是按值传递参数
"""
if __name__ == "__main__":
a = student()
a.x = 3
a.c = 'a'
f(a)
print(a.x, a.c)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
834,
9800,
834,
796,
366,
17859,
13,
1860,
1,
198,
198,
37811,
198,
220,
220,
220,
1672,
657,
5774,
198,
220,
220,
220,
21015,
42468,
162,
234,
231,
161,
222,
120,
27670,
254,
34... | 1.652174 | 115 |
import random
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from src.model import QNetwork
from utils.replay_buffer import ReplayBuffer
BUFFER_SIZE = int(1e5)
BATCH_SIZE = 64
GAMMA = 0.99
TAU = 1e-3
LR = 5e-4
UPDATE_EVERY = 5 # UPDATE FREQUENCY: how often to update the local network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# get targets by doing a forward pass of the next states in the target network
self.qnetwork_target.eval()
with torch.no_grad():
Q_targets_next = torch.max(self.qnetwork_target.forward(next_states), dim=1, keepdim=True)[0]
# distinguish the cases in which next states are terminal and those which are not
# for the first case the targets are only the one-step rewards
Q_targets = rewards + (GAMMA * Q_targets_next * (1 - dones))
# get outputs by forward pass of states in the local network
# Note: our qnetwork for a given state all action values for that state.
# However, for each state we know what action to do, so we gather all corresponding action values
self.qnetwork_local.train()
Q_expected = self.qnetwork_local.forward(states).gather(1, actions)
# compute the mean squared error of the Bellman Eq.
loss = F.mse_loss(Q_expected, Q_targets)
# clear gradients buffer from previous iteration
self.optimizer.zero_grad()
# backprop error through local network
loss.backward()
# update weights of local network by taking one SGD step
self.optimizer.step()
# update target network by copying the latest weights of the locat network
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = tau*θ_local + (1 - tau)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
| [
11748,
4738,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
198,
6738,
12351,
13,
19849,
1330,
1195,
26245,
198,
6738,
3384,
4... | 2.477183 | 1,775 |
import pyaudio
import os
import struct
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.fftpack import fft
import time
from tkinter import TclError
# # to display in separate Tk window
# %matplotlib tk
from keras.models import Sequential, Model, model_from_json
from keras import losses
import keras
import pickle
import wave # !pip install wave
# import os
import sys
import warnings
import librosa
import librosa.display
import IPython.display as ipd # To play sound in the notebook
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import optimizers
# ignore warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
# def mainloop(self):
# while (self.stream.is_active()): # if using button you can set self.stream to 0 (self.stream = 0), otherwise you can use a stop condition
# time.sleep(0.5)
# return self.emotion
| [
11748,
12972,
24051,
198,
11748,
28686,
198,
11748,
2878,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
629,
541,
88,
13,
487,
83,
80... | 3.032468 | 308 |
import unittest
import stringToLong
| [
11748,
555,
715,
395,
198,
198,
11748,
4731,
2514,
14617,
198
] | 3.363636 | 11 |
from sklearn.ensemble import GradientBoostingClassifier
import argparse
import numpy as np
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("training", help="File path to the training set")
parser.add_argument("validation", help="File path to the validation set")
parser.add_argument("-n", "--name", help="Name to help describe the output neural net and standardizer", default="")
args = parser.parse_args()
train = np.load(args.training)
val = np.load(args.validation)
train_x = train[:, 1:]
train_y = train[:, 0]
val_x = val[:, 1:]
val_y = val[:, 0]
params = dict(max_depth=8, learning_rate=0.1, n_estimators=1000, min_samples_leaf=0.045, subsample=0.5, min_samples_split=20)
bdt = GradientBoostingClassifier(**params).fit(train_x, train_y)
bdt.score(val_x, val_y)*100
with open("{}_bdt.pkl".format(args.name), 'wb') as f:
pickle.dump(bdt, f) | [
6738,
1341,
35720,
13,
1072,
11306,
1330,
17701,
1153,
45686,
278,
9487,
7483,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2298,
293,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
48610... | 2.780255 | 314 |
from django.contrib import admin
from learning.models import CurrentReadingBook, Course
admin.site.register(CurrentReadingBook)
admin.site.register(Course)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
4673,
13,
27530,
1330,
9236,
36120,
10482,
11,
20537,
198,
198,
28482,
13,
15654,
13,
30238,
7,
11297,
36120,
10482,
8,
198,
28482,
13,
15654,
13,
30238,
7,
49046,
8,
198... | 3.761905 | 42 |
# [rights] Copyright 2020 brianddk at github https://github.com/brianddk
# [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0
# [repo] https://github.com/brianddk/pypaperwallet
# [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj
# [tipjar] https://gist.github.com/brianddk/3ec16fbf1d008ea290b0
from winreg import OpenKey, EnumKey, QueryValueEx, QueryInfoKey
from winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE
from os.path import exists, isdir, join
from os import listdir
from os import environ
cairo = 'libcairo-2.dll'
if not in_path(cairo):
libdir = find_msys2_cairo(cairo)
if(libdir):
environ["PATH"] += f";{libdir}"
# print(f"added {libdir}")
# else:
# print("ERROR: cairolib not found")
# else:
# print("cairo is in path")
# print("imported ensure")
| [
2,
685,
28046,
60,
220,
15069,
12131,
31013,
392,
34388,
379,
33084,
3740,
1378,
12567,
13,
785,
14,
65,
380,
392,
34388,
198,
2,
685,
43085,
60,
24843,
362,
13,
15,
13789,
3740,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
4541,
14,
... | 2.298103 | 369 |
from ompc import
@mfunction("out, filter")
| [
6738,
267,
3149,
66,
1330,
201,
198,
201,
198,
31,
76,
8818,
7203,
448,
11,
8106,
4943,
201,
198
] | 2.473684 | 19 |
from django.urls import path, re_path
from registers import views
urlpatterns = [
path('itsystem/export/', views.ITSystemExport.as_view(), name='itsystem_export'),
path('itsystem/discrepancy-report/', views.ITSystemDiscrepancyReport.as_view(), name='itsystem_discrepancy_report'),
path('incident/', views.IncidentList.as_view(), name='incident_list'),
path('incident/<int:pk>/', views.IncidentDetail.as_view(), name='incident_detail'),
path('changerequest/', views.ChangeRequestList.as_view(), name='change_request_list'),
path('changerequest/<int:pk>/', views.ChangeRequestDetail.as_view(), name='change_request_detail'),
path('changerequest/<int:pk>/change/', views.ChangeRequestChange.as_view(), name='change_request_change'),
path('changerequest/<int:pk>/endorse/', views.ChangeRequestEndorse.as_view(), name='change_request_endorse'),
path('changerequest/<int:pk>/approval/', views.ChangeRequestApproval.as_view(), name='change_request_approval'),
path('changerequest/<int:pk>/complete/', views.ChangeRequestComplete.as_view(), name='change_request_complete'),
path('changerequest/add/', views.ChangeRequestCreate.as_view(), name='change_request_create'),
path('changerequest/create/', views.ChangeRequestCreate.as_view(), name='change_request_create'),
path('changerequest/create-standard/', views.ChangeRequestCreate.as_view(), name='std_change_request_create', kwargs={'std': True}),
path('changerequest/create-emergency/', views.ChangeRequestCreate.as_view(), name='emerg_change_request_create', kwargs={'emerg': True}),
path('changerequest/calendar/', views.ChangeRequestCalendar.as_view(), name='change_request_calendar'),
re_path('^changerequest/calendar/(?P<date>\d{4}-\d{1,2}-\d{1,2})/$', views.ChangeRequestCalendar.as_view(), name='change_request_calendar'),
re_path('^changerequest/calendar/(?P<date>\d{4}-\d{1,2})/$', views.ChangeRequestCalendar.as_view(), name='change_request_calendar'),
path('changerequest/export/', views.ChangeRequestExport.as_view(), name='change_request_export'),
path('standardchange/', views.StandardChangeList.as_view(), name='standard_change_list'),
path('standardchange/<int:pk>/', views.StandardChangeDetail.as_view(), name='standard_change_detail'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
302,
62,
6978,
198,
6738,
28441,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
896,
6781,
14,
39344,
14,
3256,
5009,
13,
2043,
11964,
43834,
13,
2... | 2.944516 | 775 |
import json, re
dataFileName = 'courses.json'
slotFileName = 'slots.1.txt'
if __name__ == '__main__':
print( searchData( input('Search for: ') ) ) | [
11748,
33918,
11,
302,
198,
198,
7890,
8979,
5376,
796,
705,
66,
39975,
13,
17752,
6,
198,
43384,
8979,
5376,
796,
705,
6649,
1747,
13,
16,
13,
14116,
6,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
... | 2.614035 | 57 |
from construct import (
Struct, Sequence,
PrefixedArray, If, Computed,
this,
)
from distance.bytes import Magic, Section
from distance.construct import (
BaseConstructFragment,
Int, UInt, Bytes, Byte, Float,
DstString, Remainder,
)
from distance.classes import CollectorGroup
from distance._common import (
ModesMapperProperty,
MedalTimesMapperProperty,
MedalScoresMapperProperty,
)
from distance._impl.level_content.levelsettings_base import BaseLevelSettings
Classes = CollectorGroup()
@Classes.fragments.fragment(any_version=True)
# vim:set sw=4 et:
| [
198,
198,
6738,
5678,
1330,
357,
198,
220,
220,
220,
32112,
11,
45835,
11,
198,
220,
220,
220,
3771,
34021,
19182,
11,
1002,
11,
955,
17128,
11,
198,
220,
220,
220,
428,
11,
198,
8,
198,
198,
6738,
5253,
13,
33661,
1330,
6139,
11,... | 3.066667 | 195 |
# Generated by Django 3.0.8 on 2020-11-10 10:01
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
23,
319,
12131,
12,
1157,
12,
940,
838,
25,
486,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import random
import re
RE_PROVERKA = re.compile(r'[а-яА-Я]+')
f = open('text', 'r', encoding='utf-8')
text = f.read()
print(text)
bad_chars = [';', ':', '?', '.', ',', '!', '~', '\n', '…', '-']
for i in bad_chars:
text = text.replace(i, ' ')
text = text.split(" ")
slova = [w for w in filter(RE_PROVERKA.match, text)]
print(slova, sep='\n')
i = 0
while i != 20:
i += 1
print(i, random.choice(slova))
f.close() | [
11748,
4738,
198,
11748,
302,
628,
198,
198,
2200,
62,
31190,
5959,
25123,
796,
302,
13,
5589,
576,
7,
81,
6,
58,
16142,
12,
40623,
140,
238,
12,
140,
107,
48688,
11537,
198,
69,
796,
1280,
10786,
5239,
3256,
705,
81,
3256,
21004,
... | 2.118227 | 203 |
#!/usr/bin/env python
# encoding: utf-8
"""
initconftest.py
Created by 黄 冬 on 2007-11-19.
Copyright (c) 2007 __MyCompanyName__. All rights reserved.
"""
import basetest
import logging.config
import os
import pwd
import shutil
import tempfile
import time
import unittest
log = logging.getLogger('xbaydns.tests.initconftest')
#logging.basicConfig(level=logging.DEBUG)
from xbaydns.tools import initconf
from xbaydns.conf import sysconf
from xbaydns.utils import shtools
def suite():
"""集合测试用例"""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(InitConfTest, 'test'))
return suite
"""
单独运行command的测试用例
"""
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
37811,
198,
15003,
1102,
701,
395,
13,
9078,
198,
198,
41972,
416,
16268,
119,
226,
10263,
228,
105,
319,
4343,
12,
1157,
12,
1129,
13,
198,
15... | 2.416949 | 295 |
__version__ = "1.0.0.dev1"
default_app_config = "djangocms_url_manager.apps.UrlManagerConfig"
| [
834,
9641,
834,
796,
366,
16,
13,
15,
13,
15,
13,
7959,
16,
1,
198,
198,
12286,
62,
1324,
62,
11250,
796,
366,
28241,
648,
420,
907,
62,
6371,
62,
37153,
13,
18211,
13,
28165,
13511,
16934,
1,
198
] | 2.435897 | 39 |
# Generated by Django 2.0.5 on 2018-05-07 17:37
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
20,
319,
2864,
12,
2713,
12,
2998,
1596,
25,
2718,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
from hpctestlib.python.numpy.numpy_ops import numpy_ops_check
@rfm.simple_test
| [
2,
15069,
1584,
12,
1238,
2481,
14780,
2351,
3115,
785,
48074,
9072,
357,
34,
6173,
50,
14,
20702,
43412,
8,
198,
2,
797,
19778,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
38559,
24290,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
... | 3.010101 | 99 |
import os
import ndjson
import pandas as pd
"""
Makes daily language specific files in correct format
"""
# define languages to extract
langs = ["da", "no", "sv"]
# make a function that transforms a pandas DF to ndjson format (found on stackoverflow)
# List file paths from folders with raw data
raw1 = ["/data/001_twitter_hope/raw/nordic-tweets/" + f for f in
os.listdir("/data/001_twitter_hope/raw/nordic-tweets")
if f.endswith(".tsv")]
raw2 = ["/data/001_twitter_hope/raw/nordic-tweets-2/" + f
for f in os.listdir("/data/001_twitter_hope/raw/nordic-tweets-2")
if f.endswith(".tsv")]
# combine file paths
raw_files = raw1 + raw2
# read in logfile to see which files have already been processed
logfile = "processed_files_log/nordic_language_extracted.ndjson"
with open(logfile) as log:
done = ndjson.load(log)
# keep only files that have not been processed yet + sort
raw_files = [f for f in raw_files if f not in done]
raw_files.sort()
# define which variables to keep in the output format
column_list = ['id', 'created_at', 'from_user_id', 'text', 'lang', 'favorite_count', 'retweet_count']
# loop through new filepaths
for path_ in raw_files:
# extract identifiers from the file path
id = path_[-14:-4]
year = id[:4]
month = id[5:7]
day = id[8:10]
print(f"Processing {year}{month}{day}")
# load raw data in tsv format
df = pd.read_csv(path_, sep='\t', skipinitialspace=True, usecols = column_list)
# loop through the desired language list
for language in langs:
print(f"extract {language}")
# filter data for the desired language using twitter lang tag
df_lang = df[df.lang.eq(language)]
# convert data to ndjson and write it down
print("Writing down...")
df_js = iterndjson(df_lang)
output_path=f"/data/001_twitter_hope/preprocessed/{language}/td_{year}{month}{day}_{language}.ndjson"
with open(output_path, "w") as f:
ndjson.dump(df_js, f)
# Add newly processed filenames to the log file
with open(logfile, "a") as out:
writer = ndjson.writer(out, ensure_ascii=False)
for line in raw_files:
writer.writerow(line) | [
11748,
28686,
198,
11748,
299,
67,
17752,
198,
11748,
19798,
292,
355,
279,
67,
220,
198,
198,
37811,
198,
44,
1124,
4445,
3303,
2176,
3696,
287,
3376,
5794,
198,
37811,
198,
198,
2,
8160,
8950,
284,
7925,
198,
17204,
82,
796,
14631,
... | 2.527936 | 877 |
import random
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import Slider, RadioButtons
from DSprites_VAE.src.model import VAE
from DSprites_VAE.src.utils import load_data, get_batch, create_categories_map
if __name__ == '__main__':
show()
| [
11748,
4738,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
13,
28029,
11407,
1330,
3454,
1304,
11,
8829,
153... | 2.877358 | 106 |
import datetime
from django.template import engines
from django.test import TestCase
from regulations3k.jinja2tags import ap_date, regs_hide_on_mobile
| [
11748,
4818,
8079,
198,
198,
6738,
42625,
14208,
13,
28243,
1330,
11874,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
6647,
18,
74,
13,
18594,
6592,
17,
31499,
1330,
2471,
62,
4475,
11,
842,
82,
62,
24717,
62,... | 3.347826 | 46 |
from typing import Generic, TypeVar, Optional, List
from pydantic import Field
from pydantic.generics import GenericModel
T = TypeVar("T")
| [
6738,
19720,
1330,
42044,
11,
5994,
19852,
11,
32233,
11,
7343,
198,
198,
6738,
279,
5173,
5109,
1330,
7663,
198,
6738,
279,
5173,
5109,
13,
8612,
873,
1330,
42044,
17633,
628,
198,
51,
796,
5994,
19852,
7203,
51,
4943,
628,
198
] | 3.512195 | 41 |
import matplotlib.pyplot as plt
import numpy as np
tray = np.genfromtxt("poblaciones.dat",delimiter=",")
a = tray[:,0]
b = tray[:,1]
c = tray[:,2]
d = tray[:,3]
fig = plt.figure(figsize = (20,20))
plt.subplot(2,3,1)
plt.scatter(a,b)
plt.xlabel(r'$\alpha$' )
plt.ylabel(r'$\beta$' )
plt.subplot(2,3,2)
plt.scatter(a,c)
plt.xlabel(r'$\alpha$' )
plt.ylabel(r'$\gamma$' )
plt.subplot(2,3,3)
plt.scatter(a,d)
plt.xlabel(r'$\alpha$' )
plt.ylabel(r'$\delta$' )
plt.subplot(2,3,4)
plt.scatter(b,c)
plt.xlabel(r'$\beta$' )
plt.ylabel(r'$\gamma$' )
plt.subplot(2,3,5)
plt.scatter(b,d)
plt.xlabel(r'$\beta$')
plt.ylabel(r'$\delta$')
plt.subplot(2,3,3)
plt.scatter(c,d)
plt.xlabel(r'$\gamma$' )
plt.ylabel(r'$\delta$' )
plt.savefig("poblaciones.pdf",dpi = 400)
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2213,
323,
796,
45941,
13,
5235,
6738,
14116,
7203,
79,
45292,
49443,
274,
13,
19608,
1600,
12381,
320,
2676,
28,
2430,
8,
198,
198... | 1.704698 | 447 |
from django.test import TestCase
from model_mommy import mommy
from ..models import Human, Child, Parent, Sibling, Avatar, User
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
2746,
62,
32542,
1820,
1330,
1995,
1820,
198,
6738,
11485,
27530,
1330,
5524,
11,
5932,
11,
16774,
11,
311,
27448,
11,
26703,
11,
11787,
628
] | 3.685714 | 35 |
"""Common color snippets."""
import re
from spacy import registry
from traiter import actions
from traiter import const as t_const
from traiter.patterns import matcher_patterns
from . import common_patterns
from . import term_patterns
from .. import consts
MULTIPLE_DASHES = ["\\" + c for c in t_const.DASH_CHAR]
MULTIPLE_DASHES = rf'\s*[{"".join(MULTIPLE_DASHES)}]{{2,}}\s*'
SKIP = t_const.DASH + common_patterns.MISSING
COLOR = matcher_patterns.MatcherPatterns(
"color",
on_match="mimosa.color.v1",
decoder=common_patterns.COMMON_PATTERNS
| {
"color_words": {"ENT_TYPE": {"IN": ["color", "color_mod"]}},
"color": {"ENT_TYPE": "color"},
"to": {"POS": {"IN": ["AUX"]}},
},
patterns=[
"missing? color_words* -* color+ -* color_words*",
"missing? color_words+ to color_words+ color+ -* color_words*",
],
)
@registry.misc(COLOR.on_match)
| [
37811,
17227,
3124,
45114,
526,
15931,
198,
11748,
302,
198,
198,
6738,
599,
1590,
1330,
20478,
198,
6738,
1291,
2676,
1330,
4028,
198,
6738,
1291,
2676,
1330,
1500,
355,
256,
62,
9979,
198,
6738,
1291,
2676,
13,
33279,
82,
1330,
2603,
... | 2.394737 | 380 |
# Be name khoda
from .node import Node as Node
from . import tools as tools
| [
2,
1355,
1438,
479,
2065,
64,
198,
6738,
764,
17440,
1330,
19081,
355,
19081,
198,
6738,
764,
1330,
4899,
355,
4899,
628
] | 3.5 | 22 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Board, Reply
admin.site.register(Board)
admin.site.register(Reply)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
5926,
11,
14883,
... | 2.968254 | 63 |
# Generated by Django 2.2.2 on 2020-03-22 05:15
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
17,
319,
12131,
12,
3070,
12,
1828,
8870,
25,
1314,
201,
198,
201,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
201,
198,
201,
198
] | 2.567568 | 37 |
import FWCore.ParameterSet.Config as cms
process = cms.Process("SurveyToTransforms")
#process.load("FWCore.MessageLogger.MessageLogger_cfi")
#process.MessageLogger.cout.enable = cms.untracked.bool(True)
#process.MessageLogger.cout.threshold = cms.untracked.string('INFO')
#process.MessageLogger.debugModules = cms.untracked.vstring('*')
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
#process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.load("Geometry.EcalTestBeam.idealGeomPlusEE_cfi")
process.load("Geometry.CaloEventSetup.CaloGeometry_cff")
process.load("Geometry.CaloEventSetup.CaloTopology_cfi")
process.load("Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )
process.source = cms.Source("EmptySource")
process.cga = cms.EDAnalyzer("SurveyToTransforms" )
process.Timing = cms.Service("Timing")
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck")
process.TFileService = cms.Service("TFileService",
fileName = cms.string('survey.root')
)
process.testendcap = cms.ESProducer( "testEcalEndcapGeometryEP",
applyAlignment = cms.bool(False) )
process.es_prefer_endcap = cms.ESPrefer( "testEcalEndcapGeometryEP", "testendcap" )
process.p1 = cms.Path(process.cga)
| [
198,
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
628,
198,
14681,
796,
269,
907,
13,
18709,
7203,
14214,
3304,
2514,
8291,
23914,
4943,
198,
198,
2,
14681,
13,
2220,
7203,
24160,
14055,
13,
12837,
11187,
1362,
13,
... | 2.408497 | 612 |
class PlainText:
"""A wrapper class for representing plaintext.
Typical format of plaintext data would be [x0, x1, x2...] where xi represents
coefficients of the polynomial.
Attributes:
data: A 1-dim list representing plaintext coefficient values.
"""
| [
4871,
28847,
8206,
25,
198,
220,
220,
220,
37227,
32,
29908,
1398,
329,
10200,
8631,
5239,
13,
628,
220,
220,
220,
48752,
5794,
286,
8631,
5239,
1366,
561,
307,
685,
87,
15,
11,
2124,
16,
11,
2124,
17,
22345,
810,
2124,
72,
6870,
... | 3.241379 | 87 |
import numpy as np
import math as mt
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
10688,
355,
45079,
201,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
201,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
201,
198,
201,
198,
201... | 1.346667 | 75 |
import pkg_resources
from .parser import Parser
from .generate import generate | [
11748,
279,
10025,
62,
37540,
198,
198,
6738,
764,
48610,
1330,
23042,
263,
198,
6738,
764,
8612,
378,
1330,
7716
] | 3.95 | 20 |
#!/usr/bin/env python
# encoding: utf-8
"""
tl_tweets.py
Copyright (c) 2015 Rob Mason
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Twitter: @Teslaliving
Blog: http://teslaliving.net
Description:
Twitter Helper Functions
Dependencies: twython: https://github.com/ryanmcgrath/twython
You need to get application keys for Twitter at https://apps.twitter.com
Provide them via environment variables:
TL_APP_KEY
TL_APP_SECRET
TL_OAUTH_TOKEN
TL_OAUTH_TOKEN_SECRET
Or via init function.
Note: The logging stuff is as Twython emits a bunch of stuff during its work that I wanted to suppress
"""
import os
import sys
import time
import random
import logging
import sys
basepath = os.path.dirname(sys.argv[0])
sys.path.append(os.path.join(basepath, 'twython'))
from twython import Twython, TwythonAuthError
# Initialize Twitter Keys
APP_KEY = None
APP_SECRET = None
OAUTH_TOKEN = None
OAUTH_TOKEN_SECRET = None
# Cache self ID
MYSELF = None
if 'TL_APP_KEY' in os.environ:
APP_KEY = os.environ['TL_APP_KEY']
if 'TL_APP_SECRET' in os.environ:
APP_SECRET = os.environ['TL_APP_SECRET']
if 'TL_OAUTH_TOKEN' in os.environ:
OAUTH_TOKEN = os.environ['TL_OAUTH_TOKEN']
if 'TL_OAUTH_TOKEN_SECRET' in os.environ:
OAUTH_TOKEN_SECRET = os.environ['TL_OAUTH_TOKEN_SECRET']
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
37811,
198,
28781,
62,
83,
732,
1039,
13,
9078,
198,
198,
15269,
357,
66,
8,
1853,
3851,
14737,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
147... | 3.094595 | 740 |
# Module: Anomaly Detection
# Author: Moez Ali <moez.ali@queensu.ca>
# License: MIT
# Release: PyCaret 2.2.0
# Last modified : 25/10/2020
import logging
import pandas as pd
import numpy as np
from pycaret.internal.pycaret_experiment import AnomalyExperiment, ClusteringExperiment
from pycaret.internal.utils import check_if_global_is_not_none
from typing import List, Tuple, Any, Union, Optional, Dict
import warnings
warnings.filterwarnings("ignore")
_EXPERIMENT_CLASS = AnomalyExperiment
_CURRENT_EXPERIMENT = None
_CURRENT_EXPERIMENT_EXCEPTION = (
"_CURRENT_EXPERIMENT global variable is not set. Please run setup() first."
)
_CURRENT_EXPERIMENT_DECORATOR_DICT = {
"_CURRENT_EXPERIMENT": _CURRENT_EXPERIMENT_EXCEPTION
}
def setup(
data,
preprocess: bool = True,
imputation_type: str = "simple",
iterative_imputation_iters: int = 5,
categorical_features: Optional[List[str]] = None,
categorical_imputation: str = "mode",
categorical_iterative_imputer: Union[str, Any] = "lightgbm",
ordinal_features: Optional[Dict[str, list]] = None,
high_cardinality_features: Optional[List[str]] = None,
high_cardinality_method: str = "frequency",
numeric_features: Optional[List[str]] = None,
numeric_imputation: str = "mean",
numeric_iterative_imputer: Union[str, Any] = "lightgbm",
date_features: Optional[List[str]] = None,
ignore_features: Optional[List[str]] = None,
normalize: bool = False,
normalize_method: str = "zscore",
transformation: bool = False,
transformation_method: str = "yeo-johnson",
handle_unknown_categorical: bool = True,
unknown_categorical_method: str = "least_frequent",
pca: bool = False,
pca_method: str = "linear",
pca_components: Optional[float] = None,
ignore_low_variance: bool = False,
combine_rare_levels: bool = False,
rare_level_threshold: float = 0.10,
bin_numeric_features: Optional[List[str]] = None,
remove_multicollinearity: bool = False,
multicollinearity_threshold: float = 0.9,
remove_perfect_collinearity: bool = False,
group_features: Optional[List[str]] = None,
group_names: Optional[List[str]] = None,
n_jobs: Optional[int] = -1,
use_gpu: bool = False,
custom_pipeline: Union[
Any, Tuple[str, Any], List[Any], List[Tuple[str, Any]]
] = None,
html: bool = True,
session_id: Optional[int] = None,
system_log: Union[bool, logging.Logger] = True,
log_experiment: bool = False,
experiment_name: Optional[str] = None,
log_plots: Union[bool, list] = False,
log_profile: bool = False,
log_data: bool = False,
silent: bool = False,
verbose: bool = True,
profile: bool = False,
profile_kwargs: Dict[str, Any] = None,
):
"""
This function initializes the training environment and creates the transformation
pipeline. Setup function must be called before executing any other function. It
takes one mandatory parameter: ``data``. All the other parameters are optional.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
data: pandas.DataFrame
Shape (n_samples, n_features), where n_samples is the number of samples and
n_features is the number of features.
preprocess: bool, default = True
When set to False, no transformations are applied except for custom
transformations passed in ``custom_pipeline`` param. Data must be
ready for modeling (no missing values, no dates, categorical data encoding),
when preprocess is set to False.
imputation_type: str, default = 'simple'
The type of imputation to use. Can be either 'simple' or 'iterative'.
iterative_imputation_iters: int, default = 5
Number of iterations. Ignored when ``imputation_type`` is not 'iterative'.
categorical_features: list of str, default = None
If the inferred data types are not correct or the silent param is set to True,
categorical_features param can be used to overwrite or define the data types.
It takes a list of strings with column names that are categorical.
categorical_imputation: str, default = 'constant'
Missing values in categorical features are imputed with a constant 'not_available'
value. The other available option is 'mode'.
categorical_iterative_imputer: str, default = 'lightgbm'
Estimator for iterative imputation of missing values in categorical features.
Ignored when ``imputation_type`` is not 'iterative'.
ordinal_features: dict, default = None
Encode categorical features as ordinal. For example, a categorical feature with
'low', 'medium', 'high' values where low < medium < high can be passed as
ordinal_features = { 'column_name' : ['low', 'medium', 'high'] }.
high_cardinality_features: list of str, default = None
When categorical features contains many levels, it can be compressed into fewer
levels using this parameter. It takes a list of strings with column names that
are categorical.
high_cardinality_method: str, default = 'frequency'
Categorical features with high cardinality are replaced with the frequency of
values in each level occurring in the training dataset. Other available method
is 'clustering' which trains the K-Means clustering algorithm on the statistical
attribute of the training data and replaces the original value of feature with the
cluster label. The number of clusters is determined by optimizing Calinski-Harabasz
and Silhouette criterion.
numeric_features: list of str, default = None
If the inferred data types are not correct or the silent param is set to True,
numeric_features param can be used to overwrite or define the data types.
It takes a list of strings with column names that are numeric.
numeric_imputation: str, default = 'mean'
Missing values in numeric features are imputed with 'mean' value of the feature
in the training dataset. The other available option is 'median' or 'zero'.
numeric_iterative_imputer: str, default = 'lightgbm'
Estimator for iterative imputation of missing values in numeric features.
Ignored when ``imputation_type`` is set to 'simple'.
date_features: list of str, default = None
If the inferred data types are not correct or the silent param is set to True,
date_features param can be used to overwrite or define the data types. It takes
a list of strings with column names that are DateTime.
ignore_features: list of str, default = None
ignore_features param can be used to ignore features during model training.
It takes a list of strings with column names that are to be ignored.
normalize: bool, default = False
When set to True, it transforms the numeric features by scaling them to a given
range. Type of scaling is defined by the ``normalize_method`` parameter.
normalize_method: str, default = 'zscore'
Defines the method for scaling. By default, normalize method is set to 'zscore'
The standard zscore is calculated as z = (x - u) / s. Ignored when ``normalize``
is not True. The other options are:
- minmax: scales and translates each feature individually such that it is in
the range of 0 - 1.
- maxabs: scales and translates each feature individually such that the
maximal absolute value of each feature will be 1.0. It does not
shift/center the data, and thus does not destroy any sparsity.
- robust: scales and translates each feature according to the Interquartile
range. When the dataset contains outliers, robust scaler often gives
better results.
transformation: bool, default = False
When set to True, it applies the power transform to make data more Gaussian-like.
Type of transformation is defined by the ``transformation_method`` parameter.
transformation_method: str, default = 'yeo-johnson'
Defines the method for transformation. By default, the transformation method is
set to 'yeo-johnson'. The other available option for transformation is 'quantile'.
Ignored when ``transformation`` is not True.
handle_unknown_categorical: bool, default = True
When set to True, unknown categorical levels in unseen data are replaced by the
most or least frequent level as learned in the training dataset.
unknown_categorical_method: str, default = 'least_frequent'
Method used to replace unknown categorical levels in unseen data. Method can be
set to 'least_frequent' or 'most_frequent'.
pca: bool, default = False
When set to True, dimensionality reduction is applied to project the data into
a lower dimensional space using the method defined in ``pca_method`` parameter.
pca_method: str, default = 'linear'
The 'linear' method performs uses Singular Value Decomposition. Other options are:
- kernel: dimensionality reduction through the use of RBF kernel.
- incremental: replacement for 'linear' pca when the dataset is too large.
pca_components: int or float, default = None
Number of components to keep. if pca_components is a float, it is treated as a
target percentage for information retention. When pca_components is an integer
it is treated as the number of features to be kept. pca_components must be less
than the original number of features. Ignored when ``pca`` is not True.
ignore_low_variance: bool, default = False
When set to True, all categorical features with insignificant variances are
removed from the data. The variance is calculated using the ratio of unique
values to the number of samples, and the ratio of the most common value to the
frequency of the second most common value.
combine_rare_levels: bool, default = False
When set to True, frequency percentile for levels in categorical features below
a certain threshold is combined into a single level.
rare_level_threshold: float, default = 0.1
Percentile distribution below which rare categories are combined. Ignored when
``combine_rare_levels`` is not True.
bin_numeric_features: list of str, default = None
To convert numeric features into categorical, bin_numeric_features parameter can
be used. It takes a list of strings with column names to be discretized. It does
so by using 'sturges' rule to determine the number of clusters and then apply
KMeans algorithm. Original values of the feature are then replaced by the
cluster label.
remove_multicollinearity: bool, default = False
When set to True, features with the inter-correlations higher than the defined
threshold are removed. When two features are highly correlated with each other,
the feature that is less correlated with the target variable is removed. Only
considers numeric features.
multicollinearity_threshold: float, default = 0.9
Threshold for correlated features. Ignored when ``remove_multicollinearity``
is not True.
remove_perfect_collinearity: bool, default = True
When set to True, perfect collinearity (features with correlation = 1) is removed
from the dataset, when two features are 100% correlated, one of it is randomly
removed from the dataset.
group_features: list or list of list, default = None
When the dataset contains features with related characteristics, group_features
parameter can be used for feature extraction. It takes a list of strings with
column names that are related.
group_names: list, default = None
Group names to be used in naming new features. When the length of group_names
does not match with the length of ``group_features``, new features are named
sequentially group_1, group_2, etc. It is ignored when ``group_features`` is
None.
n_jobs: int, default = -1
The number of jobs to run in parallel (for functions that supports parallel
processing) -1 means using all processors. To run all functions on single
processor set n_jobs to None.
use_gpu: bool or str, default = False
When set to True, it will use GPU for training with algorithms that support it,
and fall back to CPU if they are unavailable. When set to 'force', it will only
use GPU-enabled algorithms and raise exceptions when they are unavailable. When
False, all algorithms are trained using CPU only.
GPU enabled algorithms:
- None at this moment.
custom_pipeline: (str, transformer) or list of (str, transformer), default = None
When passed, will append the custom transformers in the preprocessing pipeline
and are applied on each CV fold separately and on the final fit. All the custom
transformations are applied before pycaret's internal transformations.
html: bool, default = True
When set to False, prevents runtime display of monitor. This must be set to False
when the environment does not support IPython. For example, command line terminal,
Databricks Notebook, Spyder and other similar IDEs.
session_id: int, default = None
Controls the randomness of experiment. It is equivalent to 'random_state' in
scikit-learn. When None, a pseudo random number is generated. This can be used
for later reproducibility of the entire experiment.
system_log: bool or logging.Logger, default = True
Whether to save the system logging file (as logs.log). If the input
already is a logger object, that one is used instead.
log_experiment: bool, default = False
When set to True, all metrics and parameters are logged on the ``MLFlow`` server.
experiment_name: str, default = None
Name of the experiment for logging. Ignored when ``log_experiment`` is not True.
log_plots: bool or list, default = False
When set to True, certain plots are logged automatically in the ``MLFlow`` server.
To change the type of plots to be logged, pass a list containing plot IDs. Refer
to documentation of ``plot_model``. Ignored when ``log_experiment`` is not True.
log_profile: bool, default = False
When set to True, data profile is logged on the ``MLflow`` server as a html file.
Ignored when ``log_experiment`` is not True.
log_data: bool, default = False
When set to True, dataset is logged on the ``MLflow`` server as a csv file.
Ignored when ``log_experiment`` is not True.
silent: bool, default = False
Controls the confirmation input of data types when ``setup`` is executed. When
executing in completely automated mode or on a remote kernel, this must be True.
verbose: bool, default = True
When set to False, Information grid is not printed.
profile: bool, default = False
When set to True, an interactive EDA report is displayed.
profile_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the ProfileReport method used
to create the EDA report. Ignored if ``profile`` is False.
Returns:
Global variables that can be changed using the ``set_config`` function.
"""
exp = _EXPERIMENT_CLASS()
set_current_experiment(exp)
return exp.setup(
data=data,
preprocess=preprocess,
imputation_type=imputation_type,
iterative_imputation_iters=iterative_imputation_iters,
categorical_features=categorical_features,
categorical_imputation=categorical_imputation,
categorical_iterative_imputer=categorical_iterative_imputer,
ordinal_features=ordinal_features,
high_cardinality_features=high_cardinality_features,
high_cardinality_method=high_cardinality_method,
numeric_features=numeric_features,
numeric_imputation=numeric_imputation,
numeric_iterative_imputer=numeric_iterative_imputer,
date_features=date_features,
ignore_features=ignore_features,
normalize=normalize,
normalize_method=normalize_method,
transformation=transformation,
transformation_method=transformation_method,
handle_unknown_categorical=handle_unknown_categorical,
unknown_categorical_method=unknown_categorical_method,
pca=pca,
pca_method=pca_method,
pca_components=pca_components,
ignore_low_variance=ignore_low_variance,
combine_rare_levels=combine_rare_levels,
rare_level_threshold=rare_level_threshold,
bin_numeric_features=bin_numeric_features,
remove_multicollinearity=remove_multicollinearity,
multicollinearity_threshold=multicollinearity_threshold,
remove_perfect_collinearity=remove_perfect_collinearity,
group_features=group_features,
group_names=group_names,
n_jobs=n_jobs,
use_gpu=use_gpu,
custom_pipeline=custom_pipeline,
html=html,
session_id=session_id,
system_log=system_log,
log_experiment=log_experiment,
experiment_name=experiment_name,
log_plots=log_plots,
log_profile=log_profile,
log_data=log_data,
silent=silent,
verbose=verbose,
profile=profile,
profile_kwargs=profile_kwargs,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def create_model(
model: Union[str, Any],
fraction: float = 0.05,
verbose: bool = True,
fit_kwargs: Optional[dict] = None,
**kwargs,
):
"""
This function trains a given model from the model library. All available
models can be accessed using the ``models`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
model: str or scikit-learn compatible object
ID of an model available in the model library or pass an untrained
model object consistent with scikit-learn API. Estimators available
in the model library (ID - Name):
* 'abod' - Angle-base Outlier Detection
* 'cluster' - Clustering-Based Local Outlier
* 'cof' - Connectivity-Based Outlier Factor
* 'histogram' - Histogram-based Outlier Detection
* 'knn' - k-Nearest Neighbors Detector
* 'lof' - Local Outlier Factor
* 'svm' - One-class SVM detector
* 'pca' - Principal Component Analysis
* 'mcd' - Minimum Covariance Determinant
* 'sod' - Subspace Outlier Detection
* 'sos' - Stochastic Outlier Selection
fraction: float, default = 0.05
The amount of contamination of the data set, i.e. the proportion of
outliers in the data set. Used when fitting to define the threshold on
the decision function.
verbose: bool, default = True
Status update is not printed when verbose is set to False.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
**kwargs:
Additional keyword arguments to pass to the estimator.
Returns:
Trained Model
"""
return _CURRENT_EXPERIMENT.create_model(
estimator=model,
fraction=fraction,
fit_kwargs=fit_kwargs,
verbose=verbose,
**kwargs,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def assign_model(
model, transformation: bool = False, score: bool = True, verbose: bool = True
) -> pd.DataFrame:
"""
This function assigns anomaly labels to the dataset for a given model.
(1 = outlier, 0 = inlier).
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> knn_df = assign_model(knn)
model: scikit-learn compatible object
Trained model object
transformation: bool, default = False
Whether to apply anomaly labels on the transformed dataset.
score: bool, default = True
Whether to show outlier score or not.
verbose: bool, default = True
Status update is not printed when verbose is set to False.
Returns:
pandas.DataFrame
"""
return _CURRENT_EXPERIMENT.assign_model(
model, transformation=transformation, score=score, verbose=verbose
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def plot_model(
model,
plot: str = "tsne",
feature: Optional[str] = None,
label: bool = False,
scale: float = 1,
save: bool = False,
display_format: Optional[str] = None,
):
"""
This function analyzes the performance of a trained model.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> plot_model(knn, plot = 'tsne')
model: scikit-learn compatible object
Trained Model Object
plot: str, default = 'tsne'
List of available plots (ID - Name):
* 'tsne' - t-SNE (3d) Dimension Plot
* 'umap' - UMAP Dimensionality Plot
feature: str, default = None
Feature to be used as a hoverover tooltip and/or label when the ``label``
param is set to True. When feature is None, first column of the dataset
is used.
label: bool, default = False
Name of column to be used as data labels.
scale: float, default = 1
The resolution scale of the figure.
save: bool, default = False
When set to True, plot is saved in the current working directory.
display_format: str, default = None
To display plots in Streamlit (https://www.streamlit.io/), set this to 'streamlit'.
Currently, not all plots are supported.
Returns:
None
"""
return _CURRENT_EXPERIMENT.plot_model(
model,
plot=plot,
feature_name=feature,
label=label,
scale=scale,
save=save,
display_format=display_format,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def evaluate_model(
model, feature: Optional[str] = None, fit_kwargs: Optional[dict] = None,
):
"""
This function displays a user interface for analyzing performance of a trained
model. It calls the ``plot_model`` function internally.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> evaluate_model(knn)
model: scikit-learn compatible object
Trained model object
feature: str, default = None
Feature to be used as a hoverover tooltip and/or label when the ``label``
param is set to True. When feature is None, first column of the dataset
is used by default.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
Returns:
None
Warnings
--------
- This function only works in IPython enabled Notebook.
"""
return _CURRENT_EXPERIMENT.evaluate_model(
estimator=model, feature_name=feature, fit_kwargs=fit_kwargs
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def tune_model(
model,
supervised_target: str,
supervised_type: Optional[str] = None,
supervised_estimator: Union[str, Any] = "lr",
method: str = "drop",
optimize: Optional[str] = None,
custom_grid: Optional[List[int]] = None,
fold: int = 10,
fit_kwargs: Optional[dict] = None,
groups: Optional[Union[str, Any]] = None,
round: int = 4,
verbose: bool = True,
):
"""
This function tunes the ``fraction`` parameter of a given model.
Example
-------
>>> from pycaret.datasets import get_data
>>> juice = get_data('juice')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = juice)
>>> tuned_knn = tune_model(model = 'knn', supervised_target = 'Purchase')
model: str
ID of an model available in the model library. Models that can be
tuned in this function (ID - Model):
* 'abod' - Angle-base Outlier Detection
* 'cluster' - Clustering-Based Local Outlier
* 'cof' - Connectivity-Based Outlier Factor
* 'histogram' - Histogram-based Outlier Detection
* 'knn' - k-Nearest Neighbors Detector
* 'lof' - Local Outlier Factor
* 'svm' - One-class SVM detector
* 'pca' - Principal Component Analysis
* 'mcd' - Minimum Covariance Determinant
* 'sod' - Subspace Outlier Detection
* 'sos' - Stochastic Outlier Selection
supervised_target: str
Name of the target column containing labels.
supervised_type: str, default = None
Type of task. 'classification' or 'regression'. Automatically inferred
when None.
supervised_estimator: str, default = None
Classification (ID - Name):
* 'lr' - Logistic Regression (Default)
* 'knn' - K Nearest Neighbour
* 'nb' - Naive Bayes
* 'dt' - Decision Tree Classifier
* 'svm' - SVM - Linear Kernel
* 'rbfsvm' - SVM - Radial Kernel
* 'gpc' - Gaussian Process Classifier
* 'mlp' - Multi Level Perceptron
* 'ridge' - Ridge Classifier
* 'rf' - Random Forest Classifier
* 'qda' - Quadratic Discriminant Analysis
* 'ada' - Ada Boost Classifier
* 'gbc' - Gradient Boosting Classifier
* 'lda' - Linear Discriminant Analysis
* 'et' - Extra Trees Classifier
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting
* 'catboost' - CatBoost Classifier
Regression (ID - Name):
* 'lr' - Linear Regression (Default)
* 'lasso' - Lasso Regression
* 'ridge' - Ridge Regression
* 'en' - Elastic Net
* 'lar' - Least Angle Regression
* 'llar' - Lasso Least Angle Regression
* 'omp' - Orthogonal Matching Pursuit
* 'br' - Bayesian Ridge
* 'ard' - Automatic Relevance Determ.
* 'par' - Passive Aggressive Regressor
* 'ransac' - Random Sample Consensus
* 'tr' - TheilSen Regressor
* 'huber' - Huber Regressor
* 'kr' - Kernel Ridge
* 'svm' - Support Vector Machine
* 'knn' - K Neighbors Regressor
* 'dt' - Decision Tree
* 'rf' - Random Forest
* 'et' - Extra Trees Regressor
* 'ada' - AdaBoost Regressor
* 'gbr' - Gradient Boosting
* 'mlp' - Multi Level Perceptron
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting
* 'catboost' - CatBoost Regressor
method: str, default = 'drop'
When method set to drop, it will drop the outliers from training dataset.
When 'surrogate', it uses decision function and label as a feature during
training.
optimize: str, default = None
For Classification tasks:
Accuracy, AUC, Recall, Precision, F1, Kappa (default = 'Accuracy')
For Regression tasks:
MAE, MSE, RMSE, R2, RMSLE, MAPE (default = 'R2')
custom_grid: list, default = None
By default, a pre-defined list of fraction values is iterated over to
optimize the supervised objective. To overwrite default iteration,
pass a list of fraction value to iterate over in custom_grid param.
fold: int, default = 10
Number of folds to be used in Kfold CV. Must be at least 2.
verbose: bool, default = True
Status update is not printed when verbose is set to False.
Returns:
Trained Model with optimized ``fraction`` parameter.
"""
return _CURRENT_EXPERIMENT.tune_model(
model=model,
supervised_target=supervised_target,
supervised_type=supervised_type,
supervised_estimator=supervised_estimator,
method=method,
optimize=optimize,
custom_grid=custom_grid,
fold=fold,
fit_kwargs=fit_kwargs,
groups=groups,
round=round,
verbose=verbose,
)
# not using check_if_global_is_not_none on purpose
def predict_model(model, data: pd.DataFrame) -> pd.DataFrame:
"""
This function generates anomaly labels on using a trained model.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> knn_predictions = predict_model(model = knn, data = unseen_data)
model: scikit-learn compatible object
Trained Model Object.
data : pandas.DataFrame
Shape (n_samples, n_features) where n_samples is the number of samples and
n_features is the number of features.
Returns:
pandas.DataFrame
Warnings
--------
- The behavior of the predict_model is changed in version 2.1 without backward compatibility.
As such, the pipelines trained using the version (<= 2.0), may not work for inference
with version >= 2.1. You can either retrain your models with a newer version or downgrade
the version for inference.
"""
experiment = _CURRENT_EXPERIMENT
if experiment is None:
experiment = _EXPERIMENT_CLASS()
return experiment.predict_model(estimator=model, data=data)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def deploy_model(
model, model_name: str, authentication: dict, platform: str = "aws",
):
"""
This function deploys the transformation pipeline and trained model on cloud.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> # sets appropriate credentials for the platform as environment variables
>>> import os
>>> os.environ["AWS_ACCESS_KEY_ID"] = str("foo")
>>> os.environ["AWS_SECRET_ACCESS_KEY"] = str("bar")
>>> deploy_model(model = knn, model_name = 'knn-for-deployment', platform = 'aws', authentication = {'bucket' : 'S3-bucket-name'})
Amazon Web Service (AWS) users:
To deploy a model on AWS S3 ('aws'), the credentials have to be passed. The easiest way is to use environment
variables in your local environment. Following information from the IAM portal of amazon console account
are required:
- AWS Access Key ID
- AWS Secret Key Access
More info: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#environment-variables
Google Cloud Platform (GCP) users:
To deploy a model on Google Cloud Platform ('gcp'), project must be created
using command line or GCP console. Once project is created, you must create
a service account and download the service account key as a JSON file to set
environment variables in your local environment.
More info: https://cloud.google.com/docs/authentication/production
Microsoft Azure (Azure) users:
To deploy a model on Microsoft Azure ('azure'), environment variables for connection
string must be set in your local environment. Go to settings of storage account on
Azure portal to access the connection string required.
- AZURE_STORAGE_CONNECTION_STRING (required as environment variable)
More info: https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python?toc=%2Fpython%2Fazure%2FTOC.json
model: scikit-learn compatible object
Trained model object
model_name: str
Name of model.
authentication: dict
Dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'S3-bucket-name', 'path': (optional) folder name under the bucket}
When platform = 'gcp':
{'project': 'gcp-project-name', 'bucket' : 'gcp-bucket-name'}
When platform = 'azure':
{'container': 'azure-container-name'}
platform: str, default = 'aws'
Name of the platform. Currently supported platforms: 'aws', 'gcp' and 'azure'.
Returns:
None
"""
return _CURRENT_EXPERIMENT.deploy_model(
model=model,
model_name=model_name,
authentication=authentication,
platform=platform,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def save_model(
model, model_name: str, model_only: bool = False, verbose: bool = True, **kwargs
):
"""
This function saves the transformation pipeline and trained model object
into the current working directory as a pickle file for later use.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> knn = create_model('knn')
>>> save_model(knn, 'saved_knn_model')
model: scikit-learn compatible object
Trained model object
model_name: str
Name of the model.
model_only: bool, default = False
When set to True, only trained model object is saved instead of the
entire pipeline.
verbose: bool, default = True
Success message is not printed when verbose is set to False.
**kwargs:
Additional keyword arguments to pass to joblib.dump().
Returns:
Tuple of the model object and the filename.
"""
return _CURRENT_EXPERIMENT.save_model(
model=model,
model_name=model_name,
model_only=model_only,
verbose=verbose,
**kwargs,
)
# not using check_if_global_is_not_none on purpose
def load_model(
model_name,
platform: Optional[str] = None,
authentication: Optional[Dict[str, str]] = None,
verbose: bool = True,
):
"""
This function loads a previously saved pipeline.
Example
-------
>>> from pycaret.anomaly import load_model
>>> saved_knn = load_model('saved_knn_model')
model_name: str
Name of the model.
platform: str, default = None
Name of the cloud platform. Currently supported platforms:
'aws', 'gcp' and 'azure'.
authentication: dict, default = None
dictionary of applicable authentication tokens.
when platform = 'aws':
{'bucket' : 'S3-bucket-name'}
when platform = 'gcp':
{'project': 'gcp-project-name', 'bucket' : 'gcp-bucket-name'}
when platform = 'azure':
{'container': 'azure-container-name'}
verbose: bool, default = True
Success message is not printed when verbose is set to False.
Returns:
Trained Model
"""
experiment = _CURRENT_EXPERIMENT
if experiment is None:
experiment = _EXPERIMENT_CLASS()
return experiment.load_model(
model_name=model_name,
platform=platform,
authentication=authentication,
verbose=verbose,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def models(internal: bool = False, raise_errors: bool = True,) -> pd.DataFrame:
"""
Returns table of models available in the model library.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> all_models = models()
internal: bool, default = False
If True, will return extra columns and rows used internally.
raise_errors: bool, default = True
If False, will suppress all exceptions, ignoring models
that couldn't be created.
Returns:
pandas.DataFrame
"""
return _CURRENT_EXPERIMENT.models(internal=internal, raise_errors=raise_errors)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def get_logs(experiment_name: Optional[str] = None, save: bool = False) -> pd.DataFrame:
"""
Returns a table of experiment logs. Only works when ``log_experiment``
is True when initializing the ``setup`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly, log_experiment = True)
>>> knn = create_model('knn')
>>> exp_logs = get_logs()
experiment_name: str, default = None
When None current active run is used.
save: bool, default = False
When set to True, csv file is saved in current working directory.
Returns:
pandas.DataFrame
"""
return _CURRENT_EXPERIMENT.get_logs(experiment_name=experiment_name, save=save)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def get_config(variable: str):
"""
This function retrieves the global variables created when initializing the
``setup`` function. Following variables are accessible:
- X: Transformed dataset (X)
- data_before_preprocess: data before preprocessing
- seed: random state set through session_id
- prep_pipe: Transformation pipeline configured through setup
- n_jobs_param: n_jobs parameter used in model training
- html_param: html_param configured through setup
- master_model_container: model storage container
- display_container: results display container
- exp_name_log: Name of experiment set through setup
- logging_param: log_experiment param set through setup
- log_plots_param: log_plots param set through setup
- USI: Unique session ID parameter set through setup
- gpu_param: use_gpu param configured through setup
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> X = get_config('X')
Returns:
Global variable
"""
return _CURRENT_EXPERIMENT.get_config(variable=variable)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def set_config(variable: str, value):
"""
This function resets the global variables. Following variables are
accessible:
- X: Transformed dataset (X)
- data_before_preprocess: data before preprocessing
- seed: random state set through session_id
- prep_pipe: Transformation pipeline configured through setup
- n_jobs_param: n_jobs parameter used in model training
- html_param: html_param configured through setup
- master_model_container: model storage container
- display_container: results display container
- exp_name_log: Name of experiment set through setup
- logging_param: log_experiment param set through setup
- log_plots_param: log_plots param set through setup
- USI: Unique session ID parameter set through setup
- gpu_param: use_gpu param configured through setup
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> set_config('seed', 123)
Returns:
None
"""
return _CURRENT_EXPERIMENT.set_config(variable=variable, value=value)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def save_config(file_name: str):
"""
This function save all global variables to a pickle file, allowing to
later resume without rerunning the ``setup``.
Example
-------
>>> from pycaret.datasets import get_data
>>> anomaly = get_data('anomaly')
>>> from pycaret.anomaly import *
>>> exp_name = setup(data = anomaly)
>>> save_config('myvars.pkl')
Returns:
None
"""
return _CURRENT_EXPERIMENT.save_config(file_name=file_name)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def load_config(file_name: str):
"""
This function loads global variables from a pickle file into Python
environment.
Example
-------
>>> from pycaret.anomaly import load_config
>>> load_config('myvars.pkl')
Returns:
Global variables
"""
return _CURRENT_EXPERIMENT.load_config(file_name=file_name)
def get_outliers(
data,
model: Union[str, Any] = "knn",
fraction: float = 0.05,
fit_kwargs: Optional[dict] = None,
preprocess: bool = True,
imputation_type: str = "simple",
iterative_imputation_iters: int = 5,
categorical_features: Optional[List[str]] = None,
categorical_imputation: str = "mode",
categorical_iterative_imputer: Union[str, Any] = "lightgbm",
ordinal_features: Optional[Dict[str, list]] = None,
high_cardinality_features: Optional[List[str]] = None,
high_cardinality_method: str = "frequency",
numeric_features: Optional[List[str]] = None,
numeric_imputation: str = "mean", # method 'zero' added in pycaret==2.1
numeric_iterative_imputer: Union[str, Any] = "lightgbm",
date_features: Optional[List[str]] = None,
ignore_features: Optional[List[str]] = None,
normalize: bool = False,
normalize_method: str = "zscore",
transformation: bool = False,
transformation_method: str = "yeo-johnson",
handle_unknown_categorical: bool = True,
unknown_categorical_method: str = "least_frequent",
pca: bool = False,
pca_method: str = "linear",
pca_components: Optional[float] = None,
ignore_low_variance: bool = False,
combine_rare_levels: bool = False,
rare_level_threshold: float = 0.10,
bin_numeric_features: Optional[List[str]] = None,
remove_multicollinearity: bool = False,
multicollinearity_threshold: float = 0.9,
remove_perfect_collinearity: bool = False,
group_features: Optional[List[str]] = None,
group_names: Optional[List[str]] = None,
n_jobs: Optional[int] = -1,
session_id: Optional[int] = None,
system_log: Union[bool, logging.Logger] = True,
log_experiment: bool = False,
experiment_name: Optional[str] = None,
log_plots: Union[bool, list] = False,
log_profile: bool = False,
log_data: bool = False,
profile: bool = False,
**kwargs,
) -> pd.DataFrame:
"""
Callable from any external environment without requiring setup initialization.
"""
exp = _EXPERIMENT_CLASS()
exp.setup(
data=data,
preprocess=preprocess,
imputation_type=imputation_type,
iterative_imputation_iters=iterative_imputation_iters,
categorical_features=categorical_features,
categorical_imputation=categorical_imputation,
categorical_iterative_imputer=categorical_iterative_imputer,
ordinal_features=ordinal_features,
high_cardinality_features=high_cardinality_features,
high_cardinality_method=high_cardinality_method,
numeric_features=numeric_features,
numeric_imputation=numeric_imputation,
numeric_iterative_imputer=numeric_iterative_imputer,
date_features=date_features,
ignore_features=ignore_features,
normalize=normalize,
normalize_method=normalize_method,
transformation=transformation,
transformation_method=transformation_method,
handle_unknown_categorical=handle_unknown_categorical,
unknown_categorical_method=unknown_categorical_method,
pca=pca,
pca_method=pca_method,
pca_components=pca_components,
ignore_low_variance=ignore_low_variance,
combine_rare_levels=combine_rare_levels,
rare_level_threshold=rare_level_threshold,
bin_numeric_features=bin_numeric_features,
remove_multicollinearity=remove_multicollinearity,
multicollinearity_threshold=multicollinearity_threshold,
remove_perfect_collinearity=remove_perfect_collinearity,
group_features=group_features,
group_names=group_names,
n_jobs=n_jobs,
html=False,
session_id=session_id,
system_log=system_log,
log_experiment=log_experiment,
experiment_name=experiment_name,
log_plots=log_plots,
log_profile=log_profile,
log_data=log_data,
silent=True,
verbose=False,
profile=profile,
)
c = exp.create_model(
model=model, fraction=fraction, fit_kwargs=fit_kwargs, verbose=False, **kwargs,
)
return exp.assign_model(c, verbose=False)
| [
2,
19937,
25,
1052,
24335,
46254,
198,
2,
6434,
25,
46488,
89,
12104,
1279,
76,
2577,
89,
13,
7344,
31,
4188,
641,
84,
13,
6888,
29,
198,
2,
13789,
25,
17168,
198,
2,
13868,
25,
9485,
34,
8984,
362,
13,
17,
13,
15,
198,
2,
458... | 2.550115 | 18,687 |
from django.urls import path
from . import views
urlpatterns = [
path('evaluate-architecture', views.EvaluateArchitecture.as_view()),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
49786,
12,
998,
5712,
495,
3256,
5009,
13,
36,
2100,
4985,
19895,
5712,
495,
13,
29... | 2.84 | 50 |
# Generated by Django 3.1.4 on 2021-09-10 15:43
from django.db import migrations
import markdownx.models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
19,
319,
33448,
12,
2931,
12,
940,
1315,
25,
3559,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
198,
11748,
1317,
2902,
87,
13,
27530,
628
] | 2.891892 | 37 |
__all__ = ['CollectorManager']
import concurrent.futures
from spaceone.core.manager import BaseManager
from datetime import datetime, timedelta
from spaceone.inventory.error.custom import *
from spaceone.inventory.model.server import *
from spaceone.inventory.libs.schema.base import ReferenceModel
from pprint import pprint
_LOGGER = logging.getLogger(__name__)
COLLECTIVE_STATE = ['max', 'avg']
DEFAULT_INTERVAL = 86400
MAX_WORKER = 20
MAX_DIVIDING_COUNT = 20
| [
834,
439,
834,
796,
37250,
31337,
273,
13511,
20520,
198,
198,
11748,
24580,
13,
69,
315,
942,
198,
6738,
2272,
505,
13,
7295,
13,
37153,
1330,
7308,
13511,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
2272,
5... | 3.206897 | 145 |
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import CrossEntropyLoss
from transformers.modeling_bert import BertPreTrainedModel, BertModel
from ..esim.layers import Seq2SeqEncoder
from ..esim.utils import replace_masked
class BERTBaseline(BertPreTrainedModel):
"""
ab、ac交互并编码
"""
@staticmethod
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
6738,
28034,
13,
20471,
1330,
6372,
14539,
28338,
43,
793,
198,
6738,
6121,
364,
13,
4666,
10809,
62,
4835,
1330,
22108,
6719,
2898,... | 2.779528 | 127 |
valor_total = int(input('Qual valor ira sacar? R$'))
cedula50 = cedula20 = cedula10 = cedula5 = moeda1 = 0
while True:
if valor_total >= 50:
cedula50 += 1
valor_total -= 50
elif valor_total >= 20:
cedula20 += 1
valor_total -= 20
elif valor_total >= 10:
cedula10 += 1
valor_total -= 10
elif valor_total >= 5:
cedula5 += 1
valor_total -= 5
elif valor_total >= 1:
moeda1 += 1
valor_total -= 1
else:
break
if cedula50 > 0:
print(f'Cedulas R$50: {cedula50}')
if cedula20 > 0:
print(f'Cedulas R$20: {cedula20}')
if cedula10 > 0:
print(f'Cedulas R$10: {cedula10}')
if cedula5 > 0:
print(f'Cedulas R$5: {cedula5}')
if moeda1 > 0:
print(f'Moedas R$1: {moeda1}')
| [
2100,
273,
62,
23350,
796,
493,
7,
15414,
10786,
46181,
1188,
273,
4173,
64,
5360,
283,
30,
371,
3,
6,
4008,
198,
771,
4712,
1120,
796,
269,
276,
4712,
1238,
796,
269,
276,
4712,
940,
796,
269,
276,
4712,
20,
796,
6941,
18082,
16,... | 1.87799 | 418 |
#!/usr/bin/env python3
import PyKDL as kdl
test = kdl.Vector(0, 0, -0.2)
print(test)
# | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
9485,
42,
19260,
355,
479,
25404,
628,
198,
9288,
796,
479,
25404,
13,
38469,
7,
15,
11,
657,
11,
532,
15,
13,
17,
8,
198,
4798,
7,
9288,
8,
198,
2,
220,
220,
2... | 1.978723 | 47 |
import pytest
pytest.register_assert_rewrite('tests.testing_dataframe')
| [
11748,
12972,
9288,
628,
198,
9078,
9288,
13,
30238,
62,
30493,
62,
1809,
6525,
10786,
41989,
13,
33407,
62,
7890,
14535,
11537,
198
] | 3.217391 | 23 |
# Author: Betterman
# -*- coding = utf-8 -*-
# @Time : 2020/8/27 14:56
# @File : acc_topK.py
# @Software : PyCharm
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
tf.random.set_seed(2467)
#计算accuracy
#正态分布10个样本,6个类
output = tf.random.normal([10, 6])
#softmax使得6类总和概率为1
output = tf.math.softmax(output, axis=1)
#maxval =6从0-5中随机生成10个label
target = tf.random.uniform([10], maxval=6, dtype=tf.int32)
print('prob:', output.numpy())
pred = tf.argmax(output, axis=1)
print('pred:', pred.numpy())
print('label:', target.numpy())
acc = accuracy(output, target, topk=(1,2,3,4,5,6))
print('top-1-6 acc:', acc) | [
2,
6434,
25,
11625,
805,
201,
198,
2,
532,
9,
12,
19617,
796,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
2488,
7575,
1058,
12131,
14,
23,
14,
1983,
1478,
25,
3980,
201,
198,
2,
2488,
8979,
1058,
697,
62,
4852,
42,
13,
9078,
... | 1.897959 | 343 |
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""database plugin."""
import gettext
from otopi import constants as otopicons
from otopi import plugin, transaction, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup import domains
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup.engine_common import database
@util.export
class Plugin(plugin.PluginBase):
"""database plugin."""
class DBTransaction(transaction.TransactionElement):
"""yum transaction element."""
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
name=oengcommcons.Stages.DB_CONNECTION_AVAILABLE,
)
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
)
# vim: expandtab tabstop=4 shiftwidth=4
| [
2,
198,
2,
19643,
2265,
12,
18392,
12,
40406,
1377,
19643,
2265,
3113,
9058,
198,
2,
15069,
357,
34,
8,
2211,
12,
4626,
2297,
10983,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
... | 3.12963 | 486 |
# Generated by Django 2.0.3 on 2018-07-25 05:57
from django.db import migrations
import django.db.models.manager
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
18,
319,
2864,
12,
2998,
12,
1495,
8870,
25,
3553,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
37153,
628
] | 2.875 | 40 |
#!/usr/bin/env python3
import base64
import getpass
import os
import pprint
import sys
import time
from contextlib import suppress
from pathlib import Path
from time import sleep
from typing import Dict, List
from broker import cfg
from broker._utils._log import br, log, ok
from broker._utils.tools import _remove, exit_after, mkdir, read_json
from broker._utils.web3_tools import get_tx_status
from broker.config import env, logging, setup_logger
from broker.errors import QuietExit
from broker.imports import connect
from broker.lib import (
calculate_size,
eblocbroker_function_call,
is_dir,
remove_files,
run,
run_stdout_to_file,
state,
subprocess_call,
)
from broker.libs import _git, eudat, gdrive, slurm
from broker.utils import (
WHERE,
StorageID,
byte_to_mb,
bytes32_to_ipfs,
eth_address_to_md5,
is_dir_empty,
print_tb,
read_file,
remove_empty_files_and_folders,
)
Ebb = cfg.Ebb
connect()
class Common:
"""Prevent "Class" to have attribute "method" mypy warnings."""
@exit_after(900) # timeout in 15 minuntes
if __name__ == "__main__":
kwargs = {
"job_key": sys.argv[1],
"index": sys.argv[2],
"received_block_number": sys.argv[3],
"folder_name": sys.argv[4],
"slurm_job_id": sys.argv[5],
}
try:
cloud_storage = ENDCODE(**kwargs)
cloud_storage.run()
except QuietExit:
pass
except Exception as e:
print_tb(e)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
2779,
2414,
198,
11748,
651,
6603,
198,
11748,
28686,
198,
11748,
279,
4798,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
4732,
8019,
1330,
18175,
198,
6738,
3108,
8019... | 2.437398 | 615 |
from typing import Optional, Protocol
import pytest
EXPECTED_URLS = [
"https://habr.com/kek/v2/articles/?hub=python&sort=all&fl=ru&hl=ru&page=1",
None,
]
@pytest.mark.parametrize(
"page_number, expected_url", ([1, EXPECTED_URLS[0]], [100, EXPECTED_URLS[1]])
)
| [
6738,
19720,
1330,
32233,
11,
20497,
198,
198,
11748,
12972,
9288,
628,
198,
49864,
9782,
1961,
62,
4261,
6561,
796,
685,
198,
220,
220,
220,
366,
5450,
1378,
5976,
81,
13,
785,
14,
365,
74,
14,
85,
17,
14,
26845,
20924,
40140,
28,
... | 2.224 | 125 |
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
import RegressionProblems as RP
import time
from multiprocessing import Lock
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
5972,
2569,
8081,
2234,
198,
6738,
1341,
35720,
13,
82,
14761,
1330,
311,
15922,
198,
11748,
3310,
2234,
2964,
22143,
355,
25812,
198,
11748,
640,
198,
6... | 3.666667 | 48 |
import gzip
import os
import json
from .model import Sitting, Action, Teacher, IPupil, APupil, Storyline, StorylineItem, Course, Challenge
class RoboDAO:
"""Gives access to the Robo model objects as defined in the model module. Objects
are preloaded from the ano-directory which contains the Robomind Academy
sitting datafiles"""
TYPE2NAME = {
'Action': None,
'Sitting': None,
'APupil': "apupil",
'IPupil': "ipupil",
'Teacher': "teacher",
'Challenge': "challenge",
'Course': "course",
'Storyline': "storyline",
'StorylineItem': "storylineitem"
}
TYPE2CLASS = {
'Action': Action,
'Sitting': Sitting,
'APupil': APupil,
'IPupil': IPupil,
'Teacher': Teacher,
'Challenge': Challenge,
'Course': Course,
'Storyline': Storyline,
'StorylineItem': StorylineItem
}
def preload(self):
"""Preload model objects as defined in the model module from the
anonymised sittings file in the ano-directory."""
# load the objects
for otype, fname in self.TYPE2NAME.items():
if fname:
path = os.path.join(self.anodir, fname + ".gz")
if os.path.isfile(path):
with gzip.open(path, "rt") as handler:
for line in handler:
omap = json.loads(line)
cls = self.TYPE2CLASS[otype]
item = cls.from_map(omap, self)
self.caches[otype][item.id] = item
| [
11748,
308,
13344,
198,
11748,
28686,
198,
11748,
33918,
198,
6738,
764,
19849,
1330,
46547,
11,
7561,
11,
32019,
11,
6101,
929,
346,
11,
3486,
929,
346,
11,
8362,
1370,
11,
8362,
1370,
7449,
11,
20537,
11,
13879,
628,
198,
4871,
3970... | 2.067259 | 788 |
import datetime
import json
import time
import pytz
from sphinxsearch.lookups import sphinx_lookups
from django.core import exceptions
from django.db import models
class SphinxField(models.TextField):
""" Non-selectable indexed string field
In sphinxsearch config terms, sql_field_string or rt_field.
"""
class_lookups = sphinx_lookups.copy()
class SphinxDateTimeField(models.FloatField):
""" Sphinx timestamp field for sql_attr_timestamp and rt_attr_timestamp.
NB: sphinxsearch doesn't store microseconds, if necessary, describe
field as sql_attr_float in config.
"""
# noinspection PyMethodMayBeStatic,PyUnusedLocal
# noinspection PyUnusedLocal,PyMethodMayBeStatic
| [
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
640,
198,
198,
11748,
12972,
22877,
198,
198,
6738,
599,
20079,
87,
12947,
13,
5460,
4739,
1330,
599,
20079,
87,
62,
5460,
4739,
198,
6738,
42625,
14208,
13,
7295,
1330,
13269,
198,
6738... | 3.037657 | 239 |
from gym.envs.registration import register
from d4rl.gym_mujoco import gym_envs
HOPPER_RANDOM_SCORE = -20.272305
HALFCHEETAH_RANDOM_SCORE = -280.178953
WALKER_RANDOM_SCORE = 1.629008
ANT_RANDOM_SCORE = -325.6
HOPPER_EXPERT_SCORE = 3234.3
HALFCHEETAH_EXPERT_SCORE = 12135.0
WALKER_EXPERT_SCORE = 4592.3
ANT_EXPERT_SCORE = 3879.7
# Single Policy datasets
register(
id='hopper-medium-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_medium.hdf5'
}
)
register(
id='halfcheetah-medium-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_medium.hdf5'
}
)
register(
id='walker2d-medium-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_medium.hdf5'
}
)
register(
id='hopper-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url': 'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_expert.hdf5'
}
)
register(
id='halfcheetah-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_expert.hdf5'
}
)
register(
id='walker2d-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_expert.hdf5'
}
)
register(
id='hopper-random-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url': 'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_random.hdf5'
}
)
register(
id='halfcheetah-random-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_random.hdf5'
}
)
register(
id='walker2d-random-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_random.hdf5'
}
)
# Mixed datasets
register(
id='hopper-medium-replay-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_mixed.hdf5'
},
)
register(
id='walker2d-medium-replay-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker_mixed.hdf5'
}
)
register(
id='halfcheetah-medium-replay-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_mixed.hdf5'
}
)
# Mixtures of random/medium and experts
register(
id='walker2d-medium-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_walker_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': WALKER_RANDOM_SCORE,
'ref_max_score': WALKER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_medium_expert.hdf5'
}
)
register(
id='halfcheetah-medium-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_cheetah_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HALFCHEETAH_RANDOM_SCORE,
'ref_max_score': HALFCHEETAH_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_medium_expert.hdf5'
}
)
register(
id='hopper-medium-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_hopper_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': HOPPER_RANDOM_SCORE,
'ref_max_score': HOPPER_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_medium_expert_v1.hdf5'
}
)
register(
id='ant-medium-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_medium_expert.hdf5'
}
)
register(
id='ant-medium-replay-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_mixed.hdf5'
}
)
register(
id='ant-medium-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_medium.hdf5'
}
)
register(
id='ant-random-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_random.hdf5'
}
)
register(
id='ant-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_expert.hdf5'
}
)
register(
id='ant-random-expert-v0',
entry_point='d4rl.gym_mujoco.gym_envs:get_ant_env',
max_episode_steps=1000,
kwargs={
'ref_min_score': ANT_RANDOM_SCORE,
'ref_max_score': ANT_EXPERT_SCORE,
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_random_expert.hdf5'
}
)
| [
6738,
11550,
13,
268,
14259,
13,
2301,
33397,
1330,
7881,
198,
6738,
288,
19,
45895,
13,
1360,
76,
62,
76,
23577,
25634,
1330,
11550,
62,
268,
14259,
198,
198,
39,
3185,
18973,
62,
49,
6981,
2662,
62,
6173,
6965,
796,
532,
1238,
13,... | 1.890629 | 4,087 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Draw boxes on images processed using gigadetector pipeline.
click n to keep going, escape to stop.
If you press q I'm not sure what will happen
"""
# Import stuff
import sys
import os
import joblib
import cv2
base_path = os.path.expanduser("~") + r"/gigadetector/"
sys.path.append(base_path + r'/gigadetector/')
import utils
#%% set path to final results file, and load data
# includes bboxes, scores, areas, and image paths
# note image paths might change if someone moves images but final node in path
# shouldn't.
processed_image_folder = base_path + r'data/processed/'
# Final bbox and confidence output of faster-rcnn + bbox trimming (bb_analysis_folder.py)
results_file = r'gigafolder_bb_results.pkl' #1801-2648
results_path = processed_image_folder + results_file
with open(results_path, 'rb') as f:
analysis_data = joblib.load(results_path)
#%% Extract it all
all_bboxes = analysis_data['all_bboxes']
all_scores = analysis_data['all_scores']
all_areas = analysis_data['all_areas']
image_paths = analysis_data['all_filepaths']
num_images = len(image_paths)
print(f"There are {num_images} images for which you have detection data.")
print(image_paths)
#%% optional test case
"""
OPTIONAL -- uncomment following to run
This is to run on a single image just to make sure it works for one image
"""
# print("\ngigaviewer Tester\nClick escape to break out, n to move on to next image.\n")
# image_ind = 1
# bboxes = all_bboxes[image_ind]
# scores = all_scores[image_ind]
# image_path = image_paths[image_ind]
# image = cv2.imread(image_path)
# utils.draw_bboxes_scores(image.copy(), bboxes, scores, bb_color = (255, 255, 255),
# name = 'ViewTester', line_width = 10, text_thickness = 3,
# shape = (900, 1000), xy = (130, 50))
#%% If test case seems ok, start from ind you want, and cycle through images
print("\ngigaimage inspector\nClick escape to break out, n to move on to next image.\n")
start_image_ind = 0
window_open = False
for ind in range(start_image_ind, num_images):
print(f"Working on image {ind} out of {num_images-1}")
bboxes = all_bboxes[ind]
scores = all_scores[ind]
image_path = image_paths[ind]
print(f"\tLoading{image_path}")
boxed_image = utils.put_bboxes_scores(cv2.imread(image_path), bboxes, scores,
bb_color = (255, 255, 255),
line_width = 10, text_thickness = 3)
if window_open:
cv2.destroyWindow(str(ind-1))
else:
window_open = True
utils.cv_loopshow(boxed_image,
name = str(ind),
shape = (950, 950),
xy = (130, 40))
k = cv2.waitKey()
if k == 27:
break
elif k == ord('n'):
continue
cv2.destroyAllWindows()
print("\nDONE!!!")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
25302,
10559,
319,
4263,
13686,
1262,
12526,
324,
316,
9250,
11523,
13,
198,
198,
12976,
299,
284,
1394... | 2.425941 | 1,195 |
import pydiffvg_tensorflow as pydiffvg
import tensorflow as tf
import skimage
import numpy as np
canvas_width, canvas_height = 256, 256
num_control_points = tf.constant([2, 2, 2])
points = tf.constant([[120.0, 30.0], # base
[150.0, 60.0], # control point
[ 90.0, 198.0], # control point
[ 60.0, 218.0], # base
[ 90.0, 180.0], # control point
[200.0, 65.0], # control point
[210.0, 98.0], # base
[220.0, 70.0], # control point
[130.0, 55.0]]) # control point
path = pydiffvg.Path(num_control_points = num_control_points,
points = points,
is_closed = True)
shapes = [path]
path_group = pydiffvg.ShapeGroup( shape_ids = tf.constant([0], dtype=tf.int32),
fill_color = tf.constant([0.3, 0.6, 0.3, 1.0]))
shape_groups = [path_group]
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = pydiffvg.render
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(0), # seed
*scene_args)
# The output image is in linear RGB space. Do Gamma correction before saving the image.
pydiffvg.imwrite(img, 'results/single_curve_tf/target.png', gamma=2.2)
target = tf.identity(img)
# Move the path to produce initial guess
# normalize points for easier learning rate
points_n = tf.Variable([[100.0/256.0, 40.0/256.0], # base
[155.0/256.0, 65.0/256.0], # control point
[100.0/256.0, 180.0/256.0], # control point
[ 65.0/256.0, 238.0/256.0], # base
[100.0/256.0, 200.0/256.0], # control point
[170.0/256.0, 55.0/256.0], # control point
[220.0/256.0, 100.0/256.0], # base
[210.0/256.0, 80.0/256.0], # control point
[140.0/256.0, 60.0/256.0]]) # control point
color = tf.Variable([0.3, 0.2, 0.5, 1.0])
path.points = points_n * 256
path_group.fill_color = color
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(1), # seed
*scene_args)
pydiffvg.imwrite(img, 'results/single_curve_tf/init.png', gamma=2.2)
optimizer = tf.compat.v1.train.AdamOptimizer(1e-2)
for t in range(100):
print('iteration:', t)
with tf.GradientTape() as tape:
# Forward pass: render the image.
path.points = points_n * 256
path_group.fill_color = color
# Important to use a different seed every iteration, otherwise the result
# would be biased.
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(t+1), # seed,
*scene_args)
loss_value = tf.reduce_sum(tf.square(img - target))
print(f"loss_value: {loss_value}")
pydiffvg.imwrite(img, 'results/single_curve_tf/iter_{}.png'.format(t))
grads = tape.gradient(loss_value, [points_n, color])
print(grads)
optimizer.apply_gradients(zip(grads, [points_n, color]))
# Render the final result.
path.points = points_n * 256
path_group.fill_color = color
scene_args = pydiffvg.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
img = render(tf.constant(256), # width
tf.constant(256), # height
tf.constant(2), # num_samples_x
tf.constant(2), # num_samples_y
tf.constant(101), # seed
*scene_args)
# Save the images and differences.
pydiffvg.imwrite(img, 'results/single_curve_tf/final.png')
# Convert the intermediate renderings to a video.
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"results/single_curve_tf/iter_%d.png", "-vb", "20M",
"results/single_curve_tf/out.mp4"])
| [
11748,
279,
5173,
733,
45119,
62,
83,
22854,
11125,
355,
279,
5173,
733,
45119,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
1341,
9060,
198,
11748,
299,
32152,
355,
45941,
198,
198,
5171,
11017,
62,
10394,
11,
21978,
62,
1701... | 1.996465 | 2,263 |
#!/usr/tce/bin/python
import sys
import random
if len(sys.argv) < 2 :
usage = '''
usage: add_overlap.py list_base_name number_of_lists overlap_percent
example: if your lists are t0_list.txt, t1_list.txt and t2_list.txt
you want 30 percent overlap you would run as:
add_overlap.py list.txt 2 30
The output lists names in this example would be:
t0_list.txt.overlap=30 (etc)
The output list will contain 30% more samples;
specifically, t0 will receive 15% of randomly selected
samples from t1 and t2.
The input lists are unchanged
The "excluded" counts in the output files are all set to -1,
because I haven't taken the time to get them correct.
I don't think these are used anyplace in lbann, so this should
be OK.
'''
print usage
exit(9)
#============================================================================
# the List class parses and encapsulate a sample list
# the constructor parses the sample list
#returns a list that contains random samples
# add random samples from some other List to this List
# write final output (sample list file)
#============================================================================
# parse cmd line
base = sys.argv[1]
count = int(sys.argv[2])
overlap = int(sys.argv[3])
the_lists = []
random_samples = []
for j in range(count) :
# instantiate a List object; this holds all information from a sample list
c = List('t' + str(j) + '_' + base)
the_lists.append(c)
# get the random samples from the list; this is the overlap that
# will be added to the other lists
n = c.num_samples()
p = int( (overlap / (count-1))* n / 100)
random_samples.append(c.get_random_samples(p))
# add overlap to the samples
for j in range(count) :
for k in range(count) :
if j != k :
the_lists[j].add_samples(random_samples[k])
# write output files
for x in the_lists :
x.write(overlap)
| [
2,
48443,
14629,
14,
83,
344,
14,
8800,
14,
29412,
198,
198,
11748,
25064,
198,
11748,
4738,
198,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
1279,
362,
1058,
198,
220,
8748,
796,
705,
7061,
198,
220,
8748,
25,
751,
62,
2502,
37796... | 2.847025 | 706 |
from psiopic2.app.setupwiki import SetupWiki
from psiopic2.app.createcorpus import CreateCorpus
import sys
import logging
from psiopic2.app.ui.logutils import getLogger
from appdirs import AppDirs
from docopt import docopt
import traceback
DOC="""Psiopic2 CLI Tool
Usage:
psiopic2 <command> [options]
Available Commands:
setupwiki
help
buildcorpus
For more information run:
psiopic2 <command> --help
"""
if __name__ == '__main__':
sys.exit(main())
| [
6738,
46231,
16603,
17,
13,
1324,
13,
40406,
15466,
1330,
31122,
32603,
198,
6738,
46231,
16603,
17,
13,
1324,
13,
17953,
10215,
79,
385,
1330,
13610,
45680,
385,
198,
11748,
25064,
198,
11748,
18931,
198,
6738,
46231,
16603,
17,
13,
13... | 2.955975 | 159 |
from asip.services.asip_service import AsipService
import sys | [
6738,
355,
541,
13,
30416,
13,
292,
541,
62,
15271,
1330,
1081,
541,
16177,
198,
11748,
25064
] | 3.588235 | 17 |
"""
* Knight 1.0.0
* https://github.com/RobertoPrevato/Knight
*
* Copyright 2015, Roberto Prevato
* http://ugrose.com
*
* Licensed under the MIT license:
* http://www.opensource.org/licenses/MIT
"""
import argparse
separator = "******************************************************\n"
parser = argparse.ArgumentParser(description= "Packs .html templates into .js files, possibly for Angular or Knockout.",
epilog = "{}\n{}".format("author: Roberto Prevato roberto.prevato@gmail.com", separator))
parser.add_argument("-p", "--path", dest= "path",
required=True, help="path to root folder from where to start the research of .html files")
parser.add_argument("-v", "--variable", dest= "templates_variable",
required=False, help="when generating templates in custom mode (no), the name of the global variable where to store templates. For example: $.templates.")
parser.add_argument("-c", "--comment", dest= "comment",
required=False, help="allows to add an extra comment line to generated templates files.")
parser.add_argument("-m", "--mode", dest="mode",
required=False, choices=["ko", "ng", "no"], help="no for custom (default); ng to generate Angular templates; ko to generate Knockout templates")
parser.add_argument("-a", "--appname", dest="appname",
default="app", help="when generating templates for Angular, the name of the application")
parser.add_argument("-u", "--underscoreJsCompile",
dest="underscore_js_compile", default="", help="allows to run UnderscoreJs compilation on templates using the given global variable/function")
args = parser.parse_args()
from lib import ScriptsHelper
main(args)
| [
37811,
198,
1635,
6700,
352,
13,
15,
13,
15,
198,
1635,
3740,
1378,
12567,
13,
785,
14,
15924,
1462,
36854,
5549,
14,
44242,
198,
1635,
198,
1635,
15069,
1853,
11,
32076,
43280,
5549,
198,
1635,
2638,
1378,
1018,
13698,
13,
785,
198,
... | 2.937294 | 606 |
"""cli_core.py: basic metaclass for handling generic tool layout
Acts as global namespace + parent-framework for CLI apps
"""
from os import path
import platform
from datetime import datetime
import warnings
import uuid
from plumbum import cli
import prosper.common.prosper_logging as p_logger
import prosper.common.prosper_config as p_config
import navitron_crons._version as _version
DEFAULT_LOGGER = p_logger.DEFAULT_LOGGER
HERE = path.abspath(path.dirname(__file__))
CONFIG = p_config.ProsperConfig(path.join(HERE, 'navitron_crons.cfg'))
def generate_metadata(
source_name,
source_version
):
"""if you're gonna use noSQL, you gotta have provenance! Adds reliable metadata to records
Args:
source_name (str): name of source script
source_version (str): semantic version of source script
Returns:
:obj:`dict`: specific metadata
"""
now = datetime.utcnow()
write_recipt = str(uuid.uuid1())
metadata_obj = {
'write_recipt': write_recipt,
'data_source': source_name,
'machine_source': platform.node(),
'version': source_version,
'package_version': _version.__version__,
'cron_datetime': now.isoformat()
}
return metadata_obj
def update_which_sde_data(
current_sde_df,
latest_esi_df,
index_key
):
"""validate if current table needs an update
Args:
current_sde_df (:obj:`pandas.DataFrame`): current data (from mongodb)
latest_esi_df (:obj:`pandas.DataFrame`): latest data from REST/ESI
index_key (str): name of column to match on
Returns:
(:obj:`list`): list of keys that need to be updated
"""
pass
class NavitronApplication(cli.Application):
"""parent metaclass for CLI applications
Load default args and CLI environment variables here
"""
logger = DEFAULT_LOGGER
config = CONFIG
conn = None
debug = cli.Flag(
['d', '--debug'],
help='debug mode: run without writing to db'
)
verbose = cli.Flag(
['v', '--verbose'],
help='enable verbose messaging'
)
@cli.switch(
['--config'],
str,
help='Override default config with a local config')
def override_config(self, config_path):
"""override config object with local version"""
self.config = p_config.ProsperConfig(config_path)
@cli.switch(
['--dump-config'],
help='Dump global config, for easy custom setup')
def dump_config(self):
"""dumps config file to stdout for piping into config file"""
with open(path.join(HERE, 'navitron_crons.cfg'), 'r') as cfg_fh:
base_config = cfg_fh.read()
print(base_config)
exit()
def load_logger(self, progname):
"""build a logging object for the script to use"""
log_builder = p_logger.ProsperLogger(
progname,
self.config.get('LOGGING', 'log_path'),
config_obj=self.config
)
if self.verbose:
log_builder.configure_debug_logger()
if not self.debug:
try:
log_builder.configure_discord_logger()
except Exception:
warnings.warn('Unable to config discord logger', RuntimeWarning)
self.logger = log_builder.logger
if __name__ == '__main__':
NavitronApplication.run()
| [
37811,
44506,
62,
7295,
13,
9078,
25,
4096,
1138,
330,
31172,
329,
9041,
14276,
2891,
12461,
198,
198,
6398,
82,
355,
3298,
25745,
1343,
2560,
12,
30604,
329,
43749,
6725,
198,
198,
37811,
198,
6738,
28686,
1330,
3108,
198,
11748,
3859,... | 2.412429 | 1,416 |
import RPi.GPIO as GPIO
import time
import settings
PistonTravelTime = settings.Piston_Reistijd
PinUp = settings.Pin_Omhoog
PinDown = settings.Pin_Omlaag
if __name__ == '__main__':
try:
setup()
except KeyboardInterrupt:
close()
| [
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
11748,
640,
198,
11748,
6460,
198,
198,
47,
36363,
33074,
7575,
796,
6460,
13,
47,
36363,
62,
3041,
396,
2926,
67,
198,
28348,
4933,
796,
6460,
13,
28348,
62,
46,
76,
8873,
51... | 2.19685 | 127 |
import socket
import time
import random
TCP_IP1 = '127.0.0.1'
TCP_PORT1 = 5007
transmit = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transmit.connect((TCP_IP1, TCP_PORT1))
TCP_PORT2 = 5006
transmit2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transmit2.connect((TCP_IP1, TCP_PORT2))
while True:
transmit.send(str(random.randint(0,5000)).encode())
transmit2.send(random.randint(0,5000).encode())
time.sleep(0.5) | [
11748,
17802,
198,
11748,
640,
198,
11748,
4738,
198,
4825,
47,
62,
4061,
16,
220,
796,
705,
16799,
13,
15,
13,
15,
13,
16,
6,
198,
4825,
47,
62,
15490,
16,
796,
5323,
22,
198,
7645,
2781,
796,
17802,
13,
44971,
7,
44971,
13,
85... | 2.383333 | 180 |
from unittest import TestCase
import os
import shutil
from foster.build import Build
from foster.test import Test
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
198,
6738,
17016,
13,
11249,
1330,
10934,
198,
6738,
17016,
13,
9288,
1330,
6208,
628
] | 3.866667 | 30 |
fname = input('Please enter a valid file name: ')
try:
fhand = open(fname)
except:
print('Please enter an existing file name')
exit()
counts = dict()
for line in fhand:
line = line.rstrip()
words = line.split()
if not line.startswith('From ') or len(words) < 1: continue
for word in words:
if word.find(':') == -1:continue
hour, min, sec = word.split(':')
if hour not in counts:
counts[hour] = 1
else:
counts[hour] += 1
t = counts.items()
dl = list()
check = sorted(t)
# This approach uses the sorted method instead of using a list of tuples and the sort method used by list to sort the items.
for key,val in check:
print(key,val)
| [
69,
3672,
796,
5128,
10786,
5492,
3802,
257,
4938,
2393,
1438,
25,
705,
8,
198,
28311,
25,
198,
220,
220,
220,
277,
4993,
796,
1280,
7,
69,
3672,
8,
198,
16341,
25,
198,
220,
220,
220,
3601,
10786,
5492,
3802,
281,
4683,
2393,
143... | 2.508651 | 289 |
[
{
'date': '2011-01-01',
'description': 'Año Nuevo',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-01-06',
'description': 'Epifanía del Señor',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-02-28',
'description': 'Día de Andalucía',
'locale': 'es-ES',
'notes': '',
'region': 'AN',
'type': 'F'
},
{
'date': '2011-03-01',
'description': 'Día de las Illes Balears',
'locale': 'es-ES',
'notes': '',
'region': 'IB',
'type': 'F'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'CM',
'type': 'RF'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'GA',
'type': 'RF'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'MC',
'type': 'RF'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'ML',
'type': 'RF'
},
{
'date': '2011-03-19',
'description': 'San José',
'locale': 'es-ES',
'notes': '',
'region': 'VC',
'type': 'RF'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'AN',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'AR',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'AS',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CB',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CE',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CL',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CM',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'CN',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'EX',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'GA',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'IB',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'MC',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'MD',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'ML',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'NC',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'PV',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'RI',
'type': 'RV'
},
{
'date': '2011-04-21',
'description': 'Jueves Santo',
'locale': 'es-ES',
'notes': '',
'region': 'VC',
'type': 'RV'
},
{
'date': '2011-04-22',
'description': 'Viernes Santo',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-04-23',
'description': 'Fiesta de Castilla y León',
'locale': 'es-ES',
'notes': '',
'region': 'CL',
'type': 'F'
},
{
'date': '2011-04-23',
'description': 'San Jorge / Día de Aragón',
'locale': 'es-ES',
'notes': '',
'region': 'AR',
'type': 'RF'
},
{
'date': '2011-04-24',
'description': 'Pascua',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'CT',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'IB',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'NC',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'PV',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'RI',
'type': 'RV'
},
{
'date': '2011-04-25',
'description': 'Lunes de Pascua',
'locale': 'es-ES',
'notes': '',
'region': 'VC',
'type': 'RV'
},
{
'date': '2011-05-01',
'description': 'Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-05-02',
'description': 'Fiesta de la Comunidad de Madrid',
'locale': 'es-ES',
'notes': '',
'region': 'MD',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'AN',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'AR',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'AS',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'CB',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'CE',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'EX',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'MC',
'type': 'F'
},
{
'date': '2011-05-02',
'description': 'Lunes siguiente a la Fiesta del Trabajo',
'locale': 'es-ES',
'notes': '',
'region': 'VC',
'type': 'F'
},
{
'date': '2011-05-17',
'description': 'Día de las Letras Gallegas',
'locale': 'es-ES',
'notes': '',
'region': 'GA',
'type': 'F'
},
{
'date': '2011-05-30',
'description': 'Día de Canarias',
'locale': 'es-ES',
'notes': '',
'region': 'CN',
'type': 'F'
},
{
'date': '2011-05-31',
'description': 'Día de Castilla-La Mancha',
'locale': 'es-ES',
'notes': '',
'region': 'CM',
'type': 'F'
},
{
'date': '2011-06-09',
'description': 'Día de la Región de Murcia',
'locale': 'es-ES',
'notes': '',
'region': 'MC',
'type': 'F'
},
{
'date': '2011-06-09',
'description': 'Día de La Rioja',
'locale': 'es-ES',
'notes': '',
'region': 'RI',
'type': 'F'
},
{
'date': '2011-06-13',
'description': 'Lunes de Pascua Granada',
'locale': 'es-ES',
'notes': '',
'region': 'CT',
'type': 'F'
},
{
'date': '2011-06-23',
'description': 'Corpus Christi',
'locale': 'es-ES',
'notes': '',
'region': 'CM',
'type': 'RV'
},
{
'date': '2011-06-23',
'description': 'Corpus Christi',
'locale': 'es-ES',
'notes': '',
'region': 'MD',
'type': 'RV'
},
{
'date': '2011-06-24',
'description': 'San Juan',
'locale': 'es-ES',
'notes': '',
'region': 'CT',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'CL',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'MD',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'NC',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'PV',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol',
'locale': 'es-ES',
'notes': '',
'region': 'RI',
'type': 'RF'
},
{
'date': '2011-07-25',
'description': 'Santiago Apóstol / Día Nacional de Galicia',
'locale': 'es-ES',
'notes': '',
'region': 'GA',
'type': 'RF'
},
{
'date': '2011-07-28',
'description': 'Día de las Instituciones de Cantabria',
'locale': 'es-ES',
'notes': '',
'region': 'CB',
'type': 'F'
},
{
'date': '2011-08-15',
'description': 'Asunción de la Virgen',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-09-08',
'description': 'Día de Asturias',
'locale': 'es-ES',
'notes': '',
'region': 'AS',
'type': 'F'
},
{
'date': '2011-09-08',
'description': 'Día de Extremadura',
'locale': 'es-ES',
'notes': '',
'region': 'EX',
'type': 'F'
},
{
'date': '2011-09-15',
'description': 'La Bien Aparecida',
'locale': 'es-ES',
'notes': '',
'region': 'CB',
'type': 'RF'
},
{
'date': '2011-10-12',
'description': 'Fiesta Nacional de España',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-10-25',
'description': 'Día del País Vasco-Euskadiko Eguna',
'locale': 'es-ES',
'notes': '',
'region': 'PV',
'type': 'F'
},
{
'date': '2011-11-01',
'description': 'Todos los Santos',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-11-07',
'description': 'Fiesta del Sacrificio (Aid El Kebir)',
'locale': 'es-ES',
'notes': '',
'region': 'ML',
'type': 'RV'
},
{
'date': '2011-11-07',
'description': 'Lunes siguiente a la Fiesta del Sacrificio (Eidul Adha)',
'locale': 'es-ES',
'notes': '',
'region': 'CE',
'type': 'RV'
},
{
'date': '2011-12-06',
'description': 'Día de la Constitución Española',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-12-08',
'description': 'Inmaculada Concepción',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-12-25',
'description': 'Natividad del Señor',
'locale': 'es-ES',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'AN',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'AR',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'AS',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'CE',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'CL',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'CN',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'CT',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'EX',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'IB',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'ML',
'type': 'RF'
},
{
'date': '2011-12-26',
'description': 'San Esteban',
'locale': 'es-ES',
'notes': '',
'region': 'NC',
'type': 'RF'
}
] | [
58,
198,
220,
220,
220,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
705,
4475,
10354,
705,
9804,
12,
486,
12,
486,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
705,
11213,
10354,
705,
32,
31329,
399,
518,
13038,
3256,
198,
220,... | 1.705638 | 9,135 |
""" Plotting tools for the simulation framework
Styling tools:
* :py:class:`set_plot_style`: Plot style context manager
* :py:class:`colorwheel`: Custom color palettes
Plotting Functions:
* :py:func:`plot_3d_sphere_cloud`: Plot a sphere cloud in 3D
Axis element functions:
* :py:func:`add_lineplot`: Add lineplots to an axis
* :py:func:`add_histogram`: Add a histogram to an axis
Utilities:
* :py:func:`bootstrap_ci`: Bootstrap estimate of confidence intervals
* :py:func:`get_histogram`: Get a kernel smoothed histogram from binned data
"""
# Imports
import itertools
from contextlib import ContextDecorator
from typing import List, Tuple, Optional, Dict, Callable
import pathlib
# 3rd party imports
import numpy as np
from scipy.stats import gamma, gaussian_kde
from scipy.integrate import simps
import pandas as pd
import seaborn as sns
import matplotlib.cm as mplcm
import matplotlib.pyplot as plt
import matplotlib.colors as mplcolors
from mpl_toolkits.mplot3d import Axes3D
# Our own imports
from .consts import (
PALETTE, RC_PARAMS_DARK, RC_PARAMS_LIGHT
)
# Styling
class set_plot_style(ContextDecorator):
""" Context manager for styling matplotlib plots
Basic usage as a context manager
.. code-block:: python
with set_plot_style('dark') as style:
# In here, plots are 'dark' styled
fig, ax = plt.subplots(1, 1)
ax.plot([1, 2, 3], [1, 2, 3])
# Save the plot with correct background colors
style.savefig('some_fig.png')
Can also be used as a decorator
.. code-block:: python
@set_plot_style('dark')
def plot_something():
# In here, plots are 'dark' styled
fig, ax = plt.subplots(1, 1)
ax.plot([1, 2, 3], [1, 2, 3])
plt.show()
For more complex use, see the
`Matplotlib rcParam <http://matplotlib.org/users/customizing.html>`_
docs which list all the parameters that can be tweaked.
:param str style:
One of 'dark', 'minimal', 'poster', 'dark_poster', 'default'
"""
_active_styles = []
@property
@classmethod
def get_active_style(cls) -> Optional[str]:
""" Get the currently active style, or None if nothing is active """
if cls._active_styles:
return cls._active_styles[-1]
return None
def twinx(self, ax: Optional = None):
""" Create a second axis sharing the x axis
:param Axes ax:
The axis instance to set to off
"""
if ax is None:
ax = plt.gca()
ax2 = ax.twinx()
# Fix up the defaults to make sense
ax2.spines['right'].set_visible(True)
ax2.tick_params(axis='y',
labelcolor=self.axis_color,
color=self.axis_color,
left=True)
return ax2
def set_axis_off(self, ax: Optional = None):
""" Remove labels and ticks from the axis
:param Axes ax:
The axis instance to set to off
"""
if ax is None:
ax = plt.gca()
# Blank all the things
ax.set_xticks([])
ax.set_yticks([])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_axis_off()
def rotate_xticklabels(self, ax,
rotation: float,
horizontalalignment: str = 'center',
verticalalignment: str = 'center',
rotation_mode: str = 'default'):
""" Rotate the x ticklabels
:param float rotation:
Rotation of the text (in degrees)
:param str rotation_mode:
Either "default" or "anchor"
"""
for tick in ax.get_xticklabels():
plt.setp(tick,
rotation=rotation,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation_mode=rotation_mode)
def rotate_yticklabels(self, ax,
rotation: float,
horizontalalignment: str = 'center',
verticalalignment: str = 'center',
rotation_mode: str = 'default'):
""" Rotate the y ticklabels
:param float rotation:
Rotation of the text (in degrees)
:param str rotation_mode:
Either "default" or "anchor"
"""
for tick in ax.get_yticklabels():
plt.setp(tick,
rotation=rotation,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation_mode=rotation_mode)
def show(self,
outfile: Optional[pathlib.Path] = None,
transparent: bool = True,
tight_layout: bool = False,
close: bool = True,
fig: Optional = None):
""" Act like matplotlib's show, but also save the file if passed
:param Path outfile:
If not None, save to this file instead of plotting
:param bool transparent:
If True, save with a transparent background if possible
:param bool tight_layout:
If True, try and squish the layout before saving
"""
if tight_layout:
plt.tight_layout()
if outfile is None:
plt.show()
else:
print('Writing {}'.format(outfile))
self.savefig(outfile, transparent=transparent, fig=fig)
if close:
plt.close()
def update(self, params: Dict):
""" Update the matplotlib rc.params
:param dict params:
rcparams to fiddle with
"""
self.params.update(params)
def savefig(self,
savefile: pathlib.Path,
fig: Optional = None,
**kwargs):
""" Save the figure, with proper background colors
:param Path savefile:
The file to save
:param fig:
The figure or plt.gcf()
:param \\*\\*kwargs:
The keyword arguments to pass to fig.savefig
"""
if fig is None:
fig = plt.gcf()
savefile = pathlib.Path(savefile)
savefile.parent.mkdir(exist_ok=True, parents=True)
savefig_params = dict(self.savefig_params)
savefig_params.update(kwargs)
fig.savefig(str(savefile), **kwargs)
class colorwheel(object):
""" Generate colors like a matplotlib color cycle
.. code-block:: python
palette = colorwheel(palette='some seaborn palette', n_colors=5)
for item, color in zip(items, colors):
# In here, the colors will cycle over and over for each item
# Access by index
color = palette[10]
:param str palette:
A palette that can be recognized by seaborn
:param int n_colors:
The number of colors to generate
"""
@classmethod
def from_colors(cls,
colors: List[str],
n_colors: Optional[int] = None):
""" Make a palette from a list of colors
:param str colors:
A list of matplotlib colors to use
"""
if n_colors is None:
n_colors = len(colors)
palette = []
for _, color in zip(range(n_colors, itertools.cycle)):
palette.append(mplcolors.to_rgba(color))
return cls(palette, n_colors=n_colors)
@classmethod
def from_color_range(cls,
color_start: str,
color_end: str,
n_colors: int):
""" Make a color range """
palette = []
color_start = mplcolors.to_rgba(color_start)
color_end = mplcolors.to_rgba(color_end)
red_color = np.linspace(color_start[0], color_end[0], n_colors)
green_color = np.linspace(color_start[1], color_end[1], n_colors)
blue_color = np.linspace(color_start[2], color_end[2], n_colors)
for r, g, b in zip(red_color, green_color, blue_color):
palette.append((r, g, b, 1.0))
return cls(palette, n_colors=n_colors)
# Dynamic color palettes
# These aren't as good as the ones that come with matplotlib
def wheel_blackwhite(self) -> List[Tuple]:
""" Colors from black to white in a linear ramp """
colors = np.linspace(0, 1, self.n_colors)
return [(c, c, c, 1.0) for c in colors]
def wheel_greyblack(self) -> List[Tuple]:
""" Colors from grey to black in a linear ramp """
colors = np.linspace(0.75, 0, self.n_colors)
return [(c, c, c, 1.0) for c in colors]
def wheel_greywhite(self) -> List[Tuple]:
""" Colors from grey to white in a linear ramp """
colors = np.linspace(0.25, 1, self.n_colors)
return [(c, c, c, 1.0) for c in colors]
def wheel_lightgreywhite(self) -> List[Tuple]:
""" Colors from grey to white in a linear ramp """
colors = np.linspace(0.608, 1, self.n_colors)
return [(c, c, c, 1.0) for c in colors]
def wheel_redgrey(self) -> List[Tuple]:
""" Grey to red color space """
red = np.linspace(155/255, 228/255, self.n_colors)
green = np.linspace(155/255, 26/255, self.n_colors)
blue = np.linspace(155/255, 28/255, self.n_colors)
return [(r, g, b, 1.0) for r, g, b in zip(red, green, blue)]
def wheel_bluegrey(self) -> List[Tuple]:
""" Grey to blue color space """
red = np.linspace(155/255, 70/255, self.n_colors)
green = np.linspace(155/255, 130/255, self.n_colors)
blue = np.linspace(155/255, 180/255, self.n_colors)
return [(r, g, b, 1.0) for r, g, b in zip(red, green, blue)]
@property
next = __next__
# Helper Functions
def bootstrap_ci(data: np.ndarray,
n_boot: int = 1000,
random_seed: Optional[int] = None,
ci: float = 95,
func: Callable = np.mean,
axis: int = 0) -> Tuple[np.ndarray]:
""" Calculate a confidence interval from the input data using bootstrapping
:param ndarray data:
The data to bootstrap sample
:param int n_boot:
Number of times to sample the frame
:param int random_seed:
Seed for the random number generator
:param float ci:
Confidence interval to calculate (mean +/- ci/2.0)
:param Callable func:
Function to calculate the ci around (default: np.mean)
:param int axis:
Which axis to sample over
:returns:
The upper and lower bounds on the CI
"""
n = data.shape[axis]
rs = np.random.RandomState(random_seed)
boot_dist = []
for i in range(n_boot):
resampler = rs.randint(0, n, n)
sample = data.take(resampler, axis=axis)
boot_dist.append(func(sample, axis=axis))
boot_dist = np.array(boot_dist)
return np.percentile(boot_dist, [50 - ci/2, 50 + ci/2], axis=0)
def get_histogram(data: np.ndarray,
bins: int,
range: Optional[Tuple[int]] = None,
kernel_smoothing: bool = True,
kernel_bandwidth: Optional[str] = None,
kernel_samples: int = 100) -> Tuple[np.ndarray]:
""" Get a histogram and a kernel fit for some data
:param ndarray data:
The data to fit
:param int bins:
The number of bins to generate
:param tuple[float] range:
The range to fit bins to (argument to np.histogram)
:param bool kernel_smoothing:
If True, also generate a kernel-smoothed fit. If False, xkernel, ykernel are None
:param str kernel_bandwidth:
If not None, the method to use to estimate the kernel smoothed fit
:param int kernel_samples:
The number of samples to draw for the kernel fit
:returns:
xbins, ybins, xkernel, ykernel
"""
bins_y, bins_x = np.histogram(data, bins=bins, range=range)
# Estimate the kernel smoothed fit
if kernel_smoothing:
kernel = gaussian_kde(data, bw_method=kernel_bandwidth)
kernel_x = np.linspace(bins_x[0], bins_x[-1], kernel_samples)
kernel_y = kernel(kernel_x)
# Rescale for equal areas
bin_width = bins_x[1:] - bins_x[:-1]
hist_area = np.sum(bin_width * bins_y)
kernel_area = simps(kernel_y, kernel_x)
kernel_y = kernel_y * hist_area / kernel_area
else:
kernel_x = kernel_y = None
return bins_x, bins_y, kernel_x, kernel_y
# Plot functions
def add_lineplot(ax,
data: pd.DataFrame,
x: str, y: str,
hue: Optional[str] = None,
order: Optional[List[str]] = None,
hue_order: Optional[List[str]] = None,
palette: str = PALETTE,
savefile: Optional[pathlib.Path] = None,
label: Optional[str] = None,
err_style: str = 'band'):
""" Add a seaborn-style lineplot with extra decorations
:param Axes ax:
The matplotlib axis to add the barplot for
:param DataFrame data:
The data to add a barplot for
:param str x:
The column to use for the categorical values
:param str y:
The column to use for the real values
:param str palette:
The palette to use
:param Path savefile:
If not None, save the figure data to this path
"""
bins = {}
data = data.dropna()
if order is None:
order = np.sort(np.unique(data[x]))
if hue is None:
hue_order = [None]
elif hue_order is None:
hue_order = np.sort(np.unique(data[hue]))
for cat in order:
for hue_cat in hue_order:
if hue_cat is None:
mask = data[x] == cat
else:
mask = np.logical_and(data[x] == cat, data[hue] == hue_cat)
# Handle missing categories
n_samples = np.sum(mask)
if n_samples >= 3:
catdata = data[mask]
ydata = catdata[y].values
ymean = np.mean(ydata)
ylow, yhigh = bootstrap_ci(ydata)
else:
ymean = ylow = yhigh = np.nan
if hue is None:
bins.setdefault(x, []).append(cat)
bins.setdefault(f'{y} Mean', []).append(ymean)
bins.setdefault(f'{y} CI Low', []).append(ylow)
bins.setdefault(f'{y} CI High', []).append(yhigh)
bins.setdefault('Samples', []).append(n_samples)
else:
bins.setdefault(x, []).append(cat)
bins.setdefault(hue, []).append(hue_cat)
bins.setdefault(f'{y} Mean', []).append(ymean)
bins.setdefault(f'{y} CI Low', []).append(ylow)
bins.setdefault(f'{y} CI High', []).append(yhigh)
bins.setdefault('Samples', []).append(n_samples)
# Save the background data
bins = pd.DataFrame(bins)
if savefile is not None:
if savefile.suffix != '.xlsx':
savefile = savefile.parent / (savefile.stem + '.xlsx')
bins.to_excel(str(savefile))
# Now draw the plots
palette = colorwheel(palette, len(hue_order))
for i, hue_cat in enumerate(hue_order):
if hue_cat is None:
xcoords = bins[x].values
ymean = bins[f'{y} Mean'].values
ylow = bins[f'{y} CI Low'].values
yhigh = bins[f'{y} CI High'].values
hue_label = label
else:
hue_bins = bins[bins[hue] == hue_cat]
xcoords = hue_bins[x].values
ymean = hue_bins[f'{y} Mean'].values
ylow = hue_bins[f'{y} CI Low'].values
yhigh = hue_bins[f'{y} CI High'].values
if label is None:
hue_label = hue_cat
else:
hue_label = f'{hue_cat} {label}'
color = palette[i]
if err_style in ('band', 'bands'):
ax.fill_between(xcoords, ylow, yhigh, facecolor=color, alpha=0.5)
ax.plot(xcoords, ymean, '-', color=color, label=hue_label)
elif err_style in ('bar', 'bars'):
ax.errorbar(xcoords, ymean, np.stack([ymean-ylow, yhigh-ymean], axis=0),
capsize=15, linewidth=3, color=color, label=hue_label)
else:
raise ValueError(f'Unknown error style: "{err_style}"')
return ax
def add_histogram(ax,
data: np.ndarray,
xlabel: Optional[str] = None,
ylabel: str = 'Counts',
title: Optional[str] = None,
bins: int = 10,
draw_bars: bool = True,
bar_width: float = 0.7,
range: Optional[Tuple[float]] = None,
fit_dist: Optional[str] = None,
fit_dist_color: str = 'r',
kernel_smoothing: bool = True,
label_kernel_peaks: Optional[str] = None,
kernel_smoothing_color: str = 'c',
kernel_bandwidth: Optional[str] = None,
vlines: Optional[List[np.ndarray]] = None,
vline_colors: str = 'b'):
""" Add a histogram plot
Basic Usage:
.. code-block:: python
fig, ax = plt.subplots(1, 1)
histogram(ax, np.random.rand(64, 64),
draw_bars=True,
kernel_smoothing=True,
fit_dist='poisson',
vlines=[0.25, 0.75])
This will draw the histogram with a kernel smoothed fit, a poisson fit,
and vertical lines at x coordinates 0.25 and 0.75.
:param Axis ax:
The axis to add the histogram to
:param ndarray data:
The data to make the histogram for
:param str xlabel:
Label for the x axis
:param str ylabel:
Label for the y axis
:param str title:
Title for the axis
:param int bins:
Number of bins in the histogram
:param bool draw_bars:
If True, draw the histogram bars
:param float bar_width:
The width of the bars to plot
:param tuple[float] range:
The range to fit bins to (argument to np.histogram)
:param str fit_dist:
The name of a distribution to fit to the data
:param str fit_dist_color:
The color of the fit dist line
:param bool kernel_smoothing:
If True, plot the kernel smoothed line over the bars
:param str label_kernel_peaks:
Any of min, max, both to label extrema in the kernel
:param str kernel_smoothing_color:
The color of the kernel smoothed fit line
:param str kernel_bandwidth:
The method to calculate the kernel width with
:param list vlines:
x coords to draw vertical lines at
:param list vline_colors:
The color or list of colors for the spectra
"""
# Estimate the histogram
data = data[np.isfinite(data)]
xbins, hist, kernel_x, kernel_y = get_histogram(
data, bins=bins, range=range,
kernel_smoothing=kernel_smoothing,
kernel_bandwidth=kernel_bandwidth)
width = bar_width * (xbins[1] - xbins[0])
center = (xbins[:-1] + xbins[1:])/2
# Add bars for the histogram
if draw_bars:
ax.bar(center, hist, align='center', width=width)
# Estimate the kernel smoothed fit
if kernel_smoothing:
# Add a kernel smoothed fit
ax.plot(kernel_x, kernel_y, color=kernel_smoothing_color)
if label_kernel_peaks in ('max', 'both', True):
maxima = (np.diff(np.sign(np.diff(kernel_y))) < 0).nonzero()[0] + 1
kx_maxima = kernel_x[maxima]
ky_maxima = kernel_y[maxima]
ax.plot(kx_maxima, ky_maxima, 'oc')
for kx, ky in zip(kx_maxima, ky_maxima):
ax.text(kx, ky*1.05, "{}".format(float("{:.2g}".format(kx))),
color="c", fontsize=12)
if label_kernel_peaks in ('min', 'both', True):
minima = (np.diff(np.sign(np.diff(kernel_y))) > 0).nonzero()[0] + 1
kx_minima = kernel_x[minima]
ky_minima = kernel_y[minima]
ax.plot(kx_minima, ky_minima, 'oy')
for kx, ky in zip(kx_minima, ky_minima):
ax.text(kx, ky*0.88, "{}".format(float("{:.2g}".format(kx))),
color="y", fontsize=12)
# Fit an model distribution to the data
if fit_dist is not None:
opt_x = np.linspace(xbins[0], xbins[-1], 100)
if fit_dist == 'gamma':
fit_alpha, fit_loc, fit_beta = gamma.fit(data + 1e-5)
# print(fit_alpha, fit_loc, fit_beta)
opt_y = data = gamma.pdf(opt_x, fit_alpha, loc=fit_loc, scale=fit_beta) * data.shape[0]
else:
raise KeyError(f'Unknown fit distribution: {fit_dist}')
ax.plot(opt_x, opt_y, fit_dist_color)
# Add spectral lines
if vlines is None:
vlines = []
if isinstance(vline_colors, (str, tuple)):
vline_colors = [vline_colors for _ in vlines]
if len(vlines) != len(vline_colors):
raise ValueError(f'Number of colors and lines needs to match: {vlines} vs {vline_colors}')
ymin, ymax = ax.get_ylim()
for vline, vline_color in zip(vlines, vline_colors):
ax.vlines(vline, ymin, ymax, colors=vline_color)
# Label the axes
if xlabel not in (None, ''):
ax.set_xlabel(xlabel)
if ylabel not in (None, ''):
ax.set_ylabel(ylabel)
if title not in (None, ''):
ax.set_title(f'{title} (n={data.shape[0]})')
else:
ax.set_title(f'n = {data.shape[0]}')
# Complete Plots
def plot_3d_sphere_cloud(centers: List[Tuple[np.ndarray]],
colors: List[str] = None,
cmap: str = 'inferno',
cvalues: Optional[List[np.ndarray]] = None,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
radii: List[float] = 1.0,
title: Optional[str] = None,
marker: str = 'o',
markersize: float = 10,
figsize: Tuple[int] = (16, 16),
outfile: Optional[pathlib.Path] = None,
add_colorbar: bool = False):
""" Plot the raw points we sampled
:param list[tuple[ndarray]] points:
A list of x, y, z tuples for each population
:param list[str] colors:
A list of colors for each population
:param str title:
The title for the plot
:param Path outfile:
The path to write the output file to
:param str marker:
Matplotlib marker shape to plot
:param int markersize:
Size for the markers to draw
"""
if isinstance(radii, (int, float)):
radii = [radii for _ in centers]
if colors is None and cvalues is None:
raise ValueError('Pass one of "colors" or "cvalues" to plot_3d_sphere_cloud')
# Convert the color values into a heatmap
if colors is None:
if vmin is None:
vmin = np.nanmin(cvalues)
if vmax is None:
vmax = np.nanmax(cvalues)
norm = mplcolors.Normalize(vmin=vmin, vmax=vmax)
cmapper = mplcm.get_cmap(cmap)
colors = []
for cvalue in cvalues:
colors.append(cmapper(norm(cvalue)))
mappable = mplcm.ScalarMappable(norm=norm, cmap=cmap)
else:
mappable = None
# Check that the shapes make sense
assert Axes3D is not None
if len(centers) != len(colors):
raise ValueError('Got {} centers but {} colors'.format(len(centers), len(colors)))
if len(centers) != len(radii):
raise ValueError('Got {} centers but {} radii'.format(len(centers), len(radii)))
# Plot everything
all_x = []
all_y = []
all_z = []
if add_colorbar:
figsize = (figsize[0]*1.4, figsize[1])
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
for center, color, radius in zip(centers, colors, radii):
px, py, pz = center
ax.scatter(px, py, pz,
marker=marker,
c=color,
s=radius*50, # Convert radius from um to dpi
depthshade=False,
cmap=cmap)
all_x.append(px)
all_y.append(py)
all_z.append(pz)
all_x = np.concatenate(all_x)
all_y = np.concatenate(all_y)
all_z = np.concatenate(all_z)
# Work out the bounding box
min_x = np.min(all_x)
max_x = np.max(all_x)
min_y = np.min(all_y)
max_y = np.max(all_y)
min_z = np.min(all_z)
max_z = np.max(all_z)
range_x = max_x - min_x
range_y = max_y - min_y
range_z = max_z - min_z
range_max = max([range_x, range_y, range_z])
center_x = (min_x + max_x)/2
center_y = (min_y + max_y)/2
center_z = (min_z + max_z)/2
ax.set_xlim([center_x - range_max/2, center_x+range_max/2])
ax.set_ylim([center_y - range_max/2, center_y+range_max/2])
ax.set_zlim([center_z - range_max/2, center_z+range_max/2])
if title is not None:
fig.suptitle(title)
if add_colorbar and mappable is not None:
plt.colorbar(mappable, ax=ax, fraction=0.15, pad=0.05)
if outfile is None:
plt.show()
else:
outfile.parent.mkdir(exist_ok=True, parents=True)
fig.savefig(str(outfile), transparent=True)
plt.close()
| [
37811,
28114,
889,
4899,
329,
262,
18640,
9355,
198,
198,
18716,
1359,
4899,
25,
198,
198,
9,
1058,
9078,
25,
4871,
25,
63,
2617,
62,
29487,
62,
7635,
63,
25,
28114,
3918,
4732,
4706,
198,
9,
1058,
9078,
25,
4871,
25,
63,
8043,
22... | 2.069295 | 12,454 |
import sys
import logging
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
sys.path.append("../xmpp_bot")
logging.basicConfig(level=logging.DEBUG)
server = 'localhost'
port = 5222
from xmpp_bot.controllers.copernicus import DashboardController
if __name__ == '__main__':
if len(sys.argv) >= 4:
jid = sys.argv[1] # dashboard1@localhost
password = sys.argv[2] # 1234
pubsub_server = sys.argv[3] # pubsub.localhost
if len(sys.argv) >= 5:
server = sys.argv[4] # localhost
if len(sys.argv) >= 6:
port = sys.argv[5] # 5222
xmpp = DashboardController(jid, password, pubsub_server)
xmpp.connect(address = (server, port), use_tls=False)
xmpp.process(threaded=False)
else:
print("Invalid number of arguments.\n" +
"Usage: python %s " +
"<jid> <pass> <pubsub> [host] [port]" % sys.argv[0])
| [
11748,
25064,
198,
11748,
18931,
198,
198,
361,
25064,
13,
9641,
62,
10951,
1279,
357,
18,
11,
657,
2599,
198,
220,
220,
220,
18126,
7,
17597,
8,
198,
220,
220,
220,
25064,
13,
2617,
12286,
12685,
7656,
10786,
40477,
23,
11537,
198,
... | 2.090129 | 466 |
import logging
from amitools.vamos.profiler import MainProfiler, Profiler
from amitools.vamos.cfgcore import ConfigDict
| [
11748,
18931,
198,
6738,
716,
270,
10141,
13,
85,
321,
418,
13,
5577,
5329,
1330,
8774,
15404,
5329,
11,
4415,
5329,
198,
6738,
716,
270,
10141,
13,
85,
321,
418,
13,
37581,
7295,
1330,
17056,
35,
713,
628,
628,
628,
628,
198
] | 3.047619 | 42 |
N = int(input())
i = 1
distance = 1
road = 1
while road < N:
road += i * 6
i += 1
print(i) | [
45,
796,
493,
7,
15414,
28955,
198,
72,
796,
352,
198,
30246,
796,
352,
198,
6344,
796,
352,
198,
4514,
2975,
1279,
399,
25,
198,
220,
220,
220,
2975,
15853,
1312,
1635,
718,
198,
220,
220,
220,
1312,
15853,
352,
198,
4798,
7,
72,... | 2.177778 | 45 |
#!/bin/bash/env python3
import os, sys
# sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
from .. import *
from ..model.minimal_model import pbc
# test cases of periodic boundary conditions on a random matrix
test = np.random.rand(111,111,3)
# trivial tests, do nothing/ slots agree
(pbc(test,1,2)==test[1,2]).all()
assert(not (pbc(test,2,1)==test[1,2]).all())
#test each pbc boundary
assert((pbc(test,-1,2)==test[110,2]).all()) # test left
assert((pbc(test,111,2)==test[0,2]).all() ) # test right
assert((pbc(test,11,112)==test[11,0]).all() ) # test top
assert((pbc(test,12,-1)==test[12,110]).all() ) # test bottom
assert((pbc(test,-1,-1)==test[110,110]).all() ) #test bottom left corner
#padded spiral tips are produced with at pixel percision of about 13 digits.
# note that this is not the same as accuracy, which will depend on sigma, threshold, and V_threshold
# test functions for unpad
# assert(0==unpad(X=20, pad=20, width=500, rejection_distance=10))
# assert(unpad(X=19, pad=20, width=500, rejection_distance=10)==499)
# assert(280==unpad(X=300, pad=20, width=500, rejection_distance=10))
# assert(499==unpad(X=519, pad=20, width=500, rejection_distance=10))
# assert(10==unpad(X=530, pad=20, width=500, rejection_distance=10))
# assert(-9999==unpad(X=531, pad=20, width=500, rejection_distance=10))
# assert(490==unpad(X=10, pad=20, width=500, rejection_distance=10))
# assert(-9999==unpad(X=9, pad=20, width=500, rejection_distance=10))
| [
2,
48443,
8800,
14,
41757,
14,
24330,
21015,
18,
198,
11748,
28686,
11,
25064,
198,
2,
25064,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
366,
8019,
48774,
198,
6738... | 2.687956 | 548 |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Operations for working with string Tensors.
"""
from __future__ import print_function as _print_function
from tensorflow.python import as_string
from tensorflow.python import reduce_join_v2 as reduce_join
from tensorflow.python import regex_full_match
from tensorflow.python import regex_replace
from tensorflow.python import string_format as format
from tensorflow.python import string_join as join
from tensorflow.python import string_length_v2 as length
from tensorflow.python import string_lower as lower
from tensorflow.python import string_strip as strip
from tensorflow.python import string_to_hash_bucket as to_hash_bucket
from tensorflow.python import string_to_hash_bucket_fast as to_hash_bucket_fast
from tensorflow.python import string_to_hash_bucket_strong as to_hash_bucket_strong
from tensorflow.python import string_to_number as to_number
from tensorflow.python import string_upper as upper
from tensorflow.python import substr_v2 as substr
from tensorflow.python import unicode_script
from tensorflow.python import unicode_transcode
from tensorflow.python.ops.ragged.ragged_string_ops import string_bytes_split as bytes_split
from tensorflow.python.ops.ragged.ragged_string_ops import string_split_v2 as split
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_decode
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_decode_with_offsets
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_encode
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_split
from tensorflow.python.ops.ragged.ragged_string_ops import unicode_split_with_offsets
del _print_function
| [
2,
770,
2393,
318,
337,
16219,
8881,
24700,
1137,
11617,
0,
2141,
407,
4370,
13,
198,
2,
2980,
515,
416,
25,
11192,
273,
11125,
14,
29412,
14,
31391,
14,
15042,
14,
8612,
1352,
14,
17953,
62,
29412,
62,
15042,
13,
9078,
4226,
13,
... | 3.309125 | 537 |
from .. import db
from .modelHelpers import (
findAll, findById, deleteById, findByName, formatId, assignId, updateDocument, formatDocuments,
)
from bson import ObjectId
| [
6738,
11485,
1330,
20613,
198,
6738,
764,
19849,
12621,
19276,
1330,
357,
198,
220,
220,
220,
1064,
3237,
11,
1064,
48364,
11,
12233,
48364,
11,
1064,
3886,
5376,
11,
5794,
7390,
11,
8333,
7390,
11,
4296,
24941,
11,
5794,
38354,
11,
1... | 3.296296 | 54 |
import pytest
from test_helper import get_test_data
from texthandler import CommentClassifier
@pytest.fixture
@pytest.fixture
| [
11748,
12972,
9288,
198,
6738,
1332,
62,
2978,
525,
1330,
651,
62,
9288,
62,
7890,
198,
198,
6738,
48659,
400,
392,
1754,
1330,
18957,
9487,
7483,
198,
198,
31,
9078,
9288,
13,
69,
9602,
198,
198,
31,
9078,
9288,
13,
69,
9602,
198
] | 3 | 43 |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.11.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x04\x3a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x18\x08\x06\x00\x00\x00\x9b\x53\xff\x34\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc2\x00\x00\x0e\xc2\x01\x15\x28\
\x4a\x80\x00\x00\x03\xcf\x49\x44\x41\x54\x48\x4b\xed\x56\x5b\x68\
\x1c\x65\x18\x3d\xff\x5c\xb2\x9b\x6d\xb3\x9b\x64\x77\x33\xcd\x15\
\xa1\x22\xbe\x58\x90\x78\x41\xad\x22\x22\x14\xec\x83\x54\x85\x3c\
\x59\x0a\x52\x28\x3e\x08\x55\xe8\xab\x6f\xd2\x07\x41\xf4\xd9\xbe\
\x09\x5e\x09\x58\x04\x8d\xa8\x01\x51\x8b\xd5\x74\xd3\x98\xcb\xa6\
\x26\xcd\xa6\xcd\x65\x2f\xb3\x6b\x76\x93\xdd\xec\xce\xcc\xce\x78\
\xfe\xd9\x0d\x36\xb0\x95\x9a\xf8\x98\x03\xdf\xcc\xce\xbf\xdf\xfc\
\xdf\x99\xf3\x9d\xfd\x66\x85\x47\x60\x8f\x18\x1d\x1d\x85\x6d\xdb\
\x18\x19\x19\x69\xae\xfc\x77\x28\xcd\xf3\x9e\x90\xc9\x64\x90\xcd\
\x66\x9b\x57\x7b\xc3\xbe\x08\xa8\xaa\x0a\x45\xd9\xd7\x16\xfb\x23\
\xf0\x7f\xe0\x80\xc0\x01\x81\x03\x02\x07\x04\xee\x8d\x80\x63\x63\
\x71\x61\x01\xa9\xd4\x92\x1f\x4b\x37\x6f\xfa\xcb\x72\x0a\x0a\x21\
\xfc\xcf\x7b\xc5\x3d\xbd\x8c\x5e\x7e\xf7\x43\x84\xfb\x87\x00\xcf\
\xe5\xf8\x55\x30\xb9\xb8\x8c\xf7\x5f\x38\x8e\xd9\xab\x3f\x63\xab\
\x5c\xc1\xf9\xf3\x6f\xc0\xae\x03\xba\xca\x64\xd7\x81\x67\x3b\x24\
\xc6\x74\x5e\x0a\x4d\x27\x53\xf9\x45\x6b\xfc\x2b\x81\x5b\xcb\xb7\
\x21\xec\x0d\x3c\xf7\xf5\x12\xee\x1f\xec\x85\xeb\x92\x00\x37\x9c\
\x9e\x98\xc0\x17\x27\x7b\x90\xb8\x3a\x81\x72\xb5\x86\x73\xa7\x5f\
\x42\x89\x44\x96\x53\xeb\xb8\x7c\x3b\x88\x50\x28\xc8\xbb\x3d\xd6\
\x55\x31\xf3\x67\x0a\x1f\x5d\x38\xd7\xd8\xb0\x05\x76\xb5\xc0\xe5\
\xab\x75\x6e\xea\x37\x24\x27\xbf\x45\x32\xf1\x39\x7a\x23\xcb\xe8\
\xef\xab\x00\x75\x01\x85\x3c\x77\xa2\xe6\x00\x87\x23\x21\x04\x02\
\x01\xb4\xb5\x05\x10\xea\x09\xe3\xc8\x60\x37\x7e\xdc\x0e\x61\x3c\
\x38\x80\x1f\xdc\x08\xa3\x0b\x63\xb5\x0e\xcc\x9a\x39\x64\x6f\x5c\
\xc6\xe2\xec\x38\xe6\xe7\x26\x51\xae\x54\x9b\xd5\x1a\xf0\x15\x28\
\x97\x6b\xb8\xf1\xc7\xf7\xb8\x78\x25\x8d\x70\x2c\x46\xa5\x15\x68\
\x9a\x86\x6f\x7e\x49\x60\xfa\xed\xa7\xf0\xec\xa5\x25\x0c\xf6\xc9\
\x75\x17\x2e\xe5\x0c\x66\x16\xf0\xd9\x85\x57\x28\x73\xbf\xbf\x89\
\xe7\xad\x01\xb5\x34\x3e\x18\xbf\x85\x2f\xb7\xfa\xd0\x2e\xc8\x90\
\x3d\xa8\xb3\x2d\xe9\x5f\xbf\x43\xe2\xe2\x49\xc0\x96\x42\xbb\xc8\
\xa4\xf3\x28\x56\xda\x61\x0c\x3d\x82\x48\x47\x08\x5a\xa1\x50\x80\
\xa8\x24\x10\xe8\xf0\x30\x17\x7d\x10\x46\xa0\xd1\x11\xf9\xaa\xfd\
\x4b\x28\x38\x44\x22\xbf\xbf\xf5\x10\x55\x70\xfd\x75\x1f\x5a\x1f\
\x0f\x15\xbc\x7a\xfa\x14\xfd\xc9\x62\x28\xd3\x00\x2c\xc8\xfe\x5b\
\x75\x07\x2a\xea\xfe\x03\x98\x65\x0b\x0f\x74\xb2\xff\x1e\x85\x76\
\x98\xe7\x09\x18\xf1\x38\x0c\xd5\xe3\x7f\x89\x9f\xe0\x58\xc3\x50\
\x8a\xb9\x49\x74\x19\x31\xb8\xfc\x32\xa8\x29\x08\xea\x1a\x02\x7e\
\xe8\x34\x96\x80\x23\x0b\xdb\xac\xe1\xd0\x55\x3b\x51\xe5\xa3\xd5\
\x2a\x38\x7b\xe6\x45\xbc\xfe\xda\x29\xd6\xdf\xc0\x3b\x9f\x24\x91\
\xce\x2b\x78\xac\xba\x8a\x61\xc7\xc4\xc6\x95\x71\x5c\x7a\xb4\x84\
\x4f\xdf\x3c\xc1\x5c\x49\x92\x90\x3f\x18\x69\x39\xaa\x61\x18\x71\
\x94\xcc\xeb\x50\x84\xef\x55\xae\x59\x0e\x56\x57\xd6\x19\x69\xac\
\x31\x16\x53\xab\x78\xbe\x57\x40\xef\x3c\x4c\xe5\x5a\xf8\xd4\xb2\
\xf1\xf4\xe3\xc7\x70\xfc\x99\x61\x5c\x9b\x5f\xc3\xc7\xb5\x01\x24\
\xf4\x18\xa6\x83\x06\xae\x07\x68\xd0\xa2\x83\x63\x47\x7b\xa5\x17\
\x1b\xd1\x02\x82\x2d\x11\xa6\x99\xf3\xea\x5b\xd7\xd0\x33\x60\x70\
\x85\x4c\xef\x2c\xa6\xd1\xcd\x9b\xb5\xe6\xc5\x5d\x40\xd5\xa6\xe6\
\x57\x70\x36\xd9\x83\xb8\xde\x68\x93\x4a\xf9\xc7\xbe\x1a\xc3\xf6\
\x7b\x27\x58\x84\xf2\xfb\x24\xe4\x81\x12\x48\xdb\xf3\x9e\x5c\x36\
\x07\x11\x7a\x18\x4a\x34\x1a\x43\xa0\xeb\x49\xcc\x4c\x65\x50\xca\
\x14\xf9\x64\x4c\xb2\x98\x55\x65\x6c\x59\xff\xdc\xec\x6f\xd0\x1a\
\x96\xe3\xc2\xac\x09\xe4\x79\x9f\xc9\x58\xdf\x16\xb8\x8f\xc2\x89\
\x36\xce\x00\x59\x54\x4a\xcf\xf9\x01\xcd\xc3\xe6\x66\x11\x33\xc9\
\x1c\xf4\xc8\x13\x88\xc5\xe2\xbb\xe7\x40\x2e\x5f\x44\xd1\x5c\xe0\
\x2c\x29\x20\x1a\xd1\x49\xb4\x8e\x48\x67\x98\x1b\x48\x23\x31\x61\
\xe7\x29\xee\x04\xdd\xee\x72\x52\xae\xac\x65\xa0\xc9\x22\x84\xc3\
\x81\x35\x34\x24\x15\x55\x51\xca\x6f\xc0\x76\x55\x14\x8a\x36\x87\
\x52\x37\x22\xd1\xa3\x88\xc7\x3a\xfd\x3c\x89\xbb\x0e\xa2\x52\xa9\
\xcc\xc1\x63\xa1\x90\x4f\xf3\x17\x50\xe1\x94\xab\xb3\x3b\xd2\xc9\
\x74\xa4\x3c\xb3\x7f\x3b\xbd\x15\x74\x92\xa6\x6a\x7e\x41\x21\x74\
\x3f\xaa\x36\x89\x89\x76\x74\x77\x1f\xe1\x40\x6a\x43\x38\x7c\xa8\
\x91\xbc\x0b\xc0\xdf\xcb\x70\xa6\xef\x92\xee\x15\xda\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x33\x0b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\xff\x00\x00\x00\x79\x08\x06\x00\x00\x00\xf6\xe1\xf7\x0f\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x09\xda\x00\x00\
\x09\xda\x01\xb8\x5e\x7a\x24\x00\x00\x32\x49\x49\x44\x41\x54\x78\
\xda\xed\x7d\x79\x78\x54\xd5\xdd\xff\xe7\xdc\x99\xec\x21\x99\x04\
\x42\x00\x59\x02\x44\x40\x04\x09\xe0\x06\xca\xa6\x68\xed\x6b\xad\
\x28\xb5\x76\x51\x09\xda\xda\xc5\x2d\xb4\x56\x5c\xde\x9f\xe2\x6b\
\x15\x68\x6b\x85\xfa\xf6\x6d\x6d\x51\xa1\xb5\xb5\x6a\x5b\x02\xad\
\x4a\x55\x20\x08\x2a\x6a\x2b\x41\x16\x85\x24\x24\x61\x09\x61\x49\
\x32\x93\x65\x66\x32\xcb\x3d\xbf\x3f\xee\xb9\x73\xcf\x3d\xf7\xde\
\xc9\x10\xb2\x21\xe7\xfb\x3c\xe7\x99\xbb\x6f\x73\x3e\xdf\xfd\x7b\
\x8e\x42\x29\x85\x6c\xb2\xc9\x76\xf6\x35\x05\x92\x24\x49\x3a\x2b\
\x49\x82\x5f\x92\x24\x09\x7e\x49\x92\x24\x49\xf0\x4b\x92\x24\x49\
\x82\xff\x4c\xa0\x69\xeb\xeb\x46\x4c\x2c\x3d\x52\x32\xfa\x6f\x87\
\x57\x0d\x7a\xed\x50\xd9\x39\xaf\x1d\x2a\x73\xff\xa9\xa6\xc9\xf5\
\x72\x6d\x93\xfb\xe5\x5a\xe3\xf7\x95\x83\x4d\xae\x57\x0e\x35\xb9\
\x5e\x39\xd4\x34\xa2\xb4\x6e\xc7\xd8\xf5\x47\xcb\x26\xfd\xb3\x7e\
\xed\xdc\x7f\x9d\x28\xf9\xca\x3b\x27\x2f\x97\xdd\x41\x92\x04\x7f\
\x1f\xa6\xa2\xb5\x47\x46\x0c\x7f\xed\xd0\xd2\x9c\x97\x6b\xca\x92\
\xd6\x54\x35\x91\xdf\xef\x57\xb7\xd7\xb5\xd4\xec\x3e\xd6\xf6\xcc\
\x81\x06\xff\x1d\xc7\x9a\x02\xb3\xea\xbc\xc1\x59\x51\x7f\xd8\xa3\
\xfa\xc3\x9e\xa8\x3f\xe2\x51\xfd\x11\x4f\x34\x10\xf1\xa8\xfe\xb0\
\x47\x0d\x84\x3d\x6a\x20\xe2\x39\xe8\x6b\x2f\xda\xef\x6d\x9f\xf5\
\x69\x53\x70\xde\xc6\xe3\xfe\x67\x5e\x3f\xda\xb6\x95\xbc\x7c\x48\
\x4d\xfd\x7b\x5d\x53\xd1\x1b\xc7\xcb\x6e\xdc\xd8\x30\xef\x6b\x9b\
\x1a\x3c\xb2\x8b\x48\x92\xe0\xef\x45\x1a\xfb\xd7\x43\x97\xe7\xbe\
\x5c\xb3\xd6\xbd\x6a\xbf\x7f\x67\x5d\x73\xcd\xa1\x13\xad\x0f\x7a\
\x9b\x82\xb3\x22\xfe\xb0\x07\x11\x95\x68\x47\x51\x80\xb2\x13\x28\
\x8d\x6d\x8a\x6d\xa4\xc6\xa2\xfd\x36\x00\x51\x95\xb4\x07\x22\x9e\
\x9d\xde\xf6\x59\x07\xda\x23\x6b\x01\x34\xdd\xb4\xb9\xa1\xfc\xe6\
\xcd\x0d\x25\xdf\x28\x6b\x2c\x90\xdd\x45\x92\x04\x7f\x0f\x50\xe1\
\x2b\xb5\x97\x67\xbd\x74\x60\x6d\xd2\xef\xf6\xf9\xf7\x1f\x6d\xde\
\xda\xd4\xe0\x9f\x17\x0d\x46\xd2\x38\x84\xdb\x9c\x45\x9d\x37\x11\
\x61\x9d\xc6\x39\x8f\x52\xec\x0d\x46\x59\x3c\x14\x93\x54\xe0\x19\
\x4a\x69\xf5\x37\xca\x1a\xcb\xbf\xb9\xa5\xb1\xe4\x5b\xef\x36\x49\
\x46\x20\x49\x82\xbf\xab\x69\xe8\x4b\x07\x4a\xb2\x56\xed\xab\xac\
\xaa\xf3\x6d\x6d\x39\xd9\x36\x2f\x12\x08\xa5\x81\x52\x43\x9a\x3b\
\x01\xd8\x76\x1b\xb7\x91\xd7\x0a\x78\x0d\x40\x5c\x66\xeb\xe1\x60\
\x14\x41\x95\x42\x65\xb7\x56\x01\xa8\xc0\x24\x4a\x35\x46\xf0\xad\
\x77\x9b\x56\xdf\xf2\x6e\xd3\x6c\xd9\x85\x24\x49\xf0\x9f\x26\x0d\
\xf9\x43\x65\x49\xe6\xff\xed\x6e\x3a\x52\xef\x7b\xa6\xc5\x17\x18\
\x0d\xaa\x42\x6b\x1c\x88\x79\xe0\xf2\xbf\x26\x35\x5f\x00\xbe\x1d\
\xd3\x30\xe9\xfb\xd4\x51\x0b\xa8\x08\x46\x41\x61\x30\x00\x4a\x29\
\x54\xb6\x8f\x50\xba\x00\xc0\xe6\x5b\xb7\x36\x95\x2d\xd8\xda\x34\
\x4f\x76\x25\x49\x12\xfc\xa7\x48\xe7\xac\xae\x28\x49\x7f\xf6\xd3\
\xa6\xa3\x47\xbd\xcf\xb4\xb5\x86\x3c\x31\x29\xaf\x83\x5a\x5f\x56\
\x55\x06\x64\xa1\x11\xf1\x8a\x4e\xaa\x7f\x82\x9a\x03\x8c\xfb\x56\
\x04\xa3\x1a\xd0\xb9\x9d\x44\x33\x05\x74\x4d\x00\x94\x62\x96\x0a\
\xac\xbd\x6d\x9b\xb7\xac\x78\x9b\x77\xb6\xec\x52\x92\x24\xf8\x3b\
\xa0\x91\xab\xf7\x5f\x9e\xba\xa2\xbc\xa9\xee\x48\xe3\x33\x81\x96\
\x76\x8f\x86\x24\x95\x03\xbc\xce\x00\x38\x0d\x00\xb0\x08\x6d\x8b\
\x93\x0f\x0e\xdb\x10\x87\x01\xd8\x9a\x01\x00\xc2\x2a\x5a\xc3\x2a\
\x03\xb9\x05\xf4\x31\x4d\x40\xd5\x98\xc2\x2c\x00\x9b\x17\xbe\xe7\
\x2b\xbb\xfd\x7d\x5f\x81\xec\x5a\x92\x24\xf8\x6d\x68\xc0\xaf\x77\
\x95\xd5\x1c\x6a\xd8\xda\xde\x12\xf4\x68\x98\x13\x54\x7c\x51\xea\
\x9b\x18\x02\x75\xb6\xdb\x6d\xcd\x02\x6a\xbf\xdd\xba\x62\xcb\x27\
\x76\x04\xa3\x06\xe8\x39\xf5\x5f\x27\x42\x29\x88\x89\x29\xd0\x59\
\x6f\x1f\xf6\x56\x8f\xfa\xf3\x81\x32\xd9\xbd\x24\x49\xf0\x33\x1a\
\xf6\xbb\xbd\x37\xbb\x96\x7d\xe4\x6f\x38\xd9\x32\x2b\x2e\xa8\x4d\
\x8c\x20\x1e\x03\x70\xf2\xfc\xd3\x8e\x1f\x86\xda\x88\x7e\x6a\x65\
\x26\x0d\xc1\x48\x0c\xec\xbc\xfa\x1f\x4f\x13\x18\x90\x92\x84\xea\
\x06\xff\xac\xec\xe7\xf6\xfa\x67\xbe\x5a\x73\xb3\xec\x66\x92\xce\
\x6a\xf0\x0f\xfc\x55\xf9\xda\xc3\x87\x4e\xfe\x45\xe5\xbd\xf7\x94\
\x6a\xe2\x54\x07\xa3\x1a\x4f\x03\xb0\x03\x3e\x75\x70\xf6\x39\x30\
\x00\x27\xdb\xdf\x72\x38\xc7\x10\x54\xa0\x21\xa2\xc6\x80\xee\x04\
\x7a\x9d\x08\xa5\xc8\x49\x76\x01\x00\x9a\x83\xe1\xb4\xad\x87\x9b\
\xfe\x72\xfe\x1f\x2a\xd6\xca\xae\x26\xe9\xac\x03\xff\xe8\xdf\xee\
\x1e\x91\xb9\xec\xc3\xa3\x27\x8e\x79\xe7\x99\x1c\x77\x26\x1b\x9d\
\x03\x7a\x4c\xb7\x56\x05\x75\x9d\xdf\xa7\x03\x1f\xf6\xd7\x12\xf0\
\x6b\xda\x6e\x07\x7a\x0b\x9f\x20\xdc\x3e\x8a\x0f\xfc\x11\x33\xe8\
\xa9\x01\x74\x9d\x74\x46\xa0\x0a\x97\x00\x80\xbd\xc7\x9a\xe7\xe5\
\xff\x76\xf7\xd1\xab\xfe\x52\x35\x42\x76\x39\x49\x67\x05\xf8\x47\
\x3e\x5b\x7e\x73\x75\xbd\xf7\xb3\x36\x6f\xdb\xa0\x98\x3a\x0f\x74\
\xa0\xc2\xf3\xfb\x55\x2b\xe8\xd5\x38\x3e\x01\x3b\xa0\xdb\xad\x73\
\xc0\x36\x40\x4a\x1d\x1d\x7f\x6a\x48\x15\x58\x83\xb3\xca\xaf\x31\
\x05\x8e\x03\x10\xad\x1d\xf7\x05\x06\x7d\x78\xac\xf9\xb3\x19\x2f\
\x55\x48\x33\x40\xd2\x17\x1b\xfc\x85\xbf\xda\x71\x73\x4d\x5d\xe3\
\xcb\xd4\xdf\x9e\x66\x02\xa0\xa3\xf3\x8e\x03\xb7\xca\xab\xfe\x36\
\x1a\x80\xc9\x37\x40\xcd\x66\x82\x05\xe1\x76\x9b\x68\x07\xeb\xc2\
\x0a\x05\x2a\xda\xa3\x16\xe9\x6f\xa7\xf2\x53\x0a\x24\xbb\x88\x01\
\xfc\xd8\x4e\x82\x16\x7f\x38\x6d\xdb\xc1\x86\x97\x8b\xd6\xec\x5b\
\x2c\xbb\x9e\xa4\x2f\x24\xf8\x47\x3c\xfd\xf1\xd2\xaa\x23\x0d\x2f\
\x23\x12\x25\x26\xbb\x5c\x8d\xe7\xc0\x83\x8d\x3a\x0f\x33\x63\xb0\
\x30\x10\xd8\x6f\x73\xc2\x3f\xa5\xce\x8c\xc1\xe2\x27\x30\xef\xff\
\x8c\x79\xfd\xed\x9c\x7f\xbc\xca\xaf\x02\x18\x90\x9e\x64\x02\xbd\
\xc6\x04\x18\x23\x88\xaa\x64\x67\x45\xfd\xb2\x31\xab\xf6\xfe\x4d\
\x76\x3f\x49\x5f\x28\xf0\x17\x2c\xff\x70\xd5\xc1\x23\x8d\x0f\x22\
\x12\x25\x66\xc7\x9e\xca\x31\x00\xd1\x9b\xaf\xda\x83\x1d\x36\xce\
\x3d\xdb\x26\x98\x13\x80\x35\xbc\x67\xb1\xf9\x89\xf5\xb8\xd8\xb1\
\xe2\x3a\x05\x42\x2a\xc2\xec\x1a\x89\x48\x7f\x10\x08\x92\x9f\x35\
\xaa\x9d\x58\x51\x71\xf4\xc6\x91\xbf\xdd\xbd\x45\x76\x41\x49\x5f\
\x08\xf0\x17\x2c\xdd\xbe\xaa\xb6\xae\xf1\x0e\xab\x5d\x4e\x63\x9d\
\xde\x04\x28\x27\xaf\x7e\x0c\x61\x76\xa0\xe7\x01\x0c\x33\xe8\x79\
\xcd\x81\x47\x2f\x11\x8e\xd5\xb7\xc7\x2d\xee\xb1\x6e\xda\xd7\x6e\
\x48\x7f\xcd\x5d\xe0\x2c\xfd\x63\xc0\xe7\x84\x3e\x08\x31\xee\x4f\
\x08\x6a\x0e\xd4\xcf\x1c\xfd\x9b\x5d\x92\x01\x48\x3a\xb3\xc1\x7f\
\xee\xcf\x3f\xba\xb9\xf6\x84\xf7\x76\xb3\x94\x86\x35\x9f\xde\x49\
\xdd\x77\x0a\xdf\x59\x8e\x53\x05\xe7\x9f\x83\xc9\xc0\x03\xbd\xa3\
\xb0\xbf\x9d\x39\x60\xe3\x08\xac\x0e\x9a\x1d\x7f\xf1\xa4\x3f\x5c\
\x0a\x03\xbe\x62\xa8\xfe\x31\x86\x60\xac\x1f\xa8\x3d\x31\xb3\xe8\
\xc5\xcf\xa5\x0f\x40\xd2\x99\x09\xfe\x71\xcb\x3f\xbc\xb9\xf2\xf0\
\xc9\x97\x11\x8a\x10\x1e\xe7\x56\x90\x3a\xc5\xea\x39\x10\xda\xed\
\x13\x1d\x7a\xf1\x62\xfe\x1d\x69\x08\x66\xd4\x1b\x60\xb4\x65\x12\
\x02\x23\x88\xa8\xf0\x45\xd5\x84\xa4\xbf\x2b\xc5\x6d\x0b\x76\xcb\
\xba\xaa\x62\xe7\xe7\x47\x96\x4d\x7e\xe1\x33\xc9\x00\x24\x9d\x59\
\xe0\x3f\xef\xe7\x1f\x8d\xd8\x77\xc2\xf7\x22\x89\x44\x89\x05\x98\
\x10\x80\x67\x91\xd2\x1c\x63\x50\xed\x18\x05\xe2\xd8\xf9\xb0\xb1\
\xeb\x01\x8b\x73\x50\xdf\x69\x31\x13\xe2\xad\xc3\xde\xac\x00\xf0\
\x61\x20\x6a\xda\xe9\x24\xfd\x29\xe1\x3c\xfe\x84\x00\x0a\xe7\xf8\
\x23\x56\x06\x50\x5e\x73\xe2\xa9\xe9\x7f\xdc\x3f\x51\x76\x49\x49\
\x67\x0c\xf8\x2b\x8e\x37\x95\x27\xb7\x05\xd3\x14\xca\xd4\x5d\x31\
\x7b\xcf\x2e\x17\xdf\x04\x28\x51\xd5\x77\x48\xf3\x85\x18\x09\x88\
\x93\x33\x00\x9b\x68\x00\x71\x78\x81\x84\xec\x7e\xe3\xe4\xf6\x90\
\xca\xf1\x24\x2d\xaf\xdf\x4e\xfa\x53\x1e\xf0\xa2\xe4\x57\xc4\x7d\
\x0a\x10\x68\x57\x76\x1c\x38\xfe\x89\xec\x92\x92\xce\x08\xf0\xe7\
\xfd\xbf\xcd\x65\x29\xcd\x01\x0f\xa1\x14\x0a\xa5\x9c\x99\x6c\x03\
\x42\x0b\x50\xf9\x6d\x80\xbd\xad\x0f\x98\x55\x7f\x24\xa8\x11\x00\
\x16\xbf\x83\x45\xe3\x8f\x97\x08\x44\x9d\xfd\x04\x2a\xc5\xc1\xb0\
\x20\xf9\x61\xf0\x39\x3d\xeb\x8f\xc2\x06\xf8\x31\x89\xaf\x18\xda\
\x80\xa2\xc4\xb6\x07\x5b\x02\xee\x61\x2b\x3e\xa9\x91\xdd\x52\x52\
\x9f\x06\xff\xa8\xff\xd9\x5a\xd2\x5e\xdf\x34\x4b\xa1\x80\x8b\x01\
\x9f\x50\x0a\x97\x1a\x2f\x6d\x17\xf6\x51\x00\xbb\x7c\xfe\x78\x61\
\x3e\xc4\x89\xed\x8b\x1a\x84\x08\x6c\xea\x90\xe0\x63\x67\xf7\x13\
\x58\x39\x07\x01\x76\x07\x23\xa0\x2c\xcb\x4f\x8c\xfb\xeb\xcc\x20\
\x27\x2d\xc9\xac\xee\x13\x2d\xe5\xc1\xa2\xf2\x13\x30\x06\xa0\x2d\
\x1f\x3e\x7c\x72\xc4\x84\x55\x7b\xff\x57\x76\x4d\x49\x7d\x12\xfc\
\xe3\x9e\x7c\x6f\xc4\x89\x63\x4d\x3f\xd7\xbd\x7b\x04\x80\x42\x29\
\x5c\x14\x50\x40\x39\xf5\x1f\x0e\x00\x86\x43\x86\x1f\x9c\x93\x7b\
\xc4\xf8\xbd\xdd\x08\x3f\x4e\x59\x83\x8e\x19\x80\xb0\x32\x2a\xd3\
\x36\x61\x99\x1d\xa3\x86\x55\xcb\xa9\x62\xce\x3f\x01\xb5\xda\xfb\
\x62\x8b\x49\x7e\xf3\xf2\x9e\xea\x63\x77\x5d\xf6\x52\x85\xb4\xff\
\x25\xf5\x3d\xf0\x1f\x6d\xf6\x97\x2a\xc1\xb0\x1b\xba\xba\x4f\x29\
\x5c\x6c\x19\x14\xb0\xd8\xff\x40\xc7\xb1\x78\x3e\xa6\x6f\x07\x5a\
\x8b\x24\xb7\xbb\x2e\xcc\xfb\xed\x7c\x0a\x16\x22\x09\x55\x00\x8b\
\x4c\x61\x0f\x1b\xe2\x8b\x9a\x94\x02\x23\xfd\xd7\x50\xfb\x39\x70\
\x53\x08\x40\xd7\xf7\x09\xcb\xa1\x08\xaa\x8e\x37\x6f\x94\xdd\x53\
\x52\x9f\x02\xff\xc8\xc7\xdf\xbd\x99\x1e\xf7\x16\xf1\x12\xd8\xc2\
\x00\x40\xa1\x74\xe4\x5d\x87\x83\x94\xb6\x55\xe9\xed\xea\x02\x20\
\x48\x7f\xd5\x7e\x3c\xbf\xb8\x29\xbd\x82\x4a\x4f\xc5\x61\x82\xe0\
\xe0\x2f\x00\xaa\x39\xe9\xaf\x87\xfd\x9c\x78\x0b\x14\x85\x35\x02\
\x28\x2e\xb3\xfd\x1f\xdb\xae\x70\x5a\x80\x82\x63\xf5\x4d\x79\x53\
\x57\xcb\x1a\x00\x49\x7d\x08\xfc\x4d\x0d\xcd\xbf\x35\x9b\xe3\xd4\
\xc2\x00\xdc\x2a\x85\x02\x5d\x13\x10\xbc\xff\xaa\x53\xdc\x1f\x66\
\xff\x80\x08\x60\x0a\x7b\xe6\x60\xd9\xcf\x1d\xa7\xd2\xf8\x0c\xc3\
\x02\x54\x62\x55\xf5\x89\x03\xa0\xc3\x14\x5a\xce\x8f\x7d\xd8\x6f\
\x6c\xba\xdb\xb8\xa6\xae\x01\xa8\xd4\x0c\x74\xdd\x1c\x10\x19\x00\
\x63\x02\x9f\x1d\x6e\xfc\xa9\xec\xa2\x92\xfa\x04\xf8\x07\x3d\xf4\
\xce\x2a\xd5\xd7\xe6\x89\x17\x6f\x57\x74\x7b\x97\x6d\x23\x16\xc9\
\x29\x48\x54\xbb\xd4\x5e\x3b\xf5\x1f\x36\x0e\x43\x38\xc4\xf3\xe3\
\x39\x01\x45\xa2\x36\xc0\xb6\x4d\xf6\xb1\x9e\xbb\xb7\x3d\x6a\x0a\
\xfb\x69\xa7\x1b\x8e\x3f\x4b\x52\x8f\xa2\x98\x99\x01\xbf\x6e\x62\
\x00\xda\x6f\xa0\x35\xe0\x1e\xfd\xdc\x1e\x99\xfe\x2b\xa9\xf7\xc1\
\x8f\x93\xbe\x05\xaa\xe0\x88\xe3\x13\x5b\xf4\x70\x9f\xc2\xec\x7e\
\xde\x0c\x88\x9f\xb8\x83\x04\x92\x79\xc4\x08\x00\xec\x43\x88\x10\
\xa4\xbf\xa5\xd0\x27\xde\xb8\x7d\x34\xb1\x11\xc1\xd8\x33\xd5\x85\
\xec\x1d\x7f\xf6\x5f\x9a\xe5\xf5\xf3\x00\x27\x8a\x96\x06\x1c\x47\
\x03\x38\x70\xb4\x71\xa6\xec\xa6\x92\x7a\x15\xfc\xf9\x8b\xdf\x5a\
\xd5\x1a\x08\xb9\x41\x69\x4c\x2b\xd7\x01\x46\x55\x5e\xd2\x53\xe6\
\xf9\xd7\xe3\xfe\xba\xf7\x1f\x36\xb1\x78\xd8\xa4\xf8\xa2\x63\x4d\
\x41\x04\xb7\x5d\xa1\x90\xe9\x1c\x41\xa3\xb0\x93\xec\xb6\x3c\x41\
\x98\xee\xc7\x94\x34\x44\x80\x28\x45\x7d\x84\x82\x0a\x27\xeb\x96\
\x8d\x25\xac\x67\xb2\xfd\x39\x06\x60\x6b\x02\xb0\x16\x8a\x62\xe8\
\xef\xf7\xbe\x27\xbb\xaa\xa4\x5e\x03\x3f\x3d\xd1\xbc\xc0\x58\xa1\
\x50\x85\xf9\xf0\x28\x35\xab\xfa\x7a\xf8\x4f\x89\x15\xd5\x39\x48\
\x78\xc0\x26\x36\xef\x74\x0c\xb5\x32\x0c\x4b\xb9\xae\x83\x77\xdf\
\xb1\xde\xdf\x2e\x03\x50\x9c\xe3\x0b\x8e\x1a\xc1\x67\x4c\xfa\x8b\
\xaa\xff\x90\x14\x17\x8c\xf4\x5e\x68\x25\xcd\x16\xe7\x9f\xc8\x00\
\xf4\xe6\x32\x96\x29\xc5\x91\xca\xfa\xe9\xb2\xab\x4a\xea\x15\xf0\
\xe7\xff\xe4\x5f\xab\xda\x82\x9a\xd4\xe7\x25\xb7\x2a\xac\xf3\x9d\
\x5f\xd7\x00\x14\x21\x0a\x60\x0b\x76\xa7\x12\x5f\x13\x43\x10\xc0\
\x2a\x3a\x00\xc5\xeb\x02\xce\x75\x06\xa0\xce\x63\xfa\xc1\x0e\xe4\
\x7c\x3c\xcf\x3c\xe9\x5f\x9b\x43\xcc\x3f\x29\x56\xd5\xc7\xef\x25\
\xce\x0c\x80\x97\xf8\xba\x06\x00\x02\x84\xa3\x40\x38\x8a\xfe\xbf\
\xd9\xf3\xb9\xec\xae\x92\x7a\x1c\xfc\xc9\xcd\x6d\xf3\x41\xf9\x8c\
\x36\x43\xf2\xaa\xd4\x1e\xc4\x84\x17\xa8\xba\x36\x10\x37\x59\xc7\
\x81\x39\x58\x7e\xed\xf2\x05\xec\x62\xfd\xb0\x99\xad\xd7\xc6\xec\
\xb0\xad\xe8\x23\x56\xae\x40\x04\x06\xa3\x93\x0a\x54\x84\x54\x5b\
\xd5\xdf\x74\xb2\xaa\xab\x40\x36\x0c\xc0\x25\x48\x7c\x17\x73\x04\
\x86\x23\xb1\xbc\x80\xc6\xe3\xbe\x31\xb2\xbb\x4a\xea\x51\xf0\x0f\
\x7b\xe8\xed\x9b\x7d\xde\x36\x8f\x16\xca\x53\xad\x80\xe5\x7d\x00\
\x9c\xfa\xcf\x4b\x7e\x43\xfa\xdb\x14\xff\x38\xda\xf4\xb0\x8f\xe3\
\xf3\x24\xd6\x04\xe8\xa8\xa3\x36\xde\x79\x71\x5c\x01\xc0\x41\x2b\
\x80\xd9\x3c\xb0\x35\x07\xcc\xc7\x1e\x08\xdb\xab\xfe\x16\xc6\xa1\
\x03\x1f\xa2\x0f\x80\xb7\xf3\xd9\x7a\x38\xaa\xdd\x4e\x67\x0e\xad\
\x01\x32\xea\xa5\xaa\x95\xb2\xcb\x4a\xea\x31\xf0\xc3\xe7\x7f\x12\
\x30\x66\xa6\xd1\x3d\xfc\xd4\xae\x2a\x4f\x30\xb7\x75\x7b\xdf\xc5\
\x36\xe8\x0c\xc0\x02\xca\x78\x75\xf9\x80\x8d\x44\xb7\x63\x12\x88\
\x13\xde\xb3\xab\xf1\x77\xd0\xf5\x1d\x47\xfa\x75\xfe\x44\xe1\x30\
\x45\xc8\xae\x7c\x20\xd9\xc5\xe5\xf5\x0b\x92\x1f\xd0\xb6\xeb\x52\
\xdf\xe5\x32\x96\xa3\x2a\xf3\x11\x70\xd9\x7f\x8a\x82\xea\xa3\xbe\
\x1f\xc8\x2e\x2b\xa9\xc7\xc0\xdf\xda\xd4\x32\x4a\x1f\x96\x9a\xe8\
\xe0\xa5\xd6\x4a\x3b\xd5\x21\x24\x47\xa8\x91\xfb\xaf\x13\x71\x1a\
\xb2\x9b\x67\x06\xfc\xba\x69\xbb\x8d\xe3\x0f\x71\x4a\x83\x4d\x38\
\xa7\xd6\x59\x79\x2c\x37\x8a\xb7\xd9\xc1\x01\x48\x81\x7d\x21\x15\
\x62\xc2\x8f\x51\xb1\xc7\x81\x5d\x64\x00\x10\x24\x3e\x00\x84\x22\
\x36\x9a\x81\x02\x78\x5b\x93\x64\x97\x95\xd4\x23\xe0\x1f\xf2\xe3\
\x37\x97\xd2\xb0\x4a\x74\x7b\x5d\x05\xd1\x6a\xd8\xf5\x3e\x2f\x38\
\xf1\x54\x1e\x63\x0c\xac\xba\xda\x7f\x4e\x46\x0a\xbe\x55\xd0\x1f\
\x4f\x4c\x2b\xc0\x93\xd3\x46\x62\xe9\xa5\x05\x78\x8a\xb5\xbb\xc7\
\x0c\xc4\x57\xf2\xb3\x1c\x32\xf8\x1c\x46\xf6\x81\x4d\xc8\x90\x3a\
\xe0\xd8\x71\x70\x50\x9b\x97\xe6\x1d\x7a\x76\xaa\xbf\x03\x8f\xa8\
\x0d\xab\xc8\x72\x11\x4c\xc9\x4a\xc2\xfc\x21\xa9\xb8\x75\x78\x3a\
\x9e\x1d\x9b\x85\x9f\x8f\xf3\xe0\xe7\xe3\x3c\xb8\x73\x68\x26\x46\
\x66\x25\x0b\xa5\xbd\x82\x19\x40\x08\x10\x0c\x9b\x43\x7e\x42\x6d\
\x80\x67\x75\xc5\x07\x67\x70\x7f\x2b\x00\x30\x9b\xfd\x4a\xea\xcb\
\xe0\x4f\x6a\x0b\xde\x44\xa8\x1a\x4b\xda\xd1\x6b\xd5\x09\x93\xe8\
\x31\x06\x60\x1b\x7a\xa3\x18\xde\x2f\x15\x4f\x5d\x35\x0e\xe5\x8f\
\xce\x45\xe5\xd3\xd7\xe0\xc5\xc5\x97\xe1\xa1\x5b\x27\xe2\xa1\x5b\
\x26\xe0\xc1\x5b\x26\xe0\xa1\x6f\x9f\x8f\x87\xbe\x35\x1e\xcf\xfe\
\x70\x2a\xfe\xf1\xe0\x74\xd0\x67\xae\xc1\x27\xdf\xbb\x18\x0f\x4f\
\x1c\x02\xb7\x5b\x31\xc0\x66\x51\xd7\x29\x96\x4e\x1e\x02\xba\xe4\
\x32\xd0\x47\xa7\x83\xfe\xf7\x34\xd0\x47\x2e\x05\x7d\xf8\x62\xd0\
\x07\x2f\x02\x7d\x60\x2a\xe8\x4f\xa6\x82\xfe\x78\x32\xe8\xa2\x49\
\x5a\xbb\x6f\x22\xe8\x3d\x13\x40\xef\x3a\x1f\xf4\x87\xe3\x41\xbf\
\x3f\x0e\xf4\x7b\x63\x40\xbf\x7b\x2e\xe8\x77\x0a\x41\xef\x28\x04\
\xbd\x7d\x34\xe8\xc2\x51\xa0\x0b\x47\x61\x6e\x6e\x0a\x57\xd2\x6b\
\x83\x7c\x9d\x21\xb8\x15\x3c\x5e\x90\x81\x3d\x33\xf3\xf0\xd2\x95\
\x79\x78\xec\x12\x0f\x6e\x1d\x9f\x89\x1b\xcf\x4d\xc7\xdd\x53\x3d\
\xb8\xff\xd2\x5c\xdc\x7f\x69\x2e\x9e\xbb\x6e\x28\x0e\xdc\x56\x08\
\xef\x1d\x63\xb1\xfe\x8a\xa1\xb8\x69\x70\x86\x55\x0b\x08\x86\x59\
\x22\x90\x62\x13\x11\xd0\x96\x7d\x4d\xfe\xa9\x9d\x04\xdd\x0a\x00\
\x65\xb0\x1a\x4a\x14\x40\x39\x80\xd5\x00\x8a\x01\x78\xba\xa8\x6f\
\x79\xd8\xf5\x4a\x01\xd4\xb0\xfb\x54\x03\xd8\xcc\x7e\x29\x00\x2f\
\x7b\xa6\x25\xdd\xc4\x10\x96\xb0\xeb\x97\x75\xd1\xf5\x67\xb3\x6b\
\x76\xa6\xcd\x3e\xc5\x6f\xbb\xa4\x9b\xda\xec\x84\xc0\x1f\x68\xf6\
\x8f\xd0\x80\xaf\x32\x6f\xbd\x31\x34\x35\xb1\xa8\xff\x34\x16\xff\
\x2f\xec\x97\x8a\xd2\x3b\xa7\x61\xd7\x8a\x6b\x71\xd7\xd7\xc7\x63\
\xd4\xd0\x7e\x09\xbf\xf1\xe4\x31\x39\x78\x72\xc1\x04\x9c\x78\x64\
\x26\x96\x5e\x34\x0c\x49\x49\x2e\x98\xb5\x81\x5e\x60\x91\xa6\xba\
\x7f\x03\xf4\xbf\x1c\xd3\x0f\xbe\xeb\x06\xe1\xd1\x4b\x3c\x18\x3f\
\x20\x31\x8d\x3c\x3b\xcd\x85\xeb\xce\xcf\xc6\xab\x5f\x2b\x40\xf5\
\xb7\xce\xc5\x4d\x43\x32\xb4\x8b\x86\x23\x5a\x8b\x31\x17\x31\xd7\
\x9f\xfd\xb6\x06\x4f\x45\xf5\x2f\x60\xe0\xab\x06\x70\x1f\x80\x59\
\x0e\xc7\x4d\x02\xb0\x00\xc0\x8b\x00\x9a\x18\x23\xe8\x2c\x58\x3c\
\xec\xfc\x1a\x76\xbd\xeb\x01\x8c\x70\xfa\x1c\xec\x99\x1e\x63\xcf\
\x58\xc6\x77\xce\xd3\xa4\x02\x76\xdd\x59\xac\x2d\xe9\x22\xf0\x3f\
\xd6\xc9\xb6\x99\x7d\xdb\x52\xc6\x14\x3b\xa2\xc7\xba\xa9\x75\x0c\
\xfe\xa1\xf7\x6f\xb8\x19\xfe\x90\x9b\x07\xbd\xa2\x0f\x5b\xc5\xd9\
\xf2\x1a\x2c\x0c\xd1\xfc\xc8\x8c\x42\x7c\xf2\xec\x75\xb8\xe2\xd2\
\x73\x4e\x4f\x6c\x64\x26\xe1\xc1\x9b\xc6\x62\xdf\x5d\x17\xe3\xda\
\x98\x49\xc0\x40\x48\x7b\x83\x03\x18\xba\xff\xad\x03\x53\xe1\xfb\
\xea\x60\x2c\x9a\x92\x8d\xac\x94\xce\x0f\x86\x54\xd0\x3f\x19\xaf\
\x7e\xad\x00\x5b\xfe\x6b\x38\x10\x89\x18\xa0\x17\x66\xfa\x31\x49\
\xff\x88\x8a\x82\x57\x0f\x26\xe2\xf5\x2f\x82\x26\xd1\xaf\x67\xeb\
\x3e\x00\xeb\x00\x3c\x0e\x60\x0e\x6b\x37\xb0\xf5\x75\x6c\xbf\x4e\
\x0b\xd8\xb9\xa7\x0a\x98\x12\x06\xfa\x05\x0c\xd8\x00\xb0\x13\xc0\
\x4a\x76\xaf\x39\xc2\xbd\x57\xb2\xfd\x3a\xcd\x62\x20\x29\xc5\xe9\
\x6b\x20\x22\xc0\x16\xa0\x6b\xb5\x8b\x9d\x00\xb6\x24\xd0\x6a\x85\
\xf3\xae\x67\x4c\xb1\x2c\xc1\xe7\xa9\x4d\xf0\x3e\x89\xb6\x9a\x0e\
\xc1\xef\x0a\x86\xbe\xa1\x3b\xea\x08\x57\xb5\xa7\x27\xf0\x50\x4a\
\xa1\xa8\x5a\xd3\xf1\xf8\x7f\xf3\x8b\xf0\xd0\x77\xa7\x74\x29\xe4\
\x46\x0e\x4e\xc7\x3f\x17\x5d\x84\xbb\x0a\xf3\x62\xd8\xef\x31\xe9\
\x6f\x73\x9f\x47\x47\x66\xe2\x0f\x73\xf3\x4e\x0b\xf4\x22\xcd\x2c\
\xec\x87\xc6\x7b\x8a\x90\x97\x93\x66\x2d\x06\xe2\x79\x0f\x63\x04\
\x47\x5b\xdb\x6f\x4a\x00\xf8\x65\x1c\x00\x1f\x67\x1d\x6d\x1e\xcc\
\xaa\x70\x29\x5b\x9f\xc7\xc0\x76\x03\xeb\x20\x60\xe7\x3e\xc6\x8e\
\x4b\x84\x56\x03\x78\x86\xbb\xe7\x3a\x00\x93\xd9\xb3\x94\xb0\x7b\
\x95\x09\xf7\x2e\x61\xfb\x47\x02\x58\x23\x00\xa4\x86\xed\xeb\x0c\
\x79\xd8\xb5\xc1\xbd\x8f\x1d\x43\x38\x1d\x2a\x81\x26\x45\x3b\x6a\
\x05\xec\xdf\x9b\xc3\xde\xd1\xc7\x31\xba\xf2\x04\xde\x71\x75\x82\
\xf7\x49\xb4\xad\xee\x10\xfc\xd4\xdf\x7e\x29\x5f\xa8\xa3\x6b\x00\
\x0a\x97\xb3\xaf\xea\x08\xa1\xc0\x3f\xee\x9a\x81\xdb\xe6\x8d\xeb\
\x36\x1c\x36\xb7\x87\x7b\x49\xe2\x1b\xf4\xbb\x09\x1e\x3c\x3e\x3d\
\xb7\x5b\xae\x9d\x93\xe1\xc6\xbe\xdb\xc7\x23\xcf\x93\xea\x3c\xcc\
\x37\xa3\x50\x4b\x30\x3f\x81\x0e\xa3\x83\x70\x21\x03\xb8\x37\x81\
\xc7\x28\x65\x1d\x64\x21\xd7\x49\x13\x39\x6f\x35\x34\xc9\x0a\x76\
\xde\x1c\xc6\x50\xca\x13\x7c\xfd\x1a\x06\xcc\xc9\x30\x34\x81\x6c\
\xc6\x24\x3a\x03\xd8\x12\xee\xfd\x8b\x19\x23\xd2\xb7\x7b\x7a\xa9\
\xfb\xe8\xef\x52\xc0\x3d\x8f\xfe\x8e\xbd\xf2\x4c\x8e\xe0\x0f\xb5\
\x06\x06\xe8\x7e\x3c\xc2\x06\xe7\x50\x28\x05\x51\x61\xb1\xff\x7f\
\x73\xd3\x64\x5c\x31\x7d\x68\xb7\x3d\xe4\xc3\x7f\xdc\x8b\x3f\x1e\
\x6c\xe2\x59\x53\x8f\x33\x82\x7b\x86\xa6\xe3\xbb\x45\xd9\xdd\x7a\
\x0f\x9d\x01\x68\xf9\x01\x70\x1e\xef\x3f\x10\x8a\xa7\x76\xcc\x83\
\x66\xc3\x03\x9a\xc4\x5f\xdd\x89\x47\x59\x0d\x4d\x22\xad\x49\x00\
\x7c\x3c\xf0\x77\xb2\xce\x5d\xd6\xc9\x4f\x50\xce\xdd\x17\x1c\x80\
\x3b\x03\x7e\x30\x90\xd5\x40\x73\x76\xea\xd7\x2b\x41\xef\x92\x97\
\xfd\x47\xfc\x3b\xae\xe8\x33\xe0\x1f\x7e\xff\x86\x11\x6e\x7f\xc8\
\xad\xab\xfa\x3a\x03\xd0\x42\xd6\x34\x16\xef\x27\x94\xe2\xeb\x85\
\x03\x71\xdb\x8d\xe7\x75\x78\xa3\x03\x87\x5a\xb0\x6c\xf5\x4e\xcc\
\x7f\x74\x13\xb2\xee\x2c\x45\xc6\xf7\xd7\x21\xfb\xbe\xd7\x71\xc3\
\x13\xef\xe2\x91\x55\xe5\xa8\x3e\xd2\x6a\x7b\xde\x4b\x1b\x0f\x62\
\xe9\xae\x3a\x87\x91\x79\xec\xe9\x4f\xef\xd7\xe3\xca\x17\xf6\x62\
\xee\x8b\x9f\xe1\xca\x35\xfb\x30\xf7\x0f\xfb\x71\xd5\x4b\xfb\x31\
\xf7\x4f\x95\xb8\xf2\xcf\x55\x98\xfb\x97\x03\xb8\xf2\x95\x6a\xcc\
\x7d\xad\x06\x57\xbd\x56\x8b\xb9\x7f\x3b\x88\x2b\xd6\x1e\xc2\x95\
\xa5\x87\x31\x77\xdd\x11\x5c\xbd\xee\x08\xde\x69\x0e\xc7\xbc\xf1\
\x63\xfb\x25\xe1\x57\x73\xf2\x3a\xbc\x6f\x73\xbb\x8a\xbf\x57\xf8\
\xb1\xe4\x23\x1f\x7e\xf8\x6e\x23\xa6\xbe\x7e\x0c\xe4\x4f\x07\x31\
\x77\xed\x61\x3c\xb4\xf1\x18\x6a\x9b\xc2\x09\x31\x80\x8d\xd7\x8d\
\x66\x60\x87\x90\x23\xc0\x5a\x38\x8a\x4b\xd6\xd7\x4d\x8f\x03\x7e\
\x9d\x4e\xa7\x53\xd5\x30\xe0\x7b\x3b\x60\x34\x3c\xf0\x67\x23\x31\
\x4d\xa1\x23\x2a\x86\xe6\x0f\x58\xd8\x09\xe6\x55\xcc\x31\x0d\xfd\
\xfd\xcb\x60\xa8\xff\xc5\xe8\x1b\x54\x0c\xc3\x1f\xd0\xd5\xfe\x88\
\xce\x83\x9f\x84\x23\x37\x80\x53\xf3\x15\xce\xc3\x1f\x6b\xa0\xc8\
\x4f\x71\xe3\xe9\x07\x3a\x2e\x37\x5f\xf6\xfc\x0e\xcc\x7c\x6a\x23\
\x96\x6e\xad\xc4\xc6\x3a\x2f\x28\x1b\xe5\x27\xd4\x1e\xc1\xeb\x47\
\xbd\x58\xbe\xe3\x10\x46\x2d\x2d\xc3\xf5\x4f\x6d\x43\x75\x9d\xc1\
\x04\x76\x54\x78\x71\xeb\x9b\x9f\xc3\x94\x02\xcc\xff\x3a\xd0\x21\
\x5f\x08\x9b\x4e\xb4\x61\xe3\x09\x3f\xfb\x6d\xc3\x3b\x27\xfc\xda\
\xfa\x49\x3f\x36\x9e\xf4\x63\x53\x43\x00\x1b\x4f\x06\xf0\x4e\x63\
\x10\x1b\x1b\x83\xd8\xdc\xd0\x8e\x4d\x0d\xed\xd8\xd8\x10\xc4\xdb\
\x8d\xed\x40\xac\x60\x87\xe2\x85\x0b\x3b\x56\xf5\xdf\xaa\x0e\x20\
\xfb\x8d\xe3\x98\xbf\xd3\x87\x9f\xd5\x07\x70\xa4\x3d\x1a\xdb\xb7\
\xb1\x31\x88\x65\x55\x3e\x14\xbc\x56\x8d\xef\xbf\x59\x07\x5f\x50\
\x8d\x7b\xad\x2b\xc6\x7b\x70\xeb\xa8\x1c\xee\x0f\x11\x1a\x80\xda\
\x60\xe4\x27\x0e\xa7\xeb\x9d\x68\x4b\x17\x01\xd1\x89\x3c\x1c\x30\
\x7d\x09\x30\x8a\x53\xa5\x12\x74\x4e\x6b\x59\xc2\xbd\x7f\x19\xb7\
\x5d\xbf\xd6\x88\x3e\xc4\x00\x96\x70\xcb\xb3\xfb\x04\xf8\x93\x42\
\x91\x59\x00\x4c\xde\x7d\x23\x4f\x9f\x49\x7f\x15\xb8\x63\xda\x68\
\xe4\x64\xa7\xc6\xbd\xc1\xbd\x3f\xdb\x86\xa7\xb6\x56\xa2\x25\x68\
\x84\xb1\x5c\xd4\xb8\x79\xcc\xa7\x00\x60\xfd\x51\x1f\xc6\xfe\xf2\
\x3d\xbc\xf4\x56\x0d\xaa\x8f\xb6\xe1\xe2\xe7\xff\x6d\x80\x9d\x76\
\xc6\xd3\x47\x9d\xe7\xeb\xb3\x1b\x3f\x80\x58\x8f\xb9\x2a\x37\x05\
\xd3\x87\xa5\xc5\xff\x07\x3f\xf4\xe2\x4b\x1f\x37\x02\x11\xad\x6c\
\x37\x10\xa6\x08\xb3\x71\x3b\xb4\x19\x79\x68\x4c\x75\x7f\xee\x50\
\x2b\xc6\xbe\xb4\x1f\x5e\x7f\x24\xfe\x35\xe7\x0e\xe3\xa6\xf6\x16\
\x1e\x8c\x00\x4d\xfe\xd0\x25\xbd\xdc\x71\x4b\x60\x96\xb0\xe5\x7d\
\x00\x4c\xc5\x30\xc2\x8a\x2b\x84\x7d\xab\x61\x48\xda\x25\x7d\x04\
\xfc\xa5\xdc\x72\x51\x9f\x00\x3f\x89\xaa\x39\x8a\x4a\xd9\x10\x76\
\x94\x9b\x5d\x9a\xc6\xec\x7f\x02\x8a\x9b\xae\x8d\xef\xe0\x7b\xe4\
\x57\xdb\xb1\x7a\x4f\x9d\x15\x70\x71\x40\x19\x0e\x45\x71\xeb\x1b\
\x9f\xe1\xca\x5f\x7f\x88\x48\x28\xca\x3d\x14\xb1\x9f\x84\x33\x0e\
\xee\x8d\xf3\x9c\x8a\x7c\xb8\x15\x87\xeb\x2d\x29\xca\x89\x7b\x9b\
\xe7\x77\xb5\xe0\xf1\x9a\x36\x4b\x75\xe0\x81\x08\x45\x0a\x6f\xab\
\xeb\xbf\xed\x61\x1c\x6b\xf0\x63\xcc\x9a\x7d\x71\x19\xc0\xa8\x81\
\xa9\xb8\x6a\x68\x96\x39\x23\x90\x13\xff\x51\x95\xa6\x77\xf0\xdf\
\x76\x77\x67\x2a\x61\xbf\xb5\x7d\x08\x4c\xc5\xdc\x33\x95\xda\xfd\
\x9d\xec\xb7\xaf\x48\x7f\x6f\x9f\x03\x7f\xa8\x35\x38\x49\x9f\x84\
\x03\x7c\x5c\x9f\x9b\xa2\x6a\xde\x98\x41\x18\x5d\xe0\xec\x8f\xd9\
\xf4\xc1\x61\xac\xfc\xb8\xc6\x0a\xb6\x98\x0f\x01\xb1\x5c\x01\xdd\
\x8f\xc0\x8b\xe1\xea\xb6\x90\xf9\x5c\xa7\x89\x37\xec\x98\x97\xa5\
\x1e\x20\x1e\x77\xb0\xdb\xa5\xed\xcb\x48\x75\xc5\x95\xfa\x87\x9b\
\x23\xf8\xce\x1e\x9f\xed\xe5\xf6\x85\x28\x06\x24\x29\xe6\x91\x7f\
\x22\x51\xc0\x1f\x02\x40\x70\xa2\x39\x84\x25\x6f\x1d\x89\xfb\x1e\
\xdf\x2c\xf4\xd8\x8f\xf7\x4f\x80\x68\x5b\xc8\x29\x73\xaa\x9c\xfd\
\x66\xa3\xfb\x54\xc9\x62\x18\x52\x7f\x75\x1f\x01\xfe\x6c\x18\x49\
\x4c\x4e\xcc\xa8\x14\x46\x14\xa3\x2f\x80\xbf\x57\xc9\x16\xfc\x2a\
\x38\x15\x9f\xab\xcf\xe7\x47\xe6\x99\x32\x66\x60\xdc\x0b\xff\xa6\
\x74\x37\x07\x24\x1a\xab\x03\xd0\x31\xa2\x70\x0c\xc0\x16\xb4\x16\
\x30\x9e\xae\x77\xbf\x03\x8d\x81\x58\xc7\xef\x2f\xce\x8b\x6f\xd2\
\xfc\xa2\xdc\xe7\xc4\x7e\x00\x95\xc2\xaf\xd7\x3a\x2b\x4c\xf2\xb7\
\x06\x4d\x47\xad\xdc\xd7\x88\x03\x27\xda\x1d\xaf\x7f\xc3\xe4\xfe\
\xc6\xf5\x20\x4c\xf9\xa5\x52\x12\xa7\x83\x83\x03\xa6\xa7\x9b\x80\
\x86\x3e\x06\x7e\x1d\xf0\xbe\x38\xcf\xe4\x85\x61\x0e\xcc\x42\x2f\
\xd8\xd9\x71\xa8\xbc\x4f\x80\x9f\x77\xf4\x29\xb1\xac\x56\x23\xa3\
\x8f\x50\x8a\x71\xe7\x0e\x70\xbc\x68\x55\x6d\x33\xde\x3c\xdc\x24\
\xc2\xc1\x02\x2e\x42\xa9\x66\xf7\x53\xce\xaa\xb5\x8c\xe1\x1f\x4f\
\x65\xb7\xa7\x69\x23\xfb\x61\xd9\xa4\x7c\xad\x5d\x90\x8f\xe5\x17\
\x0c\xc4\xb2\x89\x03\xb1\x6c\x62\x1e\x96\x4f\xcc\xc3\xb2\x09\x03\
\xb0\x6c\x7c\x7f\x2c\x1f\x9f\x8b\xe5\xe3\x73\xb1\xec\xbc\x1c\x2c\
\x3f\xcf\x83\xe5\xe7\x65\x23\x25\xd5\x18\x57\x7f\x46\x7e\x7c\xf0\
\xaf\x3c\xe2\x77\x7e\x39\x00\xbb\xc2\x9c\x79\xd3\x1a\x30\xe1\x58\
\xa7\x75\xbb\x1a\x1d\xaf\xef\x49\x77\x63\x80\x27\xc5\xca\x58\x4c\
\x55\x81\x16\x2a\x83\xe1\xd9\x1e\xc1\x3a\x55\x57\x77\x72\xfd\x7a\
\x3b\xc1\x65\x8c\xf5\x22\x15\xc0\x90\xfa\x2b\x3a\x38\x76\x85\x0d\
\xc3\xe8\x2d\x9a\xc7\x2d\xf7\xf8\x77\xb4\x77\xf8\x9d\x6c\xf1\x50\
\x93\xba\x4f\x63\x43\x70\x2b\x2c\xbd\xfe\xca\x19\xc3\x1d\x2f\xba\
\xe1\xdd\x1a\x67\xa0\xb2\x6d\x6b\xef\xbc\x14\xeb\xbe\x7b\x29\xfe\
\x79\xc7\x25\x78\xf3\x8e\x8b\xf1\xf6\xc2\x8b\xb0\x69\xe1\x45\xd8\
\xbc\xf0\x42\x6c\x5e\x30\x15\x9b\x6f\x9b\x8a\xcd\xb7\x4e\xc1\xe6\
\x5b\x26\x63\xf3\x2d\x93\x51\xf6\xed\x22\xfc\x7a\xc6\xc8\x78\x9d\
\x3e\x46\x33\xcf\xcb\xc1\xe2\x2f\x0f\xc7\xe2\x6b\x86\x61\xf1\x97\
\x86\xe2\x81\xab\x87\x62\xf1\x55\xe7\x60\xf1\x95\x83\xf1\xc0\x15\
\x83\xb1\x78\xce\x20\x2c\x9e\x33\x08\x0f\xcc\xca\xc7\x03\x33\x07\
\x62\xf1\x8c\x3c\x3c\x70\xd9\x00\x3c\x30\x7d\x00\x66\xe8\xe3\xed\
\x03\x18\x9a\xe5\x9c\x46\xff\xc1\xe1\xa0\xc3\x28\x40\x06\xb8\xeb\
\x75\xaf\x5f\x20\x64\x44\x0f\x4c\xd5\x7c\xc0\xef\x6b\x7c\x71\xdf\
\x65\x60\x8a\xdb\xea\xed\x27\x00\x5a\xdb\x49\x07\x9d\x6a\x27\xc7\
\x00\x36\xc3\x48\x32\xf1\x74\x41\xbf\xd1\x9d\x6a\xe5\x7d\x00\xf8\
\x3c\x88\x7d\x09\x80\xdf\x0b\x23\xc6\x3e\x0b\xbd\x5b\x61\x58\xc2\
\x2d\x97\xf6\x09\xf0\xc7\x06\xe5\x84\xe1\x8d\xd7\x9d\x7f\x89\x50\
\x4b\x5b\x7b\x4c\x72\xab\x7c\xf5\x9f\x7e\x71\x4a\x31\xe7\xa2\xc1\
\xac\x0d\xc2\x9c\x0b\xb5\x36\x7b\x6a\x3e\x66\x4f\xd1\xdb\x40\xcc\
\x9e\xcc\x5a\x51\x1e\x66\x4d\xca\xc3\xf9\xc3\xb2\x7a\xb0\xb0\x87\
\x20\x3b\x4e\x0a\xef\x07\xc7\xda\x9d\x4e\x33\x7f\xc7\xf6\xb0\x06\
\x7e\xbd\x82\x0f\x9c\xf3\x4f\x21\xf8\xec\x64\x20\xee\x53\x5c\xd5\
\x3f\xcd\x3a\xd7\x9f\x78\x1f\xfb\x0e\x3e\x1b\x46\x26\x99\xde\xd1\
\xf5\xc2\x9d\x32\x74\xae\xd2\x0c\x30\x6b\x11\x35\x7d\x00\xf8\x05\
\x30\x72\x0d\x4a\x91\x58\xb8\x71\x89\xc3\x72\x4f\x52\x31\x0c\x6d\
\x65\x4b\x9c\x6f\x59\x80\xae\x49\xeb\x2d\x48\x08\xfc\x7a\x28\x2f\
\x36\xee\x5e\x2c\x9f\x5f\xeb\xd1\x4a\x07\x0e\xb7\xff\x54\x9d\xe4\
\x98\x08\x04\x3b\x9b\xa0\x2f\x13\xe5\xd2\x68\x27\xe4\xa7\x9c\x02\
\xaf\x20\xf6\x5c\x34\x4a\x81\xb4\x64\xc0\xed\xe2\xb0\x4f\x3a\x38\
\xcf\x20\x8f\x69\x14\x60\x7d\xfc\x3f\x62\xf8\x11\xe2\x33\x80\x79\
\xd0\x52\x6d\xd7\x09\xfb\xf4\x4a\x3a\xbd\xd2\xac\x1c\x9a\xc4\x2c\
\xea\xd3\x7f\x8e\x3d\x95\x74\x02\xc8\x35\xdc\x37\xe9\x8d\x04\x9b\
\x62\x68\x8c\x18\x30\x72\x24\x9c\x68\x01\xfb\x9f\x4e\xb7\x15\x27\
\x06\x7e\x7e\x4c\x3e\x18\xb9\xfd\x89\xd2\x90\x9c\x34\x87\x1b\xf4\
\x64\x55\xce\xe9\x93\xaf\x5d\xed\xdc\x89\x04\x28\x4c\x55\x90\x4e\
\x55\x20\x23\x05\x97\x8c\xee\x0f\x8c\xe8\x0f\x9c\x93\x0b\xf4\xef\
\x07\x64\xa5\x01\xe9\x29\x40\x72\x92\xc6\x14\x4e\x89\xa9\x38\xc4\
\xfe\x9d\xa9\x8c\x31\x81\x91\x00\x16\xc1\x5a\xc1\x07\x68\xe9\xc0\
\xf7\x01\xd8\x01\x23\xb3\xef\x4c\x20\x0f\xf7\xac\x6b\x70\x6a\x9a\
\xc8\x0a\x07\x06\xd2\x9d\x54\x04\x4d\x3b\x79\x51\xb8\x77\x0d\x7a\
\x81\x1c\xf5\xda\x98\x9d\xcf\x77\xb9\x04\x99\xc0\xe0\xdc\x0c\xc1\
\x81\xd7\x95\xd4\x73\x79\xfd\x7b\x8e\x3b\x7b\xe2\xcf\xcf\x49\x36\
\x7f\x1c\x00\x50\x08\x46\xa6\x28\xf8\xaf\x74\x05\x99\x94\xc2\x1f\
\x88\x00\x14\xf8\xc4\x1f\xc1\xfc\xec\x64\x5c\xde\x3f\x0d\xc8\xc9\
\x00\xf2\xb2\x80\x01\x59\x40\xff\x4c\x8c\x19\xe6\x89\xfb\x0c\xaf\
\xd4\xb5\x9e\x22\xd6\x1d\xa9\x86\x75\xf8\x79\x0c\x34\x93\xa1\xa5\
\xcf\xae\x81\xb9\xec\x74\x04\xeb\x9c\x35\xe8\xfb\x9a\x40\x09\xac\
\xa9\xbc\x89\x52\x19\xcc\x29\xbf\x9e\x4e\x00\x79\x76\x02\xad\x18\
\x46\x12\xd4\x0e\x98\x4b\xac\x6f\x40\xc7\xd1\x92\x35\x30\x97\x42\
\x77\xb6\xad\x4e\x08\xfc\x94\x9b\x28\x93\x38\x0c\x71\x5f\x55\xed\
\x75\x7c\xda\x8b\x2f\x18\xec\x3c\x6d\xd5\x69\xe1\x96\x8b\x99\xc7\
\xeb\xe5\x27\x82\x78\xf7\x73\x2f\xb6\xee\xf3\x69\x6d\x7f\x33\xb6\
\x56\x34\x63\x6b\x45\x0b\xb6\x56\xb4\x60\x5b\x65\x0b\xb6\x55\xb5\
\x62\xdb\x81\x36\x6c\xab\x6e\xc3\xb6\x6a\x3f\xde\xab\xf1\xe3\xbd\
\x5a\x3f\x6a\xc3\x89\x49\xfb\x69\xe7\xa4\x98\xc0\x3f\x3c\x45\xc1\
\x35\xe9\x0a\xc6\xb9\x09\x82\x14\x28\x6f\x0e\xc5\x76\x87\x83\x51\
\xb4\x45\x54\x0c\x4a\x22\xb8\xc9\x93\x84\xa2\x4c\x37\x90\xe2\x06\
\x32\x52\xf1\xd5\x61\xf1\x6b\x57\xf2\xb2\xd2\x80\xcc\x34\x20\x2d\
\x05\x48\x76\x03\x6e\xa5\xab\x98\x41\x39\x8c\xd1\x7b\x0a\x18\x33\
\xe0\x4b\x4e\x47\x30\x80\x14\xd9\x9c\xa7\x53\x41\x1f\x00\x3f\x18\
\x88\xcb\x3b\x71\xbe\x0e\x88\xce\x14\xfc\x3c\x83\xc4\xd4\xed\x17\
\xa1\x69\x55\x93\x04\x40\x17\x20\x31\x27\x5f\x0d\xcc\xa5\xd0\x9d\
\x6d\x35\x09\x81\x5f\xcd\x49\x0f\xc4\xeb\x5b\x84\x02\xbb\xf6\x1e\
\x77\xdc\x7f\xc5\xf4\xa1\xc8\x4f\xd6\xbc\xe6\x2a\xef\x9f\xe2\x80\
\x3f\xff\xd1\x8d\xf8\xda\x92\xcd\x98\xff\xf8\x66\xdc\xf8\x78\x19\
\x6e\x78\xe2\x5d\xdc\xf0\xd3\x77\x31\xef\xc9\xad\xf1\xee\x9c\xd0\
\xbf\xf2\xca\xc7\xc7\x30\x6b\xed\x7e\xcc\x2c\xdd\x8f\x99\xeb\x2a\
\x31\x73\x7d\x25\x66\xfe\xa3\x0a\x33\x5f\x3f\x80\x99\x6f\x54\x63\
\xc6\x9b\xb5\x98\xb1\x81\xb5\x7f\x1d\xc2\x8c\xb7\x0f\xe1\xf2\xb7\
\x8f\xe0\xf2\x77\xea\x50\xd9\x12\x8e\xdd\xea\xfd\xba\xa0\xe3\x3d\
\xb2\x52\x14\xdc\x91\x9f\x8a\xc1\xa9\x0a\xae\x48\x77\x61\x5c\x92\
\xf1\x6c\xff\x6e\x0b\x6b\x23\xf0\x72\x83\x88\x6e\x6e\x35\x8a\x7a\
\xc6\xa5\x28\xb8\xc9\x93\x84\xd1\xe9\x2e\x7c\x69\x70\x7c\xbf\xc2\
\xd6\xe6\x30\x90\x9a\x04\x64\xa4\x00\xd9\xe9\x40\x4e\x26\x90\xdb\
\x0f\xc8\xcb\xea\x6a\xf5\xa7\x1c\x89\x95\x9c\x7a\x39\x06\x51\xd4\
\x8b\xc0\x2f\x86\x21\xf5\x97\x74\xf2\x1a\xab\x61\x68\x3d\x25\xdd\
\xf8\xac\xfa\x40\x2a\x8b\x00\xe4\xa0\xeb\xeb\x20\x3a\x45\xb6\xe0\
\x8f\xba\x5d\xed\xe2\x41\xb1\x51\x78\xf5\x0e\xbe\xab\x2e\xee\x85\
\xef\x9c\x36\x4a\x5b\x10\x7c\x7d\xba\xd0\xde\x58\xe7\xc3\x3b\x75\
\x5e\xfc\xab\xce\x87\x37\xeb\x9b\xf1\xfa\x51\x1f\xd6\xd7\x37\x63\
\x7d\xbd\xaf\x83\x47\xee\x42\x87\xa1\x38\x58\x86\x40\x6f\x1d\x8f\
\xef\x89\xbf\x77\x7c\x16\xc6\x25\x99\x3f\xe1\xb1\x88\x8a\x66\x7f\
\xc4\x32\x89\x68\x7b\x30\x8a\xd6\x08\x9b\xc1\x97\x02\x49\x04\xf8\
\x76\xff\x64\xcc\x1d\xee\x0c\xfe\x77\x6a\x83\x40\x66\x2a\x90\xe4\
\x32\x3f\xab\x42\x80\xd4\xa4\x68\x37\xf5\x09\x2f\xac\x25\xa7\x22\
\x30\xca\xd8\xef\x24\xf4\x9e\xf4\x5f\xc2\x01\x0b\xe8\xbc\x17\xbc\
\x94\x7b\xcf\xe2\x53\xb8\xff\x22\x74\xac\x6a\x8f\x64\x3d\xcb\xc3\
\xbe\xe9\x8a\xbe\x00\xfa\xb8\xe0\xd7\xb3\xf8\x4d\x5b\x88\x39\xd0\
\xb7\x69\xcf\xd1\xb8\x17\xfe\xfe\x37\x26\xe2\xbc\x7e\xa9\xa7\xfc\
\x40\xb4\x0b\xc0\x4d\xe2\x5d\x23\xae\xc9\x60\xce\xc5\x7f\xbb\xb1\
\x1d\x07\x7d\xce\xf9\xf7\x17\xe4\x27\x63\xde\x20\xf3\x3b\x7e\xe2\
\x6b\x77\x98\x7c\x94\x62\x83\xd7\xec\x43\xb8\x7d\x4c\x46\xdc\xf7\
\x78\xfb\x64\x04\xc8\x48\xd5\x9c\x84\x03\xfa\x01\x59\xe9\x40\x6a\
\xb2\x29\x4f\xa0\x1b\xa9\x84\x03\xd6\x3c\x07\xf0\xdb\xed\xeb\x09\
\x2a\x86\x91\x6b\x90\x8d\xd3\xf3\x82\xdf\x67\xc3\x50\x12\xa1\x72\
\x74\x42\xd5\xee\x4b\x64\x0b\x7e\x77\x46\x6a\x8d\x19\xf8\x56\x53\
\xbd\xaa\x39\x88\x4d\x5b\x0f\x3a\x5e\xd8\x93\x9d\x82\xe7\xee\xbe\
\x1c\x03\x52\xdc\xdd\xf2\xe0\x24\xd1\x03\xc8\x69\x5c\x8d\x10\xbc\
\xba\xaf\x25\xee\x99\x77\x4c\xcc\xc2\x84\x0c\xed\x1d\x3f\x6a\x0b\
\x6b\x95\x7d\xe2\xf0\xe2\xfa\xd7\x0b\x45\x71\x98\x95\xfa\xfe\xb0\
\x30\x03\x23\xb2\x9c\x3d\xfd\xad\x21\x8a\xdf\x36\x84\x8c\x18\xbf\
\xdb\xa5\x69\x01\xb9\x99\xc0\x20\x0f\x2e\x1e\xd8\xaf\xaa\x9b\xfb\
\x86\x17\x86\x54\x9c\x24\xec\x2b\x15\x98\x44\x6f\x80\xbf\x3b\xa8\
\x2f\x95\xfb\xf6\x0e\xf8\x15\x97\xcb\x67\xe7\x54\xd2\x99\x00\x65\
\x82\xe7\xb9\xd7\xca\xe3\x5e\x7c\xca\x84\x3c\xfc\x7d\xd1\x2c\x5c\
\x32\x20\xb3\x43\xb8\xe9\xeb\xb3\x72\xd3\x13\x7b\xf2\xd3\x91\x7c\
\xc4\x61\xa3\xcd\x43\xfd\xee\x50\x1b\xda\x42\xce\x4e\xc0\x8c\x64\
\x05\x4f\x4d\xcf\xc5\xfc\xc1\xa9\x68\x6c\x0b\x9b\xc1\x6e\x9a\x5f\
\x40\xdb\xb6\xcf\x1f\xc6\xe2\xf3\xfa\xe1\xb2\x73\xe2\xdb\xfa\xeb\
\x0f\xb6\xa3\x55\x05\x37\xbd\x37\x17\xe3\x77\x29\x18\x98\x9e\x54\
\xdf\x03\xfd\xa3\x26\xce\xf6\x35\x1c\x60\x7a\x92\x01\xcc\x86\x91\
\x1c\xb3\x08\xf6\xf9\x8f\xa7\xda\x72\x70\x16\x16\xfc\xd8\x82\x3f\
\x92\x9e\x52\xaa\x81\x9c\x98\x00\x2f\x1e\xbc\xf9\x60\x23\x36\xc6\
\x91\xfe\x3a\x03\x78\xfb\x97\xd7\xe2\xa1\x99\x85\x18\xdd\x2f\x15\
\x26\xc3\x9f\xc3\xe1\xf4\xdc\x74\xac\xf9\xca\x78\x94\x3d\x3e\xe7\
\xb4\x5f\x8a\x12\xe1\xea\xb6\xaa\x3e\x3f\x34\x96\xf5\xab\x24\xa7\
\xb8\x30\x21\xc3\x8d\x34\x02\xbc\xb4\x2b\xbe\x1f\x22\x23\x59\xc1\
\x9d\x45\xd9\xd8\x36\x7b\x10\xbe\x99\x9f\x66\x33\x85\x18\x05\x5c\
\x04\xbf\x18\x93\x85\xea\xff\x1a\x84\x29\xf9\xf1\x47\xdf\x6e\x08\
\xaa\xd8\x72\x32\x84\xdb\x3c\x49\x18\x9d\xa2\x98\x32\xfc\x14\xd6\
\x72\x5d\xa4\x27\x26\xef\xf0\xc4\xd9\xb7\x44\x58\x2e\xe8\xa1\x3e\
\xab\xdf\x37\x5e\x01\xcf\xa9\x92\x17\x86\x36\xd3\xd7\x0a\x7e\x7a\
\x16\xfc\xe8\x97\xb6\x3f\x86\x0d\x10\x66\xef\x93\x18\xb0\x74\x66\
\x40\x09\xf0\xc8\xf3\xdb\xd1\xe4\x0b\x76\x78\xa3\x07\xef\x98\x82\
\x4f\x7e\x75\x1d\x36\xdd\x3f\x0b\x6b\xbe\x3d\x15\x8b\x67\x14\xe2\
\xd9\xeb\x27\x62\xfd\x77\x2e\xc1\xd1\x9f\x5d\x83\xcd\x3f\xbd\x12\
\xb7\x5c\x33\xaa\xe3\x27\x4e\x40\xe2\x2f\xfe\xf2\x70\xd0\x07\x2f\
\x06\x5d\x7c\xa1\x31\x79\xc7\x8f\x26\x83\x96\x4c\x02\xbd\x77\x22\
\xe8\xdd\x13\x40\xef\x1a\x0f\xfa\x83\xf3\x40\xbf\x37\x16\xf4\xbb\
\x63\xb4\x89\x3b\xf4\x49\x3b\x6e\x1b\x89\x1f\x70\x15\x7d\xbf\x3d\
\xd8\x86\x9d\xf5\xed\x1d\xde\xf7\xb2\x11\x69\xf8\xf3\x97\x07\x81\
\x16\x8f\xc4\x7b\x73\xf2\xf1\xde\x15\xf9\x78\x6f\x4e\x3e\x76\x7f\
\x69\x08\xe8\x37\x87\xe3\xc7\x17\x79\x12\x1a\xf5\xf7\xb9\x7d\x9a\
\xa3\x31\x85\x00\x57\x67\x28\x58\x90\xed\xc6\x39\xc9\x0a\x14\x85\
\x40\x51\x08\x5c\x0a\x90\xee\x22\xe5\x0e\xa7\xaf\x46\xd7\x79\xe1\
\x75\x10\xd4\xda\xec\xab\x81\x36\x46\x20\xa0\xd9\xdd\xa5\xe8\xda\
\x0a\xc2\x62\x1b\x70\x17\xc0\x5c\xc0\xe3\xed\x06\xa6\x22\x2e\x9f\
\x5d\xe0\xaf\x7e\xf2\xca\x37\x28\x21\x50\x05\xc9\xaf\xb2\x01\x25\
\xf8\x14\xd8\x8a\x96\x00\xee\x5f\xbe\x25\xe1\x1b\x4e\x19\x3f\x00\
\xd7\xcf\x19\x81\x07\x17\x5c\x80\x5b\xaf\x2d\xc4\x9c\x8b\x06\xc3\
\xd3\x2f\xf9\xd4\x9e\x9a\x90\x1e\xfb\x30\xba\x16\xf1\xdf\xff\x6e\
\x40\x55\x63\x38\xe1\xf3\xa7\x0f\x4f\xc3\xf4\x61\x5a\x3b\x7f\x60\
\xe2\xef\xf7\xc7\xfd\x01\x54\x07\xcc\x8e\xfc\x0c\x05\x98\x97\xa9\
\xe0\x86\x7e\x2e\xf4\x53\xb4\xf7\xff\xcd\x94\xf4\x57\x1c\x80\xbf\
\x00\x5d\x33\xf9\xc5\x6c\x18\xb6\x7e\x69\x1c\xc0\xe8\x05\x44\x93\
\xd0\x75\x23\xd1\x2e\x81\x16\x1f\x5f\x00\xe7\x2a\xbc\xd5\x5d\xfc\
\x97\xd7\xa0\xef\x14\xfc\xf4\x1e\xf8\x01\x20\x92\xd7\xcf\xab\x00\
\x8c\x01\x10\x10\x9d\x11\x10\x7d\x54\x1f\xb6\x0d\xc0\x5f\x2b\x8e\
\xa1\xe4\xa7\x9b\xba\xf5\x41\xbd\xad\x61\x3c\xbd\xa9\xba\xd7\x3e\
\x54\x7d\x84\x62\xd1\x07\x27\x4f\x89\x01\x9c\x2a\xad\xd9\xe7\xc7\
\x96\x86\x30\x63\x3a\x56\x06\x37\xd8\x4d\x70\x5b\xb6\x1b\xf3\xfb\
\xb9\x76\x3a\x5c\x42\x07\x9e\xee\x01\x5f\xd2\x49\x30\x16\x09\x80\
\x5f\xd1\x01\x93\x10\x19\x40\x51\x27\x3f\x81\x87\xdd\xf7\x31\xb6\
\xce\xab\xf6\x05\x30\x0a\x78\x4e\x35\x95\x37\x51\x5a\xed\xc0\x68\
\xce\x2e\xf0\xa7\x64\xa5\xed\x54\x99\xd4\x57\x63\xde\x7e\x62\x52\
\xf9\x63\x7e\x01\x10\xbc\xb0\xab\x0e\x5f\xff\xf1\xeb\xf0\x26\x60\
\x02\x9c\x2a\xfd\xe3\x83\x3a\x0c\x59\xb6\x0d\xff\x38\xd6\xd2\x23\
\x52\x5f\x24\xc2\xee\x59\x1f\x51\xf1\xa3\x0f\x4e\x62\xfb\xe1\xae\
\x7d\xc7\x40\x84\xe2\xe9\x4f\x5b\x63\xc0\xef\xe8\xcf\x1a\xe4\x26\
\xa5\x0e\x87\xcc\x83\xa1\x8a\x83\x81\xa8\x1c\xa7\x96\xbe\x5a\x0c\
\xf3\x84\x1f\x8b\x3a\x00\x9a\xd7\x86\x01\xec\x60\x0c\xa3\xe0\x14\
\x40\xbf\x84\xdd\x47\x4f\x7f\xad\x65\xd7\x2d\xb7\x01\x63\x77\x01\
\xb3\x0c\x46\xca\x6f\xaf\x8c\xa8\xdb\x27\xc0\x1f\x4c\x4f\x2d\x55\
\x99\xea\x0f\x4e\xea\x83\x81\x9d\x10\x82\x28\x63\x00\x3a\x1e\x37\
\x1c\xf1\x62\xca\x7d\xeb\xf1\x87\xb5\x9f\x77\xc9\xc3\x55\x1f\x6d\
\xc3\xf5\xbf\xd8\x8e\xaf\xae\xdd\x83\x40\x58\xcc\x69\xe9\x9d\xea\
\xc0\x63\x11\x8a\xfb\x77\x34\xe2\x99\x8f\x9b\x3a\x1c\x85\x37\x11\
\x7a\xbf\x2e\x84\x9f\x7c\xdc\x8c\xdd\x2d\x51\xc7\x08\x88\x0d\x95\
\xc7\xb9\xe4\x12\x68\x09\x26\x7c\x3d\xbf\x5e\xca\x5b\x0a\xa3\x94\
\x57\x6c\x2b\x60\xcc\xaf\xa7\x03\x7f\x25\x12\xcb\x99\xd7\x19\x00\
\x3f\x8d\xd8\x7d\xd0\xe6\xdf\x2b\x85\x31\xbb\x0d\xcf\x80\x66\xc3\
\xb0\xeb\x6b\x18\xa3\xe2\x67\xfb\x29\xe2\xde\xd3\x03\x23\x9f\x60\
\x0b\xba\x37\x7e\xbe\xa2\x07\x98\xcc\xa9\x50\x31\xba\x26\xbd\x57\
\x6f\x2b\x3a\x04\xff\xa1\xa7\xbf\xbc\x42\x25\x46\xe9\xa8\xc9\x07\
\xc0\x49\x7d\x95\x18\x8e\x40\x10\xe0\x44\x28\x82\x1f\xfe\x75\x07\
\x2e\xfa\x61\x29\xfe\x58\xba\x0f\x5e\x2e\xc7\x3d\x51\x5a\xbf\xed\
\x08\xae\xff\xd9\xfb\x18\xf5\x8b\x6d\x58\x5f\xdf\x6c\x45\x43\x0f\
\xe2\x5e\x8f\xc4\xc7\x86\x1f\x63\xf7\x7e\x70\xbf\x17\xb9\x7f\xae\
\xc2\xc3\x09\x8e\xc7\xcf\x93\xaf\x5d\xc5\xab\x7b\x5b\x90\xf3\xd7\
\xc3\xf8\xef\x3d\xcd\x68\x8d\x3a\x0d\x2d\x66\xbf\xf9\x99\xc9\xe9\
\xa5\x1d\xdc\xa2\x8c\x81\x67\x21\xcc\xce\xba\xeb\x61\x94\xf2\xda\
\x25\xbb\xe8\x89\x33\xb5\xd0\x8a\x4e\x4a\x4e\xe1\xb5\xbc\xec\xf8\
\x39\x30\x4f\x91\x75\x3d\x8c\x3c\xf8\x26\x18\x31\x10\x3d\xef\x9d\
\x9f\xd7\x6f\x0b\x8c\xd9\x7e\xbc\xdc\x35\x4a\x70\xfa\xa9\xbc\x89\
\x52\x29\xf7\xcd\xe6\xa1\xf7\x66\xf8\xd1\x69\x04\x8c\xc9\x46\xbb\
\xa2\x15\x75\x08\x7e\x00\x48\x1a\xec\xa9\x37\x81\x5c\x77\xfa\x09\
\x52\x9f\x12\xa2\x45\x05\x74\x90\x10\x60\x5f\x4b\x10\x77\xfd\xbd\
\x1c\x23\xee\x2d\xc5\xed\x4f\x94\x61\xf9\x0b\xe5\xd8\xfc\x61\x1d\
\x36\x7f\x54\x87\xea\xc3\x5a\xe2\xcc\x8e\xcf\x1b\x51\xf6\xef\x7a\
\xac\x7f\xf7\x10\x96\xfd\x69\x0f\x8a\x7f\xb9\x1d\xa4\xe4\x0d\x5c\
\xff\xb7\x5d\x58\x5f\xdf\x02\x23\x2c\x48\x2c\xea\xfe\xaa\xcf\x4f\
\xe0\xba\xff\xdb\x81\xeb\x7e\xb3\x13\xd7\x3d\xf7\x29\xae\xfd\xdd\
\x2e\x5c\xf3\xfb\xdd\xb8\x7a\xd5\x1e\x5c\xfd\xc2\x5e\x5c\xfd\xe2\
\x67\xb8\x76\xcd\xe7\x58\xf0\x97\x4a\xdc\xf5\xb7\x03\xb8\xfb\xef\
\xd5\xb8\xa7\xb4\x06\xf7\xae\xab\xc5\x3d\xeb\x0f\xe2\x9e\x7f\x1e\
\xc2\x3d\x6f\x1c\xc1\xbd\x6f\x1e\xc1\x3d\x1b\x8e\xe2\xde\xb7\xea\
\x71\xef\x3b\xc7\x70\xdf\x3b\xc7\xb0\xe8\x9d\xe3\xf8\xb4\xd5\x0a\
\x68\x3d\xe2\x71\x3c\xa4\x22\xd4\x1a\x82\x1a\x56\xb1\x94\x8d\xc7\
\x7f\xee\x2b\xb5\x78\x64\xd3\x71\xfc\xe5\x53\x1f\xde\xab\xd5\x8a\
\x84\x00\xa0\xb6\x29\x8c\xf7\x6a\x03\x78\xaf\x36\x80\x5f\x6c\x6f\
\xc4\xb7\x37\xd4\xc3\xf3\xca\x41\xdc\xfc\x9f\x06\x78\x03\x11\x6c\
\xf6\xda\x9b\x10\x8a\x65\x3d\xf6\xfe\xeb\x4e\xa1\xe3\xac\x86\x51\
\xb4\x23\x4e\x8a\x69\xe1\x49\xec\xda\x0b\x91\x78\xd1\x89\x13\xe3\
\x99\xcd\xdd\xb3\xb6\x83\xe3\x6b\xd9\x71\x93\xd9\x79\x65\x36\xc7\
\x14\x31\xc6\xb0\x06\x9d\x9f\x0d\xe8\x54\xa8\x04\x46\xb1\xd0\x6c\
\x6e\x7b\x0d\x8c\x09\x2f\xbd\xdd\xfc\x0c\x5b\xba\xa9\x95\x27\x04\
\x7e\x35\x2b\xfd\x75\x0d\xf8\x88\x01\x9f\xb0\x75\x35\xa6\x09\x90\
\x98\xd4\x8f\xc5\xa2\x19\x58\x09\x03\xee\xdf\xab\x4f\x62\xe9\xb6\
\x2a\xdc\xf8\xfb\x0f\x70\xe3\xef\xb7\x63\xd2\xff\xbc\x83\xcc\xef\
\x97\x62\xfa\xca\x6d\xf8\xd2\x0b\x1f\xe3\x86\x57\x77\xe2\xa1\xed\
\x35\x58\x53\xdb\x68\x96\x7a\x16\x3d\xd8\x88\x77\x57\xfa\x43\xf8\
\xe7\xf1\x56\xad\x1d\x6b\xc5\x1b\xc7\x5a\xf1\xaf\x13\xad\x78\xfb\
\x44\x2b\xde\x3e\xd9\x86\x8f\x9b\xdb\x51\xd9\x16\xc2\x07\xde\x00\
\xde\x6a\x08\xe0\x5f\x0d\x01\x6c\x68\x08\xe2\xcd\xc6\x20\x36\x34\
\x05\xb1\xa1\xa9\x1d\x1b\x1a\x03\x78\xb3\xa9\x1d\x6f\x36\xb5\x6b\
\xeb\x4d\xed\xd8\xe0\x0d\x61\x83\xaf\x1d\x47\x23\xaa\xe5\xe3\xe8\
\x93\x85\x57\x34\x05\x2c\x03\x95\x54\xfa\xda\xf1\x54\x95\x0f\xdf\
\xfc\xf0\x38\x2e\x7f\xbb\x0e\x97\xbf\x53\x07\xf2\x7c\x25\x0a\xd6\
\x1e\xc4\xe5\x1b\xeb\x70\xf9\xa6\xa3\xf8\xc9\xe7\x5e\xfc\xb9\x3e\
\x60\x9e\xda\x3c\x18\xc5\xe1\x60\xd4\x59\x99\xb1\xfa\x38\x3a\x03\
\xca\x72\x18\x93\x62\xea\x49\x2d\x7c\x0e\x7a\x0e\x0c\xd5\x7a\x75\
\x17\x75\x5e\xfd\x9e\x05\x30\x26\xaa\x14\x1b\x61\xfb\x4b\x10\xdf\
\x94\x99\x07\xc3\x4c\xe8\x09\x2a\x85\x35\xf7\x5f\x67\xa6\xfa\xf6\
\xf2\x6e\x7e\x86\xd9\xdd\xd4\x4a\x12\x02\x7f\x72\x46\xea\x13\x51\
\x85\x80\x2a\x0c\xe8\x00\xa2\x04\x31\xf5\x9f\xf0\xb9\x00\x2c\x04\
\xa8\x98\xa6\x98\x12\x96\xb9\x51\x68\x29\xe7\x3c\x34\xf6\xb3\x05\
\xdb\xf1\x2b\xe2\x0c\x5f\xc5\x31\x9f\xdc\x14\x37\xc6\xa4\x25\x61\
\xa0\x5b\x0f\x53\x9a\x9d\x94\x3c\x51\x68\x1a\x8b\x62\xb3\x4f\x11\
\x40\xa7\xab\xfb\xe5\x2d\xed\x40\x38\x6a\x06\xb0\x38\x62\x11\xa5\
\xd6\xd9\x85\x4c\xc3\x98\xc1\x34\x87\xc1\xc6\xc6\x80\xe9\xbd\x94\
\xf8\xf8\xef\x0c\xf8\x45\xf2\xc2\x6c\x07\x76\xb7\x14\x03\xec\xed\
\x4f\x49\xbd\x48\x71\xc1\x5f\xf1\xd3\x2b\x6a\xfb\xe5\x67\xd5\xeb\
\x12\x5f\x67\x02\xaa\xee\xf8\xe3\x24\x3f\x15\xe6\x94\xd3\xa5\xbe\
\xee\x29\xe7\xe7\x9c\xd0\x22\x08\x06\xc8\x4d\xc0\x13\xd5\x7c\x27\
\x91\x48\xb8\x51\x6c\x09\x41\x5e\x8a\x0b\x63\xd3\xdc\x18\xe0\x56\
\x38\xd0\x5b\x4f\xd6\x98\x16\x89\x65\xca\xc6\xfb\x28\xfa\x73\xe9\
\x76\x7f\x73\x54\x45\xb0\x85\x4b\xf6\xb1\x05\xbb\xb8\x8d\xda\xcc\
\x0e\x44\xcd\x4c\xa0\x3d\x82\x9d\xcd\xa1\xb8\x91\x0c\xf6\x2c\xeb\
\x56\x4c\x49\xef\x09\xa0\x4a\x3a\xdb\xc1\x0f\x00\xa1\xac\x8c\xd7\
\x75\xcf\xbe\x15\xf8\x30\xab\xfb\x31\xa0\x13\xeb\x24\x93\x4c\x33\
\x88\xea\xe7\x81\x33\x1d\x60\x1e\xd1\x56\x3c\x0f\x44\xe1\x06\xaf\
\xe4\x18\x83\x42\x30\x30\xd9\x85\x71\x69\x6e\xe4\xba\x95\x58\xe5\
\x21\x0f\xfa\x58\x6e\x82\x9e\x1a\x6b\xa9\x56\x84\x29\x83\xd1\x65\
\xe1\x31\xc6\xf1\x9f\x37\xf8\xb5\x31\xf9\xc4\x49\x48\x20\x82\xdc\
\x81\x01\xf0\xb3\x14\x99\xb6\x03\x9f\x34\x05\x6c\xff\x10\x81\x1d\
\x94\xca\x2e\x2b\xa9\xc7\xc0\x7f\xf0\x17\xd7\x7c\x47\x4d\x76\x53\
\x3b\xe0\x13\x18\xa0\x56\x78\x46\xc0\x80\x4b\x62\x5a\x00\x5b\x87\
\x96\x33\x10\x25\x4a\x2c\x4a\xa0\x9a\x86\xa4\x23\xd6\x1a\x7b\xd1\
\xf6\x67\xd7\x1a\x98\xe2\xc2\xb8\xb4\x24\x0e\xf4\xc4\x54\x83\x60\
\xb8\x07\x74\xc0\x73\x0c\x41\x00\x3d\x75\xf8\x18\xbc\xa7\xbf\xd2\
\x1f\x06\x6d\x8f\xc0\x24\xb5\x79\x06\x20\x4a\x79\xbb\x7d\xb6\xcc\
\x82\xfd\x46\x54\x7c\xea\x6b\x37\x69\x41\x02\xf0\x7d\x2b\xa7\x66\
\xac\x96\x5d\x56\x52\x8f\x81\x1f\x00\xd2\x87\xe4\xbc\x1b\x8d\xc5\
\xfb\x0d\xe0\xeb\xcb\x0a\x27\xfd\x63\x80\x87\xae\xf6\x23\x96\x27\
\x10\x15\xd4\x5a\x95\x08\xaa\x39\x3f\x23\x8d\xdd\x3c\x77\x0a\x41\
\x5e\xb2\x0b\xe7\xa5\xb9\xd1\xdf\xa5\x30\x90\x13\xee\x14\xdd\xd9\
\x08\x93\xbd\x2f\x82\x1e\x36\xa0\x57\x88\x51\xc8\x24\x7e\x98\xa0\
\x4a\xd1\xc4\x24\xb3\x69\xd2\xd0\x78\x0c\xc0\x6e\x7e\x40\xea\xc0\
\x10\xd8\x39\xff\x66\xf7\xb0\xcb\xee\x53\x7a\x69\x0e\x77\x49\x67\
\x39\xf8\x33\xb3\x33\x16\x10\x0e\x1c\x66\xe0\x13\x01\xf8\x10\x24\
\xb6\x21\x75\x55\x02\x50\x26\xf5\xb5\x5a\x01\x18\x65\xaa\xb6\x52\
\x9f\x6d\x57\x14\xf4\x4f\x52\x70\x5e\xaa\x1b\x79\x6e\xa6\xb8\x13\
\x23\xb4\x68\x48\x71\x98\xec\xfd\x58\x4e\x02\xdf\x18\x5b\x72\x11\
\x2d\x7c\x96\x88\xb3\xaf\xa2\xd1\x6f\x0c\xcb\xc5\x83\x9b\xc6\x61\
\x00\x80\xa0\xfe\xdb\x1c\x2f\xe0\x1f\x61\x4d\xfa\xf3\x3c\x8f\x23\
\x09\x7e\x49\x3d\x0f\xfe\xbd\x8f\xcd\xaa\xcd\x18\x94\x53\x45\x39\
\xc7\x1f\x55\x88\x03\xf0\x45\x75\x5f\x6b\x51\x26\x6d\xa3\x0c\x6c\
\x51\xde\xd6\x07\x8c\x7a\x75\xde\xb6\x57\x14\xe4\xa7\xb8\x70\x6e\
\xaa\x1b\x03\x62\x43\x59\x89\x8e\x46\xb3\x14\xd7\x41\x6f\x68\x02\
\x66\x7b\x9f\x9f\x64\x47\xb4\xf7\xf9\x0f\xa2\x33\x84\xfa\x40\x18\
\xa1\xd6\x76\x0d\xfc\xaa\xaa\x8d\xc1\x45\x29\xfb\x55\xb5\xa6\x52\
\xab\x54\x77\x9c\x60\x94\x67\x1e\x3c\x03\xd0\xd6\x3f\x6a\x0c\x18\
\x7f\x0a\x7b\x58\x05\x64\xcd\xca\xa9\x19\x5e\xd9\x5d\x25\xf5\x38\
\xf8\x01\x20\x77\x40\x56\xb1\x21\xfd\x99\x8d\xcf\x26\x8f\xb0\x00\
\xdf\xa4\xee\x1b\x4e\x3e\x5d\xcd\x57\x4d\xce\x42\xc0\x62\xd8\x2b\
\x9a\x4d\x5f\x98\xea\x46\x96\x5b\xd1\x6e\x03\xb3\x1d\xaf\x4b\x7f\
\x5e\xe5\x27\xec\xb9\x08\xe1\x9d\x7e\xfa\x3c\xc0\x06\xa0\x9d\xec\
\x7d\xd1\xd9\x17\x52\x29\x8e\x1e\x6f\xd6\x40\x4f\x99\xe4\x57\x55\
\x63\x5d\x15\x98\x80\x45\xd2\xdb\x7c\x48\x51\x33\x30\x99\x10\x14\
\x08\xab\xd8\xe9\x33\x22\x0a\x0c\xff\x4b\x64\x57\x95\xd4\x6b\xe0\
\xdf\xfd\xe8\xcc\x6d\x99\xc3\xf2\xb6\x18\xce\x3d\x07\x89\x0f\x02\
\xa2\x68\xa0\xd3\x33\x03\x4d\x4e\x3e\xa2\x17\x0a\xd9\x48\x7d\x97\
\x82\xdc\x14\x17\x46\xa5\xba\xd1\xcf\xa5\x18\xe0\xe5\xb4\x01\x95\
\x57\xef\x4d\xd2\xdf\x46\xe5\x87\x59\xe5\xd7\xd1\xc4\xc7\xf7\xf5\
\x0c\x66\xd1\xde\x77\x01\x38\xe8\xf5\xb3\x61\xb9\x18\xc8\x55\x6e\
\x44\x5e\x1d\xf4\xba\x36\xa0\x33\x05\xd5\xe1\x78\x91\x01\x88\xce\
\x08\x6e\xfb\xf6\x06\x7f\x8c\x6d\x01\x58\xf9\xab\x0b\x33\x6a\x64\
\x57\x95\xd4\x6b\xe0\x07\x80\x7c\x4f\xc6\x82\xb4\x64\x37\x8d\x07\
\x7c\xd1\x6b\xaf\x12\x9d\x11\x68\x00\x8c\x58\xd4\x7c\x6d\xa4\x9a\
\x9c\x64\x17\x46\x26\xbb\xe0\xe1\x1d\x79\x0a\x89\x49\x7d\x05\xbc\
\xe4\xe6\xbc\xfb\xba\x6b\x91\x3d\x83\x49\x3b\x20\xa2\xca\x6f\xc4\
\xf7\x75\xa6\x21\x7e\x0c\xdd\xde\x6f\x08\x46\xd0\xda\xd0\x66\x05\
\xbc\x4a\x0d\xfb\x3f\x66\x02\x70\x5a\x00\xaf\x19\x50\x36\x5d\x97\
\xca\x1d\x6b\x49\xf6\xe1\xb4\x00\x7d\x7c\xf3\xb0\x8a\x4f\xb5\xc1\
\x3e\x7d\x90\x52\x5f\x52\x5f\x00\xff\x8e\x87\x2f\xab\xcd\x1c\x92\
\xbb\x2e\x1e\xf0\x09\x57\x05\xa8\xdb\xf5\x11\xc5\xac\xee\xc7\x40\
\xa7\x28\xc8\x4e\x76\x61\x78\x8a\x1b\xd9\x2e\x23\x96\x6f\x80\x92\
\x98\xb6\xe9\xaa\xbe\x49\x19\x50\x38\xfb\x1f\x44\xf0\xf2\x9b\x55\
\x7e\xbb\x10\x1f\x05\xb1\xa8\xfc\x94\x00\x87\xeb\xbd\x82\xaa\xcf\
\x01\x9d\xf2\x0c\x80\x63\x0e\x54\x3c\x4e\x35\xb4\x03\xaa\x5a\x1d\
\x7f\x54\x74\x0c\x1a\xcf\xf0\xfe\x49\x3f\x00\x2c\xf9\xdf\x0b\x33\
\xbd\xb2\x9b\x4a\xea\x75\xf0\x03\x40\xf5\x93\x57\xde\x90\x9c\x95\
\x1e\xd0\x40\xaf\x98\x54\x7d\xc2\x4d\x26\x29\xda\xf9\x51\x85\x30\
\x6f\x3f\x01\x71\x69\xa0\x1f\x96\xe2\x36\x4b\x7a\xce\x76\xb7\x93\
\xfa\x31\x47\x1f\x8c\xe4\x20\x23\xc3\x96\xbb\x9f\x2e\xd9\x15\x62\
\xf6\xf2\x33\x2d\xa0\x23\x95\xff\x70\x43\x2b\x68\x30\x6c\x63\xe3\
\x0b\x00\xe6\x27\xe6\xb0\x63\x10\xbc\xb4\x37\x69\x07\x76\xa6\x80\
\xc0\x14\x5c\x24\xf0\xeb\x8b\x32\x57\xc8\x2e\x2a\xa9\xcf\x80\x1f\
\x00\x86\x0e\xc9\x5d\x48\x38\xef\xbc\x6e\xe3\xeb\xc0\x8f\xc4\x24\
\xbf\x66\xe7\xeb\xc0\x57\x14\x05\xb9\xc9\x2e\x0c\x4d\x76\x21\x3b\
\x06\x7a\xc3\x2b\x4f\x4d\xb1\xfd\x8e\xa5\xbe\x7e\x2e\x14\x03\xcc\
\x44\x07\x79\xcc\xae\x37\xbc\xfc\xa2\xf4\x17\x3f\x84\x42\x08\x5a\
\x42\x51\x34\x9d\x68\x36\x83\xd5\xa2\xe2\x8b\xa0\x17\xc1\x0e\xc1\
\xe6\xe7\x7f\x05\x6d\x40\xb5\x61\x02\x00\xe6\x66\xa7\x2c\x94\xdd\
\x53\x52\x9f\x03\x7f\xf9\xc3\x97\xbd\x92\x7d\x4e\xff\x2d\x7c\xf6\
\x9e\x2e\x65\x45\xe0\x53\x02\xb8\x98\x4d\x3f\x24\xc5\x85\x4c\x97\
\x12\x93\xee\x94\x1b\x95\x56\x0f\x1d\xea\x1a\x80\xe8\xe1\x87\xa2\
\xb0\x61\xc3\x88\x39\x6a\x00\x98\x35\x01\xdd\xa1\x48\x60\x5a\x8e\
\x25\xf6\x98\xb2\x01\xad\x2a\xff\xa1\x23\x0d\x9a\x93\x4f\xa5\x66\
\x80\x52\x07\x67\x1e\x6f\xeb\x53\x1b\xa7\x5f\xd4\x46\xe2\xab\x7c\
\xd8\x90\x0b\x15\x32\xe0\x8f\xf2\xa4\x6c\x79\xfb\x9a\x81\xaf\xc8\
\xee\x29\xa9\xcf\x81\x1f\x00\x6a\x9f\x98\x33\x3b\x92\x93\xe9\x05\
\x51\x62\x52\x34\xc2\xec\xef\x88\xa2\x01\x5f\x71\x11\xf4\x4b\x71\
\x23\x3f\xc5\x85\x0c\x06\xfa\x58\x96\xa0\x00\x76\x5e\xdd\x77\xf2\
\xf0\xab\x82\xad\x2f\x4a\x7d\xdd\xff\xa0\x70\x2a\xbe\xe8\xe8\x03\
\x17\x0e\xd4\xb5\x0d\x5d\xe5\x3f\xe6\xf3\x23\xd2\x16\xe4\xc2\x6f\
\x76\x92\x9f\x03\xac\xaa\x3a\x3b\xfc\x4c\xe1\x41\x01\xf8\x3a\xd0\
\x79\x06\xc0\x34\x06\x25\xcd\xed\xad\xba\x7e\xc8\x6c\xd9\x35\x25\
\xf5\x59\xf0\x03\xc0\xd8\x21\xb9\x45\xd1\x24\x17\x55\x89\xe6\xd4\
\x03\x73\xee\xb9\x14\x05\xd9\x29\x2e\x0c\x48\x71\x23\xc3\x65\x4e\
\xfb\xa5\x31\x50\x82\x93\xf2\x24\x26\x9d\x01\x6d\x78\x6a\x85\x73\
\x2a\x12\x21\x75\x58\xb4\xf5\x79\xa9\x6f\x9b\xf1\x27\x26\x04\x09\
\x85\x3c\x0a\x80\x88\x4a\x71\xe2\xf0\x49\xc1\x63\x6f\x27\xa9\x45\
\x06\x20\x66\xfc\x89\xea\xbf\x6a\xe3\x38\x14\x19\x00\x6b\x84\xd2\
\x2f\x79\x52\x8a\x64\xb7\x94\xd4\xe7\xc1\xff\x9f\x1f\x5d\x54\x5b\
\x58\x30\xf0\x47\x7a\xda\x2e\xdc\x0a\xb2\x92\x5d\xc8\x4b\x71\x21\
\xcd\xa5\x98\xa4\x31\x0f\x7c\x3e\x14\x17\x4b\x1a\x82\x91\xa4\xc3\
\xab\xfb\xfc\xf0\x61\xb1\xe4\x20\x4e\xea\x13\xbe\x70\x88\x6b\x0a\
\xbf\xae\x33\x1a\x18\x05\x48\x0a\x31\x57\xec\x1d\x3a\xdc\x00\x44\
\xa2\x42\x58\x4f\x75\x50\xd7\x05\x53\xc0\x14\xeb\x77\xf0\x05\x88\
\xcc\xc3\x86\x99\x4c\x1b\x90\xf6\xa3\x37\xae\xc9\xaf\x3d\x83\xfb\
\x93\xa4\xb3\x05\xfc\x00\xf0\xc9\x03\x97\xae\x28\x18\x96\xf7\x6c\
\x66\xb2\x0b\xfd\x93\xdd\x48\x75\x29\x5c\xb9\xaf\x33\xf0\xcd\xdb\
\x38\xef\xbe\xa0\xee\x9b\x8b\x7d\x10\xcb\xea\xd3\x8b\x87\x78\xa6\
\xa0\x92\x0e\xa4\xbe\x4d\x3a\x2f\x21\x40\x43\x4b\x00\x81\xc6\x16\
\xfb\x44\x1e\x93\x33\x0f\x82\x07\x9f\x67\x00\xd1\xf8\x4e\x42\xea\
\xe4\x30\xd4\x7e\x2f\x18\x90\xfe\xfc\xfb\x5f\x3d\x67\x85\xec\x92\
\x92\xce\x18\xf0\x03\xc0\xee\x47\x2e\xbb\x37\xcd\xad\xac\xa4\x5c\
\xe6\x1f\xed\x00\xf8\x44\x11\x81\x6f\x5f\x27\x80\xd8\xd0\x61\x86\
\x93\x8f\x1f\x42\x4c\xf7\xf0\x23\xa6\x3d\x28\x16\xa9\xaf\x87\xf7\
\x78\xdb\x9f\x10\x02\x17\x80\x70\x44\xc5\xb1\x9a\xe3\x82\xda\xae\
\x5a\xc3\x74\x54\x48\xe3\x55\x85\xd0\x5c\x4c\x8d\x8f\xda\xc4\xfb\
\x61\x63\x3a\x18\x0c\x60\x68\x76\x6a\xf9\xce\xf9\xc3\xbf\x23\xbb\
\xa3\xa4\x33\x0e\xfc\x00\x50\xf3\xf4\x97\x4b\xa0\x90\x35\x44\x04\
\xb9\x03\xf0\xed\x1c\x7c\x0a\x5f\x0c\xa4\x28\x5c\x7a\x30\xb1\x75\
\xf2\xe9\xf6\x01\x11\xe2\xfa\x26\x0d\xc0\x14\xfb\x37\x46\x1b\x06\
\xb4\x63\x8e\x1f\xf7\x02\xa1\x88\xd5\x23\x6f\x17\xd7\x57\xed\x24\
\xbe\x68\xcf\xc3\x9a\xea\x4b\x55\x1b\x8d\x41\xdb\xde\x3f\x2b\xa5\
\xea\xd0\xb7\x47\x4d\x96\x5d\x51\xd2\x19\x0b\x7e\x00\x38\xf8\xf3\
\x6b\x8a\xa1\x90\x35\x2a\x27\x79\xa9\xc2\xc0\xae\x30\x89\xac\x98\
\x6d\x73\xb3\x83\x0f\x6c\x66\x20\x63\xb4\x60\x22\x8c\x16\x04\x9b\
\x70\x5f\xec\x7e\x5c\x8c\x1f\x82\xad\x6f\x27\xf5\x5b\xdb\x82\xf0\
\x1d\x69\x30\x67\xe3\x89\x99\x78\x3c\xb8\xa9\xe8\x9d\x8f\xd3\x54\
\x07\x2d\x82\xdb\x9f\x97\x95\x56\x75\x72\xc1\xb9\x85\xb2\x1b\x4a\
\x3a\xe3\xc1\x0f\x00\xb5\xcb\xaf\x2e\x86\xa2\xac\x21\x8a\x62\xf2\
\xda\x13\x02\xa8\xa2\xc4\x27\x66\xe0\x53\x62\x67\xe7\x6b\xcd\x08\
\xf7\x39\x3b\xf9\x88\x62\x48\x7a\x70\xa6\x01\xff\xab\x13\x25\xc0\
\xe1\x03\xc7\x6c\xa4\x71\x1c\xb5\x5f\xb5\xf3\x05\x08\xd2\xdf\x16\
\xf8\xaa\x50\x04\x44\x31\xc8\x93\x56\x75\x7c\xe1\x18\x09\x7c\x49\
\x5f\x1c\xf0\x03\xc0\xc1\xa5\x73\x8b\x09\xc1\x1a\x1e\xa8\xa2\x73\
\x4f\x94\xf8\x94\x58\x52\xf7\x4c\x00\x86\x83\xba\x2f\x3a\xf9\x08\
\x67\x3a\xf0\x71\x7d\xde\xc3\xef\x02\x70\xbc\xae\x11\xf0\x07\xad\
\xd2\x5a\xb5\xf1\xc6\x5b\x34\x00\x6a\xef\xd8\xb3\xe4\xfa\xc3\xa6\
\xb8\x47\xc5\x10\x4f\x5a\xd5\xd1\x3b\xc6\x49\xe0\x4b\xfa\xe2\x81\
\x1f\x00\xaa\x9f\xbc\xb2\x58\x21\x64\xa1\x49\xe5\x4f\x14\xf8\x8a\
\xa1\xce\xeb\x19\x39\xc4\xc1\xf1\x67\xeb\xe4\x63\x8c\x46\x15\x4c\
\x04\x9d\xda\xdb\xc3\x68\x8c\xc5\xf4\xa9\x83\x3d\x2e\x86\xfa\x10\
\xff\xf8\x78\x61\x3e\x18\x36\xfe\x84\x41\xd9\xcf\x1f\xb9\x73\xbc\
\x04\xbe\xa4\x2f\x2e\xf8\x01\xe0\xc0\x13\x73\x56\x13\x42\xe6\x10\
\x42\x7c\x44\x51\x62\x5e\xfd\x44\x80\x0f\x0e\xf8\xe0\x1c\x7f\x10\
\xa3\x02\xcc\xa7\x60\xe7\xe4\x23\x31\x1f\x83\x21\xf5\x15\x02\xd4\
\x54\x1c\x01\x22\x11\xb3\x07\xdf\x12\x77\x77\xb0\xf9\xa9\x6a\xaf\
\xfa\xf3\x49\x3b\x76\xe6\x82\x42\xe8\x85\x43\x73\x17\xed\x2a\x1e\
\x2b\xbd\xfa\x92\xbe\xf8\xe0\x07\x80\xca\x25\xb3\xca\x40\x48\x11\
\x21\x64\xa7\x42\xcc\x5e\x7d\xe2\x08\x7c\x23\x5d\x17\xbc\x43\x4f\
\xdf\xc6\x00\x4d\xf5\x72\x5e\x98\x2b\x0a\x45\x27\x1f\x9f\xc3\xdf\
\x54\xdf\x04\xb5\xd9\xcf\x79\xe5\xed\x0a\x71\x44\x13\x80\x0a\x1a\
\x80\x2a\xa4\xf8\xc2\xde\x2f\xc0\xae\x95\x95\x96\x14\x98\x36\x34\
\x67\xe6\xc7\x0b\xc6\xae\x90\x5d\x4e\xd2\x59\x03\x7e\x00\xa8\x78\
\x74\x46\xcd\xfe\xff\x77\x79\x91\x42\xc8\x4a\x45\x50\xe1\xed\x80\
\x6f\x80\xde\x90\xe0\xb1\xb4\x3f\xbe\x34\x18\x82\xc3\x8f\x53\xf7\
\x79\x27\x9f\x9e\xc3\x1f\x8e\xaa\x38\x56\x5d\x6f\xa3\xba\xb3\xd9\
\x76\x2d\x36\xbb\xae\xb2\x27\x60\xe7\xdb\x69\x0e\x2a\xc5\xe8\xfe\
\x99\x1f\xfa\x1e\xb8\x38\xfd\xfd\xdb\xc6\x6e\x93\xdd\x4d\xd2\x59\
\x07\x7e\x9d\xf6\x3e\x72\x59\x09\x21\x64\x0e\x08\x6a\xa9\xa2\x40\
\x6b\x4e\xc0\xd7\x26\xeb\xe0\x81\xaf\x0f\x09\x46\xf4\xb1\x04\xb8\
\xb8\xbf\x9d\xba\x2f\x3a\xf9\x0e\x7e\x76\x88\x9b\x6a\xcb\x2e\xa4\
\x07\x7b\x90\x9b\x42\x75\x6a\x07\xfb\xb4\xeb\xa6\x24\xbb\x22\x53\
\x86\xf7\x5f\x54\x79\xef\xe4\x4b\x65\x37\x93\x74\xd6\x83\x1f\x00\
\xf6\x3c\x34\xad\x6c\xcf\x83\xd3\x0a\x54\x42\x1e\x07\x81\x0f\xb1\
\x01\x3e\x09\x74\x86\xa0\xcd\x0f\x68\x05\x3e\x9f\xc5\xa7\x03\x3f\
\x56\x9b\x2f\x24\x16\x89\xea\xbe\xcf\xdb\x8a\x50\x63\xb3\x90\x8b\
\x6f\xe3\xe1\xb7\xf8\x01\xec\xe2\xfc\xd4\x5c\xf9\xc7\x33\x02\x97\
\x42\xc7\x0c\xcc\xde\x12\x7c\x6c\x46\xd2\x7f\xee\x9c\xb0\x42\x76\
\x31\x49\x12\xfc\x02\x7d\xf6\x93\x8b\x97\x80\x90\x22\x10\xb2\x86\
\x77\xee\xa9\x6c\x5c\x3f\x3b\xe0\x13\xce\xde\x57\x63\xe3\xfe\xf3\
\xc0\x57\x84\x48\x80\xc6\x00\x22\x91\x28\xea\x3e\x3f\x64\x00\x96\
\x81\x96\xd8\x34\x45\xd5\x1a\x61\xcd\x62\xff\x8b\x6a\x3e\xc7\x0c\
\x46\xf7\xcf\xac\xba\x64\x44\xde\xcc\x7d\x8b\xa6\xce\x96\x5d\x4b\
\x92\x04\x7f\x3c\x06\xf0\xe3\x8b\x6a\x3e\xfb\xd1\x85\xc5\x50\xc8\
\xc8\x18\x13\xe8\x00\xf8\x54\xa8\x1b\x10\xed\x7c\xd1\xbb\xef\x02\
\xd0\x54\x75\x14\x08\x86\xa0\xa8\x2a\x5c\x2a\xd5\x1a\xa5\x70\xab\
\xe6\x96\xc4\xf6\xbb\x55\x0a\x37\xd5\x9a\x4b\xa5\x50\x38\xc6\x40\
\x6c\x06\xf4\x18\x95\x9b\x59\x75\x49\xe1\xa0\x19\x95\x0f\x4e\x2b\
\xdc\xfe\xbd\x89\xd2\xb6\x97\x24\xc1\x9f\x28\xed\xbb\x6f\x4a\xcd\
\xbe\x7b\x27\x17\xab\x84\x8c\x24\x84\xac\x89\x07\x7c\x22\x00\xdf\
\x94\xde\xab\x70\xde\x7e\x06\xfc\xb6\x16\x3f\x1a\xea\x4e\xc2\xa5\
\x52\x10\x0a\x28\x0c\xd0\xda\xba\x06\xec\x24\x55\x85\x8b\x52\x28\
\x14\xec\xd7\x00\xbd\x9b\xed\x4f\x8a\x1d\x0f\x28\xcc\xeb\x3f\xbc\
\x7f\xbf\xaa\x4b\x0a\x07\xcd\xa8\x7a\xe4\xb2\xc2\xed\x3f\x98\x24\
\x41\x2f\x49\x82\xbf\xb3\x54\x71\xf7\xa4\x9a\xfd\x77\x5d\x50\x0c\
\x42\x72\x54\x42\x16\x81\x90\x5a\x5b\xe0\x2b\x06\xf0\x89\x62\xae\
\x04\x24\x9c\x9d\x4f\x08\x70\x70\x6f\x2d\x92\x74\x49\xcf\x40\xed\
\xe6\x40\xed\x56\x55\x10\x4a\xe1\x52\x55\xa3\x51\xad\xb9\xb9\xfd\
\xda\xf1\x14\x48\x76\x47\x26\x9c\x93\x5b\x3a\xfd\xdc\xc1\x05\xb5\
\x8f\xce\x28\xdc\x7e\xf7\x64\x09\x7a\x49\x12\xfc\x5d\x45\xfb\xbf\
\x3f\xd1\x5b\xf9\xbd\x09\x2b\x2a\xef\x3c\xbf\x00\x0a\x99\x4c\x09\
\x59\x49\x38\x46\x60\x0f\x7c\x23\x8b\x4f\xf7\xfc\x37\x56\x1c\x41\
\x72\x6b\x30\x26\xc1\xdd\xba\x84\x67\x20\xd7\x99\x80\x0e\xec\xd8\
\xba\xca\xd6\x59\x53\x92\xdd\x91\xbc\xfc\xec\xf2\x89\xa3\xf3\xbf\
\x11\xf9\xe5\x35\x49\x3b\x1f\x9a\x7e\xc3\x7b\xf7\x4e\x91\x83\x6e\
\x48\x92\xe0\xef\x4e\xaa\xb8\xe3\xbc\xf2\xca\xdb\xc7\x95\x54\x2c\
\x1c\x5b\x00\x85\x8c\x24\x84\x2c\x52\x09\xd6\x11\x85\xd4\x8a\xaa\
\x3f\x9f\xc5\x17\xf5\xb7\xa3\xe1\xf0\x49\x4d\xda\xf3\x6a\xbc\x4a\
\xe1\x62\xea\x3f\xef\xc8\xa3\x6c\x9d\xa8\x14\x6a\x4a\x52\x84\xe4\
\x64\xd6\x0f\x1c\x3a\xa0\x74\xcc\x98\x21\x33\xda\x56\x5e\x9b\x54\
\xb3\x64\xd6\xe4\x8f\xee\xbf\x44\x0e\xaa\x29\x49\x82\xbf\x37\x68\
\xff\x6d\xe7\xd6\xec\xbb\xb5\x70\x45\xc5\x2d\x85\xf3\x2a\xbe\x35\
\xaa\x80\x10\x32\x52\x25\x64\x0e\x08\x79\x5c\x21\x58\x43\x08\xd9\
\xe2\x02\x6a\x15\x00\x95\xbb\xab\x91\x1c\x8e\x68\xde\x7b\x4a\xa1\
\x50\x95\xd9\xea\x86\x07\x9f\xa4\x24\x45\x22\x59\x69\xde\xa4\x01\
\x59\x55\xd9\x83\x73\xb6\x14\x8c\x1a\xb4\x68\xf4\x98\x21\x33\x9a\
\x9f\xfd\x4a\x52\xe3\xf2\xab\x06\xef\x7f\x74\xc6\x0d\x1f\xdd\x7f\
\x89\x54\xeb\x25\x49\xf0\xf7\x39\x66\xf0\x8d\x82\x9a\xca\x9b\x47\
\x94\x55\x7e\x7d\xf8\x92\xfd\x37\x0d\x2f\xde\xf7\xb5\x61\xb3\xf7\
\x7e\x6d\x58\xc1\x9e\xf9\xc3\x88\xba\xfc\x2a\xd2\xfa\xdc\x3c\x52\
\x50\x38\x68\xc6\xf0\x82\xfc\x45\x43\x46\x0e\x5a\x34\xe2\xdc\xc1\
\x33\xbc\xab\x6e\x24\xde\x55\x37\x12\xef\xf3\xf3\x49\xd3\xaf\xaf\
\x4b\x6a\xfd\xe5\x97\x73\x4e\x2c\x9b\x5b\x58\xfb\xc4\x9c\xd9\xe5\
\x0f\x4f\x5f\xf1\xef\x07\x2e\x95\x60\x97\x24\xc1\xff\x45\xa0\x5d\
\x0f\x5c\xba\x6d\xf7\xc3\xd3\x57\xec\x7d\x78\xfa\x8a\x9d\x8b\xa7\
\x49\x60\x4b\x92\x74\xb6\x80\x5f\x92\x24\x49\x12\xfc\x92\x24\x49\
\x92\xe0\x97\x24\x49\x92\x04\xbf\x24\x49\x12\xfc\x92\x24\x49\x92\
\xe0\x97\x24\x49\x92\x04\xbf\x24\x49\x92\x24\xf8\x25\x49\x92\xf4\
\x05\xa2\xff\x0f\x62\x20\x19\xf4\x2f\xa5\x49\x2e\x00\x00\x00\x25\
\x74\x45\x58\x74\x64\x61\x74\x65\x3a\x63\x72\x65\x61\x74\x65\x00\
\x32\x30\x31\x37\x2d\x30\x33\x2d\x32\x36\x54\x32\x31\x3a\x30\x35\
\x3a\x30\x35\x2b\x31\x31\x3a\x30\x30\x36\x6e\xa9\x1b\x00\x00\x00\
\x25\x74\x45\x58\x74\x64\x61\x74\x65\x3a\x6d\x6f\x64\x69\x66\x79\
\x00\x32\x30\x31\x37\x2d\x30\x33\x2d\x32\x36\x54\x32\x31\x3a\x30\
\x35\x3a\x30\x34\x2b\x31\x31\x3a\x30\x30\xe1\x44\x1a\x13\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0b\x83\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x28\x00\x00\x00\x28\x08\x06\x00\x00\x00\x8c\xfe\xb8\x6d\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x2e\x23\x00\x00\
\x2e\x23\x01\x78\xa5\x3f\x76\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xe2\x0b\x1e\x00\x33\x3a\x62\x29\xbd\x16\x00\x00\x0b\x10\x49\x44\
\x41\x54\x58\xc3\xed\x98\x7b\x8c\x5d\x57\x75\x87\xbf\xb5\xf6\x3e\
\xf7\x31\xf7\xce\x9d\x19\xcf\xd8\xf1\xcc\x78\xfc\x1a\x3f\xb0\x9d\
\x04\x8c\x13\x12\x12\x4a\x48\x9c\x12\x4a\x13\x09\xe2\xd2\x34\xb4\
\x2a\x12\x69\xa4\x84\x96\x12\x90\x50\xe9\x23\x40\x48\x69\x8b\x48\
\x1f\xa0\x28\x4e\x9d\xb6\x40\xdb\x50\x52\xb5\x51\x4b\x65\xaa\x10\
\x12\x19\x5a\xa7\x20\x3b\x31\xc5\x71\x62\x27\xf1\x6b\x5e\x76\x66\
\x3c\xef\x3b\x73\xef\x3d\xf7\xec\xbd\xfb\xc7\xb9\xf3\xb0\x5d\xbb\
\x49\x09\xfc\xc5\x91\x96\xf6\x3e\xf7\xea\x9c\xf3\x69\xfd\xd6\xda\
\x7b\xad\x0d\x3f\xbb\x7e\xbc\x4b\xfe\xbf\x0f\x16\xff\x60\xbf\xaa\
\x6a\x6b\x08\xac\x51\xa3\x9d\x08\x5d\xd9\xac\x6d\x53\xa3\x38\x2f\
\xe3\x91\xd5\x21\x1b\xe9\xe9\x75\xab\x8b\xc7\x9a\x0b\x76\xe2\x5f\
\xb7\x2f\xf5\x3f\x15\xc0\xc2\x27\xf7\x96\x02\x7a\x75\x10\xb9\x19\
\xd5\x6b\x44\x75\xad\xaa\x36\x89\x91\xac\xaa\xa2\x46\x51\x63\x30\
\x46\x6b\x51\x46\x67\xd7\xaf\x69\x3e\xda\xda\x1c\x3d\x63\x44\xfe\
\xcd\x28\x3f\x50\x91\xe9\x6f\xbc\x73\xc9\x1b\x0f\x58\xbc\x67\x4f\
\x21\xa9\x87\x1b\x83\xca\xdd\xa8\xb9\x16\x63\x8a\x62\x14\x31\x06\
\x55\x45\x8c\xa2\x46\x16\x03\xa2\x46\xe9\xee\xcc\xd3\xbd\x3c\x4f\
\xa4\x32\x6d\x55\xf6\x1a\x91\x9d\x93\x93\xb5\xa7\xbe\x75\x4b\xe7\
\xcc\x1b\x02\x58\xbc\xeb\x09\xf1\x9e\x8d\x5e\xe4\x5e\x1f\xe4\x16\
\x31\xa6\x19\xa3\x60\x2c\x62\x53\x40\x31\x06\x35\x8a\xcc\x01\xea\
\x02\x60\xb1\x18\xb1\x7e\x6d\x91\x5c\xc6\x10\xa9\xa0\x30\xfd\xfc\
\xc1\xe1\xdd\xc3\xa7\xa7\xef\x6f\xca\xe9\x8b\x7d\xf7\x5e\x11\x2e\
\xf6\x7d\x73\xb1\x3f\x73\xb7\xff\xb3\x7a\xe7\x6f\xf2\xce\xef\x0a\
\x9e\xed\x40\x56\x00\x44\x10\x99\x1b\x05\x54\xd3\xb1\x71\xbf\xd8\
\x42\x80\x62\x21\x22\x9b\xd1\xf4\x99\x40\xb6\xaf\x6f\x72\xcb\xcc\
\x54\xf5\xba\xfa\x6c\xed\x98\xef\x7d\xdf\x31\x77\xf0\x1b\x17\x84\
\xd4\x0b\xfd\x91\x79\xff\x3f\xa8\xaf\x27\x37\xfb\x38\xd9\x15\x12\
\x77\x19\xde\x09\xde\x83\xf7\xe0\x16\x8f\x01\xbc\x27\x84\x00\x8b\
\x6c\x6e\xea\x5c\x60\x62\x2a\xc6\x85\x80\xf7\x81\x00\xd8\xc8\x48\
\x80\x4b\xe3\x4a\xbc\x8b\x10\x6e\xce\xdc\xf6\xb8\xbe\x2e\xc0\x25\
\x77\xfc\x8b\xa8\x84\x9b\x70\xfe\x41\x9c\xeb\x39\x0b\xea\x22\x16\
\x42\x68\x18\x04\x1a\x73\x02\xd3\xe5\x84\x38\xf6\xb8\x00\x01\x88\
\x32\x06\x54\xf1\x3e\xf4\x00\x0f\xaa\x91\xf7\xac\xf9\xec\x5e\x79\
\xcd\x80\xd5\xb1\xf2\xe6\x50\x4f\xbe\x88\xf7\x3d\x67\x41\x5c\x00\
\x52\xfc\x9c\x27\xcf\x86\x83\xd4\x8b\x71\xec\x29\xcf\x24\xf8\x10\
\xf0\x80\x18\x25\xa8\x12\x54\xc0\x98\x9e\x80\xfc\xe9\xf0\x89\xb1\
\xcb\x5e\x13\x60\xd3\x2d\x5f\x6d\x76\xb5\xf8\x33\xc1\xb9\xcd\x78\
\x77\x3e\x88\x9b\x03\xf2\x48\x43\xde\x39\x99\x99\xf7\x60\xea\xaa\
\xb9\xb9\xf7\x81\xc9\xe9\x04\xe7\xd3\x79\x36\x67\x09\x46\x41\x0d\
\x58\x4b\x50\xdd\x98\x24\xfe\xd3\xc5\x0f\xef\x2e\x5d\x14\x70\xed\
\xa7\x9e\xc2\xd7\xe2\x9b\x42\x3d\x79\x2f\xce\xcb\xfc\x87\x17\x5b\
\xf0\x0b\x50\xc1\x2f\xb2\xd0\xf8\x6d\x31\xdc\xc2\xbb\x67\x67\x13\
\xaa\xb1\xc7\x13\xb0\x51\x2a\x71\x30\x0a\x91\x85\x28\x92\xa0\xfa\
\x0b\x3e\xf0\xde\x2d\x0f\x3d\x7f\x61\xc0\xd3\xcf\xbd\xd2\x1a\x12\
\xf7\x9b\x38\x5f\xb8\x78\xbc\x85\x79\x59\xa5\x21\xad\xcc\x81\xfa\
\x34\xee\x1a\x02\xcf\x27\x4b\x3d\x09\x0d\x99\xd3\x38\x14\x55\x82\
\xb1\x04\x6b\x53\x48\x6b\x9b\xea\x33\xf1\x47\x5e\xf9\xf7\xe7\x97\
\x5c\x10\xd0\x55\xe3\x6b\x42\x92\x5c\x79\x2e\xcc\x42\x9c\x35\x64\
\x9d\xf3\xd8\xfc\xb8\xe0\x3d\x69\x78\x90\xc6\xc0\x22\xd0\xe9\x69\
\x87\x73\x01\x51\x41\x45\x10\xa3\x10\x45\x84\x28\xc2\x27\xe0\x26\
\x2b\x57\x78\xcf\xcf\x2d\x66\xb2\x73\x93\xd2\xcf\xef\xb4\x71\x25\
\xde\x11\x02\x05\x10\x10\x45\x42\xa0\xbb\x3d\xcf\xe5\xeb\x3a\xe8\
\x5a\x5a\x24\x88\x30\x34\x51\xe3\xd0\xd0\x0c\x43\x53\x75\x14\xd8\
\xd2\x55\xa0\xd4\x9c\x45\x8c\x01\x63\xc0\xa6\xa3\x18\xa5\x1e\x84\
\x17\x47\x63\x92\x74\xc9\xa4\xd5\x0a\x5b\x5b\x23\xda\x3a\x32\x0c\
\x16\x85\x97\x06\x23\x7e\x34\x38\xc3\x99\x31\x8f\x9f\xaa\x12\x30\
\x79\x1f\xe4\xd6\xdc\xaf\xee\xde\x5d\x7d\xf4\x17\x93\xb3\x00\x5d\
\x5c\xef\x08\x89\xbb\x0a\x49\xe1\x32\x59\xe1\x57\x6e\x5c\xc7\x3d\
\x1f\xb8\x9c\x75\xdd\x25\xb2\x51\xba\xa6\x57\xeb\x8e\xa3\xa7\x67\
\x78\x60\xf7\x51\x76\xff\x68\x84\x3f\xda\xb1\x9e\xab\xd7\xb5\x2e\
\x6c\x4a\x8b\x16\x8b\xc1\xa9\x3a\xbf\xf4\x58\x3f\xa3\x55\xcf\x8e\
\x4d\xcd\xdc\xb5\xad\x8d\xde\xb6\x88\x8c\x11\x02\xad\xcc\xc6\x9e\
\x03\x03\x65\xfe\xf0\x1f\x5f\xe2\x3b\x7d\x10\xac\x25\x08\x57\xf9\
\x24\x5c\x02\x0c\x9e\x0d\x58\x8d\xd7\x06\x1f\x56\x62\x0c\x4a\xe0\
\x43\x37\x6d\xe0\x8f\xef\x7e\x3b\x85\x7c\xc4\x89\xd3\x65\x0e\x0f\
\x4c\x91\xcf\x1a\x36\xae\x68\xa1\x77\x79\x81\xee\xb6\x1c\x12\x20\
\x6b\x95\xa6\x8c\xe1\xe8\x48\x85\x57\xa7\xeb\xa9\xab\x44\x40\x85\
\x91\x19\x87\x0b\xf0\xc1\xcb\x5a\xb8\xf7\xba\xa5\x14\x23\x65\xa0\
\x9c\x30\x30\xe3\x70\x71\xc2\xc6\xf6\x2c\xef\x58\x5b\xe2\xaf\xee\
\xdc\xc2\x87\x67\x63\xbe\xf3\xc3\x11\x02\xa1\x3b\xa9\xb9\xd5\xe7\
\x01\x12\xc2\x6a\xbc\x6f\x42\x84\x75\xdd\x2d\x7c\xe2\xf6\xad\x14\
\x9b\x32\x7c\x7b\xdf\x00\x9f\xdc\xb5\x8f\x93\x67\x2a\x98\x8c\x65\
\xc3\xca\x56\xde\xf6\xa6\x65\x3c\xf6\xfd\x21\xbc\x2e\xec\x94\x3b\
\xbf\x37\xc4\xdf\xed\x1b\x81\xc8\x22\xd6\xa4\x81\x6f\x0c\x6b\xdb\
\x73\xfc\xf6\x55\x4b\x28\x46\xca\x53\x27\x67\xf9\x93\xef\x4f\x50\
\x8b\x84\x81\xbe\x49\x36\x64\x3d\x7f\x76\xcb\x4a\xb6\xf4\x14\xf9\
\xbd\x5f\xde\xc8\xb3\xc7\xa6\x18\x2f\xc7\x79\xb1\x66\x0d\xb0\xf7\
\x2c\x40\x21\xac\xc5\x07\x83\x04\xae\xbd\xbc\x93\xd5\xcb\x4b\x8c\
\x4e\x56\xb9\xff\x6b\xcf\xf2\x52\xff\x04\x51\x26\x22\x6f\x84\x93\
\xaf\xce\x70\x72\xb4\x1f\xb5\x06\x5d\x94\x62\x3d\x6d\x59\xde\xd2\
\x53\x48\x63\xb1\x11\x87\x47\x27\x1d\xef\x5a\x5d\xa0\xab\x39\xe2\
\xd5\x99\x84\x2f\xfc\xd7\x18\x2f\x8f\x27\x34\x17\x2c\xe5\xd9\x3a\
\x4f\x1f\x18\xe6\x8b\x3e\x61\xd7\x5d\x97\xb2\xad\xb7\x85\xcd\xab\
\x4a\xec\x7d\x71\xcc\x98\x6c\xb4\xc9\x9d\x9b\x24\x3e\x4e\x56\x80\
\x40\x80\x2d\x6b\xdb\x51\x15\x4e\x9c\x9a\xe6\xe5\xfe\x09\x24\x04\
\xb6\xac\x6a\xe1\x81\xbb\xae\x22\x97\xb5\x20\x42\x2d\xf1\x7c\xe6\
\x9f\x5e\x62\x2e\x57\xef\xbe\xae\x9b\x3b\xdf\xd1\x35\x1f\x83\x3e\
\xc0\xc7\xbe\x35\xc4\x86\xf6\x0c\x2a\xd0\x3f\x95\x70\x72\x22\x01\
\x84\x4a\xd5\xe1\xa6\x62\x40\xd8\x7f\xa2\xcc\x58\x39\x61\x59\x4b\
\x44\xef\xf2\x22\xcf\x1c\x99\x24\xa9\xb9\x8e\xf3\xb2\x18\x1f\x1a\
\x2f\x0f\x24\x2e\x2d\x7e\x23\x2b\x58\x93\x7e\x31\x97\x31\xac\x5a\
\x56\xa0\x90\x8f\x68\x69\x8a\xa8\x25\x9e\x62\xce\xce\x67\x45\xff\
\x58\xf5\xac\x18\xf4\x02\xe3\x55\x87\x6f\xac\x35\x56\x85\x4c\x46\
\xc8\x65\x0d\xc1\x79\x9a\x36\xb6\xa3\x1b\xda\x59\x8a\xc7\x46\x8a\
\x0f\x90\x64\x2c\x5a\xc8\x42\xe2\xf1\xe7\x02\x9a\x6c\x34\x9c\xc4\
\x0e\x10\x0e\xbe\x7c\x06\xe7\x02\x6b\x3a\x4b\x5c\xb1\x71\x19\x4f\
\x1e\x18\xe2\xf9\x13\x13\xdc\x7a\xff\x1e\x96\xb5\x17\xd8\xf9\x91\
\xb7\xd1\xd1\x92\x3d\x2b\x63\x77\x7e\x6f\x90\xbf\xdf\x7f\x06\xac\
\x45\xa2\x74\x0b\xab\xab\x61\x65\x4b\x06\xe7\x03\xbd\x4b\x22\x6e\
\x78\x53\x91\xfd\xc3\x35\x06\xfa\x2b\x58\xab\x6c\xee\x2d\xf1\xbe\
\xd5\x4d\xb4\x35\x59\xa6\x6a\x9e\xd1\x52\x89\xb6\xcd\x0a\xde\x4d\
\x8c\x9c\xbb\x50\x7b\xe7\x5f\x41\xc4\x23\xb0\xf7\xbf\x87\x38\xd2\
\x37\x46\xa9\x90\xe1\x73\x77\x5c\xc9\xf5\x5b\xbb\x89\xac\x32\x38\
\x5a\x21\x6b\x0d\x19\xab\xf3\x2b\xca\x1c\x63\x31\x6b\x69\x2f\x44\
\x74\x14\xd3\xb1\xbd\x10\x71\x49\x73\xc4\x73\x13\x31\xc7\xcb\x09\
\xa5\x8c\xf2\x5b\x97\x96\xd8\x9c\x13\xf2\x40\x51\xe0\xca\x92\xe1\
\xd6\x35\x79\x8c\xc0\xf3\x13\x09\x71\x2e\x4f\xd7\x8a\x25\xbe\xbb\
\xa7\xe3\xf0\xf9\x12\xab\x39\x0e\xa1\x22\xa2\x85\xbe\xe1\x32\x9f\
\xff\xca\x3e\xfe\xfc\xe3\xd7\xf1\xe6\x75\xed\x3c\xfa\xfb\xd7\xf3\
\xca\xd0\x14\x22\x42\x6f\x57\x33\xad\x85\x0c\x43\xe3\x55\xca\x35\
\x37\x4f\xf8\xb1\x1b\x56\x70\xc7\xb5\x9d\xa9\xc4\x8d\xa2\x76\xb8\
\xea\xf8\x9d\x67\xc7\x78\xf8\xf0\x14\x9f\xba\xbc\x95\x0d\x2d\x11\
\x7f\xb1\xfd\x12\x8e\xbd\xb5\x8d\xac\x11\x56\xb5\x66\xc8\x5a\xe1\
\x78\xd9\xf1\xe4\xab\x31\x6b\x3a\x33\xb4\xcf\xfa\xca\x54\xc5\x1f\
\xe5\x7c\x89\x33\x27\xa8\x27\x43\x41\x74\x3d\x08\xdf\xfc\xcf\x13\
\x4c\x55\x1d\x1f\xfd\xc0\x9b\xd9\xb6\x71\x29\x9b\x57\xb5\x02\x30\
\x31\x53\xe7\xbb\x2f\x0c\xf1\xc8\xd3\x7d\x1c\x1a\x9c\x66\xb2\x92\
\x30\x5a\xae\x83\x40\x21\x67\x1a\x95\x75\xfa\xce\x72\x08\x88\xc0\
\x77\x4f\x55\x99\xae\x8c\xb2\xa3\x3b\xcf\xd6\xce\x3c\xeb\x96\x64\
\x00\x38\x33\x9b\xb0\x7f\x38\xe6\x99\x29\xcf\x58\x1c\x88\x8c\xb0\
\xbc\xc5\x0e\x75\xb6\x70\xfc\x89\x73\x7b\x92\x96\x77\xff\x65\x26\
\x2e\x57\xff\x36\x04\x6e\x9b\xdb\xb2\xc4\x58\x8a\xcd\x39\x7a\x7b\
\x5a\x69\x2e\xe6\xc0\x28\x23\x33\x09\xfd\xe3\x35\x6a\x41\xd1\xc8\
\xb2\xba\xb3\x99\x52\x29\x4b\xae\x25\x47\xae\x25\x8b\x64\x2c\xc1\
\x2a\x58\x83\x17\xe5\x74\xc5\x41\x80\x33\x03\x65\x6a\x13\x35\xd6\
\x76\xe4\x68\xc9\x5b\x44\x85\x91\x59\xcf\x78\x02\x5b\x37\xb5\xd1\
\x52\x88\xc8\x18\xc5\xaa\x3c\xa6\xc2\xaf\x7f\x69\x5b\x31\x3e\xaf\
\x69\x6a\x7a\xd7\x43\xb7\x79\xe7\xbf\x86\x6a\x36\xdd\x5b\x1b\x8b\
\xae\x35\x88\xb5\x8d\xbd\x76\x61\x21\xb6\xb9\x88\x7c\x6b\x9e\x7c\
\x5b\x1e\xdb\x14\x35\x4a\x27\x43\x68\x8c\xd6\x28\x91\x0a\xb5\xa9\
\x98\xc1\xa3\x93\x04\x0f\x62\x04\x99\x6f\x4f\x15\x63\x94\xf5\x2b\
\x9b\xd9\xbc\xa6\x44\xc6\x68\xcd\xa8\x7c\xe8\xc1\x2b\x8a\x8f\xfd\
\xaf\xd5\x8c\xc9\x66\xf6\x88\xb5\x87\xe6\x36\x7e\x31\x0a\x46\x41\
\x17\x4c\x54\x30\x19\x4b\xb1\x2d\x47\x7b\x57\x91\xe6\xf6\x3c\x36\
\x6b\x40\xd3\xed\x8d\x86\xc4\x82\x20\x80\xab\x7b\x46\xfa\x26\x71\
\xb5\x3a\xde\x7b\xbc\x0b\x04\xef\xd3\xb9\xf7\x38\x17\x18\x18\xae\
\x30\x53\x71\x00\x87\x20\xec\xb9\x60\xb9\x95\x6d\xce\x0f\x6b\x26\
\xda\x85\x31\xb5\x14\x32\xad\x7a\x45\xd3\x02\xd3\x66\x2d\xcd\x4b\
\xf2\x74\x74\x15\x69\x69\xcf\x63\xb3\x16\x4c\x03\x4c\x15\x44\xd2\
\x32\x5e\x04\xd1\x54\x9e\x89\xc1\x29\x2a\xe3\x15\x70\x0e\x92\x04\
\x5c\x42\x70\x9e\xe0\x52\x58\xef\x3d\x33\x95\x84\x93\xa7\x67\x6a\
\x3e\x84\x47\x08\x0c\x5f\xb0\xed\x9c\x7d\xe1\x71\x72\x9b\x77\x1c\
\x45\xcd\x5b\x51\x5d\x4f\xa3\xe7\xb5\xd9\x88\x62\x5b\x9e\x96\x8e\
\x02\xf9\x52\x0e\xcd\xda\xc6\x76\xa6\x60\x35\x95\x7e\x6e\xde\xb8\
\x57\x55\xea\xd3\x55\xce\xbc\x3c\x8a\x4f\x12\x24\x90\x5a\xa3\x3e\
\x9c\xcf\x25\x11\x82\x08\x33\x55\xf7\x74\x2e\x6b\xee\xfb\xfa\x0d\
\xed\x95\x8b\xf6\x24\xeb\xde\xb9\x69\x42\x33\xd1\x7d\x62\xcd\xf1\
\x28\x1b\x51\x5a\xd2\x44\x47\x57\x89\x52\x7b\x01\x9b\xb3\x67\x49\
\x1e\xe6\xe7\x02\x66\xce\x7b\x8d\x1e\xd9\x79\xc6\x8e\x9e\xc1\xcd\
\xd6\x20\x69\x78\xaf\x61\x92\xb8\xc6\x6f\x0d\x73\xfe\x64\xe2\xc3\
\x7d\x3e\x62\xfc\xff\x6c\xdc\x4f\x3f\xf9\x08\x76\xc3\xad\xa7\x4a\
\x4b\x9a\x86\x5b\x97\x16\xb6\x37\x95\x72\x39\xcd\xd8\x05\x4f\x35\
\x32\x34\x34\x0a\x02\xce\x29\x54\x31\x8a\xaa\x50\xee\x1f\x67\xea\
\xe4\x58\x5a\x85\xcf\x55\xda\x84\x86\x17\xc3\xbc\xf7\x54\x98\xb4\
\x56\x7f\xb7\x9e\x84\xdd\x2f\xfc\x46\x6f\x78\x4d\x27\x0b\xf5\x23\
\x8f\x87\x15\x37\xde\x71\xc4\x64\xa3\x72\xb0\xe6\xed\x58\xcd\xcd\
\xc3\x2d\x82\x5c\x6c\x61\x51\x62\xb9\x72\x8d\xd1\x83\x03\xf8\x5a\
\x7d\xbe\x05\x98\x93\x57\x42\x68\x24\x11\x08\x32\x09\x7c\xda\xa8\
\x7c\x65\xfc\xb3\x5b\x93\xd7\x75\xb2\x70\xf8\x81\x77\xc7\xc1\xe8\
\xc3\x18\xfd\x28\xd6\x9c\x48\xe1\xd2\x18\x0b\x73\x31\x77\x96\xa5\
\x32\xe3\x03\x13\x47\x4e\x53\x9f\xaa\xcc\x4b\x28\xf3\xf2\x3a\x70\
\x0e\x49\x1c\xe2\x5d\xbf\xd5\xf0\x09\xf1\xfe\xe1\xd1\xcf\x6d\x8b\
\x5f\xf7\xd1\x07\xc0\x91\xcf\x5f\x1f\x63\xcd\xd7\x31\xfa\x41\x8c\
\xf9\x36\xd6\xc4\x61\x3e\x31\x16\x2c\xcc\x2f\x41\x4a\x65\x68\x9c\
\xd9\xfe\x33\x88\x6b\x80\xb9\xa4\x01\x99\xde\x8b\x73\x75\x0d\x7e\
\x4f\xa4\xf2\x6b\x21\x4e\xbe\x3a\xf9\xc0\x35\xf1\x8f\x7d\xfc\xb6\
\xf1\x0b\x3f\x00\xa3\xed\xc1\xe8\xed\x18\x73\x27\x56\x37\x61\x4c\
\x84\xd5\xf9\x58\x14\xab\xf8\xd9\x98\xe1\xff\x38\x42\x3c\x31\xd3\
\x78\xb9\x20\xda\x38\x48\x32\x26\x11\xa3\x87\x35\x13\xfd\x4d\xbe\
\x25\xff\x68\xe7\x9a\xf6\xe1\x03\xf7\xbc\xe5\x8d\x3d\xc0\xdc\xf0\
\xe5\x1f\x0a\x46\xbb\xb0\xba\x1d\x63\xde\x1f\xac\xd9\x8a\xd5\x4b\
\x30\x26\xab\x82\x8c\x3f\x77\x9c\xa9\xc3\xa7\xd2\xf6\x33\xcd\x81\
\xaa\xa8\x8e\x88\xd1\x03\x26\x13\x7d\x53\x23\xf3\x44\xae\x98\x1b\
\x3c\xf5\xd0\x7b\xc2\x4f\xf4\x08\x78\xc3\x5f\xbf\x08\xaa\xd9\x60\
\xb4\x07\x6b\xd6\x88\xd1\x4d\x6e\x72\x76\xc3\xe9\x27\x0f\x46\x6e\
\xb6\x86\xaa\x38\x63\xf4\x68\x80\x43\xc6\x9a\x63\x62\xb4\x6f\xfd\
\xd5\xbd\xd5\x7d\x1f\xdf\xf6\xb3\x43\xf7\x9f\xfa\xf5\x3f\xe5\x4a\
\x50\xe4\x07\x90\xdf\x8f\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x10\
\x0a\x8a\xcd\x47\
\x00\x6e\
\x00\x65\x00\x61\x00\x72\x00\x65\x00\x73\x00\x74\x00\x5f\x00\x62\x00\x75\x00\x69\x00\x6c\x00\x64\x00\x69\x00\x6e\x00\x67\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x05\xe0\x84\x67\
\x00\x67\
\x00\x65\x00\x6f\x00\x73\x00\x63\x00\x61\x00\x70\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x0c\xcd\xf0\x47\
\x00\x67\
\x00\x65\x00\x6f\x00\x73\x00\x63\x00\x61\x00\x70\x00\x65\x00\x5f\x00\x69\x00\x63\x00\x6f\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x03\x00\x00\x00\x03\
\x00\x00\x00\x50\x00\x00\x00\x00\x00\x01\x00\x00\x04\x3e\
\x00\x00\x00\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x6e\x00\x00\x00\x00\x00\x01\x00\x00\x37\x4d\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x03\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x50\x00\x00\x00\x00\x00\x01\x00\x00\x04\x3e\
\x00\x00\x01\x66\xe8\x91\x28\x26\
\x00\x00\x00\x3a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x67\x0b\x6e\x20\xd0\
\x00\x00\x00\x6e\x00\x00\x00\x00\x00\x01\x00\x00\x37\x4d\
\x00\x00\x01\x67\x62\x19\xd2\xf4\
"
qt_version = QtCore.qVersion().split('.')
if qt_version < ['5', '8', '0']:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
qInitResources()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
20857,
2134,
2438,
198,
2,
198,
2,
15622,
416,
25,
383,
20857,
3082,
5329,
329,
9485,
48,
83,
20,
357,
48,
83,
410,
20,
13,
1157,
13,
17,
8,
198,
2,
198,... | 1.23592 | 58,787 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import os
import tempfile
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinit.abiinspect import *
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "abinit")
try:
import matplotlib
matplotlib.use("pdf") # Use non-graphical display backend during test.
have_matplotlib = "DISPLAY" in os.environ
except ImportError:
have_matplotlib = False
class YamlTokenizerTest(PymatgenTest):
"""Test YamlTokenizer."""
if __name__ == '__main__':
import unittest2 as unittest
unittest.main()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
15069,
357,
66,
8,
350,
4948,
265,
5235,
7712,
4816,
13,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
17168,
13789,
13,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
11,
... | 2.616949 | 295 |
# -*- coding: utf-8 -*-
#Provide function logic for UI
from UI import Ui_MainWindow
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import configparser
import os
from nlp import *
#初始化options.ini(如果不存在就创建)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
2,
15946,
485,
2163,
9156,
329,
12454,
201,
198,
201,
198,
6738,
12454,
1330,
471,
72,
62,
13383,
27703,
201,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,... | 1.912409 | 137 |
import torch.utils.data as data
import cv2
import numpy as np
import math
from lib.utils import data_utils
from pycocotools.coco import COCO
import os
from lib.utils.tless import tless_utils, visualize_utils, tless_config
from PIL import Image
import glob
| [
11748,
28034,
13,
26791,
13,
7890,
355,
1366,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
6738,
9195,
13,
26791,
1330,
1366,
62,
26791,
198,
6738,
12972,
66,
420,
313,
10141,
13,
66,
25634,
133... | 3.185185 | 81 |
import numpy as np
class BinaryHigherOrderModel:
"""Higher order model.
"""
def adj_dict(self):
"""adjacency list of each variables
Returns:
dict: key (variables key), value (list of tuple represents connected indices)
"""
adj_dict = {i: [] for i in self.indices}
for coeff in self.interactions[1:]:
for _inds, value in coeff.items():
for i in _inds:
_inds_list = list(_inds)
_inds_list.remove(i)
adj_dict[i].append([_inds_list, value])
return adj_dict
def energy(self, state):
"""calculate energy of state
Args:
state (list of int): list of SPIN or BINARY
Returns:
float: energy of state
"""
energy = 0.0
if isinstance(state, dict):
# convert to array
state = [state[elem] for elem in self.indices]
state = np.array(state)
for coeff in self.interactions[1:]:
for _inds, value in coeff.items():
energy += value * np.prod(state[list(_inds)])
for i, hi in self.interactions[0].items():
energy += hi * state[i]
return energy
def calc_energy(self, state):
"""alias of `energy`
Args:
state (list of int): list of SPIN or BINARY
Returns:
float: energy of state
"""
return self.energy(state)
| [
11748,
299,
32152,
355,
45941,
628,
198,
4871,
45755,
48708,
18743,
17633,
25,
198,
220,
220,
220,
37227,
48708,
1502,
2746,
13,
198,
220,
220,
220,
37227,
628,
220,
220,
220,
825,
9224,
62,
11600,
7,
944,
2599,
198,
220,
220,
220,
... | 2.031165 | 738 |
import logging
from django.core.mail import send_mail
from django.core.management.base import BaseCommand, CommandError
from metashare.edelivery.wsdl_services import download_messages
from metashare.settings import LOG_HANDLER, CONTRIBUTIONS_ALERT_EMAILS
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(LOG_HANDLER)
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
4529,
1330,
3758,
62,
4529,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
11,
9455,
12331,
198,
6738,
1138,
1077,
533,
13,
276,
417,
6315,
13,
1850... | 3.165049 | 103 |
#!/usr/bin/env python
import sys
import argparse
import subprocess
from collections import defaultdict as dd
size_multipliers = {'M':1, 'G':1024, 'T':1024**2}
core_node_keys = {'c':'ReqCPUS', 'n':'ReqNodes'}
avail_sort = ['Jobs', 'Nodes', 'CPUs', 'GPUs', 'RAM']
avail_levels = ['User', 'Account', 'State', 'Partition']
if __name__ == '__main__':
args = get_args()
levels = get_levels(args['levels'])
job_summary = summarize_jobs(levels)
if 'GPUs' in args['sort_on']:
args['gpu']=True
print_summary(job_summary, levels, args['gpu'], args['units'], args['sort_on'], args['ascending'])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
850,
14681,
198,
6738,
17268,
1330,
4277,
11600,
355,
49427,
198,
198,
7857,
62,
47945,
3183,
796,
1391,
6,
44,
10354,
16,
11,
705,
3... | 2.526749 | 243 |
# Copyright 2020 Tecnativa - Ernesto Tejeda
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import _, fields, models
from odoo.exceptions import ValidationError
from odoo.tools import float_compare
| [
2,
15069,
12131,
48257,
32353,
12151,
532,
34705,
78,
1665,
73,
18082,
198,
2,
13789,
13077,
6489,
12,
18,
13,
15,
393,
1568,
357,
5450,
1378,
2503,
13,
41791,
13,
2398,
14,
677,
4541,
14,
363,
489,
737,
198,
198,
6738,
16298,
2238,... | 3.123288 | 73 |
import json
from office365.graph.directory.directoryObject import DirectoryObject
from office365.graph.directory.directoryObjectCollection import DirectoryObjectCollection
from office365.graph.onedrive.driveCollection import DriveCollection
from office365.graph.onedrive.siteCollection import SiteCollection
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.resource_path import ResourcePath
from office365.runtime.serviceOperationQuery import ServiceOperationQuery
from office365.graph.teams.team import Team
def _delete_group_from_directory(target_group):
"""
Deletes the group from directory
:type target_group: Group
"""
deleted_item = target_group.context.directory.deletedGroups[target_group.id]
deleted_item.delete_object()
class Group(DirectoryObject):
"""Represents an Azure Active Directory (Azure AD) group, which can be an Office 365 group, or a security group."""
def add_team(self):
"""Create a new team under a group."""
team = Team(self.context)
team._parent_collection = self.parent_collection
qry = ServiceOperationQuery(self, "team", None, team, None, team)
self.context.add_query(qry)
self.context.get_pending_request().beforeExecute += self._construct_create_team_request
return team
def delete_object(self, permanent_delete=False):
"""
:param permanent_delete: Permanently deletes the group from directory
:type permanent_delete: bool
"""
super(Group, self).delete_object()
if permanent_delete:
self.ensure_property("id", _delete_group_from_directory)
@property
def members(self):
"""Users and groups that are members of this group."""
if self.is_property_available('members'):
return self.properties['members']
else:
return DirectoryObjectCollection(self.context,
ResourcePath("members", self.resource_path))
@property
def owners(self):
"""The owners of the group."""
if self.is_property_available('owners'):
return self.properties['owners']
else:
return DirectoryObjectCollection(self.context,
ResourcePath("owners", self.resource_path))
@property
def drives(self):
"""The group's drives. Read-only."""
if self.is_property_available('drives'):
return self.properties['drives']
else:
return DriveCollection(self.context, ResourcePath("drives", self.resource_path))
@property
def sites(self):
"""The list of SharePoint sites in this group. Access the default site with /sites/root."""
if self.is_property_available('sites'):
return self.properties['sites']
else:
return SiteCollection(self.context,
ResourcePath("sites", self.resource_path))
| [
11748,
33918,
198,
198,
6738,
2607,
24760,
13,
34960,
13,
34945,
13,
34945,
10267,
1330,
27387,
10267,
198,
6738,
2607,
24760,
13,
34960,
13,
34945,
13,
34945,
10267,
36307,
1330,
27387,
10267,
36307,
198,
6738,
2607,
24760,
13,
34960,
13... | 2.669033 | 1,127 |
import sys
import yacc
from cool_lexer import CoolLexer, tokens
from ast import *
#precedence of terminals listed in ascending order
#first string of each tuple shows left, right, or non associativity
precedence = (
('right', 'larrow'),
('nonassoc', 'not'),
('nonassoc', 'lt', 'le', 'equals'),
('left', 'plus', 'minus'),
('left', 'times', 'divide'),
('nonassoc', 'isvoid'),
('nonassoc', 'tilde'),
('left', 'at'),
('left', 'dot'),
)
#start symbol
start = 'program'
#Empty production
#Put at top so that reduce/reduce conflicts always choose this production
def p_empty(p):
'empty :'
pass #do nothing
#begin program grammar
def p_program(p):
'program : classdef semi classlist'
p[0] = AST([p[1]] + p[3])
def p_classlist_head(p):
'classlist : classdef semi classlist'
p[0] = [p[1]] + p[3]
def p_classlist_tail(p):
'classlist : empty'
p[0] = []
#end program grammar
#begin class grammar
def p_classdef(p):
'classdef : class type optinherits lbrace featurelist rbrace'
p[0] = ASTClass(
ASTIdentifier(p.lineno(2),p[2]),
p[3],
p[5])
def p_optinherits_nonempty(p):
'optinherits : inherits type'
p[0] = ASTIdentifier(p.lineno(2), p[2])
def p_optinherits_empty(p):
'optinherits : empty'
p[0] = None
##class features (methods and fields)
def p_featurelist_head(p):
'featurelist : feature semi featurelist'
p[0] = [p[1]] + p[3]
def p_featurelist_tail(p):
'featurelist : empty'
p[0] = []
def p_feature_method(p):
'feature : identifier lparen formalargs rparen colon type lbrace expr rbrace'
p[0] = ASTMethod(
ASTIdentifier(p.lineno(1), p[1]),
p[3],
ASTIdentifier(p.lineno(6), p[6]),
p[8])
def p_formalargs_first(p):
'formalargs : formal formallist'
p[0] = [p[1]] + p[2]
def p_formalargs_empty(p):
'formalargs : empty'
p[0] = []
def p_formallist_head(p):
'formallist : comma formal formallist'
p[0] = [p[2]] + p[3]
def p_formallist_tail(p):
'formallist : empty'
p[0] = []
def p_feature_field(p):
'feature : identifier colon type optinit'
p[0] = ASTAttribute(
ASTIdentifier(p.lineno(1), p[1]),
ASTIdentifier(p.lineno(3), p[3]),
p[4])
def p_formal(p):
'formal : identifier colon type'
p[0] = (ASTIdentifier(p.lineno(1), p[1]),
ASTIdentifier(p.lineno(3), p[3]))
#end class grammar
### BEGIN Expression Grammars
#begin dynamic/static dispatch grammar
def p_expression_dispatch(p):
'expr : expr opttype dot identifier lparen funcargs rparen'
# Static dispatch, class is specified
if p[2] is not None:
p[0] = ASTExpression(
p.lineno(1),
"static_dispatch",
(
p[1],
p[2],
ASTIdentifier(p.lineno(4), p[4]),
p[6]
))
# Dynamic dispatch, no type
else:
p[0] = ASTExpression(
p.lineno(1),
"dynamic_dispatch",
(
p[1],
ASTIdentifier(p.lineno(4), p[4]),
p[6]
))
def p_opttype_nonempty(p):
'opttype : at type'
p[0] = ASTIdentifier(p.lineno(2), p[2])
def p_opttype_empty(p):
'opttype : empty'
p[0] = None
def p_funcargs_first(p):
'funcargs : expr funclist'
p[0] = [p[1]] + p[2]
def p_funcargs_empty(p):
'funcargs : empty'
p[0] = []
def p_funclist_head(p):
'funclist : comma expr funclist'
p[0] = [p[2]] + p[3]
def p_funclist_tail(p):
'funclist : empty'
p[0] = []
#end dynamic/static dispatch grammar
#begin self dispatch grammar
def p_expression_selfdispatch(p):
'expr : identifier lparen funcargs rparen'
p[0] = ASTExpression(
p.lineno(1),
"self_dispatch",
(
ASTIdentifier(p.lineno(1), p[1]),
p[3]
)
)
#end self dispatch grammar
##If expression
def p_expression_if(p):
'expr : if expr then expr else expr fi'
p[0] = ASTExpression(
p.lineno(1),
"if",
(p[2],p[4],p[6]))
##While expression
def p_expression_while(p):
'expr : while expr loop expr pool'
p[0] = ASTExpression(
p.lineno(1),
"while",
(p[2],p[4])
)
#begin block statement grammar
def p_expression_block(p):
'expr : lbrace expr semi blocklist rbrace'
p[0] = ASTExpression(
p.lineno(1),
"block",
[p[2]] + p[4])
def p_blocklist_head(p):
'blocklist : expr semi blocklist'
p[0] = [p[1]] + p[3]
def p_blocklist_tail(p):
'blocklist : empty'
p[0] = []
#end block statement grammar
#begin let statement grammar
def p_expression_let(p):
'expr : let identifier colon type optinit letlist in expr'
p[0] = ASTExpression(
p.lineno(1),
"let",
([ASTLetBinding(
ASTIdentifier(p.lineno(2), p[2]),
ASTIdentifier(p.lineno(4), p[4]),
p[5])] + p[6],
p[8]))
def p_optinit_nonempty(p):
'optinit : larrow expr'
p[0] = p[2]
def p_optinit_empty(p):
'optinit : empty'
p[0] = None
def p_letlist_head(p):
'letlist : comma identifier colon type optinit letlist'
p[0] = [ASTLetBinding(\
ASTIdentifier(p.lineno(2), p[2]),
ASTIdentifier(p.lineno(4), p[4]),
p[5])] + p[6]
def p_letlist_tail(p):
'letlist : empty'
p[0] = []
#end let statement grammar
#begin case statement grammar
def p_expression_case(p):
'expr : case expr of identifier colon type rarrow expr semi caselist esac'
p[0] = ASTExpression(
p.lineno(1),
"case",
(p[2],[ASTCase(ASTIdentifier(p.lineno(4),p[4]),
ASTIdentifier(p.lineno(6),p[6]),
p[8])] + p[10]))
def p_caselist_head(p):
'caselist : identifier colon type rarrow expr semi caselist'
p[0] = [ASTCase(ASTIdentifier(p.lineno(1),p[1]),
ASTIdentifier(p.lineno(3),p[3]),
p[5])] + p[7]
def p_caselist_tail(p):
'caselist : empty'
p[0] = []
#end case statement grammar
##expressions with unary and binary operators
def p_expression_assign(p):
'expr : identifier larrow expr'
p[0] = ASTExpression(p.lineno(1), "assign", (ASTIdentifier(p.lineno(1), p[1]), p[3]))
def p_expression_newtype(p):
'expr : new type'
p[0] = ASTExpression(p.lineno(1), "new", ASTIdentifier(p.lineno(2), p[2]))
def p_expression_isvoid(p):
'expr : isvoid expr'
p[0] = ASTExpression(p.lineno(1), "isvoid", p[2])
def p_expression_plus(p):
'expr : expr plus expr'
p[0] = ASTExpression(
p.lineno(1),
"plus",
(p[1],p[3]))
def p_expression_minus(p):
'expr : expr minus expr'
p[0] = ASTExpression(
p.lineno(1),
"minus",
(p[1],p[3]))
def p_expression_times(p):
'expr : expr times expr'
p[0] = ASTExpression(
p.lineno(1),
"times",
(p[1],p[3]))
def p_expression_divide(p):
'expr : expr divide expr'
p[0] = ASTExpression(
p.lineno(1),
"divide",
(p[1],p[3]))
def p_expression_negate(p):
'expr : tilde expr'
p[0] = ASTExpression(
p.lineno(1),
"negate",
p[2])
def p_expression_lt(p):
'expr : expr lt expr'
p[0] = ASTExpression(
p.lineno(1),
"lt",
(p[1],p[3]))
def p_expression_lte(p):
'expr : expr le expr'
p[0] = ASTExpression(
p.lineno(1),
"le",
(p[1],p[3]))
def p_expression_equals(p):
'expr : expr equals expr'
p[0] = ASTExpression(
p.lineno(1),
"eq",
(p[1],p[3]))
def p_expression_not(p):
'expr : not expr'
p[0] = ASTExpression(
p.lineno(1),
"not",
p[2])
def p_expression_paren(p):
'expr : lparen expr rparen'
p[0] = p[2]
def p_expression_id(p):
'expr : identifier'
p[0] = ASTExpression(p.lineno(1),
"identifier",
ASTIdentifier(p.lineno(1),p[1]))
##constant expressions
def p_expression_integer(p):
'expr : integer'
p[0] = ASTExpression(p.lineno(1),
"integer",
int(p[1]))
def p_expression_string(p):
'expr : string'
p[0] = ASTExpression(p.lineno(1),
"string",
p[1])
def p_expression_true(p):
'expr : true'
p[0] = ASTExpression(p.lineno(1),
"true",
"")
def p_expression_false(p):
'expr : false'
p[0] = ASTExpression(p.lineno(1),
"false",
"")
if __name__ == '__main__':
lexer = CoolLexer()
lexer.loadFromFile(sys.argv[1])
parser = yacc.yacc()
result = parser.parse(lexer=lexer, tracking=True, debug=False)
with open(sys.argv[1].replace("-lex",'-ast'), 'w') as outFile:
outFile.write(str(result))
| [
11748,
25064,
198,
11748,
331,
4134,
198,
6738,
3608,
62,
2588,
263,
1330,
15226,
45117,
263,
11,
16326,
198,
6738,
6468,
1330,
1635,
198,
198,
2,
3866,
771,
594,
286,
30237,
5610,
287,
41988,
1502,
198,
2,
11085,
4731,
286,
1123,
465... | 1.964924 | 4,590 |
#!/home/ubuntu/anaconda3/bin//python
'''
MIT License
Copyright (c) 2018 Riya Dulepet <riyadulepet123@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
The code is inspired by https://github.com/erikor/medline project, but the logic to
parse medline XML was substantially modified.
'''
# pre-requisites: pip install elasticsearch
# pip install --upgrade pip
# to execute this code:
# STEP 0: ensure elastic search and kibana are running on port 9200
# and 5601 correspondingly
# STEP 1: make sure you have all the medline XML files downloaded from
# STEP 2: then you run nohup ls *.xml | xargs -n 1 -P 4 python ./parseMedline.py &
# the above step assume quad-core processor, and runs it as daemon process so when
# you exit SSH session, it runs in background.
# this should load the data into elastic search
import pandas as pd
import glob
import sys
import sys, os
descr_filenames = glob.glob("." + "/descr*.txt")
speech_filenames = glob.glob("." + "/speech*.txt")
speakermap_filenames = glob.glob("." + "/*SpeakerMap.txt")
NO_PARTY_SENTENCE = "N"
REPUBLICAN_SENTENCE = "R"
DEMOCRAT_SENTENCE = "D"
BOTH_PARTY_SENTENCE = "B"
republican = ["rnc", "gop", "republican", "republicans", "conservative", "conservatives", "right wing", "alt right", "far right"]
democrat = ["dnc", "democrat", "democrats", "democratic", "liberal", "liberals", "progressive", "progressives", "moderates", "nonconservative", "nonconservatives", "alt left", "far left", "left wing"]
from datetime import datetime
import json
import logging
from collections import deque
from pathlib import Path
import os.path
logging.basicConfig(filename='parse.log',level=logging.INFO)
DESTINATION_FILE = "congress_party_affiliation_sentences.csv"
import spacy
import textacy
nlp = spacy.load('en_core_web_sm')
import nltk
from nltk.tokenize import sent_tokenize
nltk.download('punkt')
for speakermap_filename in speakermap_filenames:
try:
prefix = speakermap_filename[2:5]
print("prefix=", prefix)
descr_filename = "./descr_" + str(prefix) + ".txt"
speech_filename = "./speeches_" + str(prefix) + ".txt"
list_descr = []
list_speech = []
list_speakermap = []
list_descr.append(pd.read_csv(descr_filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
list_speech.append(pd.read_csv(speech_filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
list_speakermap.append(pd.read_csv(speakermap_filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
df_descr = pd.concat(list_descr)
df_speech = pd.concat(list_speech)
df_speakermap = pd.concat(list_speakermap)
print("len df_descr=", len(df_descr))
print("len df_speech=", len(df_speech))
print("len df_speakerma=", len(df_speakermap))
list_descr = None
list_speech = None
list_speakermap = None
df_descr_speech_speakermap = pd.merge(pd.merge(df_descr, df_speech, on='speech_id'), df_speakermap, on='speech_id')
df_descr = None
df_speech = None
df_speakermap = None
# convert date
df_descr_speech_speakermap['speech'] = df_descr_speech_speakermap['speech'].fillna('')
df_descr_speech_speakermap['party'] = df_descr_speech_speakermap['party'].fillna('')
df_congressPartySentences = pd.DataFrame(columns=('congress', 'speech_id', 'speaker_party', 'spoken_party', 'sentence'))
for index, row in df_descr_speech_speakermap.iterrows():
# process NLP on the text, primarily to extract sentences most reliabily
# doc = nlp(row["speech"])
doc = sent_tokenize(row["speech"])
# for sent in doc.sents:
for sent in doc:
party_affiliation = partyTypeSentence(str(sent))
if party_affiliation in [REPUBLICAN_SENTENCE, DEMOCRAT_SENTENCE]:
last_index = len(df_congressPartySentences)
df_congressPartySentences.loc[last_index] = "ignore"
df_congressPartySentences.loc[last_index]["congress"] = prefix
df_congressPartySentences.loc[last_index]["speech_id"] = row["speech_id"]
df_congressPartySentences.loc[last_index]["speaker_party"] = row["party"]
df_congressPartySentences.loc[last_index]["spoken_party"] = party_affiliation
df_congressPartySentences.loc[last_index]["sentence"] = sent
print ("CONGRESS={},LENGTH={}", prefix, len(df_congressPartySentences))
if os.path.exists(DESTINATION_FILE):
# file exists
df_congressPartySentences.to_csv(DESTINATION_FILE, mode='a', header=False)
else:
# brand new file
df_congressPartySentences.to_csv(DESTINATION_FILE, mode='w', header=True)
except Exception as e:
print("Error reading description file = ", descr_filename)
print("Error reading speech file = ", speech_filename)
print("Error reading speakermap file = ", speakermap_filename)
print(e) # for the repr
print(str(e)) # for just the message
print(e.args) # the arguments that the exception has been called with.
# the first one is usually the message.
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
# logging.info(datetime.now().isoformat() + " imported " + str(res[0]) + " records from " + sys.argv[1]) | [
2,
48443,
11195,
14,
32230,
14,
272,
330,
13533,
18,
14,
8800,
1003,
29412,
198,
7061,
6,
198,
36393,
13789,
198,
198,
15269,
357,
66,
8,
2864,
371,
21008,
360,
2261,
6449,
1279,
380,
88,
324,
2261,
6449,
10163,
31,
14816,
13,
785,
... | 2.55384 | 2,591 |
from unittest import TestCase
from ..functions import permutationtest
import numpy as np
import pandas as pd
import permutation_test.csv_parser as csv_parser
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
11485,
12543,
2733,
1330,
9943,
7094,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
9943,
7094,
62,
9288,
13,
40664,
62,
48610,
355,
269,
... | 2.661765 | 68 |
import os
import datetime
import numpy as np
import pandas as pd
class RelionMetaData:
"""RELION metadata handling class.
Parameters
----------
df_particles : pandas.DataFrame
DataFrame containing particle data block contents.
df_optics : pandas.DataFrame, optional
DataFrame containing optics group data block contents. By default None
starfile : string
starfile name
"""
@classmethod
def load(cls, starfile):
"""Load RELION metadata from a particle star file.
Parameters
----------
starfile : string
star file
Returns
-------
RelionMetaData
RelionMetaData class instance.
"""
with open(starfile, 'r') as f:
# Check RELION version
relion31 = None
for line in f:
words = line.strip().split()
if len(words) == 0:
continue
elif words[0] == 'data_optics':
relion31 = True
break
elif words[0] == 'data_':
relion31 = False
break
elif words[0][0] == '#':
# Comment line
continue
assert relion31 is not None, f'The starfile {starfile} is invalid.'
# Load starfile
if relion31:
df_particles, df_optics = cls._load_relion31(starfile)
else:
df_particles = cls._load_relion(starfile)
df_optics = None
return cls(df_particles, df_optics, starfile)
@classmethod
def _load_relion31(cls, starfile):
"""Load RELION 3.1 style starfile
Parameters
----------
starfile : string
RELION 3.1 style star file
Returns
-------
df_particles : pandas.DataFrame
dataframe containing particle data block
df_optics : pandas.DataFrame
dataframe containing optics group data block.
"""
with open(starfile, 'r') as f:
headers_optics, data_optics = cls._read_block(f, 'data_optics')
headers_particles, data_particles = cls._read_block(
f, 'data_particles')
df_optics = pd.DataFrame(data_optics, columns=headers_optics)
df_particles = pd.DataFrame(data_particles, columns=headers_particles)
return df_particles, df_optics
@classmethod
def _load_relion(cls, starfile):
"""Load RELION 2.x/3.0 style starfile
Parameters
----------
starfile : string
RELION 2.x/3.0 style starfile
Returns
-------
pandas.DataFrame
dataframe containing data block
"""
with open(starfile, 'r') as f:
headers, data = cls._read_block(f, 'data_')
df = pd.DataFrame(data, columns=headers)
return df
@classmethod
def _read_block(cls, f, blockname):
"""Read data block from starfile
Parameters
----------
f : file-like object
File-like object of starfile
blockname : string
Data block name to read.
Returns
-------
headers : list of strings
Metadata labels
body : ndarray
Metadatas
"""
# Get to the block (data_, data_optics, data_particles, etc...)
for line in f:
if line.startswith(blockname):
break
# Get to header loop
for line in f:
if line.startswith('loop_'):
break
# Get list of column headers
headers = []
for line in f:
if line.startswith('_'):
headers.append(line.strip().split()[0])
else:
break
# All subsequent lines until empty line is the data block body
body = [line.strip().split()]
for line in f:
if line.strip() == '':
break
else:
body.append(line.strip().split())
body = np.array(body)
assert len(headers) == body.shape[1]
return headers, body
def write(self, outdir, outfile_rootname):
"""Save metadata in file
Parameters
----------
outdir : string
Output directory.
outfile_rootname : string
Output file rootname.
"""
os.makedirs(outdir, exist_ok=True)
outfile = os.path.join(outdir, outfile_rootname + '.star')
with open(outfile, 'w') as f:
f.write('# Created by cryoPICLS at {}\n'.format(
datetime.datetime.now()))
f.write('\n')
if self.df_optics is not None:
self._write_block(f, 'data_optics', self.df_optics)
self._write_block(f, 'data_particles', self.df_particles)
else:
self._write_block(f, 'data_', self.df_particles)
def _write_block(self, f, blockname, df):
"""Write data block as star format
Parameters
----------
f : File-like object
Star file object
blockname : string
Data block name (e.g. data_optics)
df : pandas.DataFrame
DataFrame containing metadata labels and metadatas
"""
f.write(blockname.strip())
f.write('\n\n')
f.write('loop_\n')
f.write('\n'.join(df.columns))
f.write('\n')
for i in df.index:
f.write(' '.join(df.loc[i]))
f.write('\n')
f.write('\n')
def iloc(self, idxs):
"""Fancy indexing.
Parameters
----------
idxs : array-like
Indices to select.
Returns
-------
RelionMetaData
New metadata object with the selected rows.
"""
df_particles_new = self.df_particles.iloc[idxs]
return self.__class__(df_particles=df_particles_new,
df_optics=self.df_optics)
| [
11748,
28686,
198,
11748,
4818,
8079,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
4871,
4718,
295,
48526,
6601,
25,
198,
220,
220,
220,
37227,
16448,
2849,
20150,
9041,
1398,
13,
628,
220,
... | 2.001307 | 3,060 |
import logging
from collections import UserDict
from pajbot.managers.redis import RedisManager
from pajbot.streamhelper import StreamHelper
log = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
17268,
1330,
11787,
35,
713,
198,
198,
6738,
279,
1228,
13645,
13,
805,
10321,
13,
445,
271,
1330,
2297,
271,
13511,
198,
6738,
279,
1228,
13645,
13,
5532,
2978,
525,
1330,
13860,
47429,
198,
198,
6404,
796,
1... | 3.358491 | 53 |
#!/usr/bin/python3
from google.cloud import bigquery
from google.cloud import storage
import flask
from flask import request, jsonify, abort
import json
#for ML
import tensorflow as tf
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import PyPDF2
import pandas as pd
import os
from sklearn.preprocessing import LabelBinarizer
# Load labels
filename = 'train_labels.csv'
data = pd.read_csv(filename, header=0, names=['Query'])
filename2 = 'train_descs.csv'
data2 = pd.read_csv(filename2, header = 0, names = ['Description'])
# Initialize tokenizer
tokenizer = Tokenizer(num_words = 3000)
tokenizer.fit_on_texts(data2['Description'])
#Load Model
model = tf.keras.models.load_model('../saved_model')
predicted = model.predict(token_list, verbose = 0)
app = flask.Flask(__name__)
app.config["DEBUG"] = True
bucketName="job-flex-storage"
@app.route('/', methods=['GET'])
@app.route('/search', methods=['POST'])
@app.route('/pdfPredict', methods=['POST'])
@app.route('/getRecommendation', methods=['POST'])
app.run(host = "0.0.0.0",port=8080)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
6738,
23645,
13,
17721,
1330,
1263,
22766,
198,
6738,
23645,
13,
17721,
1330,
6143,
198,
11748,
42903,
198,
6738,
42903,
1330,
2581,
11,
33918,
1958,
11,
15614,
198,
11748,
33918,
628,
198,... | 2.848866 | 397 |
# -*- coding: utf-8 -*-
"""
合并区间问题:
输入:
3
1,10;32,45
78,94;5,16
80,100;200,220;16,32
输出:
1,45;78,100;200,220
Created on Sun Aug 12 09:58:08 2018
"""
from __future__ import absolute_import
from __future__ import print_function
class Solution(object):
'''
def merge(self, parts):
n = len(parts)
if n <= 1:
return parts
result = []
parts.sort(key=lambda d: d.start)
left, right = parts[0].start, parts[0].end
for index in range(1,n): #从第二个区间开始判断
# 下一个区间的起始位置小于或等于当前的right值,说明可以合并
if parts[index].start <= right:
right = max(parts[index].end, right)
# 下一个区间的起始位置大于当前的right值,说明应该重新生成区间
else:
# 实际上是以left, right为初始变量生成一个Part型的对象,并加入结果列表
result.append(Part(left, right))
left = parts[index].start
right = parts[index].end
index += 1
result.append(Part(left, right))
return result
'''
if __name__ == '__main__':
main() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
28938,
230,
33176,
114,
44293,
118,
29785,
112,
29785,
106,
165,
95,
246,
171,
120,
248,
198,
164,
122,
241,
17739,
98,
171,
120,
248,
198,
18,
198,
16,
... | 1.46164 | 756 |
import crashstatsutils
import jydoop
import json
from org.python.core.util import StringUtil
setupjob = crashstatsutils.dosetupjob([])
output = jydoop.outputWithKey
| [
11748,
7014,
34242,
26791,
198,
11748,
474,
88,
4598,
404,
198,
11748,
33918,
198,
6738,
8745,
13,
29412,
13,
7295,
13,
22602,
1330,
10903,
18274,
346,
198,
198,
40406,
21858,
796,
7014,
34242,
26791,
13,
37427,
316,
929,
21858,
26933,
... | 3.092593 | 54 |