content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python3
import pygraphviz
import wyvern
import random
import pydot
import sys
import csv
import re
prereq_reg = re.compile(r'[A-Z]{2,4}\*[0-9]{4}')
restrict_reg = re.compile(r'[A-Z]{2,4}\*[0-9]{4}')
if __name__ == "__main__":
sys.exit(main(sys.argv))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
12972,
34960,
85,
528,
198,
11748,
266,
88,
933,
198,
11748,
4738,
198,
11748,
279,
5173,
313,
198,
11748,
25064,
198,
11748,
269,
21370,
198,
11748,
302,
198,
198,
386... | 2.037313 | 134 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
@author: alishtory
@site: https://github.com/alishtory
@file: xsadmin_server.py
@time: 2017/3/10 16:33
@description:
'''
import config_xsadmin as config
import requests, hashlib
import time, random, logging
from ServerManager import AbstractServerManager
if __name__ == '__main__':
manager = XsadminServerManager()
while(True):
try:
manager.loop_server()
except Exception as e:
logging.error('loop happens error:%s'% e)
time.sleep(config.API_INTERVAL)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
31,
9800,
25,
435,
680,
83,
652,
198,
31,
15654,
25,
3740,
1378,
12567,
13,
785,
14,
282,
680,
83,
652,
... | 2.482301 | 226 |
from django.urls import path
from orders import views
app_name = 'orders'
urlpatterns = [
path('create/', views.order_create, name='order_create'),
path('<int:order_id>/', views.detail, name='detail'),
path('payment/<int:order_id>/<int:price>', views.payment, name='payment'),
path('verify/', views.verify, name='verify'),
path('apple-coupon/<int:order_id>', views.coupon_apply, name='coupon_apply'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
6266,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
705,
6361,
6,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
17953,
14,
3256,
5009,
13,
2875,
62,
... | 2.613497 | 163 |
import files_list
import files_delete
from datetime import datetime
import time
if __name__ == "__main__":
while True:
files = files_list.get_files_list()
if files == -1:
print("failed files get")
else:
now_time = datetime.now()
for file in files:
file_upload_time = datetime.fromtimestamp(file["timestamp"])
if 30 <= (now_time - file_upload_time).days:
print((now_time - file_upload_time).days)
files_delete.delete(file["file_id"])
files = []
time.sleep(86400)
| [
11748,
3696,
62,
4868,
198,
11748,
3696,
62,
33678,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
640,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
981,
6407,
25,
198,
220,
220,
220,
220,
... | 2.060403 | 298 |
import discord
from discord.ext import commands
import asyncio
import pickle
import time
import datetime
import asyncio
ranksRP = [int(5*(i**1.5)+50*i+100) for i in range(50)]
class Profiles(commands.Cog):
"""
Info about yourself or other users. Will use Battle Legion API to check stats when added.
"""
@commands.group(name='profile',invoke_without_command=True)
async def profile(self,ctx,member:discord.Member=None):
"""
Base profile command
Check your profile or that of another member's. Leave the member argument blank to select yourself.
"""
member = member or ctx.author
profiles = pickle.load(open('data/profiles.data','rb'))
embed = discord.Embed(title=f"{member.display_name}'s profile",
colour=member.colour,
description=f"{member.name}#{member.discriminator}")
embed.set_thumbnail(url=member.avatar_url_as(static_format='png'))
embed.set_footer(text=f"Requested by {ctx.author.display_name}",
icon_url=ctx.author.avatar_url_as(static_format='png'))
try:
account = profiles[member.id]['account']
except KeyError:
account = "`N/A`"
try:
clan = profiles[member.id]['clan']
except KeyError:
clan = "`N/A`"
embed.add_field(name="Base info:",
value=f"Primary account: {account} \nClan: {clan}")
try:
remRP, Rank = get_rank_from(profiles[member.id]['rp'])
embed.add_field(name="Level Info:",
value=f"Rank: {Rank}\nTotal RP: {profiles[member.id]['rp']}")
except KeyError:
embed.add_field(name='Level Info:',value="Rank: 0\nTotal RP: 0")
try:
squire = profiles[member.id]['squire']
except KeyError:
squire = "`N/A`"
try:
lord = profiles[member.id]['lord']
except KeyError:
lord = "`N/A`"
try:
rating = profiles[member.id]['rating']
except KeyError:
rating = "`N/A`"
try:
days = int(int(time.time() - (member.joined_at - datetime.datetime.utcfromtimestamp(0)).total_seconds())/86400)
server_date = f"{member.joined_at.ctime()} ({days} days ago)"
except KeyError:
print("How tf did I get this!?")
try:
days = int(int(time.time() - (member.created_at - datetime.datetime.utcfromtimestamp(0)).total_seconds())/86400)
discord_date = f"{member.created_at.ctime()} ({days} days ago)"
except KeyError:
print("wft?")
embed.add_field(name="Some Stats:",
value=f"Amount of Lord titles: {lord} \nAmount of Squire titles: {squire} \nBest :trophy: rating: {rating} \nJoined Discord on: {discord_date} \nJoined server on: {server_date}")
try:
unit = profiles[member.id]['unit']
except KeyError:
unit = "`N/A`"
try:
tactic = profiles[member.id]['tactic']
except KeyError:
tactic = "`N/A`"
embed.add_field(name='Fun Favourites:',
value=f"Favourite unit: {unit} \nFavourite Tactic: {tactic}")
await ctx.send(content="",embed=embed)
@profile.command(name="set")
async def profileSet(self,ctx,element:str=None,*,value:str=None):
"""
Modifies elements of your profile. Leave the element argument blank to see possible arguments.
"""
if element not in self.valid_elements:
await ctx.send("Please select a valid element. Valid elements are: \n`unit` - favourite unit, \n`rating` - top rating achieved (also set with `b!rank set`), \n`account` - Name of account,\n`clan` - Name of you current clan, \n`tactic` - Favourite tactic type, \n`lord` - amount of lord titles. \n`squire` - amount of squire titles.")
return
value = value or "`N/A`"
profiles = pickle.load(open('data/profiles.data','rb'))
if element in ["rating",'lord']:
value = int(value)
profiles[ctx.author.id][element] = value
pickle.dump(profiles,open('data/profiles.data','wb'))
await ctx.send(f"`{element}` has been set to `{value}`.")
@commands.is_owner()
@profile.command(name="reset",hidden=True)
async def profileReset(self,ctx):
"""
Resets all profiles. Do not use unless needed.
"""
profiles = {}
pickle.dump(profiles,(open('data/profiles.data','wb')))
await ctx.send("Deleted all profiles.")
@commands.is_owner()
@profile.command(name="fix",hidden=True)
async def profileFix(self,ctx):
"""
Must be rewritten each time. Fixes a bug with data.
"""
profiles = pickle.load(open('data/profiles.data','rb'))
for profile in profiles:
try:
profiles[profile]['rating'] = int(profiles[profile]['rating'])
except KeyError:
pass
except ValueError:
profiles[profile]['rating'] = 0
pickle.dump(profiles,(open('data/profiles.data','wb')))
await ctx.send("Deleted all accounts. They will now have to re input their data.")
time.sleep(30)
await ctx.send("Fixed the bug. Accounts restored.")
| [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
11748,
30351,
952,
198,
11748,
2298,
293,
198,
11748,
640,
198,
11748,
4818,
8079,
198,
11748,
30351,
952,
198,
198,
81,
2283,
20031,
796,
685,
600,
7,
20,
9,
7,
72,
1174,
1... | 2.173253 | 2,505 |
from flask import jsonify, render_template, redirect, current_app, request
import requests_oauthlib
from requests_oauthlib.compliance_fixes import facebook_compliance_fix
from config import FB_AUTHORIZATION_BASE_URL, FB_TOKEN_URL, BASE_URL
from serializer import serialize
from .db import get_schema
from .models import User # trigger migration
| [
6738,
42903,
1330,
33918,
1958,
11,
8543,
62,
28243,
11,
18941,
11,
1459,
62,
1324,
11,
2581,
198,
11748,
7007,
62,
12162,
1071,
8019,
198,
6738,
7007,
62,
12162,
1071,
8019,
13,
47587,
62,
42624,
1330,
23960,
62,
47587,
62,
13049,
19... | 3.597938 | 97 |
from wtforms_alchemy import ModelForm, ModelFormField
from models import * | [
6738,
266,
83,
23914,
62,
282,
26599,
1330,
9104,
8479,
11,
9104,
8479,
15878,
198,
6738,
4981,
1330,
1635
] | 3.894737 | 19 |
from substitution_cipher.decrypt import decrypt_file, decrypt_text, reverse_dict
from substitution_cipher.encrypt import encrypt_file, encrypt_text
key = {
"a": "X",
"b": "B",
"c": "R",
"d": "Z",
"e": "A",
"f": "Y",
"g": "J",
"h": "M",
"i": "C",
"j": "D",
"k": "O",
"l": "U",
"m": "N",
"n": "E",
"o": "T",
"p": "Q",
"q": "H",
"r": "W",
"s": "G",
"t": "L",
"u": "K",
"v": "F",
"w": "P",
"x": "I",
"y": "S",
"z": "V",
}
reversed_key = {
"X": "a",
"B": "b",
"R": "c",
"Z": "d",
"A": "e",
"Y": "f",
"J": "g",
"M": "h",
"C": "i",
"D": "j",
"O": "k",
"U": "l",
"N": "m",
"E": "n",
"T": "o",
"Q": "p",
"H": "q",
"W": "r",
"G": "s",
"L": "t",
"K": "u",
"F": "v",
"P": "w",
"I": "x",
"S": "y",
"V": "z",
}
encryption_samples = {
"this is A test Message 12345 done.": "LMCGCGXLAGLNAGGXJAZTEA",
"?haha": "MXMX",
"...echo ...I": "ARMTC",
}
decryption_samples = {
"LMC2GCGXL3AGL NAGGXJAZTEA": "thisisatestmessagedone",
"? MXMX": "haha",
"ARM12345TC": "echoi",
}
def test_reverse_dict():
"""
Tests :py:func:`substitution_cipher.decrypt.reverse_dict`.
"""
assert reverse_dict(key) == reversed_key
class TestHandleText:
"""
Tests the encryption/decryption of strings.
"""
def test_encrypt_text(self):
"""
Tests :py:func:`substitution_cipher.encrypt.encrypt_text`.
"""
for original, encrypted in encryption_samples.items():
assert encrypt_text(original, key) == encrypted
def test_decrypt_text(self):
"""
Tests :py:func:`substitution_cipher.decrypt.decrypt_text`.
"""
for encrypted, decrypted in decryption_samples.items():
assert decrypt_text(encrypted, reversed_key) == decrypted
class TestHandleFiles:
"""
Tests the encryption/decryption of plaintext (UTF-8) files.
"""
def test_encrypt_file(self, tmpdir):
"""
Tests :py:func:`substitution_cipher.encrypt.encrypt_file`.
"""
input_filename = tmpdir.join("input.txt")
output_filename = tmpdir.join("output.txt")
with open(input_filename, "w", encoding="utf8") as input_file:
input_file.write("\n".join(encryption_samples.keys()))
encrypt_file(input_filename, output_filename, key)
expected = "\n".join(encryption_samples.values()) + "\n"
with open(output_filename, encoding="utf8") as output_file:
assert output_file.read() == expected
def test_decrypt_file(self, tmpdir):
"""
Tests :py:func:`substitution_cipher.decrypt.decrypt_file`.
"""
input_filename = tmpdir.join("input.txt")
output_filename = tmpdir.join("output.txt")
with open(input_filename, "w", encoding="utf8") as input_file:
input_file.write("\n".join(decryption_samples.keys()))
decrypt_file(input_filename, output_filename, reversed_key)
expected = "\n".join(decryption_samples.values()) + "\n"
with open(output_filename, encoding="utf8") as output_file:
assert output_file.read() == expected
| [
6738,
32097,
62,
66,
10803,
13,
12501,
6012,
1330,
42797,
62,
7753,
11,
42797,
62,
5239,
11,
9575,
62,
11600,
198,
6738,
32097,
62,
66,
10803,
13,
12685,
6012,
1330,
34117,
62,
7753,
11,
34117,
62,
5239,
198,
198,
2539,
796,
1391,
1... | 2.073957 | 1,582 |
# (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Implements image tile identification and fetching from various sources.
The Matplotlib interface can make use of tile objects (defined below) via the
:meth:`cartopy.mpl.geoaxes.GeoAxes.add_image` method. For example, to add a
:class:`MapQuest Open Aerial tileset <MapQuestOpenAerial>` to an existing axes
at zoom level 2, do ``ax.add_image(MapQuestOpenAerial(), 2)``. An example of
using tiles in this way can be found at the
:ref:`sphx_glr_gallery_eyja_volcano.py` example.
"""
from __future__ import (absolute_import, division, print_function)
from abc import ABCMeta, abstractmethod
import warnings
from PIL import Image
import shapely.geometry as sgeom
import numpy as np
import six
import cartopy.crs as ccrs
class GoogleWTS(six.with_metaclass(ABCMeta, object)):
"""
Implement web tile retrieval using the Google WTS coordinate system.
A "tile" in this class refers to the coordinates (x, y, z).
"""
def _find_images(self, target_domain, target_z, start_tile=(0, 0, 0)):
"""Target domain is a shapely polygon in native coordinates."""
assert isinstance(target_z, int) and target_z >= 0, ('target_z must '
'be an integer '
'>=0.')
# Recursively drill down to the images at the target zoom.
x0, x1, y0, y1 = self._tileextent(start_tile)
domain = sgeom.box(x0, y0, x1, y1)
if domain.intersects(target_domain):
if start_tile[2] == target_z:
yield start_tile
else:
for tile in self._subtiles(start_tile):
for result in self._find_images(target_domain, target_z,
start_tile=tile):
yield result
find_images = _find_images
_subtiles = subtiles
def tile_bbox(self, x, y, z, y0_at_north_pole=True):
"""
Return the ``(x0, x1), (y0, y1)`` bounding box for the given x, y, z
tile position.
Parameters
----------
x
The x tile coordinate in the Google tile numbering system.
y
The y tile coordinate in the Google tile numbering system.
z
The z tile coordinate in the Google tile numbering system.
y0_at_north_pole: optional
Boolean representing whether the numbering of the y coordinate
starts at the north pole (as is the convention for Google tiles)
or not (in which case it will start at the south pole, as is the
convention for TMS). Defaults to True.
"""
n = 2 ** z
assert 0 <= x <= (n - 1), ("Tile's x index is out of range. Upper "
"limit %s. Got %s" % (n, x))
assert 0 <= y <= (n - 1), ("Tile's y index is out of range. Upper "
"limit %s. Got %s" % (n, y))
x0, x1 = self.crs.x_limits
y0, y1 = self.crs.y_limits
# Compute the box height and width in native coordinates
# for this zoom level.
box_h = (y1 - y0) / n
box_w = (x1 - x0) / n
# Compute the native x & y extents of the tile.
n_xs = x0 + (x + np.arange(0, 2, dtype=np.float64)) * box_w
n_ys = y0 + (y + np.arange(0, 2, dtype=np.float64)) * box_h
if y0_at_north_pole:
n_ys = -1 * n_ys[::-1]
return n_xs, n_ys
def tileextent(self, x_y_z):
"""Return extent tuple ``(x0,x1,y0,y1)`` in Mercator coordinates."""
x, y, z = x_y_z
x_lim, y_lim = self.tile_bbox(x, y, z, y0_at_north_pole=True)
return tuple(x_lim) + tuple(y_lim)
_tileextent = tileextent
@abstractmethod
# http://developer.mapquest.com/web/products/open/map for terms of use
# http://devblog.mapquest.com/2016/06/15/
# modernization-of-mapquest-results-in-changes-to-open-tile-access/
# this now requires a sign up to a plan
# http://developer.mapquest.com/web/products/open/map for terms of use
# The following attribution should be included in the resulting image:
# "Portions Courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture,
# Farm Service Agency"
# http://developer.mapquest.com/web/products/open/map for terms of use
class Stamen(GoogleWTS):
"""
Retrieves tiles from maps.stamen.com. Styles include
``terrain-background``, ``terrain``, ``toner`` and ``watercolor``.
For a full reference on the styles available please see
http://maps.stamen.com. Of particular note are the sub-styles
that are made available (e.g. ``terrain`` and ``terrain-background``).
To determine the name of the particular [sub-]style you want,
follow the link on http://maps.stamen.com to your desired style and
observe the style name in the URL. Your style name will be in the
form of: ``http://maps.stamen.com/{STYLE_NAME}/#9/37/-122``.
Except otherwise noted, the Stamen map tile sets are copyright Stamen
Design, under a Creative Commons Attribution (CC BY 3.0) license.
Please see the attribution notice at http://maps.stamen.com on how to
attribute this imagery.
"""
class StamenTerrain(Stamen):
"""
**DEPRECATED:** This class is deprecated. Please use
``Stamen('terrain-background')`` instead.
Terrain tiles defined for the continental United States, and include land
color and shaded hills. The land colors are a custom palette developed by
Gem Spear for the National Atlas 1km land cover data set, which defines
twenty-four land classifications including five kinds of forest,
combinations of shrubs, grasses and crops, and a few tundras and wetlands.
The colors are at their highest contrast when fully zoomed-out to the
whole U.S., and they slowly fade out to pale off-white as you zoom in to
leave room for foreground data and break up the weirdness of large areas
of flat, dark green.
References
----------
* http://mike.teczno.com/notes/osm-us-terrain-layer/background.html
* http://maps.stamen.com/
* https://wiki.openstreetmap.org/wiki/List_of_OSM_based_Services
* https://github.com/migurski/DEM-Tools
"""
class MapboxTiles(GoogleWTS):
"""
Implement web tile retrieval from Mapbox.
For terms of service, see https://www.mapbox.com/tos/.
"""
def __init__(self, access_token, map_id):
"""
Set up a new Mapbox tiles instance.
Access to Mapbox web services requires an access token and a map ID.
See https://www.mapbox.com/api-documentation/ for details.
Parameters
----------
access_token
A valid Mapbox API access token.
map_id
An ID for a publicly accessible map (provided by Mapbox).
This is the map whose tiles will be retrieved through this process.
"""
self.access_token = access_token
self.map_id = map_id
super(MapboxTiles, self).__init__()
class MapboxStyleTiles(GoogleWTS):
"""
Implement web tile retrieval from a user-defined Mapbox style. For more
details on Mapbox styles, see
https://www.mapbox.com/studio-manual/overview/map-styling/.
For terms of service, see https://www.mapbox.com/tos/.
"""
def __init__(self, access_token, username, map_id):
"""
Set up a new instance to retrieve tiles from a Mapbox style.
Access to Mapbox web services requires an access token and a map ID.
See https://www.mapbox.com/api-documentation/ for details.
Parameters
----------
access_token
A valid Mapbox API access token.
username
The username for the Mapbox user who defined the Mapbox style.
map_id
A map ID for a map defined by a Mapbox style. This is the map whose
tiles will be retrieved through this process. Note that this style
may be private and if your access token does not have permissions
to view this style, then map tile retrieval will fail.
"""
self.access_token = access_token
self.username = username
self.map_id = map_id
super(MapboxStyleTiles, self).__init__()
class QuadtreeTiles(GoogleWTS):
"""
Implement web tile retrieval using the Microsoft WTS quadkey coordinate
system.
A "tile" in this class refers to a quadkey such as "1", "14" or "141"
where the length of the quatree is the zoom level in Google Tile terms.
"""
def find_images(self, target_domain, target_z, start_tile=None):
"""
Find all the quadtrees at the given target zoom, in the given
target domain.
target_z must be a value >= 1.
"""
if target_z == 0:
raise ValueError('The empty quadtree cannot be returned.')
if start_tile is None:
start_tiles = ['0', '1', '2', '3']
else:
start_tiles = [start_tile]
for start_tile in start_tiles:
start_tile = self.quadkey_to_tms(start_tile, google=True)
for tile in GoogleWTS.find_images(self, target_domain, target_z,
start_tile=start_tile):
yield self.tms_to_quadkey(tile, google=True)
def _merge_tiles(tiles):
"""Return a single image, merging the given images."""
if not tiles:
raise ValueError('A non-empty list of tiles should '
'be provided to merge.')
xset = [set(x) for i, x, y, _ in tiles]
yset = [set(y) for i, x, y, _ in tiles]
xs = xset[0]
xs.update(*xset[1:])
ys = yset[0]
ys.update(*yset[1:])
xs = sorted(xs)
ys = sorted(ys)
other_len = tiles[0][0].shape[2:]
img = np.zeros((len(ys), len(xs)) + other_len, dtype=np.uint8) - 1
for tile_img, x, y, origin in tiles:
y_first, y_last = y[0], y[-1]
yi0, yi1 = np.where((y_first == ys) | (y_last == ys))[0]
if origin == 'upper':
yi0 = tile_img.shape[0] - yi0 - 1
yi1 = tile_img.shape[0] - yi1 - 1
start, stop, step = yi0, yi1, 1 if yi0 < yi1 else -1
if step == 1 and stop == img.shape[0] - 1:
stop = None
elif step == -1 and stop == 0:
stop = None
else:
stop += step
y_slice = slice(start, stop, step)
xi0, xi1 = np.where((x[0] == xs) | (x[-1] == xs))[0]
start, stop, step = xi0, xi1, 1 if xi0 < xi1 else -1
if step == 1 and stop == img.shape[1] - 1:
stop = None
elif step == -1 and stop == 0:
stop = None
else:
stop += step
x_slice = slice(start, stop, step)
img_slice = (y_slice, x_slice, Ellipsis)
if origin == 'lower':
tile_img = tile_img[::-1, ::]
img[img_slice] = tile_img
return img, [min(xs), max(xs), min(ys), max(ys)], 'lower'
| [
2,
357,
34,
8,
3517,
12223,
15069,
2813,
532,
2864,
11,
3395,
4452,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
6383,
11081,
13,
198,
2,
198,
2,
6383,
11081,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
34... | 2.395821 | 4,929 |
# importing the package covid
# can be installed with pip install covid
from covid import Covid
covid= Covid()
# setting a country, here India
india = covid.get_status_by_country_name("india")
print(india) | [
2,
33332,
262,
5301,
39849,
312,
198,
2,
460,
307,
6589,
351,
7347,
2721,
39849,
312,
198,
198,
6738,
39849,
312,
1330,
39751,
312,
198,
66,
709,
312,
28,
39751,
312,
3419,
198,
198,
2,
4634,
257,
1499,
11,
994,
3794,
198,
521,
54... | 3.089552 | 67 |
import datetime
import requests
import schedule
import time
import vk
session = vk.Session(access_token="token1")
admin = vk.Session(access_token="token2")
api = vk.API(session)
human = vk.API(admin)
maingroup = 128947927
maingroup_fix = -128947927
# Уведомления в личные сообщения
schedule.every(1).minutes.do(messages)
# Статус группы и стена
schedule.every(2).hours.do(status)
while True:
# Активация костыля
schedule.run_pending()
# Работа с временем/датой
now_date = datetime.date.today()
now_time = datetime.datetime.now()
day = now_date.isoweekday()
cur_hour = now_time.hour
cur_minute = now_time.minute
cur_second = now_time.second
bb = datetime.date.today()
# Работа с API блокчейна
check = requests.get("https://blockchain.info/ru/ticker").json()
USD = check["USD"]
buy = USD["buy"]
sell = USD["sell"]
message = "Buy: " + str(buy) + "$\nSell: " + str(sell) + "$"
time.sleep(10)
| [
11748,
4818,
8079,
198,
11748,
7007,
198,
11748,
7269,
198,
11748,
640,
198,
11748,
410,
74,
198,
198,
29891,
796,
410,
74,
13,
36044,
7,
15526,
62,
30001,
2625,
30001,
16,
4943,
198,
28482,
796,
410,
74,
13,
36044,
7,
15526,
62,
30... | 2.031513 | 476 |
from flask import Flask
# Internal packages
from configurations import BaseConfig
from flask_base import db, bcrypt, log_mgr
from .admin import admin
from .api import api
from .errors import errors
from .main import main
from .posts import posts
from .user import users
def create_app(*args, **kwargs) -> Flask:
"""Creates a Flask app instance"""
# Config app
config_class = kwargs.pop('config_class', BaseConfig)
app = Flask(__name__, static_folder=config_class.STATIC_DIR_PATH,
template_folder=config_class.TEMPLATE_DIR_PATH)
app.config.from_object(config_class)
# Initialize things that supports app
db.init_app(app)
bcrypt.init_app(app)
log_mgr.init_app(app)
# Register routes
for rt in [admin, api, main, users, posts, errors]:
app.register_blueprint(rt)
return app
| [
6738,
42903,
1330,
46947,
198,
2,
18628,
10392,
198,
6738,
25412,
1330,
7308,
16934,
198,
6738,
42903,
62,
8692,
1330,
20613,
11,
275,
29609,
11,
2604,
62,
76,
2164,
198,
6738,
764,
28482,
1330,
13169,
198,
6738,
764,
15042,
1330,
40391... | 2.82 | 300 |
"""Test loading of additional plugins."""
from nose.tools import eq_
def test_one():
"""Test that the test plugin was initialized."""
from testapp import plugins
eq_(plugins.plugin_began, True)
| [
37811,
14402,
11046,
286,
3224,
20652,
526,
15931,
198,
6738,
9686,
13,
31391,
1330,
37430,
62,
628,
198,
4299,
1332,
62,
505,
33529,
198,
220,
220,
220,
37227,
14402,
326,
262,
1332,
13877,
373,
23224,
526,
15931,
198,
220,
220,
220,
... | 3.370968 | 62 |
"""Notification channels for django-notifs."""
try:
from channels.layers import get_channel_layer
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
from asgiref.sync import async_to_sync
from pydantic import BaseModel
from .base import BaseNotificationProvider
class DjangoChannelsNotificationProvider(BaseNotificationProvider):
"""django-channels websocket provider"""
name = 'django_channels'
package = 'channels'
HAS_DEPENDENCIES = HAS_DEPENDENCIES
@property
@property
| [
37811,
3673,
2649,
9619,
329,
42625,
14208,
12,
1662,
361,
82,
526,
15931,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
9619,
13,
75,
6962,
1330,
651,
62,
17620,
62,
29289,
628,
220,
220,
220,
33930,
62,
46162,
10619,
24181,
11015,
... | 3.039548 | 177 |
import tensorflow as tf
import os
import numpy as np
import cv2
from args import FLAGS
from database import reader, helper, helper_cityscapes
from model import pspnet_mg
from experiment_manager.utils import sorted_str_dict
if __name__ == '__main__':
tf.app.run()
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
198,
6738,
26498,
1330,
9977,
4760,
50,
198,
6738,
6831,
1330,
9173,
11,
31904,
11,
31904,
62,
19205,
1416,
7916,... | 3.102273 | 88 |
import unittest
from py4godot import Array, Variant
| [
11748,
555,
715,
395,
198,
6738,
12972,
19,
25344,
313,
1330,
15690,
11,
38215,
628,
197,
628,
197,
198
] | 3.052632 | 19 |
# -*- coding: utf-8 -*-
# Converted from Chango-Regular.ttf using:
# ./font2bitmap.py Chango-Regular.ttf 32 -c 0x20-0x7f
MAP = " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
BPP = 1
HEIGHT = 35
MAX_WIDTH = 49
_WIDTHS = \
b'\x0d\x10\x13\x1c\x1a\x30\x21\x0b\x11\x11\x14\x1a\x0c\x10\x0c\x17'\
b'\x1c\x13\x1b\x1a\x1d\x19\x1b\x19\x1a\x1b\x0c\x0c\x18\x18\x16\x16'\
b'\x31\x23\x1f\x1a\x1d\x1b\x1a\x1c\x23\x11\x15\x21\x1b\x24\x1f\x21'\
b'\x1d\x23\x1d\x1a\x1b\x1c\x1f\x2e\x21\x1f\x1b\x11\x17\x11\x16\x18'\
b'\x0e\x1a\x1a\x16\x1a\x18\x14\x1a\x1b\x0e\x11\x1b\x0e\x29\x1b\x1a'\
b'\x1a\x1a\x15\x15\x15\x1b\x19\x24\x1a\x19\x16\x11\x0b\x11\x1b\x16'
OFFSET_WIDTH = 3
_OFFSETS = \
b'\x00\x00\x00\x00\x01\xc7\x00\x03\xf7\x00\x06\x90\x00\x0a\x64\x00'\
b'\x0d\xf2\x00\x14\x82\x00\x19\x05\x00\x1a\x86\x00\x1c\xd9\x00\x1f'\
b'\x2c\x00\x21\xe8\x00\x25\x76\x00\x27\x1a\x00\x29\x4a\x00\x2a\xee'\
b'\x00\x2e\x13\x00\x31\xe7\x00\x34\x80\x00\x38\x31\x00\x3b\xbf\x00'\
b'\x3f\xb6\x00\x43\x21\x00\x46\xd2\x00\x4a\x3d\x00\x4d\xcb\x00\x51'\
b'\x7c\x00\x53\x20\x00\x54\xc4\x00\x58\x0c\x00\x5b\x54\x00\x5e\x56'\
b'\x00\x61\x58\x00\x68\x0b\x00\x6c\xd4\x00\x71\x11\x00\x74\x9f\x00'\
b'\x78\x96\x00\x7c\x47\x00\x7f\xd5\x00\x83\xa9\x00\x88\x72\x00\x8a'\
b'\xc5\x00\x8d\xa4\x00\x92\x27\x00\x95\xd8\x00\x9a\xc4\x00\x9f\x01'\
b'\x00\xa3\x84\x00\xa7\x7b\x00\xac\x44\x00\xb0\x3b\x00\xb3\xc9\x00'\
b'\xb7\x7a\x00\xbb\x4e\x00\xbf\x8b\x00\xc5\xd5\x00\xca\x58\x00\xce'\
b'\x95\x00\xd2\x46\x00\xd4\x99\x00\xd7\xbe\x00\xda\x11\x00\xdd\x13'\
b'\x00\xe0\x5b\x00\xe2\x45\x00\xe5\xd3\x00\xe9\x61\x00\xec\x63\x00'\
b'\xef\xf1\x00\xf3\x39\x00\xf5\xf5\x00\xf9\x83\x00\xfd\x34\x00\xff'\
b'\x1e\x01\x01\x71\x01\x05\x22\x01\x07\x0c\x01\x0c\xa7\x01\x10\x58'\
b'\x01\x13\xe6\x01\x17\x74\x01\x1b\x02\x01\x1d\xe1\x01\x20\xc0\x01'\
b'\x23\x9f\x01\x27\x50\x01\x2a\xbb\x01\x2f\xa7\x01\x33\x35\x01\x36'\
b'\xa0\x01\x39\xa2\x01\x3b\xf5\x01\x3d\x76\x01\x3f\xc9\x01\x43\x7a'
_BITMAPS =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x3f\xf0\x7f\xf8\x7f\xf8\x7f\xf8\x7f\xf8\x7f\xf8\x7f'\
b'\xf8\x3f\xf0\x3f\xf0\x3f\xf0\x3f\xf0\x1f\xe0\x1f\xe0\x1f\xc0\x0f'\
b'\xc0\x00\x00\x01\x00\x0f\xc0\x1f\xe0\x1f\xe0\x1f\xe0\x1f\xe0\x0f'\
b'\xc0\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x80\x7e\x7e\x0f\xcf\xc1'\
b'\xf9\xf8\x3e\x3e\x07\xc7\xc0\xf8\xf8\x1f\x1f\x01\xc1\xe0\x38\x38'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x3c\x00\x0f\x87\xc0\x00\xf8'\
b'\x7c\x00\x0f\x87\xc0\x00\xf8\x7c\x00\xff\xff\xf8\x0f\xff\xff\xc0'\
b'\xff\xff\xfc\x0f\xff\xff\x80\x1f\x0f\x80\x01\xf0\xf8\x00\x1f\x0f'\
b'\x80\x01\xf0\xfe\x01\xff\xff\xf0\x1f\xff\xff\x01\xff\xff\xf0\x1f'\
b'\xff\xff\x00\x3e\x1f\x00\x03\xe1\xf0\x00\x3e\x1f\x00\x03\xc1\xf0'\
b'\x00\x3c\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x0f\x80\x00\x03\xe0\x00\x00\xfc\x00'\
b'\x01\xff\xf0\x01\xff\xff\x01\xff\xff\xe0\x7f\xff\xf8\x3f\xff\xfe'\
b'\x0f\xff\xc3\x03\xff\xf0\x00\xff\xfc\x00\x1f\xff\xc0\x07\xff\xfc'\
b'\x00\xff\xff\xc0\x0f\xff\xf8\x00\xff\xff\x00\x1f\xff\xe0\x03\xff'\
b'\xf8\xf0\xff\xfe\x7f\xff\xff\x9f\xff\xff\xc7\xff\xff\xf0\xff\xff'\
b'\xf0\x0f\xff\xf8\x00\x7f\xe0\x00\x03\xf0\x00\x00\xf8\x00\x00\x3e'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x80\x00'\
b'\x00\x00\x00\x7f\xe0\x00\xff\x00\x00\xff\xf8\x01\xff\x00\x01\xff'\
b'\xf8\x03\xfe\x00\x03\xf9\xfc\x07\xfc\x00\x03\xf9\xfc\x0f\xf8\x00'\
b'\x03\xf9\xfc\x0f\xf0\x00\x03\xf9\xfc\x1f\xe0\x00\x03\xf9\xfc\x3f'\
b'\xc0\x00\x03\xff\xfc\x7f\x80\x00\x01\xff\xf8\xff\x0f\xe0\x00\xff'\
b'\xf1\xfe\x3f\xf8\x00\x7f\xe3\xfc\x7f\xfc\x00\x00\x07\xf8\xff\xfe'\
b'\x00\x00\x0f\xf0\xfe\xfe\x00\x00\x1f\xf1\xfe\x7f\x00\x00\x3f\xe1'\
b'\xfc\x7f\x00\x00\x7f\xc1\xfc\x7f\x00\x00\xff\x80\xfe\x7f\x00\x01'\
b'\xff\x00\xff\xfe\x00\x03\xfe\x00\x7f\xfe\x00\x07\xfc\x00\x7f\xfc'\
b'\x00\x07\xf8\x00\x1f\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xff\xf0\x00\x03\xff\xfe\x00\x03\xff\xff'\
b'\x80\x03\xff\xff\xc0\x03\xff\xff\xc0\x01\xff\xe0\x40\x00\xff\xf0'\
b'\x00\x00\x7f\xf0\x00\x00\x3f\xfd\x80\x10\x1f\xff\xff\xfc\x07\xff'\
b'\xff\xfe\x01\xff\xff\xff\x01\xff\xff\xff\x81\xff\xff\xff\x01\xff'\
b'\xf1\xff\x00\xff\xf0\xff\x80\x7f\xf8\x7f\xc0\x3f\xfe\x3f\xe0\x1f'\
b'\xff\x9f\xf8\x0f\xff\xff\xff\x03\xff\xff\xff\x80\xff\xff\xff\xc0'\
b'\x3f\xff\x3f\xe0\x01\xf0\x01\xe0\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x02\x01\xf8\x3f\x07\xe0\xf8\x1f\x03\xe0'\
b'\x7c\x07\x00\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xf0\x01\xfc\x01\xfe\x01\xfe\x01\xfe\x00\xff\x00\xff\x00'\
b'\x7f\x80\x7f\xc0\x3f\xc0\x1f\xe0\x0f\xf0\x0f\xf8\x07\xf8\x03\xfc'\
b'\x01\xfe\x00\xff\x00\x7f\x80\x3f\xc0\x1f\xe0\x0f\xf0\x07\xfc\x01'\
b'\xfe\x00\xff\x00\x7f\x80\x3f\xe0\x0f\xf0\x07\xf8\x01\xfe\x00\xff'\
b'\x00\x3f\xc0\x0f\xf0\x03\xf8\x00\x78\x00\x00\x0f\x00\x0f\xe0\x07'\
b'\xf8\x01\xfe\x00\x7f\x00\x3f\xc0\x0f\xf0\x07\xf8\x03\xfc\x00\xff'\
b'\x00\x7f\x80\x3f\xc0\x1f\xf0\x07\xf8\x03\xfc\x01\xfe\x00\xff\x00'\
b'\x7f\x80\x3f\xc0\x1f\xe0\x0f\xf0\x0f\xf8\x07\xf8\x03\xfc\x01\xfe'\
b'\x01\xfe\x00\xff\x00\x7f\x80\x7f\x80\x3f\x80\x3f\xc0\x3f\xc0\x1f'\
b'\xc0\x07\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\xf0\x00\x0f\x00\x00\xf0\x00\x8f\x10\x1e\xf7\x81\xff'\
b'\xf8\x1f\xff\x81\xff\xf8\x01\xf8\x00\x3f\xc0\x07\xfe\x00\xfb\xf0'\
b'\x07\x9e\x00\x30\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xf8\x00\x00\x3f\x00\x00\x0f\xc0\x00\x03\xf0'\
b'\x00\x00\xfc\x00\x00\x3f\x00\x00\x0f\xc0\x00\xff\xff\xc0\x7f\xff'\
b'\xf8\x1f\xff\xfe\x07\xff\xff\x81\xff\xff\xe0\x00\xfc\x00\x00\x3f'\
b'\x00\x00\x0f\xc0\x00\x03\xf0\x00\x00\xfc\x00\x00\x3f\x00\x00\x0f'\
b'\x80\x00\x01\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\xc0'\
b'\x7e\x0f\xf0\xff\x0f\xf0\xff\x07\xe0\x3e\x07\xc0\xf8\x0e\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x03\xfc\x07\xff\x07\xff\x07\xff\x07\xfe\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3c\x07\xe0\xff\x0f\xf0\xff'\
b'\x0f\xe0\x7c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07'\
b'\xc0\x00\x3f\xc0\x00\x7f\x00\x01\xfe\x00\x03\xfc\x00\x07\xf0\x00'\
b'\x1f\xe0\x00\x3f\xc0\x00\x7f\x00\x01\xfe\x00\x03\xfc\x00\x07\xf0'\
b'\x00\x1f\xe0\x00\x3f\xc0\x00\x7f\x00\x01\xfe\x00\x03\xfc\x00\x0f'\
b'\xf0\x00\x1f\xe0\x00\x3f\x80\x00\xff\x00\x01\xfe\x00\x03\xf8\x00'\
b'\x0f\xf0\x00\x1f\xe0\x00\x3f\x80\x00\xff\x00\x01\xfe\x00\x03\xf8'\
b'\x00\x0f\xf0\x00\x1f\xe0\x00\x3f\x80\x00\xff\x00\x00\xf8\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7f\xc0\x00\x3f\xff\x00\x07\xff\xf8\x00\xff'\
b'\xff\xe0\x1f\xff\xfe\x03\xff\xff\xf0\x7f\xff\xff\x87\xfe\x1f\xf8'\
b'\x7f\xe1\xff\x8f\xfe\x1f\xfc\xff\xe0\xff\xcf\xfc\x0f\xfc\xff\xc0'\
b'\xff\xcf\xfe\x1f\xfc\x7f\xe1\xff\xc7\xfe\x1f\xf8\x7f\xf3\xff\x83'\
b'\xff\xff\xf0\x3f\xff\xff\x01\xff\xff\xe0\x0f\xff\xfc\x00\x7f\xff'\
b'\x80\x01\xff\xe0\x00\x01\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xc1\xff\xf8'\
b'\xff\xff\x1f\xff\xf3\xff\xfe\x7f\xff\xc1\xff\xf8\x3f\xff\x07\xff'\
b'\xe0\xff\xfc\x1f\xff\x83\xff\xf0\x7f\xfe\x0f\xff\xc1\xff\xf8\x3f'\
b'\xff\x07\xff\xe0\xff\xfc\x1f\xff\x83\xff\xe0\x7f\xfc\x07\xff\x80'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x01\xff\xc0\x01\xff\xfe\x00\xff\xff\xe0\x3f\xff\xfe\x07\xff'\
b'\xff\xe0\xff\xff\xfc\x0e\x0f\xff\x81\x01\xff\xf0\x00\x3f\xfe\x00'\
b'\x07\xff\x80\x00\xff\xf0\x00\x3f\xfc\x00\x0f\xff\x00\x03\xff\xe0'\
b'\x00\xff\xf0\x00\x3f\xfc\x00\x0f\xff\x00\x03\xff\xff\xf1\xff\xff'\
b'\xfe\x3f\xff\xff\xc7\xff\xff\xf8\x7f\xff\xff\x0f\xff\xff\xe0\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x0f\xf8\x00\x3f\xff\xc0\x1f\xff\xf8'\
b'\x0f\xff\xff\x03\xff\xff\xc0\xff\xff\xf0\x38\x3f\xfc\x00\x0f\xff'\
b'\x00\x03\xff\x80\x07\xff\xe0\x1f\xff\xe0\x07\xff\xfe\x01\xff\xff'\
b'\xc0\x21\xff\xf0\x00\x3f\xfe\x00\x0f\xff\x80\x07\xff\xe1\xff\xff'\
b'\xf8\x7f\xff\xfe\x1f\xff\xff\x07\xff\xff\xc1\xff\xff\xe0\x3f\xff'\
b'\xe0\x00\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f'\
b'\xfc\x00\x07\xff\xe0\x00\x7f\xff\x00\x07\xff\xf8\x00\x7f\xff\xc0'\
b'\x03\xff\xfe\x00\x3f\xff\xf0\x03\xef\xff\x80\x3e\x7f\xfc\x03\xe3'\
b'\xff\xe0\x3f\x1f\xff\x01\xf0\xff\xf8\x1f\xff\xff\xe0\xff\xff\xff'\
b'\x87\xff\xff\xfc\x3f\xff\xff\xe1\xff\xff\xff\x00\x03\xff\xe0\x00'\
b'\x1f\xff\x00\x00\xff\xf8\x00\x07\xff\xc0\x00\x1f\xfc\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x07\xf0\x00\xff\xff\x80\x7f\xff\xc0'\
b'\x3f\xff\xe0\x1f\xff\xf0\x1f\xff\xf0\x0f\xf8\x00\x07\xfc\x00\x03'\
b'\xff\xe0\x01\xff\xfe\x00\xff\xff\x80\x7f\xff\xe0\x1f\xff\xf8\x00'\
b'\x3f\xfc\x00\x1f\xff\x00\x0f\xff\x80\x0f\xff\xc3\xff\xff\xc1\xff'\
b'\xff\xe0\xff\xff\xe0\x7f\xff\xf0\x3f\xff\xf0\x0f\xff\xe0\x00\x1f'\
b'\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x20\x00\x01\xff\xc0\x01\xff\xf8\x00'\
b'\x7f\xff\x80\x3f\xff\xf0\x0f\xff\xfc\x03\xff\xff\x00\x7f\xf8\x00'\
b'\x1f\xfe\x00\x03\xff\xbf\x00\xff\xff\xf8\x1f\xff\xff\x83\xff\xff'\
b'\xf8\x7f\xfb\xff\x0f\xfe\x3f\xf1\xff\xc7\xfe\x3f\xf0\xff\xc3\xff'\
b'\x1f\xf8\x7f\xe7\xfe\x07\xff\xff\xc0\x7f\xff\xf0\x07\xff\xfc\x00'\
b'\x3f\xfe\x00\x00\x7c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\xfe\x00\x3f\xff\xfe'\
b'\x1f\xff\xff\x8f\xff\xff\xc7\xff\xff\xe1\xff\xff\xe0\x01\xff\xf0'\
b'\x00\xff\xf0\x00\xff\xf8\x00\x7f\xf8\x00\x7f\xfc\x00\x7f\xfc\x00'\
b'\x3f\xfe\x00\x3f\xfe\x00\x1f\xff\x00\x1f\xff\x00\x0f\xff\x80\x0f'\
b'\xff\x80\x0f\xff\xc0\x07\xff\xc0\x07\xff\xe0\x03\xff\xe0\x01\xff'\
b'\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\x80\x01\xff\xf8\x01\xff'\
b'\xff\x80\xff\xff\xf0\x3f\xc7\xfc\x0f\xf1\xff\x07\xfe\x7f\xc0\xff'\
b'\xff\xf0\x3f\xff\xf8\x07\xff\xfc\x00\xff\xff\x80\xff\xff\xf0\x7f'\
b'\xff\xfe\x1f\xff\xff\x8f\xfc\xff\xe3\xff\x1f\xfc\xff\xc3\xff\x3f'\
b'\xf9\xff\x87\xff\xff\xe1\xff\xff\xf0\x3f\xff\xf8\x07\xff\xfc\x00'\
b'\x7f\xfc\x00\x01\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\xff'\
b'\x00\x07\xff\xf8\x01\xff\xff\x80\x7f\xff\xf8\x1f\xf9\xff\x83\xfe'\
b'\x3f\xf0\x7f\xc7\xff\x0f\xf8\xff\xe1\xff\x1f\xfc\x3f\xf3\xff\x87'\
b'\xff\xff\xf0\x7f\xff\xfe\x07\xff\xff\xc0\x7f\xff\xf0\x00\x0f\xfe'\
b'\x00\x03\xff\xc0\x00\xff\xf0\x07\xff\xfc\x03\xff\xff\x00\x7f\xff'\
b'\xc0\x0f\xff\xe0\x00\xff\xf0\x00\x0f\xc0\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x81\xf8\x3f\xc3\xfc'\
b'\x3f\xc1\xf8\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f'\
b'\x01\xf8\x3f\xc3\xfc\x3f\xc3\xf8\x1f\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8'\
b'\x1f\x83\xfc\x3f\xc3\xfc\x1f\x80\xf0\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xf0\x1f\x83\xfc\x3f\xc3\xfc\x3f\xc1\xf8\x0f\x81'\
b'\xf0\x3e\x03\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x1f\x00\x00\x7f\x00\x01\xff\x00\x07\xff\x00\x1f\xfe\x00'\
b'\x7f\xf8\x01\xff\xe0\x01\xff\x80\x01\xfe\x00\x01\xff\x00\x01\xff'\
b'\xc0\x00\xff\xf0\x00\x3f\xfc\x00\x0f\xff\x00\x01\xff\x00\x00\x7f'\
b'\x00\x00\x1f\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xff\x83'\
b'\xff\xff\xc3\xff\xff\xc3\xff\xff\xc3\xff\xff\xc1\xff\xff\x80\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\xff\xff\xc3\xff\xff'\
b'\xc3\xff\xff\xc3\xff\xff\xc3\xff\xff\xc0\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x01\xe0\x00\x07\xe0\x00\x1f\xe0\x00\x7f\xe0\x01\xff\xe0\x01\xff'\
b'\xf0\x01\xff\xe0\x00\xff\xc0\x00\xff\x00\x0f\xfc\x00\xff\xf0\x0f'\
b'\xff\x00\xff\xf0\x07\xff\x00\x1f\xf0\x00\x7f\x00\x01\xf0\x00\x03'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xe0\x07\xff\xe0\x3f\xff'\
b'\xc1\xff\xff\x87\xff\xfe\x1e\x1f\xf8\x30\x7f\xe0\x01\xff\x80\x0f'\
b'\xfc\x00\xff\xc0\x07\xfc\x00\x3f\xc0\x00\xff\x00\x03\xfc\x00\x07'\
b'\xf0\x00\x00\x00\x00\x08\x00\x01\xf8\x00\x0f\xf0\x00\x3f\xc0\x00'\
b'\xff\x00\x03\xfc\x00\x07\xe0\x00\x06\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xff\xe0\x00\x00\x00\x0f\xff\xff\x80\x00'\
b'\x00\x3f\xff\xff\xf8\x00\x00\x7f\xff\xff\xff\x00\x00\x7f\xe0\x01'\
b'\xff\xc0\x00\xff\x80\x00\x1f\xf8\x00\xff\x80\x00\x07\xfc\x00\xff'\
b'\x80\x00\x01\xff\x00\xff\x87\xe7\xfc\x7f\xc0\x7f\x8f\xfb\xff\x1f'\
b'\xe0\x7f\x8f\xff\xff\x8f\xf0\x3f\xc7\xff\xff\xc7\xf8\x1f\xe7\xfe'\
b'\xff\xe3\xfe\x1f\xf3\xff\x3f\xf1\xff\x0f\xf1\xff\x1f\xf8\xff\x07'\
b'\xf8\xff\x8f\xfc\x7f\x83\xfc\x7f\xc7\xfe\x3f\xc1\xff\x3f\xf3\xff'\
b'\x1f\xe0\xff\x9f\xff\xff\x9f\xe0\x3f\xc7\xff\x7f\xff\xe0\x1f\xe3'\
b'\xff\xbf\xff\xe0\x0f\xf8\xff\x8f\xff\xe0\x07\xfe\x3f\x01\xff\x80'\
b'\x01\xff\x80\x08\x00\x00\x00\x7f\xe0\x1e\x00\x00\x00\x3f\xff\xff'\
b'\x80\x00\x00\x0f\xff\xff\xc0\x00\x00\x01\xff\xff\xc0\x00\x00\x00'\
b'\x7f\xff\x80\x00\x00\x00\x03\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7c\x00\x00\x00\x3f\xe0\x00'\
b'\x00\x0f\xfe\x00\x00\x03\xff\xc0\x00\x00\x7f\xfc\x00\x00\x1f\xff'\
b'\x80\x00\x07\xff\xf8\x00\x00\xff\xff\x80\x00\x3f\xff\xf0\x00\x07'\
b'\xff\xff\x00\x01\xf3\xff\xe0\x00\x3c\x3f\xfe\x00\x0f\x87\xff\xe0'\
b'\x01\xf0\xff\xfc\x00\x7e\x3f\xff\xc0\x1f\xff\xff\xf8\x03\xff\xff'\
b'\xff\x80\xff\xff\xff\xf8\x1f\xff\xff\xff\x07\xe0\x07\xff\xf0\xfc'\
b'\x00\xff\xfe\x3f\x00\x0f\xff\xe7\xe0\x01\xff\xf8\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04'\
b'\x00\x00\xff\xff\xfe\x01\xff\xff\xff\x03\xff\xff\xfe\x07\xff\xc7'\
b'\xfe\x0f\xff\x87\xfc\x1f\xff\x0f\xf8\x3f\xfe\x3f\xf0\x7f\xff\xff'\
b'\xe0\xff\xff\xff\x81\xff\xff\xfe\x03\xff\xff\xff\x07\xff\xff\xff'\
b'\x0f\xff\x87\xfe\x1f\xff\x07\xfe\x3f\xfe\x0f\xfc\x7f\xfc\x1f\xf8'\
b'\xff\xf8\x7f\xf1\xff\xf7\xff\xc3\xff\xff\xff\x87\xff\xff\xfe\x0f'\
b'\xff\xff\xf8\x0f\xff\xff\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x07\xff\x00\x0f\xff\xf8\x0f\xff\xff\x07\xff\xff\xc3'\
b'\xff\xff\xf1\xff\xff\xf8\x7f\xfe\x00\x1f\xff\x00\x0f\xff\xc0\x03'\
b'\xff\xe0\x00\xff\xf8\x00\x3f\xfe\x00\x0f\xff\x80\x03\xff\xf0\x00'\
b'\xff\xfc\x00\x3f\xff\x80\x0f\xff\xf8\x01\xff\xff\xfc\x7f\xff\xff'\
b'\x0f\xff\xff\xc1\xff\xff\xf0\x3f\xff\xfc\x03\xff\xfc\x00\x0f\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\xff\xff\xc0\x0f\xff\xff'\
b'\x80\x7f\xff\xfe\x03\xff\xff\xf8\x1f\xff\xff\xe0\xff\xfd\xff\x87'\
b'\xff\xc3\xfc\x3f\xfe\x0f\xe1\xff\xf0\x7f\x0f\xff\x83\xfc\x7f\xfc'\
b'\x1f\xe3\xff\xe0\xff\x1f\xff\x07\xf0\xff\xf8\x7f\x87\xff\xc7\xfc'\
b'\x3f\xff\xff\xc1\xff\xff\xfe\x0f\xff\xff\xe0\x7f\xff\xfe\x03\xff'\
b'\xff\xe0\x1f\xff\xfe\x00\x7f\xff\x80\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x1f\xff\xff\x87\xff\xff\xf0\xff\xff'\
b'\xfe\x1f\xff\xff\xc3\xff\xe0\x00\x7f\xfc\x00\x0f\xff\x80\x01\xff'\
b'\xf0\x00\x3f\xfe\x00\x07\xff\xff\xc0\xff\xff\xf8\x1f\xff\xff\x03'\
b'\xff\xff\xc0\x7f\xfc\x00\x0f\xff\x80\x01\xff\xf0\x00\x3f\xfe\x00'\
b'\x07\xff\xff\xf0\xff\xff\xff\x1f\xff\xff\xe3\xff\xff\xfc\x3f\xff'\
b'\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\xff\xff\xf1'\
b'\xff\xff\xfc\x7f\xff\xff\x1f\xff\xff\xc7\xff\xc0\x01\xff\xf0\x00'\
b'\x7f\xfc\x00\x1f\xff\x00\x07\xff\xc0\x01\xff\xff\xf0\x7f\xff\xfc'\
b'\x1f\xff\xff\x07\xff\xff\x81\xff\xf0\x00\x7f\xfc\x00\x1f\xff\x00'\
b'\x07\xff\xc0\x01\xff\xf0\x00\x7f\xfc\x00\x1f\xff\x00\x07\xff\xc0'\
b'\x00\xff\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\xff\x00'\
b'\x07\xff\xfe\x01\xff\xff\xe0\x3f\xff\xfe\x07\xff\xff\xe0\xff\xff'\
b'\xfe\x0f\xff\xc0\x01\xff\xf8\x00\x1f\xff\x00\x01\xff\xf0\x00\x3f'\
b'\xff\x00\x03\xff\xf1\xfe\x3f\xff\x1f\xe3\xff\xf1\xfe\x3f\xff\x1f'\
b'\xe1\xff\xf9\xfe\x1f\xff\x9f\xe1\xff\xff\xfe\x1f\xff\xff\xe0\xff'\
b'\xff\xfe\x07\xff\xff\xe0\x3f\xff\xfc\x00\xff\xff\x80\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07'\
b'\xff\x80\xff\xe0\xff\xf8\x1f\xfe\x1f\xff\x07\xff\xc3\xff\xe0\xff'\
b'\xf8\x7f\xfc\x1f\xff\x0f\xff\x83\xff\xe1\xff\xf0\x7f\xfc\x3f\xfe'\
b'\x0f\xff\x87\xff\xc1\xff\xf0\xff\xf8\x3f\xfe\x1f\xff\xff\xff\xc3'\
b'\xff\xff\xff\xf8\x7f\xff\xff\xff\x0f\xff\xff\xff\xe1\xff\xff\xff'\
b'\xfc\x3f\xff\xff\xff\x87\xff\xc1\xff\xf0\xff\xf8\x3f\xfe\x1f\xff'\
b'\x07\xff\xc3\xff\xe0\xff\xf8\x7f\xfc\x1f\xff\x07\xff\x01\xff\xc0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\xfe\x1f\xff\x0f'\
b'\xff\x87\xff\xc3\xff\xe1\xff\xf0\xff\xf8\x7f\xfc\x3f\xfe\x1f\xff'\
b'\x0f\xff\x87\xff\xc3\xff\xe1\xff\xf0\xff\xf8\x7f\xfc\x3f\xfe\x1f'\
b'\xff\x0f\xff\x87\xff\xc3\xff\xe0\xff\xe0\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xfc\x03\xff\xe0\x1f\xff'\
b'\x00\xff\xf8\x07\xff\xc0\x3f\xfe\x01\xff\xf0\x0f\xff\x80\x7f\xfc'\
b'\x03\xff\xe0\x1f\xff\x00\xff\xf8\x07\xff\xc0\x3f\xfe\x01\xff\xf0'\
b'\x1f\xff\x81\xff\xf8\x3f\xff\xc3\xff\xfc\x1f\xff\xc0\xff\xfc\x07'\
b'\xff\x80\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\xff\x80'\
b'\x7f\x07\xff\xc0\x7f\x03\xff\xe0\x7f\x01\xff\xf0\xff\x00\xff\xf8'\
b'\xff\x00\x7f\xfc\xff\x00\x3f\xfe\xff\x00\x1f\xff\xff\x00\x0f\xff'\
b'\xff\x00\x07\xff\xff\xc0\x03\xff\xff\xf0\x01\xff\xff\xfc\x00\xff'\
b'\xff\xff\x00\x7f\xff\xff\xc0\x3f\xff\xff\xf0\x1f\xff\xff\xfc\x0f'\
b'\xff\xdf\xff\x07\xff\xc7\xff\xc3\xff\xe1\xff\xf1\xff\xf0\xff\xfc'\
b'\xff\xf8\x3f\xfe\x3f\xfc\x0f\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\xff\x00\x03\xff\xe0\x00'\
b'\x7f\xfc\x00\x0f\xff\x80\x01\xff\xf0\x00\x3f\xfe\x00\x07\xff\xc0'\
b'\x00\xff\xf8\x00\x1f\xff\x00\x03\xff\xe0\x00\x7f\xfc\x00\x0f\xff'\
b'\x80\x01\xff\xf0\x00\x3f\xfe\x00\x07\xff\xc0\x00\xff\xf8\x00\x1f'\
b'\xff\xff\xe3\xff\xff\xfe\x7f\xff\xff\xcf\xff\xff\xf9\xff\xff\xff'\
b'\x1f\xff\xff\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x3f\xf0\x00\x7f\x83\xff\x80\x0f\xfc\x3f'\
b'\xfc\x00\xff\xc3\xff\xe0\x1f\xfc\x3f\xfe\x03\xff\xc3\xff\xf0\x3f'\
b'\xfc\x3f\xff\x87\xff\xc3\xff\xfc\xff\xfc\x3f\xff\xcf\xff\xc3\xff'\
b'\xff\xff\xfc\x3f\xff\xff\xff\xc3\xff\xff\xff\xfc\x3f\xff\xff\xff'\
b'\xc3\xff\xff\xff\xfc\x3f\xff\xfb\xff\xc3\xfb\xff\xbf\xfc\x3f\x9f'\
b'\xf3\xff\xc3\xf9\xfe\x3f\xfc\x3f\x8f\xe3\xff\xc3\xf8\xfc\x3f\xfc'\
b'\x3f\x87\xc3\xff\x81\xf0\x78\x3f\xf8\x00\x01\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xff\x00\x0f\xe1\xff\x80\x1f\xc3\xff\xc0\x3f\x87\xff\xe0\x7f\x0f'\
b'\xff\xf0\xfe\x1f\xff\xf9\xfc\x3f\xff\xff\xf8\x7f\xff\xff\xf0\xff'\
b'\xff\xff\xe1\xff\xff\xff\xc3\xff\xff\xff\x87\xff\xff\xff\x0f\xff'\
b'\xff\xfe\x1f\xff\xff\xfc\x3f\xff\xff\xf8\x7f\xff\xff\xf0\xfe\x7f'\
b'\xff\xe1\xfc\x3f\xff\xc3\xf8\x1f\xff\x87\xf0\x1f\xff\x0f\xe0\x0f'\
b'\xfe\x0f\xc0\x07\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x3f\xf8\x00\x00\xff\xff\x80\x01\xff\xff'\
b'\xe0\x03\xff\xff\xfc\x03\xff\xff\xff\x03\xff\xff\xff\x81\xff\xff'\
b'\xff\xe1\xff\xff\x7f\xf0\xff\xfc\x07\xfc\x7f\xfc\x03\xfe\x7f\xfe'\
b'\x00\xff\x3f\xfe\x00\x7f\x9f\xff\x80\x7f\xcf\xff\xc0\x3f\xe3\xff'\
b'\xf0\x3f\xf1\xff\xff\xff\xf0\xff\xff\xff\xf8\x3f\xff\xff\xf8\x0f'\
b'\xff\xff\xf8\x03\xff\xff\xf8\x00\xff\xff\xf8\x00\x1f\xff\xf8\x00'\
b'\x03\xff\xe0\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x07\xff\xff\x00\x7f\xff\xfe\x03\xff\xff'\
b'\xf8\x1f\xff\xff\xe0\xff\xff\xff\x07\xff\xff\xfc\x3f\xfe\x1f\xe1'\
b'\xff\xf0\x7f\x0f\xff\x83\xf8\x7f\xfc\x1f\xc3\xff\xe0\xfe\x1f\xff'\
b'\x0f\xf0\xff\xf8\xff\x87\xff\xff\xf8\x3f\xff\xff\x81\xff\xff\xf8'\
b'\x0f\xff\xff\x00\x7f\xfc\x00\x03\xff\xe0\x00\x1f\xff\x00\x00\xff'\
b'\xf8\x00\x03\xff\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x03\xff\x80\x00\x03\xff\xfe\x00\x01\xff'\
b'\xff\xe0\x00\xff\xff\xff\x00\x3f\xff\xff\xf0\x0f\xff\xff\xfe\x01'\
b'\xff\xff\xff\xe0\x7f\xff\xdf\xfc\x0f\xff\xc0\xff\xc3\xff\xf0\x0f'\
b'\xf8\x7f\xfc\x00\xff\x0f\xff\x80\x1f\xe1\xff\xf0\x07\xfc\x3f\xff'\
b'\x00\xff\x83\xff\xf0\x3f\xe0\x7f\xff\xff\xfc\x0f\xff\xff\xff\x80'\
b'\xff\xff\xff\xe0\x0f\xff\xff\xff\x80\xff\xff\xff\xf0\x0f\xff\xff'\
b'\xfe\x00\xff\xff\xff\xc0\x03\xff\xf7\xf8\x00\x03\xc0\x06\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\xff'\
b'\xff\x00\x7f\xff\xfe\x03\xff\xff\xf8\x1f\xff\xff\xe0\xff\xff\xff'\
b'\x07\xff\xff\xfc\x3f\xfe\x1f\xe1\xff\xf0\x7f\x0f\xff\x83\xf8\x7f'\
b'\xfc\x1f\xc3\xff\xe0\xfe\x1f\xff\x0f\xf0\xff\xf8\xff\x87\xff\xff'\
b'\xf8\x3f\xff\xff\x81\xff\xff\xf8\x0f\xff\xff\xc0\x7f\xfd\xff\x03'\
b'\xff\xe7\xfc\x1f\xff\x1f\xf0\xff\xf8\xff\xc3\xff\xc3\xfe\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x07\xff\x00\x0f\xff\xf8\x07\xff'\
b'\xff\x03\xff\xff\xc1\xff\xff\xf0\x7f\xff\xfc\x1f\xff\x82\x07\xff'\
b'\xe0\x01\xff\xf8\x00\x3f\xff\x80\x0f\xff\xf8\x01\xff\xff\x80\x1f'\
b'\xff\xf0\x01\xff\xfe\x00\x3f\xff\xc0\x07\xff\xf1\xc1\xff\xfc\xff'\
b'\xff\xff\x3f\xff\xff\x8f\xff\xff\xe1\xff\xff\xf0\x3f\xff\xf8\x03'\
b'\xff\xf0\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\xff\xff'\
b'\xf9\xff\xff\xff\x3f\xff\xff\xe7\xff\xff\xfc\xff\xff\xff\x80\xff'\
b'\xfe\x00\x0f\xff\x80\x01\xff\xf0\x00\x3f\xfe\x00\x07\xff\xc0\x00'\
b'\xff\xf8\x00\x1f\xff\x00\x03\xff\xe0\x00\x7f\xfc\x00\x0f\xff\x80'\
b'\x01\xff\xf0\x00\x3f\xfe\x00\x07\xff\xc0\x00\xff\xf8\x00\x1f\xff'\
b'\x00\x03\xff\xe0\x00\x3f\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x0f\xff\x03\xf1\xff\xf0\x3f\x9f\xff\x03\xf9\xff'\
b'\xf0\x3f\x9f\xff\x03\xf9\xff\xf0\x3f\x9f\xff\x03\xf9\xff\xf0\x3f'\
b'\x9f\xff\x03\xf9\xff\xf0\x3f\x9f\xff\x03\xf9\xff\xf0\x3f\x9f\xff'\
b'\x03\xf9\xff\xf8\x7f\x8f\xff\xcf\xf8\xff\xff\xff\x0f\xff\xff\xf0'\
b'\x7f\xff\xfe\x03\xff\xff\xe0\x1f\xff\xfc\x00\xff\xff\x00\x03\xff'\
b'\xe0\x00\x01\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x7f\xfc\x01\xfc\xff\xfc\x03\xf1\xff\xf8\x07\xe1\xff\xf0\x1f'\
b'\x83\xff\xf0\x3f\x03\xff\xe0\xfe\x07\xff\xe1\xf8\x07\xff\xc7\xf0'\
b'\x0f\xff\x8f\xc0\x0f\xff\xbf\x80\x1f\xff\xfe\x00\x1f\xff\xfc\x00'\
b'\x3f\xff\xf0\x00\x3f\xff\xe0\x00\x7f\xff\x80\x00\x7f\xff\x00\x00'\
b'\xff\xfc\x00\x00\xff\xf8\x00\x01\xff\xe0\x00\x01\xff\xc0\x00\x03'\
b'\xff\x00\x00\x03\xfc\x00\x00\x00\xe0\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xff\xf8\x00\x00\x0f\xe3\xff\xf0\x3f\xe0\x3f\x8f'\
b'\xff\xc1\xff\xe0\xfc\x1f\xff\x07\xff\x87\xf0\x7f\xfc\x3f\xfe\x1f'\
b'\x81\xff\xf8\xff\xfc\x7e\x03\xff\xe3\xff\xf3\xf8\x0f\xff\x9f\xff'\
b'\xcf\xc0\x3f\xff\x7f\xff\x3f\x00\x7f\xfd\xff\xfe\xfc\x01\xff\xff'\
b'\xff\xff\xe0\x03\xff\xff\xff\xff\x80\x0f\xff\xff\xff\xfe\x00\x3f'\
b'\xff\xe7\xff\xf0\x00\x7f\xff\x9f\xff\xc0\x01\xff\xfe\x7f\xff\x00'\
b'\x03\xff\xf0\xff\xf8\x00\x0f\xff\xc3\xff\xe0\x00\x1f\xff\x0f\xff'\
b'\x80\x00\x7f\xf8\x1f\xfc\x00\x00\xff\xe0\x3f\xf0\x00\x01\xff\x00'\
b'\x7f\x80\x00\x00\xe0\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x07\xff\xf8\x1f\xe3\xff\xfc\x1f\xe1\xff\xff\x1f\xe0'\
b'\x7f\xff\xdf\xe0\x1f\xff\xff\xe0\x07\xff\xff\xe0\x03\xff\xff\xe0'\
b'\x00\xff\xff\xe0\x00\x3f\xff\xe0\x00\x0f\xff\xf0\x00\x03\xff\xfc'\
b'\x00\x00\xff\xff\x00\x00\xff\xff\x80\x00\xff\xff\xe0\x00\xff\xff'\
b'\xf8\x00\x7f\xff\xfe\x00\x7f\x7f\xff\x00\x7f\x1f\xff\xc0\x7f\x07'\
b'\xff\xf0\x7f\x03\xff\xf8\x7f\x00\xff\xfe\x7f\x00\x3f\xfe\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x1f\xff\xc0\x7e\x3f\xff\x81\xf8\x7f\xff\x87\xf0\x7f\xff'\
b'\x0f\xc0\x7f\xff\x3f\x80\xff\xfe\xfe\x00\xff\xff\xf8\x01\xff\xff'\
b'\xf0\x01\xff\xff\xc0\x01\xff\xff\x00\x03\xff\xfe\x00\x03\xff\xf8'\
b'\x00\x07\xff\xf0\x00\x07\xff\xc0\x00\x0f\xff\x80\x00\x1f\xff\x00'\
b'\x00\x3f\xfe\x00\x00\x7f\xfc\x00\x00\xff\xf8\x00\x01\xff\xf0\x00'\
b'\x03\xff\xe0\x00\x03\xff\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x03\xf0\x7f\xff\xff\x8f\xff\xff\xf1\xff\xff'\
b'\xfe\x3f\xff\xff\xc1\xff\xff\xf8\x01\xff\xfe\x00\x7f\xff\x80\x1f'\
b'\xff\xf0\x03\xff\xfc\x00\xff\xff\x00\x3f\xff\xe0\x0f\xff\xf8\x01'\
b'\xff\xfe\x00\x7f\xff\xc0\x1f\xff\xf0\x03\xff\xfc\x00\xff\xff\x80'\
b'\x1f\xff\xff\xf3\xff\xff\xfe\x7f\xff\xff\xcf\xff\xff\xf8\xff\xff'\
b'\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\xe0\x7f\xfc\x3f\xfe\x1f'\
b'\xff\x0f\xff\x07\xf0\x03\xf8\x01\xfc\x00\xfe\x00\x7f\x00\x3f\x80'\
b'\x1f\xc0\x0f\xe0\x07\xf0\x03\xf8\x01\xfc\x00\xfe\x00\x7f\x00\x3f'\
b'\x80\x1f\xc0\x0f\xe0\x07\xf0\x03\xf8\x01\xfc\x00\xfe\x00\x7f\x00'\
b'\x3f\x80\x1f\xc0\x0f\xe0\x07\xff\x83\xff\xe1\xff\xf0\xff\xf8\x1f'\
b'\xf0\x00\x00\x0f\x80\x00\x3f\xc0\x00\x3f\x80\x00\x7f\x00\x00\xff'\
b'\x00\x00\xfe\x00\x01\xfe\x00\x03\xfc\x00\x03\xf8\x00\x07\xf8\x00'\
b'\x0f\xf0\x00\x0f\xe0\x00\x1f\xe0\x00\x3f\xc0\x00\x3f\x80\x00\x7f'\
b'\x80\x00\xff\x00\x00\xff\x00\x01\xfe\x00\x03\xfc\x00\x03\xfc\x00'\
b'\x07\xf8\x00\x07\xf0\x00\x0f\xf0\x00\x1f\xe0\x00\x1f\xc0\x00\x3f'\
b'\xc0\x00\x7f\x80\x00\x7f\x00\x00\xff\x00\x01\xfe\x00\x01\xfc\x00'\
b'\x03\xfc\x00\x01\xf0\x00\x00\x00\x7f\xe0\x7f\xf8\x3f\xfe\x1f\xff'\
b'\x07\xff\x80\x3f\xc0\x1f\xe0\x07\xf0\x03\xf8\x01\xfc\x00\xfe\x00'\
b'\x7f\x00\x3f\x80\x1f\xc0\x0f\xe0\x07\xf0\x03\xf8\x01\xfc\x00\xfe'\
b'\x00\x7f\x00\x3f\x80\x1f\xc0\x0f\xe0\x07\xf0\x03\xf8\x01\xfc\x00'\
b'\xfe\x00\xff\x00\x7f\x83\xff\xc3\xff\xe1\xff\xf0\xff\xf0\x3f\xf0'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3c\x00\x01\xf0\x00\x07\xe0'\
b'\x00\x3f\x80\x00\xff\x00\x07\xfc\x00\x1f\xf8\x00\xfb\xe0\x03\xe7'\
b'\xc0\x1f\x1f\x00\x7c\x3c\x01\xe0\xf8\x0f\x81\xe0\x3e\x07\xc0\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x03\xff\xff\x03\xff\xff\x03\xff\xff\x03'\
b'\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x0f\xe0\x1f\x80\x3f\x00\x7c\x00\x78\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\xe0\x00'\
b'\x3f\xff\x80\x1f\xff\xf8\x07\xff\xff\x01\xe1\xff\xc0\x20\x3f\xf8'\
b'\x00\x0f\xfe\x03\xff\xff\x83\xff\xff\xe1\xff\x3f\xf8\x7f\x8f\xfe'\
b'\x3f\xe3\xff\x8f\xf8\xff\xe3\xff\xff\xf8\x7f\xff\xfe\x1f\xff\xff'\
b'\x83\xfe\xff\xc0\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\x80\x00\x7f\xe0'\
b'\x00\x1f\xf8\x00\x07\xfe\x00\x01\xff\x80\x00\x7f\xe0\x00\x1f\xf8'\
b'\x78\x07\xfe\x7f\x81\xff\xff\xf0\x7f\xff\xfe\x1f\xff\xff\x87\xff'\
b'\xff\xf1\xff\xcf\xfc\x7f\xe1\xff\x1f\xf8\x7f\xc7\xfe\x1f\xf1\xff'\
b'\x87\xfc\x7f\xf3\xff\x1f\xff\xff\xc7\xff\xff\xe1\xff\xff\xf8\x7f'\
b'\xff\xfc\x0f\xf3\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x00\xff\xf8'\
b'\x0f\xff\xe0\x7f\xff\x83\xff\xfe\x1f\xfe\x78\x7f\xf0\x01\xff\xc0'\
b'\x07\xfe\x00\x1f\xf8\x00\x7f\xf0\x01\xff\xc0\x07\xff\xfe\x1f\xff'\
b'\xfc\x3f\xff\xf0\x7f\xff\xc0\x7f\xfe\x00\x3f\x80\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\xfc'\
b'\x00\x03\xff\x00\x00\xff\xc0\x00\x3f\xf0\x00\x0f\xfc\x00\x03\xff'\
b'\x00\xf8\xff\xc0\xff\xbf\xf0\x7f\xff\xfc\x3f\xff\xff\x1f\xff\xff'\
b'\xc7\xff\xff\xf1\xff\x1f\xfc\xff\xc7\xff\x3f\xf1\xff\xcf\xfc\x7f'\
b'\xf3\xff\x1f\xfc\xff\xcf\xff\x1f\xff\xff\xc7\xff\xff\xf1\xff\xff'\
b'\xfc\x3f\xfb\xff\x07\xfc\x7f\x80\x38\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x0f\x80\x00\xff\xf0\x03\xff\xfc\x07\xff\xfe\x0f\xf8\xfe\x1f\xf8'\
b'\xfe\x1f\xf8\xfe\x3f\xff\xfe\x3f\xff\xfe\x3f\xff\xf8\x3f\xf8\x00'\
b'\x1f\xf8\x00\x1f\xf8\x06\x1f\xfe\x7e\x0f\xff\xfe\x03\xff\xfe\x01'\
b'\xff\xf8\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x1f\x80\x1f\xff\x03\xff\xf8\x7f\xff\x8f\xff\xf8\xff\xe3'\
b'\x8f\xfc\x01\xff\xc0\x1f\xfc\x03\xff\xfc\x3f\xff\xe3\xff\xfe\x3f'\
b'\xff\xc1\xff\xc0\x1f\xfc\x01\xff\xc0\x1f\xfc\x01\xff\xc0\x1f\xfc'\
b'\x01\xff\xc0\x1f\xfc\x01\xff\xc0\x0f\xf8\x00\xff\x80\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x1c\x00\x00\x3f\xef\xf8\x1f\xff\xfe\x0f\xff\xff\x87'\
b'\xff\xff\xe1\xff\xff\xf8\xff\xc7\xfe\x3f\xf1\xff\x8f\xfc\x7f\xe3'\
b'\xff\x1f\xf8\xff\xc7\xff\x3f\xff\xff\xc7\xff\xff\xf1\xff\xff\xfc'\
b'\x3f\xff\xff\x07\xfd\xff\x80\x7c\x7f\xe0\x00\x1f\xf8\x38\x07\xfe'\
b'\x0f\xef\xff\x83\xff\xff\xc0\xff\xff\xe0\x0f\xff\xe0\x00\x04\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0f\xfc\x00\x01\xff\x80\x00\x3f\xf0\x00\x07\xfe\x00\x00\xff'\
b'\xc0\x00\x1f\xf8\x00\x03\xff\x0f\x00\x7f\xe7\xfc\x0f\xff\xff\xc1'\
b'\xff\xff\xfc\x3f\xff\xff\x87\xff\xff\xf8\xff\xe7\xff\x1f\xf8\x7f'\
b'\xe3\xff\x0f\xfc\x7f\xe1\xff\x8f\xfc\x3f\xf1\xff\x87\xfe\x3f\xf0'\
b'\xff\xc7\xfe\x1f\xf8\xff\xc3\xff\x1f\xf8\x7f\xe1\xff\x0f\xf8\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x07'\
b'\xf8\x1f\xf0\x7f\xc1\xff\x07\xf8\x07\xc0\x00\x03\xff\x0f\xfc\x3f'\
b'\xf0\xff\xc3\xff\x0f\xfc\x3f\xf0\xff\xc3\xff\x0f\xfc\x3f\xf0\xff'\
b'\xc3\xff\x0f\xfc\x3f\xf0\x7f\xc0\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3c\x00\x7f'\
b'\x00\x7f\xc0\x3f\xe0\x1f\xf0\x07\xf0\x01\xf0\x00\x00\x01\xff\x01'\
b'\xff\x80\x7f\xe0\x3f\xf0\x1f\xf8\x0f\xfc\x07\xfe\x03\xff\x01\xff'\
b'\x80\xff\xc0\x7f\xe0\x3f\xf0\x1f\xf8\x0f\xfc\x0f\xfe\x07\xfe\x07'\
b'\xff\x0f\xff\x8f\xff\x87\xff\x81\xff\x00\x40\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f'\
b'\xf0\x00\x07\xfe\x00\x00\xff\xc0\x00\x1f\xf8\x00\x03\xff\x00\x00'\
b'\x7f\xe0\x00\x0f\xfc\x00\x01\xff\x83\xf8\x3f\xf0\xff\x07\xfe\x3f'\
b'\xc0\xff\xcf\xf0\x1f\xfb\xf8\x03\xff\xfe\x00\x7f\xff\xe0\x0f\xff'\
b'\xfe\x01\xff\xff\xe0\x3f\xff\xfc\x07\xff\xff\xc0\xff\xdf\xfc\x1f'\
b'\xf9\xff\xc3\xff\x3f\xf8\x7f\xe3\xff\x87\xf8\x3f\xf0\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\xf0\xff'\
b'\xc3\xff\x0f\xfc\x3f\xf0\xff\xc3\xff\x0f\xfc\x3f\xf0\xff\xc3\xff'\
b'\x0f\xfc\x3f\xf0\xff\xc3\xff\x0f\xfc\x3f\xf0\xff\xc3\xff\x0f\xfc'\
b'\x3f\xf0\xff\xc1\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xe0\x07\x00\x3f'\
b'\xf3\xfe\x1f\xf0\x1f\xff\xff\x9f\xfe\x0f\xff\xff\xff\xff\x07\xff'\
b'\xff\xff\xff\xc3\xff\xff\xff\xff\xe1\xff\xcf\xfe\x3f\xf0\xff\xc3'\
b'\xff\x1f\xf8\x7f\xe1\xff\x8f\xfc\x3f\xf0\xff\xc7\xfe\x1f\xf8\x7f'\
b'\xe3\xff\x0f\xfc\x3f\xf1\xff\x87\xfe\x1f\xf8\xff\xc3\xff\x0f\xfc'\
b'\x7f\xe1\xff\x87\xfe\x3f\xf0\xff\xc3\xff\x1f\xf8\x3f\xc1\xff\x07'\
b'\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\x07\xfe\x7f'\
b'\xc0\xff\xff\xfc\x1f\xff\xff\xc3\xff\xff\xf8\x7f\xff\xff\x8f\xfe'\
b'\x7f\xf1\xff\x87\xfe\x3f\xf0\xff\xc7\xfe\x1f\xf8\xff\xc3\xff\x1f'\
b'\xf8\x7f\xe3\xff\x0f\xfc\x7f\xe1\xff\x8f\xfc\x3f\xf1\xff\x87\xfe'\
b'\x1f\xe0\xff\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xfc\x00\x03\xff\xf0\x03\xff\xff\x01\xff\xff\xe0\xff\xff\xfc\x3f'\
b'\xff\xff\x0f\xfc\x7f\xe7\xfe\x1f\xf9\xff\x87\xfe\x7f\xe1\xff\x9f'\
b'\xf8\x7f\xe3\xff\x1f\xf8\xff\xff\xfe\x3f\xff\xff\x07\xff\xff\x80'\
b'\xff\xff\xc0\x0f\xff\xc0\x00\x3f\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0f\x00\xff\x9f\xf0\x3f\xff\xfe\x0f\xff\xff\xc3\xff\xff'\
b'\xf0\xff\xff\xfe\x3f\xf9\xff\x8f\xfc\x3f\xe3\xff\x0f\xf8\xff\xc3'\
b'\xfe\x3f\xf0\xff\x8f\xfe\x7f\xe3\xff\xff\xf8\xff\xff\xfc\x3f\xff'\
b'\xff\x0f\xfd\xff\x83\xff\x3f\x80\xff\xc0\x00\x3f\xf0\x00\x0f\xfc'\
b'\x00\x03\xff\x00\x00\xff\xc0\x00\x0f\xc0\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x18\x00\x00\x7f\xdf\xf0\x3f\xff\xfc\x1f\xff\xff\x0f'\
b'\xff\xff\xc3\xff\xff\xf0\xff\x8f\xfc\x7f\xe3\xff\x1f\xf8\xff\xc7'\
b'\xfe\x3f\xf1\xff\x8f\xfe\x7f\xe7\xff\x8f\xff\xff\xe3\xff\xff\xf8'\
b'\xff\xff\xfe\x1f\xff\xff\x03\xfe\xff\xc0\x1c\x3f\xf0\x00\x0f\xfc'\
b'\x00\x01\xff\x00\x00\x7f\xc0\x00\x1f\xf0\x00\x03\xf8\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x70'\
b'\xff\xcf\xc7\xfe\xff\x3f\xff\xf9\xff\xff\xcf\xff\xfe\x7f\xfb\xe3'\
b'\xff\x86\x1f\xf8\x00\xff\xc0\x07\xfe\x00\x3f\xf0\x01\xff\x80\x0f'\
b'\xfc\x00\x7f\xe0\x03\xff\x00\x0f\xf0\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x3f\xfc\x07\xff'\
b'\xf0\x7f\xff\xc3\xff\x9e\x1f\xf8\x20\xff\xe0\x07\xff\xe0\x3f\xff'\
b'\xc0\xff\xff\x00\xff\xf8\x01\xff\xe0\x07\xfe\x3c\x3f\xf1\xff\xff'\
b'\x87\xff\xf8\x1f\xff\x80\x07\x80\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x01\xf8\x00'\
b'\x1f\xc0\x03\xfe\x00\x3f\xf0\x07\xff\xfe\x3f\xff\xf1\xff\xff\x8f'\
b'\xff\xf8\x1f\xf8\x00\xff\xc0\x07\xfe\x00\x3f\xf0\x01\xff\x80\x0f'\
b'\xfe\x00\x7f\xf0\x41\xff\xfe\x0f\xff\xf0\x3f\xff\x80\xff\xfc\x01'\
b'\xff\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\xfc\x1f\xf0'\
b'\xff\xc7\xfe\x1f\xf8\xff\xc3\xff\x1f\xf8\x7f\xe3\xff\x0f\xfc\x7f'\
b'\xe1\xff\x8f\xfc\x3f\xf1\xff\x87\xfe\x3f\xf0\xff\xc7\xfe\x1f\xf9'\
b'\xff\xc3\xff\xff\xf8\x7f\xff\xff\x0f\xff\xff\xe0\xff\xef\xfc\x0f'\
b'\xf8\xff\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07'\
b'\xfe\x01\xf3\xff\x81\xf9\xff\xc0\xf8\x7f\xf0\xfc\x3f\xf8\x7c\x0f'\
b'\xfe\x7e\x07\xff\x3e\x01\xff\xff\x00\xff\xff\x00\x3f\xff\x80\x1f'\
b'\xff\x80\x07\xff\x80\x01\xff\xc0\x00\xff\xc0\x00\x3f\xe0\x00\x1f'\
b'\xe0\x00\x01\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\xfc\x0f'\
b'\xe0\x3e\xff\xe0\xff\x07\xe7\xfe\x1f\xf0\x7c\x7f\xe1\xff\x8f\xc7'\
b'\xff\x3f\xf8\xfc\x3f\xf3\xff\xcf\x83\xff\x7f\xfd\xf8\x1f\xff\xff'\
b'\xff\x01\xff\xff\xff\xf0\x1f\xff\xff\xff\x00\xff\xfb\xff\xe0\x0f'\
b'\xff\xbf\xfe\x00\x7f\xf1\xff\xe0\x07\xff\x1f\xfc\x00\x3f\xf0\xff'\
b'\xc0\x03\xfe\x0f\xf8\x00\x03\x00\x0c\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xf0\x7f\x3f'\
b'\xfc\x3f\x87\xff\x9f\xc0\xff\xff\xc0\x1f\xff\xe0\x03\xff\xf0\x00'\
b'\xff\xf8\x00\x1f\xff\x00\x03\xff\xe0\x00\xff\xfc\x00\x7f\xff\x80'\
b'\x3f\xff\xf0\x1f\xbf\xfc\x0f\xc7\xff\x87\xf0\xff\xf3\xf8\x1f\xfc'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\xf0\x0f\x9f'\
b'\xfc\x0f\xcf\xfe\x07\xc3\xff\x87\xe1\xff\xc3\xe0\x7f\xf3\xf0\x3f'\
b'\xf9\xf0\x0f\xff\xf8\x07\xff\xf8\x01\xff\xfc\x00\xff\xfc\x00\x3f'\
b'\xfe\x00\x0f\xfe\x00\x07\xff\x00\x01\xff\x00\x00\xff\x80\x00\x7f'\
b'\x80\x07\xff\xc0\x07\xff\xc0\x03\xff\xc0\x00\xff\xc0\x00\x3f\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x3f\xff\xf0\xff\xff\xc3\xff\xff\x0f\xff\xfc'\
b'\x00\xff\xe0\x03\xff\x00\x1f\xf8\x00\xff\xc0\x07\xff\x00\x3f\xf8'\
b'\x01\xff\xc0\x0f\xfe\x00\x7f\xff\xf9\xff\xff\xe7\xff\xff\x9f\xff'\
b'\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0f\x00\x7f\xe0\x7f\xf0\x7f\xf8\x3f\xf8\x3f'\
b'\xe0\x1f\xf0\x0f\xf0\x07\xf8\x03\xfc\x00\xfe\x00\x7f\x00\x1f\x80'\
b'\x0f\xc0\x0f\xe0\x1f\xf0\x1f\xf0\x0f\xf8\x01\xfe\x00\x3f\x00\x1f'\
b'\x80\x1f\xc0\x0f\xe0\x07\xf0\x07\xf8\x03\xfc\x01\xfe\x00\xff\x80'\
b'\x7f\xc0\x1f\xfc\x0f\xff\x03\xff\x80\x7f\x80\x00\x00\x00\x00\xf8'\
b'\x3f\x87\xf0\xfe\x1f\xc3\xf8\x7f\x0f\xe1\xfc\x3f\x87\xf0\xfe\x1f'\
b'\xc3\xf8\x7f\x0f\xe1\xfc\x3f\x87\xf0\xfe\x1f\xc3\xf8\x7f\x0f\xe1'\
b'\xfc\x3f\x87\xf0\xfe\x1f\xc3\xf8\x7f\x0f\xe1\xfc\x1f\x00\x00\x78'\
b'\x00\x7f\xc0\x7f\xf0\x3f\xfc\x0f\xff\x00\xff\x80\x3f\xc0\x1f\xe0'\
b'\x0f\xf0\x07\xf8\x03\xfc\x01\xfc\x00\xfe\x00\x7e\x00\x3f\x80\x0f'\
b'\xf0\x03\xfc\x03\xfe\x03\xfc\x01\xfc\x00\xfc\x00\x7f\x00\x3f\x80'\
b'\x1f\xe0\x0f\xf0\x07\xf8\x03\xfc\x01\xfe\x01\xff\x07\xff\x87\xff'\
b'\x83\xff\x80\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x03\xf8\x03\x80\xff\xc0\xf0\x3f\xfe\x3e\x0f'\
b'\xff\xff\x81\xff\xff\xf0\x3c\x3f\xfc\x07\x01\xff\x00\xc0\x0f\x80'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\xff\xc1'\
b'\xff\xff\x07\xfd\xfe\x1e\x00\x78\x70\x00\xe1\x80\x03\x86\x10\x06'\
b'\x1d\xe0\x18\x7f\x80\xe1\xf8\x07\x87\xc0\xfe\x1e\x07\xf8\x7c\x1f'\
b'\xe1\xf0\x7f\x87\xff\xfe\x1f\x8f\xf8\x7c\x1f\xe1\xe0\x7f\x87\xc1'\
b'\xfe\x1f\x0f\xf8\x7f\xff\xe1\xff\xff\x03\xff\xf8\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
WIDTHS = memoryview(_WIDTHS)
OFFSETS = memoryview(_OFFSETS)
BITMAPS = memoryview(_BITMAPS)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43433,
422,
609,
14208,
12,
40164,
13,
926,
69,
1262,
25,
198,
2,
220,
220,
220,
220,
24457,
10331,
17,
2545,
8899,
13,
9078,
609,
14208,
12,
40164,
13,
926,
69,
... | 1.321681 | 37,705 |
# -*- coding: utf-8 -*
from itertools import chain
from os import getcwd, path
import argparse
from pupy.decorations import tictoc
CD = getcwd()
def fmt_line(line, col, phillup=False):
"""
:param line:
:param col:
:return:
# >>> format_line([])
"""
if len(line) == 1:
code = line[0]
if phillup: return ''.join((code, ' ' * (1 + col - len(code)), '#$#'))
else: return code
if len(line) == 2:
code, comment = line
return ''.join((code, ' ' * (1 + col - len(code)), '#$#', comment))
@tictoc()
def funk_docktor(unfunky, clusters=True, phillup=False):
"""Formats a multi-line string"""
if clusters:
pass
else:
lines = unfunky.replace('# $#', '#$#').split('\n')
furst = min(i for i, line in enumerate(lines)
if '#$#' in line)
last = max(i for i, line in enumerate(lines)
if '#$#' in line)
doc_lines = [line.split("#$#")
for line in lines[furst:last + 1]]
maxcodelength = max(len(line[0]) for line in doc_lines
if len(line) == 2)
lgne = [fmt_line(line,
col=maxcodelength,
phillup=phillup)
for line in doc_lines]
if phillup and doc_lines[-1][0] == '':
lgne[-1] = ''
return '\n'.join(chain.from_iterable((lines[:furst], lgne, lines[last + 1:])))
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
6738,
28686,
1330,
651,
66,
16993,
11,
3108,
198,
11748,
1822,
29572,
198,
6738,
15552,
88,
13,
12501,
273,
602,
1330,
256,
713,
420,... | 1.959635 | 768 |
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
def parse_handle(handle):
"""
Parses a connection handle to get it's subparts (user, password, host, port, dbname)
@return (user, passwd, host, port, dbname)
@example
127.0.0.1 -> (None, None, '127.0.0.1', None, None)
root@localhost -> ('root', None, 'localhost', None, None)
root:passwd@localhost -> ('root', 'passwd', 'localhost', None, None)
root:passwd@db.example.org:8085 -> ('root', 'passwd', 'db.example.org', '8085', None)
root@db.example.org:8085/db/test -> ('root', None, 'db.example.org', '8085', 'test')
localhost:8085/test -> (None, None, 'localhost', '8085', 'test')
root@db.example.org:8085/test -> ('root', None, 'db.example.org', '8085', 'test')
root@db.example.org/test -> ('root', None, 'db.example.org', None, 'test')
"""
e = handle.split('@')
user = passwd = host = port = dbname = None
if len(e) == 2:
user, passwd = parse_user(e[0])
host, port, dbname = parse_host(e[-1])
return {
"user": user,
"password": passwd,
"host": host,
"port": port,
"database": dbname
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
25064,
628,
628,
198,
4299,
21136,
62,
28144,
7,
28144,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
... | 2.271218 | 542 |
""" Priority Tests
Test conversions between Todoist and Taskwarrior priorities.
"""
import pytest
from todoist_taskwarrior import utils
| [
37811,
34416,
30307,
198,
198,
14402,
32626,
1022,
309,
24313,
396,
290,
15941,
5767,
7701,
15369,
13,
198,
37811,
198,
11748,
12972,
9288,
198,
6738,
284,
4598,
396,
62,
35943,
5767,
7701,
1330,
3384,
4487,
628,
198
] | 3.756757 | 37 |
nome = input('Digite seu nome: ').strip()
print('\n Nome em maiúsculas:', nome.upper())
print(' Nome em minúsculas:', nome.lower())
print(' Quantidade de letras:', len(nome.replace(' ', '')))
print(' Quantidade de letras:', len(nome)-nome.count(' ')) # 2° opção
print(' Q. de letras primeiro nome:', len(nome.split()[0]))
print(' Q. de letras primeiro nome:', nome.find(' ')) # 2° opção
| [
77,
462,
796,
5128,
10786,
19511,
578,
384,
84,
299,
462,
25,
705,
737,
36311,
3419,
198,
4798,
10786,
59,
77,
399,
462,
795,
285,
1872,
21356,
1416,
25283,
25,
3256,
299,
462,
13,
45828,
28955,
198,
4798,
10786,
399,
462,
795,
949,... | 2.462025 | 158 |
from typing import Dict
import matplotlib.pyplot as plt
import numpy as np
from IPython.core.display import display
from matplotlib.ticker import FormatStrFormatter, MaxNLocator
from mpl_toolkits.mplot3d import Axes3D
from plotly import offline as plotly, graph_objs as go, tools
PLOT_TYPES = {'random', 'grid', 'exploration', 'exploitation', 'initialisation', 'default', 'user-defined'}
| [
6738,
19720,
1330,
360,
713,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
6101,
7535,
13,
7295,
13,
13812,
1330,
3359,
198,
6738,
2603,
29487,
8019,
13,
83,
15799,
1... | 3.094488 | 127 |
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
from twisted.web import server
from twisted.web.resource import Resource
from twisted.internet.defer import Deferred
from nox.coreapps.pyrt.pycomponent import *
from nox.lib.core import *
from nox.lib.directory import AuthResult
from nox.lib import config
import os
import types
import urllib
import webserver
from webserver import get_current_session
all_immutable_roles = ["Superuser",
"Admin",
"Demo",
"Readonly"]
class Capabilities:
"""Stores and provides info on entire set of defined capabilities"""
def register(self, name, description, immutable_roles=None):
"""Register a capability.
Capabilities used to control visibity and actions in the UI should
be registered using this method in the component's install() method.
Arguments are:
name: The name of the capability. This is the string that will
be used to refer to the capability subsequently in tests,
etc.
description: A user-readable description of the capability.
This will be displayed in the role definition UI to
assist the user in determining the appropriate capabilities
to give to the role.
immutable_roles: A list of the names of immutable roles that
should have this capability. Immutable roles are a default
set of roles provided by Nicira which the user can not
edit. The capabilities for each of those roles are built
from these lists. This is needed because the capability
set may change over time and the editable roles will always
assume a role does not have a capability if the user did
not specifically set it. Note it is not neccesary to
include the 'Superuser' role in this list as the
implementation gurantees that role will have all
capabilities."""
if immutable_roles == None:
immutable_roles = []
else:
for r in immutable_roles:
if r not in all_immutable_roles:
raise InvalidRoleError, "Only roles in webauth.all_immutable_roles are appropiate."
self._dict[name] = (description, immutable_roles)
# Following ensures there is only ever one capabilities manager...
Capabilities = Capabilities()
class Role:
"""Named set of capabilities"""
class SuperuserRole(Role):
"""Role guaranteed to always have all capabilities"""
class NoAccessRole(Role):
"""Role guaranteed to never have any capabilities"""
class Roles:
"""Manages defined roles."""
# Following ensures there is only ever one roles manager...
Roles = Roles()
class User:
"""User information class"""
class AuthResource(Resource):
"""UI resource class handling authentication.
This is a subclass of twisted.web.resource.Resource that ensures that
the current session is associated with a user with the required
capabilities set to interact with the resource. It is intended to be
subclassed in the same way as its twisted parent class. Similar to
the way the Twisted Resource class uses the isLeaf class variable,
subclasses of this class can use two class variables to control
authentication:
noUser: (default=False) if True, no authentication will be
done for this resource.
required_capabilities: (default=set()) a set object of capabilities
the user must hold to interact with this resource.
Capabilites in the list are supplied as strings naming the
capability and must also be registered with the capabilities
manager (webauth.Capabilities). Alternatively, can be a
dictionary keyed by request method containing a set of
capabilities for each request method implemented by the
resource. If a method is implemented but has not entry
in the dictionary, it is assummed that no capabilities
are required.
Note that the capability checking is primarily a convenience to
handle the most common cases for simple resources. For more
complex situations such as a resources that parses request.postpath
and thus supports many different URIs, it may be appropriate for the
method specific render methods to check capabilities directly.
This class also sets up component specific template search paths,
and provides a conveience function to render templates with global
site configuration information passed into the template using the
contents of the webserver component siteConfig dictionary."""
noUser = False
required_capabilities = set()
| [
2,
15069,
3648,
357,
34,
8,
8377,
8704,
11,
3457,
13,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
8005,
55,
13,
198,
2,
198,
2,
8005,
55,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
340,
739,
... | 3 | 1,849 |
from . import HydrusConstants as HC
from . import ClientConstants as CC
from . import ClientData
from . import ClientDefaults
from . import ClientDownloading
from . import ClientDragDrop
from . import ClientExporting
from . import ClientCaches
from . import ClientFiles
from . import ClientGUIACDropdown
from . import ClientGUIFrames
from . import ClientGUICommon
from . import ClientGUIDialogsQuick
from . import ClientGUIImport
from . import ClientGUIListBoxes
from . import ClientGUIListCtrl
from . import ClientGUIPredicates
from . import ClientGUIShortcuts
from . import ClientGUITime
from . import ClientGUITopLevelWindows
from . import ClientImporting
from . import ClientTags
from . import ClientThreading
import collections
import gc
from . import HydrusExceptions
from . import HydrusFileHandling
from . import HydrusNATPunch
from . import HydrusNetwork
from . import HydrusPaths
from . import HydrusSerialisable
from . import HydrusTagArchive
from . import HydrusTags
from . import HydrusThreading
import itertools
import os
import random
import re
import queue
import shutil
import stat
import string
import threading
import time
import traceback
import wx
import wx.lib.agw.customtreectrl
import yaml
from . import HydrusData
from . import ClientSearch
from . import HydrusGlobals as HG
# Option Enums
ID_NULL = wx.NewId()
ID_TIMER_UPDATE = wx.NewId()
| [
6738,
764,
1330,
15084,
14932,
34184,
1187,
355,
27327,
198,
6738,
764,
1330,
20985,
34184,
1187,
355,
12624,
198,
6738,
764,
1330,
20985,
6601,
198,
6738,
764,
1330,
20985,
7469,
13185,
198,
6738,
764,
1330,
20985,
10002,
278,
198,
6738,... | 1.705376 | 1,395 |
# -*- coding: utf-8 -*-
"""
Created on Jul 21 2017
@author: J. C. Vasquez-Correa
"""
import numpy as np
import sys
def PQ(x,k):
"""
Perturbation Quotient in percentage of the signal x
:params x: input sequence: F0 values or Amplitude values per window
:params k: average factor (must be an odd number)
:returns PQ
"""
N=len(x)
if N<k or k%2==0:
return 0
m=int(0.5*(k-1))
summ=0
for n in range(N-k):
dif=0
for r in range(k):
dif=dif+x[n+r]-x[n+m]
dif=np.abs(dif/float(k))
summ=summ+dif
num=summ/(N-k)
den=np.mean(np.abs(x))
c=100*num/den
if np.sum(np.isnan(c))>0:
print(x)
return c
def APQ(PAS):
"""
Amplitude perturbation quotient (APQ)
:params PAS: sequence of amplitude periods of the signal
:returns APQ
"""
return PQ(PAS,11)
def PPQ(PPS):
"""
Period perturbation quotient (PPQ)
:params PPS: sequence of pitch periods of the signal
:returns PPQ
"""
return PQ(PPS,5)
| [
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
5979,
2310,
2177,
198,
198,
31,
9800,
25,
449,
13,
327,
13,
23663,
22281,
12,
10606,
21468,
198,
37811,
628,
198,
11748,
299,
32152,
355,
... | 2.023077 | 520 |
import sqlparse
from sqlparse.sql import Identifier, Token, Comparison, Statement
from sqlparse.tokens import Token as TokenType
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
| [
11748,
44161,
29572,
198,
6738,
44161,
29572,
13,
25410,
1330,
11440,
7483,
11,
29130,
11,
34420,
11,
21983,
198,
6738,
44161,
29572,
13,
83,
482,
641,
1330,
29130,
355,
29130,
6030,
198,
198,
6738,
285,
1652,
9319,
13,
1069,
11755,
133... | 3.361111 | 72 |
# Copyright 2011-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the replica_set_connection module."""
import contextlib
import copy
import random
import sys
import pickle
sys.path[0:0] = [""]
from bson.py3compat import MAXSIZE
from bson.son import SON
from pymongo.errors import ConfigurationError, OperationFailure
from pymongo.message import _maybe_add_read_preference
from pymongo.mongo_client import MongoClient
from pymongo.read_preferences import (ReadPreference, MovingAverage,
Primary, PrimaryPreferred,
Secondary, SecondaryPreferred,
Nearest)
from pymongo.server_description import ServerDescription
from pymongo.server_selectors import readable_server_selector, Selection
from pymongo.server_type import SERVER_TYPE
from pymongo.write_concern import WriteConcern
from test.test_replica_set_client import TestReplicaSetClientBase
from test import (SkipTest,
client_context,
unittest,
db_user,
db_pwd)
from test.utils import connected, single_client, one, wait_until, rs_client
from test.version import Version
_PREF_MAP = [
(Primary, SERVER_TYPE.RSPrimary),
(PrimaryPreferred, SERVER_TYPE.RSPrimary),
(Secondary, SERVER_TYPE.RSSecondary),
(SecondaryPreferred, SERVER_TYPE.RSSecondary),
(Nearest, 'any')
]
if __name__ == "__main__":
unittest.main()
| [
2,
15069,
2813,
12,
5304,
42591,
11012,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
1... | 2.726531 | 735 |
"""
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
| [
37811,
198,
15269,
2211,
37927,
13200,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
1639,
743,
7... | 3.93662 | 142 |
# MIT licensed
# Copyright (c) 2022 Pekka Ristola <pekkarr [at] protonmail [dot] com>, et al.
from nvchecker.api import session, GetVersionError
CRAN_URL = 'https://cran.r-project.org/package=%s/DESCRIPTION'
VERSION_FIELD = 'Version: '
| [
2,
17168,
11971,
198,
2,
15069,
357,
66,
8,
33160,
350,
988,
4914,
371,
396,
5708,
1279,
431,
28747,
3258,
685,
265,
60,
386,
1122,
4529,
685,
26518,
60,
401,
22330,
2123,
435,
13,
198,
198,
6738,
299,
85,
9122,
263,
13,
15042,
13... | 2.767442 | 86 |
import click
from gitkit.util.shell import get_output, run
@click.command()
@click.argument("branches", nargs=-1)
def point_here(branches):
""" Set the given branch refs to point to the current HEAD. """
if not branches:
print("No branches passed.")
return
current = get_output("git rev-parse HEAD")
for branch in branches:
run(["git", "update-ref", f"refs/heads/{branch}", current])
print(branch, "set to", current)
| [
11748,
3904,
198,
198,
6738,
17606,
15813,
13,
22602,
13,
29149,
1330,
651,
62,
22915,
11,
1057,
628,
198,
31,
12976,
13,
21812,
3419,
198,
31,
12976,
13,
49140,
7203,
1671,
12140,
1600,
299,
22046,
10779,
16,
8,
198,
4299,
966,
62,
... | 2.689655 | 174 |
"""
Asynchronous stream with a piping system.
Copyright (C) 2017 The Pylp Authors.
This file is under the MIT License.
"""
import asyncio
class Stream():
"""An asynchronous stream containing a transformer with a piping system."""
def append_file(self, file):
"""Append a new file in the stream."""
self.files.append(file)
if self.transformer:
future = asyncio.ensure_future(self.transformer.transform(file))
future.add_done_callback(self.handle_transform)
def flush_if_ended(self):
"""Call 'flush' function if all files have been transformed."""
if self.ended and self.next and len(self.files) == self.transformed:
future = asyncio.ensure_future(self.transformer.flush())
future.add_done_callback(lambda x: self.next.end_of_stream())
def handle_transform(self, task):
"""Handle a 'transform' callback."""
self.transformed += 1
file = task.result()
if file:
self.next.append_file(file)
self.flush_if_ended()
def end_of_stream(self):
"""Tell that no more files will be transformed."""
self.ended = True
self.flush_if_ended()
def pipe(self, transformer):
"""Pipe this stream to another."""
if self.next:
return
stream = Stream()
self.next = stream
stream.prev = self
self.transformer = transformer
transformer.stream = self
transformer.piped()
for file in self.files:
future = asyncio.ensure_future(self.transformer.transform(file))
future.add_done_callback(self.handle_transform)
self.onpiped.set_result(None)
self.flush_if_ended()
return stream
| [
37811,
198,
198,
1722,
31301,
4269,
351,
257,
48426,
1080,
13,
198,
198,
15269,
357,
34,
8,
2177,
383,
350,
2645,
79,
46665,
13,
198,
1212,
2393,
318,
739,
262,
17168,
13789,
13,
198,
198,
37811,
198,
198,
11748,
30351,
952,
628,
19... | 2.900749 | 534 |
def noop():
"""Empty function to invoke CPython ceval loop."""
return
| [
4299,
645,
404,
33529,
198,
220,
220,
220,
37227,
40613,
2163,
284,
26342,
16932,
7535,
269,
18206,
9052,
526,
15931,
198,
220,
220,
220,
1441,
198
] | 3 | 26 |
import base64
import struct
from typing import List
from binascii import hexlify
from itertools import chain
from lbry.schema.types.v2.result_pb2 import Outputs as OutputsMessage
| [
11748,
2779,
2414,
198,
11748,
2878,
198,
6738,
19720,
1330,
7343,
198,
6738,
9874,
292,
979,
72,
1330,
17910,
75,
1958,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
198,
6738,
18360,
563,
13,
15952,
2611,
13,
19199,
13,
85,
17,
13,
... | 3.351852 | 54 |
# MIT License
# Copyright (c) 2020 - Present nxtlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""An API interface for the factory marshaller."""
from __future__ import annotations
__all__: tuple[str, ...] = ("FactoryInterface",)
import abc
import typing
if typing.TYPE_CHECKING:
import collections.abc as collections
from aiobungie import traits
from aiobungie import typedefs
from aiobungie.crate import activity
from aiobungie.crate import application
from aiobungie.crate import character
from aiobungie.crate import clans
from aiobungie.crate import components
from aiobungie.crate import entity
from aiobungie.crate import fireteams
from aiobungie.crate import friends
from aiobungie.crate import milestones
from aiobungie.crate import profile
from aiobungie.crate import records
from aiobungie.crate import season
from aiobungie.crate import user
class FactoryInterface(abc.ABC):
"""An API interface that documents and describes the implementation of the marshaller factory."""
__slots__ = ()
if typing.TYPE_CHECKING:
_net: traits.Netrunner
# Users, Memberships.
@abc.abstractmethod
def deserialize_user(self, data: typedefs.JSONObject) -> user.User:
"""Deserialize a raw JSON results of fetched user memebrships and Bungie.net user its their id.
Parameters
----------
data : `aiobungie.typedefs.JSONObject`
The JSON data/payload.
Returns
-------
`aiobungie.crate.User`
A user object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_bungie_user(self, data: typedefs.JSONObject) -> user.BungieUser:
"""Deserialize a raw JSON Bungie.net user only payload into a user object.
.. note::
This only returns the Bungie.net user and not the Destiny memberships.
Parameters
----------
data : `aiobungie.typedefs.JSONObject`
The JSON data/payload.
Returns
-------
`aiobungie.crate.BungieUser`
A Bungie user object of the deserialized payload.
"""
@abc.abstractmethod
def deseialize_found_users(
self, payload: typedefs.JSONObject
) -> collections.Sequence[user.DestinyUser]:
"""Deserialize a raw JSON of prefix searched users.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.DestinyUser]`
A sequence of the deserialized Destiny users/memberships.
"""
@abc.abstractmethod
def deserialize_partial_bungie_user(
self, payload: typedefs.JSONObject
) -> user.PartialBungieUser:
"""Deserialize a raw JSON of a partial `bungieNetUserInfo`.
A partial user is a bungie.net user payload with missing information from
the main `BungieUser` object.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.PartialBungieUser`
A partial bungie user object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_destiny_user(
self, payload: typedefs.JSONObject
) -> user.DestinyUser:
"""Deserialize a raw JSON of `destinyUserInfo` destiny memberships information.
A destiny user is just destiny memberships, i.e., Xbox membershio, Steam membership. etc.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.user.DestinyUser`
A destiny membership/user object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_destiny_members(
self, data: typedefs.JSONArray
) -> collections.Sequence[user.DestinyUser]:
"""Deserialize a raw JSON payload/array of `destinyUserInfo`.
Parameters
----------
payload : `aiobungie.typedefs.JSONArray`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.user.DestinyUser]`
A sequence of destiny membership/user object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_user_themes(
self, payload: typedefs.JSONArray
) -> collections.Sequence[user.UserThemes]:
"""Deserialize a raw JSON array of Bungie user themes.
Parameters
----------
payload : `aiobungie.typedefs.JSONArray`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.user.UserThemes]`
A sequence of bungie user themes object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_players(
self, payload: typedefs.JSONArray, /
) -> collections.Sequence[user.DestinyUser]:
"""Deserialize a raw JSON sequence of players.
Parameters
----------
payload : `aiobungie.typedefs.JSONArray`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.user.DestinyUser]`
A sequence of players object of the deserialized payload.
.. note::
This typically returns just 1 element
but keeping it a sequence to match the JSON array signature.
"""
@abc.abstractmethod
def deserialize_user_credentials(
self, payload: typedefs.JSONArray
) -> collections.Sequence[user.UserCredentials]:
"""Deserialize a JSON array of Bungie user credentials.
Parameters
----------
payload : `aiobungie.typedefs.JSONArray`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.UserCredentials]`
A sequence of user's credentials.
"""
# Clans, Groups.
@abc.abstractmethod
def deseialize_clan_owner(self, data: typedefs.JSONObject) -> clans.ClanMember:
"""Deserialize a raw JSON payload of clan founder information.
Parameters
----------
data : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.ClanMember`
A clan owner object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_clan(self, payload: typedefs.JSONObject) -> clans.Clan:
"""Deserialize a raw JSON payload of Bungie clan information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.Clan`
A clan owner object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_group_member(
self, payload: typedefs.JSONObject
) -> typedefs.NoneOr[clans.GroupMember]:
"""Deserialize a JSON payload of group information for a member.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.typedefs.NoneOr[aiobungie.crate.GroupMember]`
A group member object of the deserialized payload. This can return `None` if nothing was found.
"""
@abc.abstractmethod
def deserialize_clan_admins(
self, payload: typedefs.JSONObject
) -> collections.Sequence[clans.ClanAdmin]:
"""Deserialize a JSON payload of clan admins/owners information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.ClanAdmin]`
A sequence of clan admins object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_clan_member(self, data: typedefs.JSONObject, /) -> clans.ClanMember:
"""Deserialize a JSON payload of a clan member information.
Parameters
----------
data : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.ClanMember`
A clan member object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_clan_members(
self, data: typedefs.JSONObject, /
) -> collections.Sequence[clans.ClanMember]:
"""Deserialize a JSON payload of a clan members information.
Parameters
----------
data : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.ClanMember]`
A sequence of clan members of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_clan_convos(
self, payload: typedefs.JSONArray
) -> collections.Sequence[clans.ClanConversation]:
"""Deserialize a JSON array of a clan conversations information.
Parameters
----------
payload : `aiobungie.typedefs.JSONArray`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.ClanConversation]`
A sequence of clan conversations of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_clan_banners(
self, payload: typedefs.JSONObject
) -> collections.Sequence[clans.ClanBanner]:
"""Deserialize a JSON array of a clan banners information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.ClanBanner]`
A sequence of clan banners of the deserialized payload.
"""
# Application
@abc.abstractmethod
def deserialize_app_owner(
self, payload: typedefs.JSONObject
) -> application.ApplicationOwner:
"""Deserialize a JSON payload of Bungie Developer portal application owner information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.application.ApplicationOwner`
An application owner object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_app(self, payload: typedefs.JSONObject) -> application.Application:
"""Deserialize a JSON payload of Bungie Developer portal application information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.application.Application`
An application object of the deserialized payload.
"""
# Characters.
@abc.abstractmethod
def deserialize_character_component(
self, payload: typedefs.JSONObject
) -> components.CharacterComponent:
"""Deserialize a JSON payload of Destiny 2 character component.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.CharacterComponent`
A character component object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_character_render_data(
self, payload: typedefs.JSONObject, /
) -> character.RenderedData:
"""Deserialize a JSON payload of a profile character render data component.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.RenderedData`
A character rendered data profile component.
"""
@abc.abstractmethod
def deserialize_character_minimal_equipments(
self, payload: typedefs.JSONObject
) -> character.MinimalEquipments:
"""Deserialize a singular JSON peer view of equipment found in character render data profile component.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.character.MinimalEquipments`
A minimal equipment object.
"""
@abc.abstractmethod
def deserialize_character_dye(self, payload: typedefs.JSONObject) -> character.Dye:
"""Deserialize a JSON payload of a character's dye information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.character.Dye`
Information about a character dye object.
"""
@abc.abstractmethod
def deserialize_character_customazition(
self, payload: typedefs.JSONObject
) -> character.CustomizationOptions:
"""Deserialize a JSON payload of a character customization information found in character
render data profile component.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.character.CustomizationOptions`
Information about a character customs object.
"""
@abc.abstractmethod
@abc.abstractmethod
@abc.abstractmethod
@abc.abstractmethod
@abc.abstractmethod
@abc.abstractmethod
# Profiles.
@abc.abstractmethod
def deserialize_profile_progression(
self, payload: typedefs.JSONObject
) -> profile.ProfileProgression:
"""Deserialize a JSON payload of a profile progression component.
Parameters
----------
payload : `aiobungie.internal.helpers.JsonObject`
The JSON payload.
Returns
-------
`aiobungie.crate.ProfileProgression`
A profile progression component object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_profile(
self, payload: typedefs.JSONObject, /
) -> typing.Optional[profile.Profile]:
"""Deserialize a JSON payload of Bungie.net profile information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`typing.Optional[aiobungie.crate.Profile]`
A profile object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_profile_items(
self, payload: typedefs.JSONObject, /
) -> typing.Optional[collections.Sequence[profile.ProfileItemImpl]]:
"""Deserialize a JSON payload of profile items component information.
This may deserialize `profileInventories` or `profileCurrencies` or any
other alternatives.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`typing.Optional[collections.Sequence[aiobungie.crate.ProfileItemImpl]]`
A profile component object that contains items of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_linked_profiles(
self, payload: typedefs.JSONObject
) -> profile.LinkedProfile:
"""Deserialize a JSON payload of Bungie.net hard linked profile information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.LinkedProfile`
A hard linked profile object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_profile_item(
self, payload: typedefs.JSONObject
) -> profile.ProfileItemImpl:
"""Deserialize a JSON payload of a singular profile component item.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.ProfileItemImpl`
A concerete implementation of a profile component item.
"""
# Components
@abc.abstractmethod
def deserialize_components(
self, payload: typedefs.JSONObject
) -> components.Component:
"""Deserialize a JSON payload of Bungie.net profile components information.
Parameters
----------
payload : `aiobungie.internal.helpers.JsonObject`
The JSON payload.
Returns
-------
`aiobungie.crate.Component`
A component implementation that includes all other components
of the deserialized payload.
"""
# Records
@abc.abstractmethod
def deserialize_records(
self,
payload: typedefs.JSONObject,
scores: typing.Optional[records.RecordScores] = None,
**nodes: int,
) -> records.Record:
"""Deserialize a JSON object of a profile record component.
Parameters
----------
payload : `aiobungie.internal.helpers.JsonObject`
The JSON object payload
scores: `typing.Optional[records.RecordScores]`
The records scores object.
This exists only to keep the signature of `aiobungie.crate.CharacterRecord` with the record object.
As it will always be `None` in that object.
**nodes: `int`
An int kwargs use to grab the node hashes while deserializing components.
Returns
-------
`aiobungie.records.Record`
A standard implementation of a profile record component.
"""
@abc.abstractmethod
def deserialize_character_records(
self,
payload: typedefs.JSONObject,
scores: typing.Optional[records.RecordScores] = None,
record_hashes: typing.Optional[list[int]] = None,
) -> records.CharacterRecord:
"""Deserialize a JSON object of a profile character record component.
This almost does the same this as `deserialize_records` but
has more fields which can only be found in a character record.
Parameters
----------
payload : `aiobungie.internal.helpers.JsonObject`
The JSON object payload
scores: `typing.Optional[records.RecordScores]`
The records scores object.
record_hashes: `typing.Optional[list[int]]`
A list of record hashes that's included during deserializing the component.
Returns
-------
`aiobungie.records.CharacterRecord`
A standard implementation of a profile character record component.
"""
@abc.abstractmethod
@abc.abstractmethod
@abc.abstractmethod
def deserialize_objectives(self, payload: typedefs.JSONObject) -> records.Objective:
"""Deserialize a JSON payload of an objective found in a record profile component.
Parameters
----------
payload : `aiobungie.internal.helpers.JsonObject`
The JSON payload.
Returns
-------
`aiobungie.crate.records.Objective`
A record objective object.
"""
# Inventory entities and Definitions.
@abc.abstractmethod
def deserialize_inventory_entity(
self, payload: typedefs.JSONObject, /
) -> entity.InventoryEntity:
"""Deserialize a JSON payload of an inventory entity item information.
This can be any item from `DestinyInventoryItemDefinition` definition.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.InventoryEntity`
An entity item object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_inventory_results(
self, payload: typedefs.JSONObject
) -> collections.Sequence[entity.SearchableEntity]:
"""Deserialize results of searched Destiny2 entities.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.SearchableEntity]`
A sequence of the found searched entities.
"""
@abc.abstractmethod
def deserialize_objective_entity(
self, payload: typedefs.JSONObject, /
) -> entity.ObjectiveEntity:
"""Deserialize a JSON payload of an objetive entity information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.ObjectiveEntity`
An objetive entity object of the deserialized payload.
"""
# Activities.
@abc.abstractmethod
def deserialize_activity(
self, payload: typedefs.JSONObject, /
) -> activity.Activity:
"""Deserialize a JSON payload of an activity history information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.Activity`
An activity object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_activities(
self, payload: typedefs.JSONObject, /
) -> collections.Sequence[activity.Activity]:
"""Deserialize a JSON payload of an array of activity history information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.Activity]`
A sequence of activity objects of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_post_activity(
self, payload: typedefs.JSONObject
) -> activity.PostActivity:
"""Deserialize a JSON payload of a post activity information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.PostActivity`
A post activity object.
"""
@abc.abstractmethod
def deserialize_available_activity(
self, payload: typedefs.JSONObject
) -> activity.AvailableActivity:
"""Deserialize a JSON payload of an available activities.
This method is used to deserialize an array of `aiobungie.crate.CharacterActivity.available_activities`.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.AvailableActivity`
An available activity object.
"""
@abc.abstractmethod
def deserialize_character_activity(
self, payload: typedefs.JSONObject
) -> activity.CharacterActivity:
"""Deserialize a JSON payload of character activity profile component.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.CharacterActivity`
A character activities component object.
"""
@abc.abstractmethod
def deserialize_extended_weapon_values(
self, payload: typedefs.JSONObject
) -> activity.ExtendedWeaponValues:
"""Deserialize values of extended weapons JSON object.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.ExtendedWeaponValues`
Information about an extended weapon values.
"""
# Milestones.
@abc.abstractmethod
def deserialize_public_milestone_content(
self, payload: typedefs.JSONObject
) -> milestones.MilestoneContent:
"""Deserialize a JSON payload of milestone content information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.MilestoneContent`
A milestone content object of the deserialized payload.
"""
@abc.abstractmethod
# Social and friends.
@abc.abstractmethod
def deserialize_friend(self, payload: typedefs.JSONObject, /) -> friends.Friend:
"""Deserialize a JSON payload of a Bungie friend information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.Friend`
A friend object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_friends(
self, payload: typedefs.JSONObject
) -> collections.Sequence[friends.Friend]:
"""Deserialize a JSON sequence of Bungie friends information.
This is usually used to deserialize the incoming/outgoing friend requests.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.Friend]`
A sequence of friends object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_friend_requests(
self, payload: typedefs.JSONObject
) -> friends.FriendRequestView:
"""Deserialize a JSON sequence of Bungie friend requests information.
This is used for incoming/outgoing friend requests.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.FriendRequestView]`
A sequence of incoming and outgoing friends object of the deserialized payload.
"""
# Fireteams.
@abc.abstractmethod
def deserialize_fireteams(
self, payload: typedefs.JSONObject
) -> typedefs.NoneOr[collections.Sequence[fireteams.Fireteam]]:
"""Deserialize a JSON sequence of Bungie fireteams information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`collections.Sequence[aiobungie.crate.Fireteam]`
A sequence of fireteam object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_fireteam_destiny_users(
self, payload: typedefs.JSONObject
) -> fireteams.FireteamUser:
"""Deserialize a JSON payload of Bungie fireteam destiny users information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.FireteamUser`
A fireteam user object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_fireteam_members(
self, payload: typedefs.JSONObject, *, alternatives: bool = False
) -> typing.Optional[collections.Sequence[fireteams.FireteamMember]]:
"""Deserialize a JSON sequence of Bungie fireteam members information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
alternatives : `bool`
If set to `True`, Then it will deserialize the `alternatives` data in the payload.
If not the it will just deserialize the `members` data.
Returns
-------
`typing.Optional[collections.Sequence[aiobungie.crate.FireteamUser]]`
An optional sequence of the fireteam members object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_available_fireteams(
self, data: typedefs.JSONObject, *, no_results: bool = False
) -> typing.Union[
fireteams.AvalaibleFireteam, collections.Sequence[fireteams.AvalaibleFireteam]
]:
"""Deserialize a JSON payload of a sequence of/fireteam information.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
no_results : `bool`
Whether to deserialize the data from `results` in the payload or not.
Returns
-------
`typing.Union[aiobungie.crate.fireteams.AvalaibleFireteam, collections.Sequence[aiobungie.crate.fireteams.AvalaibleFireteam]]` # noqa: E501
An available fireteam or a sequence of available fireteam object of the deserialized payload.
"""
@abc.abstractmethod
def deserialize_fireteam_party(
self, payload: typedefs.JSONObject
) -> fireteams.FireteamParty:
"""Deserialize a JSON payload of `profileTransitory` component response.
Parameters
----------
payload : `aiobungie.typedefs.JSONObject`
The JSON payload.
Returns
-------
`aiobungie.crate.FireteamParty`
A fireteam party object of the current fireteam.
"""
# Seasonal content.
@abc.abstractmethod
def deserialize_seasonal_artifact(
self, payload: typedefs.JSONObject
) -> season.Artifact:
"""Deserialize a JSON payload of a Destiny 2 seasonal artifact information.
Parameters
----------
payload : `aiobungie.internal.helpers.JsonObject`
The JSON payload.
Returns
-------
`aiobungie.crate.Artifact`
A seasonal artifact object of the deserialized payload.
"""
| [
2,
17168,
13789,
198,
2,
15069,
357,
66,
8,
12131,
532,
21662,
299,
742,
5439,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428,
3788,
290,
3917,
10314,
3696... | 2.450355 | 12,670 |
import logging
import time
import adb
from .. import app_test
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
11748,
640,
198,
198,
11748,
512,
65,
198,
6738,
11485,
1330,
598,
62,
9288,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628
] | 3.090909 | 33 |
# -*- coding: utf-8 -*-
from mmvae_hub.utils.utils import save_and_log_flags
from torch.utils.tensorboard import SummaryWriter
from mmvae_hub.base.BaseCallback import BaseCallback
from mmvae_hub.base.BaseTrainer import BaseTrainer
from mmvae_hub.celeba.CelebaLogger import CelebALogger
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
8085,
33353,
62,
40140,
13,
26791,
13,
26791,
1330,
3613,
62,
392,
62,
6404,
62,
33152,
198,
6738,
28034,
13,
26791,
13,
83,
22854,
3526,
1330,
21293,
34379,
198,
... | 3.06383 | 94 |
#!/usr/bin/env python3
# vim: autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4 filetype=python
# -*- coding: utf-8 -*-
# BSD LICENSE
#
# Copyright (c) 2016, Boying Xu All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# PEP 484 -- Type Hints
# https://www.python.org/dev/peps/pep-0484/
# https://docs.python.org/3/library/typing.html
import argparse
import asyncio
import asyncio.subprocess
import functools
import logging
import os
import re
import sys
from pathlib import Path
from typing import List
LOOP = None # type: asyncio.AbstractEventLoop
LOG = None # type: logging.Logger
CFG = None # type: MyConfig
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='my sync')
parser.add_argument('--local_dir', type=str, nargs=1, help='local_dir(Only on this machine)', required=True)
parser.add_argument('--target_dir', type=str, nargs=1,
help='target_dir(On this machine or remote machine)', required=True)
parser.add_argument('--remote', help='If the target_dir is on remote machine, use this flag',
required=False, action='store_true')
parser.add_argument('--ssh_tunnel_port', type=int, nargs=1, help='ssh tunnel port option', required=False)
parser.add_argument('--gitignore', help='Ignore the file in local_dir/.gitignore',
required=False, action='store_true')
parser.add_argument('--init', help='List all the files in local dir and sync to target',
required=False, action='store_true')
parser.add_argument('--dry_run', help='Dry run, not really do the sync', required=False, action='store_true')
arguments = parser.parse_args()
local_dir = arguments.local_dir[0]
target_dir = arguments.target_dir[0]
gitignore = None
if arguments.gitignore:
P = Path(local_dir)
gitignore_path = P / '.gitignore'
if gitignore_path.is_file():
gitignore = GitIgnore(fn=str(gitignore_path))
if not os.path.isdir(local_dir):
parser.error("local_dir: %s does not exist" % local_dir)
if local_dir[-1:] != "/":
local_dir += "/"
if not arguments.remote:
if not os.path.isdir(target_dir):
parser.error("target_dir: %s does not exist" % target_dir)
if target_dir[-1:] != "/":
target_dir += "/"
ssh_tunnel_port = arguments.ssh_tunnel_port[0] if arguments.ssh_tunnel_port else None
CFG = MyConfig(local_dir_=local_dir, target_dir_=target_dir, remote_=arguments.remote,
ssh_tunnel_port_=ssh_tunnel_port, gitignore_=gitignore, dry_run_=arguments.dry_run)
if sys.platform == 'win32':
print("Windows Platform is not supported")
sys.exit(1)
# LOOP = asyncio.ProactorEventLoop() # type: asyncio.windows_events.ProactorEventLoop
# asyncio.set_event_loop(LOOP)
else:
LOOP = asyncio.get_event_loop() # type: asyncio.AbstractEventLoop
LOG = prepare_logging("program")
if arguments.init:
LOOP.run_until_complete(init())
LOOP.close()
else:
tasks = asyncio.gather(asyncio.ensure_future(main()))
try:
# asyncio.ensure_future(main())
LOOP.run_until_complete(tasks)
LOOP.run_forever()
except KeyboardInterrupt:
tasks.cancel()
LOOP.run_forever()
tasks.exception()
finally:
LOOP.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
43907,
25,
8295,
521,
298,
7400,
11338,
28,
19,
6482,
10394,
28,
19,
4292,
8658,
2705,
8658,
11338,
28,
19,
2393,
4906,
28,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
... | 2.671203 | 1,837 |
from keras.layers import Lambda, SimpleRNN, Concatenate, Subtract, Reshape
| [
6738,
41927,
292,
13,
75,
6962,
1330,
21114,
6814,
11,
17427,
49,
6144,
11,
1482,
9246,
268,
378,
11,
3834,
83,
974,
11,
1874,
71,
1758,
628
] | 2.814815 | 27 |
# Import Pytest Package
import pytest
from solution import alternatingCharacters | [
2,
17267,
9485,
9288,
15717,
198,
11748,
12972,
9288,
198,
198,
6738,
4610,
1330,
39623,
48393
] | 5.0625 | 16 |
from django.urls import path
from . import views
app_name = 'articles'
urlpatterns = [
path(
'',
views.ArticlesListView.as_view(),
name = 'list'),
path(
'create/',
views.ArticleCreateView.as_view(),
name = 'create'),
path(
'created/',
views.ArticleCreatedTemplateView.as_view(),
name = 'created'),
path(
'<int:pk>/',
views.ArticleDetailView.as_view(),
name = 'detail'),
path(
'<int:pk>/delete/',
views.ArticleDeleteView.as_view(),
name = 'delete'),
path(
'deleted/',
views.ArticleDeletedTemplateView.as_view(),
name = 'deleted'),
path(
'<int:pk>/update/',
views.ArticleUpdateView.as_view(),
name = 'update'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
705,
26845,
6,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
7,
198,
220,
220,
220,
220,
220,
220,
220,
705,... | 2.024938 | 401 |
from django.conf.urls.defaults import *
urlpatterns = patterns('browse.views',
url(r'^spell/(?P<slug>[\w-]+)/$', 'spell_detail', name='spell_detail'),
url(r'^feat/(?P<slug>[\w-]+)/$', 'feat_detail', name='feat_detail'),
url(r'^monster/(?P<slug>[\w-]+)/$', 'monster_detail', name='monster_detail'),
url(r'^favorites/$', 'favorites', name='favorite_lists'),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12286,
82,
1330,
1635,
198,
198,
6371,
33279,
82,
796,
7572,
10786,
25367,
325,
13,
33571,
3256,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
46143,
29006,
30,
47,
27,
6649,
1018,
36937,
... | 2.35625 | 160 |
from django.shortcuts import render
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198
] | 4 | 9 |
# Generated by Django 3.2.9 on 2021-11-03 17:25
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
24,
319,
33448,
12,
1157,
12,
3070,
1596,
25,
1495,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from collections import defaultdict, deque
import re
CHALLENGE_DAY = "3"
REAL = open(CHALLENGE_DAY + ".txt").read()
SAMPLE = open(CHALLENGE_DAY + ".sample.txt").read()
SAMPLE_EXPECTED = 2
sample = solve(SAMPLE)
if sample != SAMPLE_EXPECTED:
print("SAMPLE FAILED: ", sample, " != ", SAMPLE_EXPECTED)
assert sample == SAMPLE_EXPECTED
print("\n*** SAMPLE PASSED ***\n")
solved = solve(REAL)
print("SOLUTION: ", solved)
import pandas as pd
df=pd.DataFrame([str(solved)])
df.to_clipboard(index=False,header=False)
print("COPIED TO CLIPBOARD")
| [
6738,
17268,
1330,
4277,
11600,
11,
390,
4188,
198,
11748,
302,
198,
198,
3398,
7036,
1677,
8264,
62,
26442,
796,
366,
18,
1,
198,
2200,
1847,
796,
1280,
7,
3398,
7036,
1677,
8264,
62,
26442,
1343,
27071,
14116,
11074,
961,
3419,
198,... | 2.607656 | 209 |
from django import forms
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
| [
6738,
42625,
14208,
1330,
5107,
201,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
201,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
201,
198,
6738,
42625,
... | 3.186667 | 75 |
from .base import TestExternalDatabase
from .compat import long_, unicode_
| [
6738,
764,
8692,
1330,
6208,
41506,
38105,
198,
6738,
764,
5589,
265,
1330,
890,
62,
11,
28000,
1098,
62,
198
] | 3.75 | 20 |
# SPDX-License-Identifier: MIT
# (c) 2019 The TJHSST Director 4.0 Development Team & Contributors
from django.apps import AppConfig
| [
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
2,
357,
66,
8,
13130,
383,
41852,
7998,
2257,
5890,
604,
13,
15,
7712,
4816,
1222,
25767,
669,
198,
198,
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.35 | 40 |
from pyclist.admin import BaseModelAdmin, admin_register
from django.contrib import admin
from tg.models import Chat, History
@admin_register(Chat)
@admin_register(History)
| [
6738,
12972,
565,
396,
13,
28482,
1330,
7308,
17633,
46787,
11,
13169,
62,
30238,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
256,
70,
13,
27530,
1330,
24101,
11,
7443,
628,
198,
31,
28482,
62,
30238,
7,
30820,
8,
... | 3.403846 | 52 |
# Generated by Django 3.2.8 on 2021-11-09 14:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
23,
319,
33448,
12,
1157,
12,
2931,
1478,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14... | 3.019231 | 52 |
from . import video_transforms as T
from . import object_transforms as OT
| [
6738,
764,
1330,
2008,
62,
7645,
23914,
355,
309,
198,
6738,
764,
1330,
2134,
62,
7645,
23914,
355,
21676,
628
] | 3.75 | 20 |
import copy
import os
import torch
import wandb
from soundbay.utils.logging import Logger
from soundbay.inference import predict
from soundbay.trainers import Trainer
from pathlib import Path
from soundbay.utils.app import App
from omegaconf import DictConfig
| [
11748,
4866,
198,
11748,
28686,
198,
11748,
28034,
198,
11748,
11569,
65,
198,
6738,
2128,
24406,
13,
26791,
13,
6404,
2667,
1330,
5972,
1362,
198,
6738,
2128,
24406,
13,
259,
4288,
1330,
4331,
198,
6738,
2128,
24406,
13,
27432,
364,
13... | 3.71831 | 71 |
#!/usr/bin/env python3
# Copyright Yuri Astrakhan <YuriAstrakhan@gmail.com>
import json
import logging
import time
from typing import Dict
import argparse
import requests
from datetime import datetime
from utils import stringify, chunks, query_status, set_status_query, parse_utc
from sparql import Sparql
info_keys = [
'count_all', 'count_all_fraction', 'count_nodes', 'count_nodes_fraction', 'count_ways', 'count_ways_fraction',
'count_relations', 'count_relations_fraction', 'values_all', 'users_all'
]
if __name__ == '__main__':
UpdateUsageStats().run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
38450,
8304,
17716,
7637,
1279,
56,
9900,
32,
12044,
74,
7637,
31,
14816,
13,
785,
29,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
640,
198,
6738,
19720,
1330,... | 2.928934 | 197 |
from OpenGL import GL
# Pyopengl shader compilation errors are unreadable, so we'll define our own
# routine.
| [
6738,
30672,
1330,
10188,
198,
198,
2,
9485,
404,
1516,
75,
33030,
23340,
8563,
389,
555,
46155,
11,
523,
356,
1183,
8160,
674,
898,
198,
2,
8027,
13,
628,
198
] | 3.766667 | 30 |
# encoding: utf-8
"""
Read images and corresponding labels.
"""
import torch
from torch.utils.data import Dataset
from PIL import Image
import os
import random
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
5569,
4263,
290,
11188,
14722,
13,
198,
37811,
198,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
... | 3.468085 | 47 |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
# constant values which we define at the first of the code.
# url to create user
CREATE_USER_URL = reverse('user:create')
# url to generate the user token
TOKEN_URL = reverse('user:token')
# update user endpoint url user/"me" => the account of the user who is authenticated
ME_URL = reverse('user:me')
# helper function
# public api is not authenticated (without creating user and logging in)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def test_create_valid_user_success(self):
"""Test creating user with valid payload is successful"""
# the payload is an object which you pass to an API when you make the request.
payload = {
'email': 'falkel@gmail.com',
'password': '123eret4',
'name': 'fateme'
}
# lets make our request (http post request):
res = self.client.post(CREATE_USER_URL, payload) # this res return "a created user"
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating a user that already exists fails"""
payload = {'email': 'falkel@gmail.com', 'password': '1234er'}
# we put ** before the name of dictionary to make it cleaner as arguments of function.
# it means we created a user with this info before we send post request to create page.
create_user_customized(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that the password must be more than 5 characters"""
payload = {
'email': 'falkel@gmail.com',
'password': 'pw'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
# --------------------------------------- user token tests
def test_create_token_for_user(self):
"""Test that a token created for the user"""
payload = {'email': 'falkel@gmail.com', 'password': '12344w5'}
# we are going to create a user by helper function that match this authentication, so we can test it.
create_user_customized(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user_customized(email='fateme@test.com', password='testpass')
payload = {'email': 'fateme@test.com', 'password': 'passnotthesame'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user does not exist"""
payload = {'email': 'fateme@test.com', 'password': 'passtest'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
# ----------------------------------------------------manage user tests
# the authentication is required for the endpoint (it's an important part)
def test_retrieve_user_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
# authentication is required before you can use those points.
# private api is like modifying user or making some change in user.
class PrivateUserApiTests(TestCase):
"""Test API requests that require authentication"""
def setUp(self):
"""Set the Authentication for whole tests in this class"""
self.user = create_user_customized(
email='test3@hotmail.com',
password='testpass2',
name='fateme3'
)
# Just make reusable client
self.client = APIClient()
# to forcte to authenticate any requests that the client makes with our sample user
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
# res does not send the hash password it only sends email and name
self.assertEqual(res.data, {
'email': self.user.email,
'name': self.user.name
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new_name', 'password': 'new_password'}
res = self.client.patch(ME_URL, payload)
# a helper function which refresh database by latest values
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
198,
6738,
1334,
62,
30604,
13,
9288,
1330,
34... | 2.637056 | 2,364 |
"""Provide the common bit-vector operators."""
import collections
import functools
import itertools
import math
from sympy.core import cache
from sympy.printing import precedence as sympy_precedence
from cascada.bitvector import context
from cascada.bitvector import core
zip = functools.partial(zip, strict=True)
def _cacheit(func):
"""Cache functions if `CacheContext` is enabled."""
cfunc = cache.cacheit(func)
return cached_func
# noinspection PyUnresolvedReferences
class Operation(core.Term):
"""Represent bit-vector operations.
A bit-vector operation is a mathematical function that takes some bit-vector
operands (i.e. `Term`) and some scalar operands (i.e. `int`),
and returns a single bit-vector term. Often, *operator* is used to
denote the Python class representing the mathematical function (without operands)
and *operation* is used to denote the application of an operator to some operands.
A particular operator (a subclass of `Operation`) can be evaluated
by instantiating an object with the operands as the object arguments.
The instantiation internally calls the method `eval`, containing the logic
of the operator. This behaviour is similar as those of the SymPy classes
`Add <https://docs.sympy.org/latest/modules/core.html?highlight=add#sympy.core.add.Add>`_
or `Mul <https://docs.sympy.org/latest/modules/core.html?highlight=mul#sympy.core.mul.Mul>`_.
Unless the `Simplification` context is disabled (enabled by default),
many of the operations automatically simplify complex expressions
by applying basic rules from `Boolean algebra
<https://en.wikipedia.org/wiki/Bitwise_operation#Boolean_algebra>`_.
Moreoever, operations are also affected by other context managers
such as `Cache`, `PrimaryOperationEvaluation`, `SecondaryOperationEvaluation`,
`Validation` or `Memoization`.
This class is not meant to be instantiated but to provide a base
class for the two types of operations: `PrimaryOperation`
and `SecondaryOperation`.
.. note::
New bit-vector operations can be defined by subclassing `SecondaryOperation`
(see `BvMaj` as example). It is also possible to define new bit-vector
operations by subclassing `PrimaryOperation`, but this is not recommended.
Attributes:
arity: a pair of number specifying the number of bit-vector operands
(at least one) and scalar operands, respectively.
is_symmetric: True if the operator is symmetric with respect to
its operands (a permutation of the inputs does not change the output).
Operators with scalar operands cannot be symmetric.
is_simple: True if the operator is *simple*, that is, all its
operands are bit-vector of the same width. Simple operators allow
*Automatic Constant Conversion*, that is, instead of passing
all arguments as bit-vector types, it is possible to pass
arguments as plain integers.
::
>>> from cascada.bitvector.core import Constant
>>> (Constant(1, 8) + 1).vrepr()
'Constant(0b00000010, width=8)'
operand_types: a list specifying the types of the operands (optional
if all operands are bit-vectors)
alt_name: an alternative name used when printing (optional)
unary_symbol: a symbol used when printing (optional)
infix_symbol: a symbol used when printing (optional)
.. Implementation details:
New operations should be added in the header of test_operation.
"""
is_Atom = False
precedence = sympy_precedence.PRECEDENCE["Func"]
is_symmetric = False
is_simple = False
@_cacheit
@classmethod
def _simplify(self):
"""Simplify the bit-vector operation.
Return the simplified value and a boolean flag depending on
whether the expression was reduced.
"""
return self, False
def _binary_symmetric_simplification(self, compatible_terms):
"""Simplify a binary symmetric operation.
Replace pairs of *compatible connected terms* by their resulting value.
* Two terms are connected if they are arguments of the same operator
node when the bit-vector expression is flattened (e.g. ``z``
and ``t`` are connected in ``((x ^ y) + z) + t``.
* Two connected terms are compatible if they can be simplified.
For example, two constants are always compatible.
Note that this function assumed the arguments of the operation
are already simplified.
Args:
compatible_terms: a list of lambda functions specifying
the compatible terms for a particular operator.
"""
op = type(self)
assert isinstance(compatible_terms, collections.abc.Sequence)
# noinspection PyShadowingNames
# noinspection PyShadowingNames
x, y = self.args
modified = False # modified
if isinstance(x, core.Constant) and isinstance(y, op):
new_expr, modified = replace_constant(x, expr=y)
elif isinstance(y, core.Constant) and isinstance(x, op):
new_expr, modified = replace_constant(y, expr=x)
if not modified and not isinstance(x, core.Constant) and isinstance(y, op):
new_expr, modified = replace_term(x, [f(x) for f in compatible_terms], y)
if not modified and not isinstance(y, core.Constant) and isinstance(x, op):
new_expr, modified = replace_term(y, [f(y) for f in compatible_terms], x)
if not modified and isinstance(x, op) and isinstance(y, op):
x1, x2 = x.args
if op(x1, y, evaluate=False) != op(x1, y):
new_expr = op(x2, op(x1, y))
modified = True
if not modified and op(x2, y, evaluate=False) != op(x2, y):
new_expr = op(x1, op(x2, y))
modified = True
if not modified:
new_expr, modified = op(x1, y)._simplify()
new_expr = op(x2, new_expr)
if not modified:
new_expr, modified = op(x2, y)._simplify()
new_expr = op(x1, new_expr)
if modified:
# noinspection PyUnboundLocalVariable
return new_expr, True
else:
return self, False
@classmethod
def condition(cls, *args):
"""Check if the operands verify the restrictions of the operator."""
return True
def output_width(*args):
"""Return the bit-width of the resulting bit-vector."""
raise NotImplementedError("subclasses need to override this method")
@classmethod
def eval(cls, *args):
"""Evaluate the operator with given operands.
This is an internal method that assumes the list ``args`` has been parsed.
To evaluate a bit-vector operation, instantiate a new object with
the operands as the object arguments (i.e., use the Python operator ``()``).
"""
raise NotImplementedError("subclasses need to override this method")
def formula_size(self):
"""The formula size of the operation."""
size = 1 + bin_enc(self.width)
for arg in self.args:
if isinstance(arg, int):
size += bin_enc(arg)
else:
size += arg.formula_size()
return size
def memoization_table(self, id_prefix="x", decompose_sec_ops=True):
"""Return a decomposition of the current operation into simple assignments.
Given an `Operation` object :math:`F(a_1, a_2, \dots)`, this method
decomposes it into a list of *simple* assignments
:math:`x_{i+1} \leftarrow f_i(x_i)` such that the last output variable
:math:`x_{n}` represents the output of :math:`F(a_1, a_2, \dots)`.
The i-th assignment is given by:
- the output `Variable` :math:`x_{i+1}` that represents
the output of :math:`f_i(x_i)`,
- the `Operation` object :math:`f_i(x_i)` where the input
:math:`x_i` is a previous output `Variable`,
an input `Variable` (of :math:`F(a_1, a_2, \dots)`),
a `Constant`, a scalar, o a list of them
but not an `Operation` object.
The list of assignments is given as a `MemoizationTable`,
and it is obtained by re-evaluating the current operation
under the `Memoization` context.
The argument ``id_prefix`` is the string prefix used to name
intermediate variables and the argument ``decompose_sec_ops``
determines whether to use the context `SecondaryOperationEvaluation`.
In other words, if ``decompose_sec_ops`` is ``True``,
`SecondaryOperation` objects are not allowed in the list of
assignments and they are replaced by their
decomposition into `PrimaryOperation` objects.
>>> from cascada.bitvector.core import Variable
>>> from cascada.bitvector.secondaryop import BvMaj
>>> expr = 1 + BvMaj(2, Variable("a_1", 8), 3 | Variable("a_2", 8))
>>> expr.memoization_table() # doctest: +NORMALIZE_WHITESPACE
MemoizationTable([(x0, a_2 | 0x03), (x1, 0x02 & a_1), (x2, 0x02 & x0),
(x3, x1 | x2), (x4, a_1 & x0), (x5, x3 | x4), (x6, x5 + 0x01), (x7, Id(x6))])
>>> expr.memoization_table(decompose_sec_ops=False)
MemoizationTable([(x0, a_2 | 0x03), (x1, BvMaj(0x02, a_1, x0)), (x2, x1 + 0x01), (x3, Id(x2))])
.. Implementation details:
To use this method not only for decomposing secondary operations
but also for printing complex bit-vector expressions:
- This method is added to Operation and not to SecondaryOperation,
to support (1 + BvMaj()).decompose() and printing any complex operation.
- SSA is not returned, just the table (input/output vars not needed)
- Constant and scalar inputs are supported (for printing)
"""
table = context.MemoizationTable(id_prefix=id_prefix)
with context.Memoization(table):
output = self.doit(eval_sec_ops=decompose_sec_ops)
last_assignment = BvIdentity(output, evaluate=False)
assert not table.contain_op(last_assignment)
table.add_op(last_assignment)
return table
class PrimaryOperation(Operation):
"""Represent the primary bit-vector operations.
The primary bit-vector operations are those *basic*
operations that are included in the bit-vector theory of the
`SMT_LIBv2 <http://smtlib.cs.uiowa.edu/theories-FixedSizeBitVectors.shtml>`_
format.
The primary operations are `BvAnd`, `BvOr`, `BvXor`, `BvComp`, `BvUlt`,
`BvUle`, `BvUgt`, `BvUge`, `BvShl`, `BvLshr`, `RotateLeft`, `RotateRight`,
`Concat`, `BvAdd`, `BvSub`, `BvMul`, `BvUdiv`, `BvUrem`, `BvNeg`, `BvNot`,
`Extract`, `Ite` and `BvIdentity`.
This class is not meant to be instantiated but to provide a base
class to define primary operators.
"""
@classmethod
class SecondaryOperation(Operation):
"""Represent secondary bit-vector operations.
Secondary bit-vector operations are those bit-vector operations
that are not primary operations (see `PrimaryOperation`).
Secondary operations must be defined in terms of primary operations.
By default, secondary operations are fully evaluated (`Operation.eval`
is used) if all the operands are scalar or `Constant` objects (see also
`context.SecondaryOperationEvaluation`). On the other hand, `pre_eval`
is always called in the evaluation (even with symbolic inputs).
This class is not meant to be instantiated but to provide a base
class to define secondary operators.
"""
@classmethod
def pre_eval(cls, *args):
"""Evaluate the operator before `Operation.eval`.
This is an internal method that assumes the list ``args`` has been parsed.
"""
return None
@classmethod
class PartialOperation(object):
"""Represent bit-vector operations with fixed operands.
Given a base operator :math:`(x, y) \mapsto f(x, y)`,
a partial operator is a function obtained by fixing some
of the inputs to constants, e.g., :math:`x \mapsto f(x, y=0)`.
This class is not meant to be instantiated but to provide a base
class for bit-vector operations with fixed operands generated
through `make_partial_operation`.
`PartialOperation` subclasses generated by `make_partial_operation`
are also subclasses of `PrimaryOperation` or `SecondaryOperation`
depending on the type of the base operator.
Attributes:
base_op: a subclass of `Operation` denoting the base operator.
fixed_args: a `tuple` with the same length as the number of operands
of the base function containing ``None``, scalar or `Constant` elements.
If ``fixed_args[i]`` is ``None``, the i-th operand is not fixed;
otherwise, the i-th operand is replaced with ``fixed_args[i]``.
"""
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@functools.lru_cache(maxsize=None) # temporary hack to create singletons
def make_partial_operation(base_op, fixed_args):
"""Return a new `PartialOperation` subclass with the given base operator and fixed arguments.
The argument ``fixed_args`` is a `tuple`, with the same length as
the number of operands of the base operator, containing ``None``,
scalar or `Constant` elements. If ``fixed_args[i]`` is ``None``,
the i-th operand is not fixed; otherwise, the i-th operand is
replaced with ``fixed_args[i]``.
The resulting class is also a subclass of `PrimaryOperation` or
`SecondaryOperation`, depending on the type of the base operator.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvAdd, Extract, make_partial_operation
>>> from cascada.bitvector.secondaryop import BvMaj
>>> BvAddCte = make_partial_operation(BvAdd, tuple([None, Constant(1, 4)]))
>>> BvAddCte.__name__
'BvAdd_{·, 0x1}'
>>> expr = BvAddCte(Variable("a", 4))
>>> expr
a + 0x1
>>> expr.vrepr()
"make_partial_operation(BvAdd, (None, Constant(0b0001, width=4)))(Variable('a', width=4))"
>>> ExtractLSB = make_partial_operation(Extract, tuple([None, 0, 0]))
>>> ExtractLSB.__name__
'Extract_{·, 0, 0}'
>>> ExtractLSB(Extract(Variable("a", 4), 2, 0)) # result is simplified
a[0]
>>> BvCteMaj = make_partial_operation(BvMaj, tuple([None, None, Constant(1, 4)]))
>>> BvCteMaj.__name__
'BvMaj_{·, ·, 0x1}'
>>> BvCteMaj(Variable("a", 4), Variable("a", 4))
a
>>> expr = BvCteMaj(Variable("a", 4), Variable("b", 4))
>>> expr
BvMaj_{·, ·, 0x1}(a, b)
>>> expr.doit()
(a & b) | (a & 0x1) | (b & 0x1)
>>> expr.memoization_table()
MemoizationTable([(x0, a & b), (x1, a & 0x1), (x2, x0 | x1), (x3, b & 0x1), (x4, x2 | x3), (x5, Id(x4))])
>>> BvCteMaj_v2 = make_partial_operation(BvCteMaj, tuple([Constant(2, 4), None]))
>>> BvCteMaj_v2.__name__
'BvMaj_{0x2, ·, 0x1}'
>>> BvCteMaj_v2(Variable("a", 4))
BvMaj_{0x2, ·, 0x1}(a)
"""
assert issubclass(base_op, Operation)
assert isinstance(fixed_args, tuple)
assert len(fixed_args) == sum(base_op.arity)
assert all(arg is None or isinstance(arg, (int, core.Constant)) for arg in fixed_args)
# at least one None and one non-None in fixed_args
assert None in fixed_args
assert any(arg is not None for arg in fixed_args)
if issubclass(base_op, PartialOperation):
assert len(fixed_args) == len([a for a in base_op.fixed_args if a is None])
combined_fixed_args = list(base_op.fixed_args)
counter_None = 0
for i in range(len(combined_fixed_args)):
if combined_fixed_args[i] is None:
combined_fixed_args[i] = fixed_args[counter_None]
counter_None += 1
assert counter_None == len(fixed_args)
return make_partial_operation(base_op.base_op, tuple(combined_fixed_args))
if hasattr(base_op, "operand_types"):
operand_types = base_op.operand_types
else:
operand_types = [core.Term for _ in range(len(fixed_args))]
num_terms_fixed = 0
num_scalars_fixed = 0
free_operand_types = []
fixed_args_str = []
for arg, type_arg in zip(fixed_args, operand_types):
if arg is None:
free_operand_types.append(type_arg)
fixed_args_str.append("·")
continue
assert isinstance(arg, type_arg)
if type_arg == int:
num_scalars_fixed += 1
elif isinstance(arg, core.Term):
num_terms_fixed += 1
else:
assert False
fixed_args_str.append(str(arg))
if issubclass(base_op, PrimaryOperation):
parent_class = PrimaryOperation
else:
assert issubclass(base_op, SecondaryOperation)
parent_class = SecondaryOperation
_base_op = base_op
_fixed_args = fixed_args
_arity = [base_op.arity[0] - num_terms_fixed, base_op.arity[1] - num_scalars_fixed]
assert _arity[0] >= 1
_is_symmetric = base_op.is_symmetric
_is_simple = base_op.is_simple and sum(_arity) > 1
_operand_types = free_operand_types
# avoid subclassing base_op (may introduce side effects)
if hasattr(base_op, "alt_name"):
MyPartialOperation.alt_name = f"{base_op.alt_name}_{{{', '.join(fixed_args_str)}}}"
MyPartialOperation.__name__ = f"{base_op.__name__}_{{{', '.join(fixed_args_str)}}}"
assert issubclass(parent_class, PrimaryOperation) == \
issubclass(MyPartialOperation, PrimaryOperation)
assert issubclass(parent_class, SecondaryOperation) == \
issubclass(MyPartialOperation, SecondaryOperation)
return MyPartialOperation
# Bitwise operators
class BvNot(PrimaryOperation):
"""Bitwise negation operation.
It overrides the operator ~. See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvNot
>>> BvNot(Constant(0b1010101, 7))
0b0101010
>>> ~Constant(0b1010101, 7)
0b0101010
>>> ~Variable("x", 8)
~x
"""
arity = [1, 0]
is_symmetric = False
unary_symbol = "~"
@classmethod
@classmethod
class BvAnd(PrimaryOperation):
"""Bitwise AND (logical conjunction) operation.
It overrides the operator & and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvAnd
>>> BvAnd(Constant(5, 8), Constant(3, 8))
0x01
>>> BvAnd(Constant(5, 8), 3)
0x01
>>> Constant(5, 8) & 3
0x01
>>> Variable("x", 8) & Variable("y", 8)
x & y
"""
arity = [2, 0]
is_symmetric = True
is_simple = True
infix_symbol = "&"
@classmethod
@classmethod
@classmethod
class BvOr(PrimaryOperation):
"""Bitwise OR (logical disjunction) operation.
It overrides the operator | and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvOr
>>> BvOr(Constant(5, 8), Constant(3, 8))
0x07
>>> BvOr(Constant(5, 8), 3)
0x07
>>> Constant(5, 8) | 3
0x07
>>> Variable("x", 8) | Variable("y", 8)
x | y
"""
arity = [2, 0]
is_symmetric = True
is_simple = True
infix_symbol = "|"
@classmethod
@classmethod
@classmethod
class BvXor(PrimaryOperation):
"""Bitwise XOR (exclusive-or) operation.
It overrides the operator ^ and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvXor
>>> BvXor(Constant(5, 8), Constant(3, 8))
0x06
>>> BvXor(Constant(5, 8), 3)
0x06
>>> Constant(5, 8) ^ 3
0x06
>>> Variable("x", 8) ^ Variable("y", 8)
x ^ y
"""
arity = [2, 0]
is_symmetric = True
is_simple = True
infix_symbol = "^"
@classmethod
@classmethod
@classmethod
# Relational operators
class BvComp(PrimaryOperation):
"""Equality operator.
Provides Automatic Constant Conversion. See `PrimaryOperation` for more
information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvComp
>>> BvComp(Constant(1, 8), Constant(2, 8))
0b0
>>> BvComp(Constant(1, 8), 2)
0b0
>>> x, y = Variable("x", 8), Variable("y", 8)
>>> BvComp(Constant(1, 8), Variable("y", 8))
0x01 == y
>>> bool(BvComp(x + y, y + x))
True
The operator == is used for exact structural equality testing and
it returns either True or False. On the other hand, BvComp
performs symbolic equality testing and it leaves the relation unevaluated
if it cannot prove the objects are equal (or unequal).
>>> x == y
False
>>> BvComp(x, y) # symbolic equality
x == y
"""
arity = [2, 0]
is_symmetric = True
is_simple = True
infix_symbol = "=="
@classmethod
@classmethod
@classmethod
class BvUlt(PrimaryOperation):
"""Unsigned less than operator.
It overrides < and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvUlt
>>> BvUlt(Constant(1, 8), Constant(2, 8))
0b1
>>> BvUlt(Constant(1, 8), 2)
0b1
>>> Constant(1, 8) < 2
0b1
>>> Constant(1, 8) < Variable("y", 8)
0x01 < y
"""
arity = [2, 0]
is_symmetric = False
is_simple = True
infix_symbol = "<"
@classmethod
@classmethod
@classmethod
class BvUle(PrimaryOperation):
"""Unsigned less than or equal operator.
It overrides <= and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvUle
>>> BvUle(Constant(2, 8), Constant(2, 8))
0b1
>>> BvUle(Constant(2, 8), 2)
0b1
>>> Constant(2, 8) <= 2
0b1
>>> Constant(2, 8) <= Variable("y", 8)
0x02 <= y
"""
arity = [2, 0]
is_symmetric = False
is_simple = True
infix_symbol = "<="
@classmethod
@classmethod
@classmethod
class BvUgt(PrimaryOperation):
"""Unsigned greater than operator.
It overrides > and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvUgt
>>> BvUgt(Constant(1, 8), Constant(2, 8))
0b0
>>> BvUgt(Constant(1, 8), 2)
0b0
>>> Constant(1, 8) > 2
0b0
>>> Constant(1, 8) > Variable("y", 8)
0x01 > y
"""
arity = [2, 0]
is_symmetric = False
is_simple = True
infix_symbol = ">"
@classmethod
@classmethod
@classmethod
class BvUge(PrimaryOperation):
"""Unsigned greater than or equal operator.
It overrides >= and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvUgt
>>> BvUge(Constant(2, 8), Constant(2, 8))
0b1
>>> BvUge(Constant(2, 8), 2)
0b1
>>> Constant(2, 8) >= 2
0b1
>>> Constant(2, 8) >= Variable("y", 8)
0x02 >= y
"""
arity = [2, 0]
is_symmetric = False
is_simple = True
infix_symbol = ">="
@classmethod
@classmethod
@classmethod
# Shifts operators
class BvShl(PrimaryOperation):
"""Shift left operation.
It overrides << and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvShl
>>> BvShl(Constant(0b10001, 5), Constant(1, 5))
0b00010
>>> BvShl(Constant(0b10001, 5), 1)
0b00010
>>> Constant(0b10001, 5) << 1
0b00010
>>> Variable("x", 8) << Variable("y", 8)
x << y
"""
arity = [2, 0]
is_symmetric = False
is_simple = True
infix_symbol = "<<"
@classmethod
@classmethod
@classmethod
class BvLshr(PrimaryOperation):
"""Logical right shift operation.
It overrides >> and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvLshr
>>> BvLshr(Constant(0b10001, 5), Constant(1, 5))
0b01000
>>> BvLshr(Constant(0b10001, 5), 1)
0b01000
>>> Constant(0b10001, 5) >> 1
0b01000
>>> Variable("x", 8) >> Variable("y", 8)
x >> y
"""
arity = [2, 0]
is_symmetric = False
is_simple = True
infix_symbol = ">>"
@classmethod
@classmethod
@classmethod
class RotateLeft(PrimaryOperation):
"""Circular left rotation operation.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import RotateLeft
>>> RotateLeft(Constant(150, 8), 2)
0x5a
>>> RotateLeft(Variable("x", 8), 2)
x <<< 2
"""
arity = [1, 1]
is_symmetric = False
infix_symbol = "<<<"
operand_types = [core.Term, int]
@classmethod
@classmethod
@classmethod
class RotateRight(PrimaryOperation):
"""Circular right rotation operation.
It provides Automatic Constant Conversion. See `PrimaryOperation` for more
information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import RotateRight
>>> RotateRight(Constant(150, 8), 3)
0xd2
>>> RotateRight(Variable("x", 8), 3)
x >>> 3
"""
arity = [1, 1]
is_symmetric = False
infix_symbol = ">>>"
operand_types = [core.Term, int]
@classmethod
@classmethod
@classmethod
# Others
class Ite(PrimaryOperation):
"""If-then-else operator.
``Ite(b, x, y)`` returns ``x`` if ``b == 0b1`` and ``y`` otherwise.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import Ite
>>> Ite(Constant(0, 1), Constant(0b11, 2), Constant(0b00, 2))
0b00
>>> Ite(Constant(1, 1), Constant(0x1, 4), Constant(0x0, 4))
0x1
"""
arity = [3, 0]
is_symmetric = False
@classmethod
@classmethod
@classmethod
class Extract(PrimaryOperation):
"""Extraction of bits.
``Extract(t, i, j)`` extracts the bits from position ``i`` down
position ``j`` (end points included, position 0 corresponding
to the least significant bit).
It overrides the operation [], that is, ``Extract(t, i, j)``
is equivalent to ``t[i:j]``.
Note that the indices can be omitted when they point the most
significant bit or the least significant bit.
For example, if ``t`` is a bit-vector of length ``n``,
then ``t[n-1:j] = t[:j]`` and ``t[i:0] = t[i:]``
Warning:
In python, given a list ``l``, ``l[i:j]`` denotes the elements
from position ``i`` up to (but not included) position ``j``.
Note that with bit-vectors, the order of the arguments is
swapped and both end points are included.
For example, for a given list ``l`` and bit-vector ``t``,
``l[0:1] == l[0]`` and ``t[1:0] == (t[0], t[1])``.
::
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import Extract
>>> Extract(Constant(0b11100, 5), 4, 2,)
0b111
>>> Constant(0b11100, 5)[4:2]
0b111
>>> Variable("x", 8)[4:2]
x[4:2]
>>> Variable("x", 8)[7:0]
x
"""
arity = [1, 2]
is_symmetric = False
operand_types = [core.Term, int, int]
@classmethod
@classmethod
@classmethod
class Concat(PrimaryOperation):
"""Concatenation operation.
Given the bit-vectors :math:`(x_{n-1}, \dots, x_0)` and
:math:`(y_{m-1}, \dots, y_0)`, ``Concat(x, y)`` returns the bit-vector
:math:`(x_{n-1}, \dots, x_0, y_{m-1}, \dots, y_0)`.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import Concat
>>> Concat(Constant(0x12, 8), Constant(0x345, 12))
0x12345
>>> Concat(Variable("x", 8), Variable("y", 8))
x :: y
"""
arity = [2, 0]
is_symmetric = False
infix_symbol = "::"
@classmethod
@classmethod
# Arithmetic operators
class BvNeg(PrimaryOperation):
"""Unary minus operation.
It overrides the unary operator -. See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvNeg
>>> BvNeg(Constant(1, 8))
0xff
>>> -Constant(1, 8)
0xff
>>> BvNeg(Variable("x", 8))
-x
"""
arity = [1, 0]
is_symmetric = False
unary_symbol = "-"
@classmethod
@classmethod
class BvAdd(PrimaryOperation):
"""Modular addition operation.
It overrides the operator + and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvAdd
>>> BvAdd(Constant(1, 8), Constant(2, 8))
0x03
>>> BvAdd(Constant(1, 8), 2)
0x03
>>> Constant(1, 8) + 2
0x03
>>> Variable("x", 8) + Variable("y", 8)
x + y
"""
arity = [2, 0]
is_symmetric = True
is_simple = True
infix_symbol = "+"
@classmethod
@classmethod
@classmethod
class BvSub(PrimaryOperation):
"""Modular subtraction operation.
It overrides the operator - and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvSub
>>> BvSub(Constant(1, 8), Constant(2, 8))
0xff
>>> BvSub(Constant(1, 8), 2)
0xff
>>> Constant(1, 8) - 2
0xff
>>> Variable("x", 8) - Variable("y", 8)
x - y
"""
arity = [2, 0]
is_symmetric = False
is_simple = True
infix_symbol = "-"
@classmethod
@classmethod
@classmethod
class BvMul(PrimaryOperation):
"""Modular multiplication operation.
It overrides the operator * and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvMul
>>> BvMul(Constant(4, 8), Constant(3, 8))
0x0c
>>> BvMul(Constant(4, 8), 3)
0x0c
>>> Constant(4, 8) * 3
0x0c
>>> Variable("x", 8) * Variable("y", 8)
x * y
"""
arity = [2, 0]
is_symmetric = True
is_simple = True
infix_symbol = "*"
@classmethod
@classmethod
@classmethod
class BvUdiv(PrimaryOperation):
"""Unsigned and truncated division operation.
It overrides the operator / and provides Automatic Constant Conversion.
See `PrimaryOperation` for more information.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvUdiv
>>> BvUdiv(Constant(0x0c, 8), Constant(3, 8))
0x04
>>> BvUdiv(Constant(0x0c, 8), 3)
0x04
>>> Constant(0x0c, 8) / 3
0x04
>>> Variable("x", 8) / Variable("y", 8)
x / y
"""
arity = [2, 0]
is_symmetric = False
is_simple = True
infix_symbol = "/"
@classmethod
@classmethod
@classmethod
class BvUrem(PrimaryOperation):
"""Unsigned remainder (modulus) operation.
Usage:
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvUrem
>>> BvUrem(Constant(0x0d, 8), Constant(3, 8))
0x01
>>> BvUrem(Constant(0x0d, 8), 3)
0x01
>>> Constant(0x0d, 8) % 3
0x01
>>> Variable("x", 8) % Variable("y", 8)
x % y
"""
arity = [2, 0]
is_symmetric = False
is_simple = True
infix_symbol = "%"
@classmethod
@classmethod
@classmethod
class BvIdentity(PrimaryOperation):
"""The identity operation.
Return the same value when the input is constant and
a `BvIdentity` object when the input is symbolic:
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import BvIdentity
>>> BvIdentity(Constant(0x1, 4))
0x1
>>> BvIdentity(Variable("x", 8))
Id(x)
"""
# 'Identity' is already taken by SymPy
arity = [1, 0]
is_symmetric = False
alt_name = "Id"
@classmethod
@classmethod
# Shortcuts
def zero_extend(x, i):
"""Extend with zeroes preserving the unsigned value.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import zero_extend
>>> zero_extend(Constant(0x12, 8), 4)
0x012
>>> zero_extend(Variable("x", 8), 4)
0x0 :: x
"""
assert isinstance(x, core.Term)
assert isinstance(i, int) and i >= 0
if i == 0:
output = x
else:
output = Concat(core.Constant(0, i), x)
assert x.width + i
return output
def repeat(x, i):
"""Concatenate a bit-vector with itself a given number of times.
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.operation import repeat
>>> repeat(Constant(0x1, 4), 4)
0x1111
>>> repeat(Variable("x", 8), 4)
x :: x :: x :: x
"""
assert isinstance(x, core.Term)
assert isinstance(i, int) and i >= 1
if i == 1:
output = x
else:
output = functools.reduce(Concat, itertools.repeat(x, i))
assert output.width == i * x.width
return output
| [
37811,
15946,
485,
262,
2219,
1643,
12,
31364,
12879,
526,
15931,
198,
11748,
17268,
198,
11748,
1257,
310,
10141,
198,
11748,
340,
861,
10141,
198,
11748,
10688,
198,
198,
6738,
10558,
88,
13,
7295,
1330,
12940,
198,
6738,
10558,
88,
1... | 2.435396 | 14,612 |
from json import dump
from operator import itemgetter
from os import listdir, mkdir, path, remove
from time import sleep
import fitz
from docx2pdf import convert
# from PDFNetPython3 import * ----> python version 3.5-3.8 with working
# from PDFNetPython3.PDFNetPython import Element, Image, PDFDoc, Point
from PyPDF2 import PdfFileReader, generic
from tabula import read_pdf
# site.addsitedir("../../../PDFNetC/Lib")
# Resim yolları resim çıkarma dosyasına yollanır
# def ImageExtractPath(file):
# doc = PDFDoc(file)
# doc.InitSecurityHandler()
# counter = 0
# cos_doc = doc.GetSDFDoc()
# num_objs = cos_doc.XRefSize()
# i = 1
# while i < num_objs:
# obj = cos_doc.GetObj(i)
# if obj is not None and not obj.IsFree() and obj.IsStream():
# # Process only images
# itr = obj.Find("Type")
# if not itr.HasNext() or not itr.Value().GetName() == "XObject":
# i = i + 1
# continue
# itr = obj.Find("Subtype")
# if not itr.HasNext() or not itr.Value().GetName() == "Image":
# i = i + 1
# continue
# image = Image(obj)
# counter += 1
# print("--> Image: " + str(counter))
# print(" Width: " + str(image.GetImageWidth()))
# print(" Height: " + str(image.GetImageHeight()))
# print(" BPC: " + str(image.GetBitsPerComponent()))
# fname = "image_extract_" + str(counter)
# _files = listdir(".")
# if not path.isdir("images"):
# mkdir("images")
# _path = path.abspath("images") + "\\" + fname
# image.Export(_path)
# i = i + 1
# doc.Close()
# print("Done.")
# print("resimler başarılı bir şekilde çekildi")
# print("----------------------------------")
# counter = 0
# # resimleri buradan çıkarmaya çalışır
# def ImageExtract(reader):
# element = reader.Next()
# while element is not None:
# if (element.GetType() == Element.e_image or
# element.GetType() == Element.e_inline_image):
# global counter
# counter += 1
# print("--> Image: " + str(counter))
# print(" Width: " + str(element.GetImageWidth()))
# print(" Height: " + str(element.GetImageHeight()))
# print(" BPC: " + str(element.GetBitsPerComponent()))
# ctm = element.GetCTM()
# x2 = 1
# y2 = 1
# pt = Point(x2, y2)
# point = ctm.Mult(pt)
# print(" Coords: x1=%.2f, y1=%.2f, x2=%.2f, y2=%.2f" %
# (ctm.m_h, ctm.m_v, point.x, point.y))
# if element.GetType() == Element.e_image:
# image = Image(element.GetXObject())
# fname = "image_" + str(counter)
# _path = path.abspath("images") + "\\" + fname
# image.Export(_path)
# elif element.GetType() == Element.e_form:
# reader.FormBegin()
# ImageExtract(reader)
# reader.End()
# element = reader.Next()
# paragrafları Empty(),Punctuation() ve Quotation() fonksiyonuna yollar hataları listeler
page = """
************* ********** ***********
*** ** **
*** ***** **
*** ** **
*** ********** ***********
Tez Control Sistemine Hosteled
Sürüm 12.08.2020645198
Software Engineer Ümit KOÇ
Not:Tez Sistemine başlatabilmemiz için lütfen word dosyanızı belirli bir klasöre yerleştiriniz.
[E]=e, [H]=h
"""
if __name__ == "__main__":
main()
| [
6738,
33918,
1330,
10285,
198,
6738,
10088,
1330,
2378,
1136,
353,
198,
6738,
28686,
1330,
1351,
15908,
11,
33480,
15908,
11,
3108,
11,
4781,
198,
6738,
640,
1330,
3993,
198,
198,
11748,
4197,
89,
198,
6738,
2205,
87,
17,
12315,
1330,
... | 1.863955 | 2,139 |
"""Verify the structure of YAML-formatted configuration files.
The yamlfig package provides developers with a framework for defining
rules that test and verify a config file's structure. Those rules are
captured in a parser object which can be applied to YAML-based config
files to validate them.
In particular, this module enables a developer to:
- define which fields are required, optional, or will be assigned
default values if omitted;
- declare types for those fields (e.g., str, int, date, dict, list,
etc.);
- run arbitrary functions to test the values in each field (e.g.,
regular expressions matches or file-path-existence checks).
After a config file is parsed, validated, and accepted, the returned
object can be used to access the field values with some confidence
that they exist, are of the expected type, and have whatever other
properties the rules established. If a config file is rejected, an
error explaining the violated rule is raised.
This package was inspired by the similar capability that argparse
brought to command-line argument parsing.
"""
from __future__ import absolute_import
from .__version__ import __version__
from .base import *
from . import test
| [
37811,
13414,
1958,
262,
4645,
286,
575,
2390,
43,
12,
687,
16898,
8398,
3696,
13,
198,
198,
464,
331,
321,
1652,
328,
5301,
3769,
6505,
351,
257,
9355,
329,
16215,
198,
38785,
326,
1332,
290,
11767,
257,
4566,
2393,
338,
4645,
13,
... | 4.094276 | 297 |
import numpy as np
import SBCcode as sbc
import os
import re
from SBCcode.DataHandling.WriteBinary import WriteBinaryNtupleFile as wb
# import ipdb
modules = [
'AcousticAnalysis_',
'DytranAnalysis_',
'EventAnalysis_',
'HistoryAnalysis_',
'ImageAnalysis_',
'TimingAnalysis_',
'PMTfastDAQalignment_']
# modules = ['PMTpulseAnalysis_']
# modules = ['ImageAnalysis_']
# modules = ['AcousticAnalysis_']
# modules = ['TimingAnalysis_']
# recondir = '/bluearc/storage/recon/devel/SBC-17/output'
recondir = '/pnfs/coupp/persistent/grid_output/SBC-17/output'
merge_dir = '/bluearc/storage/recon/devel/SBC-17/output'
runlist = os.listdir(recondir)
runlist = filter(lambda fn: (not re.search('^\d+_\d+$', fn) is None)
and os.path.isdir(os.path.join(recondir, fn))
and (len(os.listdir(os.path.join(recondir, fn))) > 0),
runlist)
# runlist = ['20170706_6']
# runlist = ['20170621_7','20170625_2']
print(runlist)
# one_piezo_list = [
# '20170619_3',
# '20170621_0',
# '20170621_2',
# '20170621_3',
# '20170621_4',
# '20170621_5',
# '20170621_6',
# '20170621_7',
# '20170621_8',
# '20170622_0',
# '20170622_1',
# '20170622_2',
# '20170622_3',
# '20170622_5',
# '20170622_6',
# '20170622_7',
# '20170622_8',
# '20170622_9',
# '20170623_0',
# '20170623_1',
# '20170623_2']
# merge out by category to save memory
for module in modules:
# bad_list = [
# '20170624_2',
# '20170624_4',
# '20170625_0',
# '20170625_1',
# '20170625_2',
# '20170704_3',
# '20170704_4',
# '20170705_0',
# '20170705_1',
# '20170705_2',
# '20170706_5',
# '20170713_3',
# '20170713_4',
# '20170713_5',
# '20170714_0',
# '20170714_1',
# '20170714_2',
# '20170715_0',
# '20170715_1',
# '20170715_2',
# '20170715_4',
# '20170716_0',
# '20170716_1',
# '20170716_2',
# '20170716_3',
# '20170716_5',
# '20170716_6',
# '20170716_7',
# '20170717_0']
# if key == 'AcousticAnalysis_':
# bad_list += [
# '20170621_1', '20170622_4', '20170624_3', '20170711_13', '20170706_6', '20170708_2', '20170719_11']
# bad_list = []
# if key == 'ImageAnalysis_':
# bad_list = ['20170626_9', '20170703_3', '20170707_4']
# elif key == 'DytranAnalysis_':
# bad_list = [
# '20170622_9',
# '20170624_4',
# '20170625_0',
# '20170625_1',
# '20170704_3',
# '20170704_4',
# '20170705_0',
# '20170705_1',
# '20170705_2',
# '20170706_5']
# elif key == 'EventAnalysis_':
# bad_list = ['20170621_1' '20170622_4' '20170624_3']
# elif key == 'PMTfastDAQalignment_':
# bad_list = ['20170621_1' '20170622_4' '20170624_3']
bad_list = []
print("Loading " + module)
merge_out = []
shapes0 = []
for runname in runlist:
if runname in set(bad_list):
print(runname + ' is in bad_list')
continue
runid_str = runname.split('_')
runid = np.int32(runid_str)
runsn = runid[0] * 1000 + runid[1]
if (runsn >= 20170619003) and (runsn < 20170901000):
fpath = os.path.join(recondir, runname, module + runname + '.bin')
if os.path.exists(fpath):
if os.stat(fpath).st_size > 0:
data = sbc.read_bin(fpath)
# # check array sizes
# shapes = [data[x].shape for x in data.keys()]
# if len(shapes0) < 1:
# shapes0 = shapes
# print(runname + "\t" + str(shapes))
# Pad 0's to fields without Piezo2
if module == 'AcousticAnalysis_' and len(data['piezo_list'].shape) == 1:
size = [data['piezo_list'].shape[0], 2]
tmp = data['piezo_list']
data['piezo_list'] = np.zeros(size, dtype=np.int32)
data['piezo_list'][:, 0] = tmp
tmp = data['bubble_t0']
data['bubble_t0'] = np.zeros(size, dtype=np.float64)
data['bubble_t0'][:, 0] = tmp
tmp = data['peak_t0']
data['peak_t0'] = np.zeros(size, dtype=np.float64)
data['peak_t0'][:, 0] = tmp
size = list(data['piezoE'].shape)
size[1] += 1
tmp = data['piezoE']
data['piezoE'] = np.zeros(size, dtype=np.float64)
# ipdb.set_trace()
data['piezoE'][:, 0, :, :] = tmp[:, 0, :, :]
if module == 'TimingAnalysis_' and len(data['PMTmatch_t0'].shape) == 1:
var_names = ['CAMstate', 'PMTmatch_area', 'PMTmatch_area_nobs', 'PMTmatch_baseline', 'PMTmatch_baserms', 'PMTmatch_coinc', 'PMTmatch_ix', 'PMTmatch_lag', 'PMTmatch_max', 'PMTmatch_min', 'PMTmatch_pulse_area', 'PMTmatch_pulse_height', 'PMTmatch_pulse_t10', 'PMTmatch_pulse_t90', 'PMTmatch_pulse_tend', 'PMTmatch_pulse_tpeak', 'PMTmatch_pulse_tstart', 'PMTmatch_t0', 'nPMThits_fastdaq', 'nVetohits_fastdaq', 't_nearestPMThit', 't_nearestVetohit']
for var_name in var_names:
if len(data[var_name].shape) == 1:
data[var_name] = np.stack((data[var_name],
np.zeros(data[var_name].shape, data[var_name].dtype)), axis=1)
elif len(data[var_name].shape) > 1:
data[var_name] = np.concatenate((data[var_name],
np.zeros(data[var_name].shape, data[var_name].dtype)),
axis=1)
if module == 'TimingAnalysis_': # fix int32/int64 problem
var_name = 'PMTmatch_ix'
data[var_name] = np.int64(data[var_name])
shapes = [(x, data[x].dtype, data[x].shape) for x in data.keys()]
if len(shapes0) < 1:
shapes0 = shapes
print(runname + "\t" + str(shapes))
# ipdb.set_trace()
merge_out.append(data)
else:
print("zero size file: " + fpath)
else:
print("nonexis file: " + fpath)
merge_name = 'all'
rowdef = 1
if module in set(['PMTpulseAnalysis_', 'PMTpheAnalysis_']):
rowdef = 7
if module in set(['HumanGetBub_']):
rowdef = 8
print("Writing " + module)
wb(os.path.join(merge_dir, module + merge_name + '.bin'), merge_out,
rowdef=rowdef, initialkeys=['runid', 'ev'], drop_first_dim=True)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
311,
2749,
8189,
355,
264,
15630,
198,
11748,
28686,
198,
11748,
302,
198,
6738,
311,
2749,
8189,
13,
6601,
12885,
1359,
13,
16594,
33,
3219,
1330,
19430,
33,
3219,
45,
83,
29291,
8979,
355,
... | 1.743671 | 4,108 |
"""
Python3 has three sequence type
String
list
tuple
range
byte and byte array
"""
string1 = "he's "
string2 = "probably "
string3 = "pining "
string4 = "for the "
string5 = "fjords"
print(string1, string2, string3, string4, string5)
print(string1 * 5) # string multiplication
print(string1 * (5+4)) # operator precedence
# check if exist
print("day" in "friday")
| [
37811,
198,
37906,
18,
468,
1115,
8379,
2099,
198,
10100,
198,
4868,
198,
83,
29291,
198,
9521,
198,
26327,
290,
18022,
7177,
198,
37811,
198,
198,
8841,
16,
796,
366,
258,
338,
366,
198,
8841,
17,
796,
366,
26949,
366,
198,
8841,
1... | 2.883721 | 129 |
from flask import jsonify, make_response, request
from app.V1.views import bluprint
from app.V1.models.appmodels import PoliticalOfficeModel
@bluprint.route("/offices", methods= ["GET"] )
@bluprint.route("/offices", methods= ["POST"])
@bluprint.route("/offices/<int:office_id>", methods=["GET"]) | [
6738,
42903,
1330,
33918,
1958,
11,
787,
62,
26209,
11,
2581,
198,
6738,
598,
13,
53,
16,
13,
33571,
1330,
698,
929,
22272,
198,
6738,
598,
13,
53,
16,
13,
27530,
13,
1324,
27530,
1330,
14611,
27743,
17633,
628,
198,
31,
2436,
929,
... | 2.912621 | 103 |
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.RACKSPACE)
driver = cls("username", "api key", region="iad")
| [
6738,
9195,
17721,
13,
5589,
1133,
13,
19199,
1330,
32549,
198,
6738,
9195,
17721,
13,
5589,
1133,
13,
15234,
4157,
1330,
651,
62,
26230,
198,
198,
565,
82,
796,
651,
62,
26230,
7,
29495,
13,
49,
8120,
4303,
11598,
8,
198,
26230,
79... | 3.137931 | 58 |
from typing import Dict, Tuple, Optional, List
import os
from abc import ABCMeta, abstractmethod
import numpy as np
import json
from scipy.sparse import csr_matrix, dok_matrix
import panqec
from panqec.bpauli import bcommute, get_effective_error
from panqec import bsparse
os.environ['PANQEC_ROOT_DIR'] = os.path.dirname(panqec.__file__)
Operator = Dict[Tuple, str] # Coordinate to pauli ('X', 'Y' or 'Z')
class StabilizerCode(metaclass=ABCMeta):
"""Abstract class for generic stabilizer codes (CSS or not)
Any subclass should override the following four methods:
- get_qubit_coordinates() to define all the coordinates in the lattice
that contain qubits
- get_stabilizer_coordinates() to define all the coordinates in the lattice
that contain stabilizers
- qubit_axis(location) to return the axis of a qubit at a given location
(when qubit have an orientation in space, for instance when they are
edges)
Using only those methods, a StabilizerCode will then automatically create
the corresponding parity-check matrix (in self.stabilizers) and can be used
to make a visualization in the GUI or calculate thresholds.
"""
X_AXIS = 0
Y_AXIS = 1
Z_AXIS = 2
def __init__(
self, L_x: int,
L_y: Optional[int] = None,
L_z: Optional[int] = None,
deformed_axis: Optional[str] = None
):
"""Constructor for the StabilizerCode class
Parameters
----------
L_x : int
Dimension of the lattice in the x direction (or in all directions
if L_y and L_z are not given)
L_y: int, optional
Dimension of the lattice in the y direction
L_z: int, optional
Dimension of the lattice in the z direction
deformed_axis: str, optional
If given, will determine whether to apply a Clifford deformation on
this axis.
The axis is a string in ['x', 'y', 'z'].
Can be used to easily create codes such as the XZZX surface code
(arXiv: 2009.07851)
"""
if L_y is None:
L_y = L_x
if L_z is None:
L_z = L_x
self._deformed_axis = deformed_axis
self._size: Tuple
if self.dimension == 2:
self._size = (L_x, L_y)
else:
self._size = (L_x, L_y, L_z)
self._qubit_coordinates: List = []
self._stabilizer_coordinates: List[Tuple] = []
self._qubit_index: Dict[Tuple, int] = {}
self._stabilizer_index: Dict[Tuple, int] = {}
self._stabilizer_matrix = bsparse.empty_row(2*self.n)
self._Hx = bsparse.empty_row(self.n)
self._Hz = bsparse.empty_row(self.n)
self._logicals_x: Optional[np.ndarray] = None
self._logicals_z: Optional[np.ndarray] = None
self._is_css: Optional[bool] = None
self._x_indices: Optional[np.ndarray] = None
self._z_indices: Optional[np.ndarray] = None
self._d: Optional[int] = None
self._stabilizer_types: Optional[List[str]] = None
self.colormap = {'red': '0xFF4B3E',
'blue': '0x48BEFF',
'green': '0x058C42',
'pink': '0xffbcbc',
'white': '0xf2f2fc',
'gold': '0xf1c232',
'coral': '0xFA824C',
'light-yellow': '0xFAFAC6',
'salmon': '0xe79e90',
'light-orange': '0xFA824C',
'orange': '0xfa7921'}
@property
@abstractmethod
def dimension(self) -> int:
"""Dimension of the code (usually 2 or 3)"""
@property
@abstractmethod
def label(self) -> str:
"""Label uniquely identifying a code, including its lattice dimensions
Example: 'Toric 3D {Lx}x{Ly}x{Lz}'
"""
@property
def id(self) -> str:
"""Returns a string identifying the class (usually the code name)"""
return self.__class__.__name__
@property
def n(self) -> int:
"""Number of physical qubits"""
return len(self.qubit_coordinates)
@property
def k(self) -> int:
"""Number of logical qubits"""
return self.logicals_x.shape[0]
@property
def d(self) -> int:
"""Distance of the code"""
if self._d is None:
weights_z = np.sum(
np.logical_or(
self.logicals_z[:, :self.n],
self.logicals_z[:, self.n:]
),
axis=1
)
weights_x = np.sum(
np.logical_or(
self.logicals_x[:, :self.n],
self.logicals_x[:, self.n:]
), axis=1
)
self._d = min(np.min(weights_x), np.min(weights_z))
return self._d
@property
def qubit_coordinates(self) -> List[Tuple]:
"""List of all the coordinates that contain a qubit"""
if len(self._qubit_coordinates) == 0:
self._qubit_coordinates = self.get_qubit_coordinates()
return self._qubit_coordinates
@property
def stabilizer_coordinates(self) -> List[Tuple]:
"""List of all the coordinates that contain a stabilizer"""
if len(self._stabilizer_coordinates) == 0:
self._stabilizer_coordinates = self.get_stabilizer_coordinates()
return self._stabilizer_coordinates
@property
def qubit_index(self) -> Dict[Tuple, int]:
"""Dictionary that assigns an index to a given qubit location"""
if len(self._qubit_index) == 0:
self._qubit_index = {
loc: i for i, loc in enumerate(self.qubit_coordinates)
}
return self._qubit_index
@property
def stabilizer_index(self) -> Dict[Tuple, int]:
"""Dictionary that assigns an index to a given stabilizer location"""
if len(self._stabilizer_index) == 0:
self._stabilizer_index = {
loc: i for i, loc in enumerate(self.stabilizer_coordinates)
}
return self._stabilizer_index
@property
def n_stabilizers(self) -> int:
"""Number of stabilizer generators"""
return len(self.stabilizer_index)
@property
def logicals_x(self) -> np.ndarray:
"""Logical X operator, as a k x 2n sparse matrix in the binary
symplectic format, where k is the number of logical X operators,
and n the number of qubits.
"""
if self._logicals_x is None:
logical_ops = self.get_logicals_x()
k = len(logical_ops)
self._logicals_x = np.zeros((k, 2*self.n), dtype='uint8')
for i, logical_op in enumerate(logical_ops):
self._logicals_x[i] = self.to_bsf(logical_op)
return self._logicals_x
@property
def logicals_z(self) -> np.ndarray:
"""Logical Z operators in the binary symplectic format.
It is a sparse matrix of dimension k x 2n, where k is the number
of Z logicals and n the number of qubits.
"""
if self._logicals_z is None:
logical_ops = self.get_logicals_z()
k = len(logical_ops)
self._logicals_z = np.zeros((k, 2*self.n), dtype='uint8')
for i, logical_op in enumerate(logical_ops):
self._logicals_z[i] = self.to_bsf(logical_op)
return self._logicals_z
@property
def is_css(self) -> bool:
"""Determines if a code is CSS, i.e. if it has separate X
and Z stabilizers
"""
if self._is_css is None:
self._is_css = not np.any(
np.logical_and(self.x_indices, self.z_indices)
)
return self._is_css
@property
def stabilizer_matrix(self) -> csr_matrix:
"""Parity-check matrix of the code in the binary symplectic format.
It is a sparse matrix of dimension k x 2n, where k is the total number
of stabilizers and n the number of qubits
"""
if bsparse.is_empty(self._stabilizer_matrix):
sparse_dict: Dict = dict()
self._stabilizer_matrix = dok_matrix(
(self.n_stabilizers, 2*self.n),
dtype='uint8'
)
for i_stab, stabilizer_location in enumerate(
self.stabilizer_coordinates
):
stabilizer_op = self.get_stabilizer(
stabilizer_location, deformed_axis=self._deformed_axis
)
for qubit_location in stabilizer_op.keys():
if stabilizer_op[qubit_location] in ['X', 'Y']:
i_qubit = self.qubit_index[qubit_location]
if (i_stab, i_qubit) in sparse_dict.keys():
sparse_dict[(i_stab, i_qubit)] += 1
else:
sparse_dict[(i_stab, i_qubit)] = 1
if stabilizer_op[qubit_location] in ['Y', 'Z']:
i_qubit = self.n + self.qubit_index[qubit_location]
if (i_stab, i_qubit) in sparse_dict.keys():
sparse_dict[(i_stab, i_qubit)] += 1
else:
sparse_dict[(i_stab, i_qubit)] = 1
self._stabilizer_matrix._update(sparse_dict)
self._stabilizer_matrix = self._stabilizer_matrix.tocsr()
self._stabilizer_matrix.data %= 2
return self._stabilizer_matrix
@property
def size(self) -> Tuple:
"""Dimensions of the lattice."""
return self._size
@property
def Hx(self) -> csr_matrix:
"""Parity-check matrix corresponding to the X stabilizers of the code.
It is a sparse matrix of dimension k x n, where k is the number of
X stabilizers and n the number of qubits.
Works only for CSS codes.
"""
if not self.is_css:
raise ValueError("Impossible to extract Hz: the code is not CSS")
if self._Hx.shape[0] == 0:
H = self.stabilizer_matrix[:, :self.n]
self._Hx = H[self.x_indices]
return self._Hx
@property
def Hz(self) -> csr_matrix:
"""Parity-check matrix corresponding to the Z stabilizers of the code.
It is a sparse matrix of dimension k x n, where k is the number of
Z stabilizers and n the number of qubits.
Works only for CSS codes.
"""
if not self.is_css:
raise ValueError("Impossible to extract Hz: the code is not CSS")
if self._Hz.shape[0] == 0:
H = self.stabilizer_matrix[:, self.n:]
self._Hz = H[self.z_indices]
return self._Hz
@property
def x_indices(self) -> np.ndarray:
"""Indices of the X stabilizers in the parity-check matrix,
as a boolean array s.t. x_indices[i] is True if stabilizer H[i]
only contain X operators and False otherwise"""
if self._x_indices is None:
Hx = self.stabilizer_matrix[:, :self.n]
self._x_indices = (Hx.getnnz(1) > 0)
return self._x_indices
@property
def z_indices(self) -> np.ndarray:
"""Indices of the Z stabilizers in the parity-check matrix,
as a boolean array s.t. z_indices[i] is True if stabilizer H[i]
only contain Z operators and False otherwise"""
if self._z_indices is None:
Hz = self.stabilizer_matrix[:, self.n:]
self._z_indices = (Hz.getnnz(1) > 0)
return self._z_indices
def in_codespace(self, error: np.ndarray) -> bool:
"""Check whether or not a given error is in the codespace,
i.e. whether it has a zero syndrome or not.
Parameters
----------
error: np.ndarray
Error as an array of size 2n (where n is the number of qubits)
in the binary symplectic format
Returns
-------
bool
Whether or not the error is in the codespace
"""
return bool(np.all(bcommute(self.stabilizer_matrix, error) == 0))
def logical_errors(self, error: np.ndarray) -> np.ndarray:
"""Return the logical errors, as an array of size 2k
(where k is the number of logicals), such that each component is
1 if and only if it anticommutes with the corresponding logical.
By convention, the first k indices correspond to the X logicals
and the last k to the the Z logicals
Parameters
----------
error: np.ndarray
Error as an array of size 2n (where n is the number of qubits)
in the binary symplectic format
Returns
-------
logical_errors: np.ndarray
Array of size 2k (where k is the number of logicals)
indicating whether the error commute with each X and Z logical.
"""
return get_effective_error(
error, self.logicals_x, self.logicals_z
)
def is_logical_error(self, error) -> bool:
"""Check whether or not a given error is in the codespace,
i.e. whether it has a zero syndrome or not.
Parameters
----------
error: np.ndarray
Error as an array of size 2n (where n is the number of qubits)
in the binary symplectic format
Returns
-------
bool
Whether or not the error is in the codespace
"""
return bool(np.any(self.logical_errors(error) != 0))
def extract_x_syndrome(self, syndrome: np.ndarray) -> np.ndarray:
"""For CSS codes only. Returns the part of the syndrome that
corresponds to X stabilizers.
Parameters
----------
syndrome: np.ndarray
Syndrome as a sparse row of dimension 1xm, where m is the number
of stabilizers.
Returns
-------
x_syndrome: np.ndarray
Syndrome reduced to X stabilizers
"""
return syndrome[self.x_indices]
def extract_z_syndrome(self, syndrome: np.ndarray) -> np.ndarray:
"""For CSS codes only. Returns the part of the syndrome that
corresponds to Z stabilizers.
Parameters
----------
syndrome: np.ndarray
Syndrome as a sparse row of dimension 1xm, where m is the number
of stabilizers.
Returns
-------
z_syndrome: np.ndarray
Syndrome reduced to X stabilizers
"""
return syndrome[self.z_indices]
def to_bsf(self, operator: Operator) -> np.ndarray:
"""Convert an operator (given as a dictionary qubit_location -> pauli)
to an array in the binary symplectic format.
Parameters
----------
operator: Dict[Tuple, str]
Operator given as a dictionary that assigns a Pauli operator
('X', 'Y' or 'Z') to each qubit location in its support
Returns
-------
bsf_operator: np.ndarray
Array of dimension 2n in the binary symplectic format
(where n is the number of qubits)
"""
bsf_operator = np.zeros(2*self.n, dtype=np.uint)
for qubit_location in operator.keys():
if operator[qubit_location] in ['X', 'Y']:
bsf_operator[self.qubit_index[qubit_location]] += 1
if operator[qubit_location] in ['Y', 'Z']:
bsf_operator[self.n + self.qubit_index[qubit_location]] += 1
return bsf_operator
def from_bsf(self, bsf_operator: np.ndarray) -> Operator:
"""Convert an operator given as a sparse row in the binary
symplectic format to a dictionary qubit_location -> pauli.
Parameters
----------
bsf_operator: np.ndarray
Array of dimension (1, 2n) in the binary symplectic format
(where n is the number of qubits)
Returns
-------
operator: Dict[Tuple, str]
Operator given as a dictionary that assigns a Pauli operator
('X', 'Y' or 'Z') to each qubit location in its support
"""
assert (
bsf_operator.shape[0] == 1 or len(bsf_operator.shape) == 1
), "Can only take one operator at a time."
operator = dict()
if len(bsf_operator.shape) == 1:
cols = bsf_operator.nonzero()[0]
else:
rows, cols = bsf_operator.nonzero()
for col in cols:
if col < self.n:
location = self.qubit_coordinates[col]
operator[location] = 'X'
else:
location = self.qubit_coordinates[col - self.n]
if location in operator.keys():
operator[location] = 'Y'
else:
operator[location] = 'Z'
return operator
def measure_syndrome(self, error: np.ndarray) -> np.ndarray:
"""Noiseless syndrome corresponding to a given Pauli error.
Parameters
----------
error: np.ndarray
Error given as an array of dimension 2n in the binary
symplectic format.
Returns
-------
syndrome: np.ndarray
Syndrome, as an array of dimension m (where m is the number
of stabilizers)
"""
return bcommute(self.stabilizer_matrix, error)
def is_stabilizer(self, location: Tuple, stab_type: str = None):
"""Returns whether a given location in the coordinate system
corresponds to a stabilizer or not
"""
_is_stabilizer = (
(location in self.stabilizer_index) and
(stab_type is None or self.stabilizer_type(location) == stab_type)
)
return _is_stabilizer
def is_qubit(self, location: Tuple):
"""Returns whether a given location in the coordinate system
corresponds to a qubit or not. It is done by checking that the input
location is a key in the dictionary `self.qubit_index`.
Parameters
----------
location : Tuple
Location as a tuple of coordinates
Returns
-------
Bool
Whether the location is a qubit in the coordinate system.
"""
return location in self.qubit_index
@abstractmethod
def get_qubit_coordinates(self) -> List[Tuple]:
"""Give the list of all the qubit coordinates, in a coordinate system
that should contain both the qubits and the stabilizers.
This function is used to set the attributes `self.qubit_coordinates`
and `self.qubit_index`.
Returns
-------
qubit_coordinates: List[Tuple]
List of coordinates
"""
@abstractmethod
def get_stabilizer_coordinates(self) -> List[Tuple]:
"""Create list of stabilizer coordinates, in a coordinate system
that should contain both the qubits and the stabilizers.
This function is used to set the attributes
`self.stabilizer_coordinates` and `self.stabilizer_index`.
"""
@abstractmethod
def qubit_axis(self, location: Tuple) -> str:
""" Return the orientation of a qubit sitting at given location
(as a string representing the axis 'x', 'y' or 'z').
Useful when qubits have an orientation in space, for instance when
they are edges, to help establish the visual representation of the
code in the GUI, to simplify the construction of stabilizers,
and to create Clifford deformations.
Parameters
----------
location: Tuple
Location of the qubit in the coordinate system.
Returns
-------
axis: str
Either 'x', 'y' or 'z', depending on the orientation axis of the
qubit.
"""
@abstractmethod
def stabilizer_type(self, location: Tuple) -> str:
""" Returns the type of a stabilizer sitting at a given location.
E.g. 'vertex' or 'face' in toric codes
"""
@abstractmethod
def get_stabilizer(
self, location: Tuple, deformed_axis: str = None
) -> Operator:
""" Returns a stabilizer, formatted as dictionary that assigns a Pauli
operator ('X', 'Y' or 'Z') to each qubit location in the support of
the stabilizer.
For example, for a vertex stabilizer in the 2D toric code, we could
have
`get_stabilizer((1,1)) -> {(1,0): 'X', (0, 1): 'X', (2, 1): 'X',
(1, 2): 'X'}`
Parameters
----------
location: Tuple
Location of the stabilizer in the coordinate system
deformed_axis: str, optional
If given, represents an axis ('x', 'y' or 'z') that we want to
Clifford-deform, by applying a Clifford transformation to all the
qubits oriented along the given axis
(e.g. `deformed_axis='x'` in the 2D toric code could give an
XZZX surface code, where the transformation Pauli X <-> Z
has been applied to all the vertical qubits of the code)
Returns
-------
stabilizer: Dict[Tuple, str]
Dictionary that assigns a Pauli operator ('X', 'Y' or 'Z') to each
qubit location in the support of the stabilizer
"""
@abstractmethod
def get_logicals_x(self) -> List[Operator]:
"""Returns the list of logical X operators, where each operator is a
dictionary that assigns a Pauli operator ('X', 'Y' or 'Z') to each
qubit location in its support.
Returns
-------
logicals: List[Dict[Tuple, str]]
List of dictionaries, where each dictionary assign a Pauli
operator ('X', 'Y' or 'Z') to each qubit location in the support
of the logical operator.
"""
@abstractmethod
def get_logicals_z(self) -> List[Operator]:
"""Returns the list of logical Z operators, where each operator is a
dictionary that assigns a Pauli operator ('X', 'Y' or 'Z') to each
qubit location in its support.
Returns
-------
logicals: List[Dict[Tuple, str]]
List of dictionaries, where each dictionary assign a Pauli
operator ('X', 'Y' or 'Z') to each qubit location in the support
of the logical operator.
"""
def stabilizer_representation(self,
location: Tuple,
rotated_picture=False,
json_file=None) -> Dict:
"""Returns a dictionary of visualization parameters for the input
stabilizer, that can be used by the web visualizer.
It should contain 4 keys:
- 'type': the type of stabilizer, e.g. 'vertex'
- 'location': [x, y, z],
- 'object': the type of object to use for visualization, e.g. 'sphere'
- 'params': a dictionary of parameters for the chosen object
Parameters
----------
location: Tuple
Coordinates of the stabilizer
rotated_picture: bool
For codes that have a rotated picture, can be used to differentiate
the two types visualizations
json_file: str
File with the initial configuration for the code
Returns
-------
representation: Dict
Dictionary to send to the GUI
"""
if json_file is None:
json_file = os.path.join(
os.environ['PANQEC_ROOT_DIR'], 'codes', 'gui-config.json'
)
stab_type = self.stabilizer_type(location)
with open(json_file, 'r') as f:
data = json.load(f)
code_name = self.id
picture = 'rotated' if rotated_picture else 'kitaev'
representation = data[code_name]['stabilizers'][picture][stab_type]
representation['type'] = stab_type
representation['location'] = location
for activation in ['activated', 'deactivated']:
color_name = representation['color'][activation]
representation['color'][activation] = self.colormap[color_name]
return representation
def qubit_representation(self,
location: Tuple,
rotated_picture=False,
json_file=None) -> Dict:
"""Returns a dictionary of visualization parameters for the input
qubit, that can be used by the web visualizer.
- 'location': [x, y, z],
- 'object': the type of object to use for visualization, e.g. 'sphere'
- 'params': a dictionary of parameters for the chosen object
Parameters
----------
location: Tuple
Coordinates of the qubit
rotated_picture: bool
For codes that have a rotated picture, can be used to differentiate
the two types visualizations
json_file: str
File with the initial configuration for the code
Returns
-------
representation: Dict
Dictionary to send to the GUI
"""
if json_file is None:
json_file = os.path.join(
os.environ['PANQEC_ROOT_DIR'], 'codes', 'gui-config.json'
)
with open(json_file, 'r') as f:
data = json.load(f)
code_name = self.id
# if self.id == 'MyToric3DCode':
# print(data)
# print()
# print()
# print(data[code_name])
# print()
# print(data[code_name]['qubits'])
# print(data[code_name]['qubits'][picture])
picture = 'rotated' if rotated_picture else 'kitaev'
representation = data[code_name]['qubits'][picture]
representation['params']['axis'] = self.qubit_axis(location)
representation['location'] = location
for pauli in ['I', 'X', 'Y', 'Z']:
color_name = representation['color'][pauli]
representation['color'][pauli] = self.colormap[color_name]
return representation
def type_index(self, stab_type: str) -> Dict[Tuple, int]:
"""Dictionary of locations and indices for given stabilizer type.
Parameters
----------
stab_type: str
Stabilizer type ot index.
Returns
-------
index: Dict[Tuple, int]
Dictionary of qubit indices for each stabilizer location that
matches the given type.
"""
return {
location: index
for index, location in enumerate(self.stabilizer_coordinates)
if self.stabilizer_type(location) == stab_type
}
@property
def site(self, operator: Operator, pauli: str, location: Tuple) -> None:
"""Apply a Pauli on operator at site location.
Note that the operator is a (mutable) dict.
Parameters
----------
operator: Operator
Operator in dictionary representation.
pauli: str
Pauli to apply.
"""
product_map = {
('X', 'Y'): 'Z',
('X', 'Z'): 'Y',
('Y', 'X'): 'Z',
('Y', 'Z'): 'X',
('Z', 'X'): 'Y',
('Z', 'Y'): 'X',
}
if location in operator:
if operator[location] == pauli:
operator.pop(location)
else:
operator[location] = product_map[(operator[location], pauli)]
else:
operator[location] = pauli
| [
6738,
19720,
1330,
360,
713,
11,
309,
29291,
11,
32233,
11,
7343,
198,
11748,
28686,
198,
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33918,
198,
6738,
629,
541,
88,
13,
82,
2957... | 2.198134 | 12,648 |
from typing import Any
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base: Any = declarative_base()
| [
6738,
19720,
1330,
4377,
198,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
34142,
11,
10903,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
2377,
283,
876,
62,
8692,
198,
198,
14881,
25,
4377,
796,
2377,
283,
876... | 3.478261 | 46 |
import time
import shutil
import torch
from big_sleep import Imagine
terminate = False
num_attempts = 4
for attempt in range(num_attempts):
dream = Imagine(
text = "an armchair in the form of pikachu\\an armchair imitating pikachu\\abstract",
text_min = "blur\\zoom",
lr = 7e-2,
image_size = 512,
gradient_accumulate_every = 1,
save_every = 50,
epochs = 5,
iterations = 50,
save_progress = False,
bilinear = False,
open_folder = False,
seed = None,
torch_deterministic = False,
max_classes = 20,
class_temperature = 2.,
save_date_time = False,
save_best = True,
experimental_resample = True,
ema_decay = 0.99
)
dream()
shutil.copy(dream.textpath + ".best.png", f"{attempt}.png")
try:
time.sleep(2)
del dream
time.sleep(2)
torch.cuda.empty_cache()
except Exception:
torch.cuda.empty_cache() | [
11748,
640,
198,
11748,
4423,
346,
198,
11748,
28034,
198,
6738,
1263,
62,
42832,
1330,
18450,
198,
198,
23705,
378,
796,
10352,
198,
198,
22510,
62,
1078,
1791,
82,
796,
604,
198,
1640,
2230,
287,
2837,
7,
22510,
62,
1078,
1791,
82,
... | 2.119748 | 476 |
import pickle
import sys
import pika
import gevent
from gevent.event import Event
POLLS = {}
CACHE_SIZE = 20
| [
198,
11748,
2298,
293,
198,
11748,
25064,
198,
11748,
279,
9232,
198,
11748,
4903,
1151,
198,
6738,
4903,
1151,
13,
15596,
1330,
8558,
198,
198,
16402,
3069,
50,
796,
23884,
198,
34,
2246,
13909,
62,
33489,
796,
1160,
628,
628
] | 2.85 | 40 |
# coding = utf-8
import jieba
import logging
import numpy as np
from transformers import BertModel, BertTokenizer
jieba.setLogLevel(logging.INFO)
bert_path = "../chinese_wwm_ext_pytorch"
bert = BertModel.from_pretrained(bert_path)
token = BertTokenizer.from_pretrained(bert_path)
# Bert 字向量生成
# char 为False时执行的是词向量生成, 为True则执行字向量生成
get_bert_embed("text.txt", char=False)
print("Generate Finished!!!")
| [
2,
19617,
796,
3384,
69,
12,
23,
201,
198,
11748,
474,
494,
7012,
201,
198,
11748,
18931,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
6121,
364,
1330,
22108,
17633,
11,
22108,
30642,
7509,
201,
198,
201,
198,
73,
494,
7... | 2.067633 | 207 |
"""
This example uses a simple bag-of-words (BoW) approach. A sentence is mapped
to a sparse vector with e.g. 25,000 dimensions. Optionally, you can also use tf-idf.
To make the model trainable, we add multiple dense layers to create a Deep Averaging Network (DAN).
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import *
from sentence_transformers.models.tokenizer.WordTokenizer import ENGLISH_STOP_WORDS
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = 'output/training_tf-idf_word_embeddings-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#Check if dataset exsist. If not, download and extract it
sts_dataset_path = 'datasets/stsbenchmark.tsv.gz'
if not os.path.exists(sts_dataset_path):
util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row['sentence1'], row['sentence2']], label=score)
if row['split'] == 'dev':
dev_samples.append(inp_example)
elif row['split'] == 'test':
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
##### Construction of the SentenceTransformer Model #####
# Wikipedia document frequency for words
wiki_doc_freq = 'wikipedia_doc_frequencies.txt'
if not os.path.exists(wiki_doc_freq):
util.http_get('https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/wikipedia_doc_frequencies.txt', wiki_doc_freq)
# Create the vocab for the BoW model
stop_words = ENGLISH_STOP_WORDS
max_vocab_size = 25000 #This is also the size of the BoW sentence vector.
#Read the most common max_vocab_size words. Skip stop-words
vocab = set()
weights = {}
lines = open('wikipedia_doc_frequencies.txt', encoding='utf8').readlines()
num_docs = int(lines[0])
for line in lines[1:]:
word, freq = line.lower().strip().split("\t")
if word in stop_words:
continue
vocab.add(word)
weights[word] = math.log(num_docs/int(freq))
if len(vocab) >= max_vocab_size:
break
##### Construction of the SentenceTransformer Model #####
#Create the BoW model. Because we set word_weights to the IDF values and cumulative_term_frequency=True, we
#get tf-idf vectors. Set word_weights to an empty dict and cumulative_term_frequency=False to get a 1-hot sentence encoding
bow = models.BoW(vocab=vocab, word_weights=weights, cumulative_term_frequency=True)
# Add two trainable feed-forward networks (DAN) with max_vocab_size -> 768 -> 512 dimensions.
sent_embeddings_dimension = max_vocab_size
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=768)
dan2 = models.Dense(in_features=768, out_features=512)
model = SentenceTransformer(modules=[bow, dan1, dan2])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) #10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test')
model.evaluate(evaluator) | [
37811,
198,
1212,
1672,
3544,
257,
2829,
6131,
12,
1659,
12,
10879,
357,
16635,
54,
8,
3164,
13,
317,
6827,
318,
27661,
198,
1462,
257,
29877,
15879,
351,
304,
13,
70,
13,
1679,
11,
830,
15225,
13,
16018,
453,
11,
345,
460,
635,
7... | 2.801153 | 1,735 |
# Generated by Django 2.2.2 on 2019-07-30 13:22
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
17,
319,
13130,
12,
2998,
12,
1270,
1511,
25,
1828,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
import textwrap
from test_utils import get_rendered_file
| [
11748,
2420,
37150,
198,
198,
6738,
1332,
62,
26791,
1330,
651,
62,
26238,
62,
7753,
628
] | 3.6875 | 16 |
from .dynedge import DynEdge, DynEdge_V2, DynEdge_V3
from .convnet import ConvNet
| [
6738,
764,
67,
2047,
14907,
1330,
39530,
37021,
11,
39530,
37021,
62,
53,
17,
11,
39530,
37021,
62,
53,
18,
198,
6738,
764,
42946,
3262,
1330,
34872,
7934,
198
] | 2.827586 | 29 |
from keras.models import load_model
import numpy as np
import cv2
"""
tensorflow_version --> '2.0.0'
keras_version -------> '2.3.1'
"""
'''
para usar:
1. instanciar modelo
face_detector = f_face_detector.detector_face_occlusion()
2. ingresar alguna imagen con un rostro y predecir
boxes_face = face_detector.detect_face(img)
Nota: devuleve los bounding_box donde encontro rostros
'''
| [
6738,
41927,
292,
13,
27530,
1330,
3440,
62,
19849,
198,
11748,
299,
32152,
355,
45941,
220,
198,
11748,
269,
85,
17,
198,
37811,
198,
83,
22854,
11125,
62,
9641,
14610,
705,
17,
13,
15,
13,
15,
6,
198,
6122,
292,
62,
9641,
40103,
... | 2.506329 | 158 |
import os
from git import Commit, Remote
import logging
| [
11748,
28686,
198,
198,
6738,
17606,
1330,
35910,
11,
21520,
198,
11748,
18931,
628,
628,
628,
628,
628,
198
] | 3.526316 | 19 |
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from dm_control import suite
from x_mushroom_rl.environments import Environment, MDPInfo
from x_mushroom_rl.utils.spaces import *
from x_mushroom_rl.utils.viewer import ImageViewer
class DMControl(Environment):
"""
Interface for dm_control suite Mujoco environments. It makes it possible to
use every dm_control suite Mujoco environment just providing the necessary
information.
"""
def __init__(self, domain_name, task_name, horizon, gamma, task_kwargs=None,
dt=.01, width_screen=480, height_screen=480, camera_id=0):
"""
Constructor.
Args:
domain_name (str): name of the environment;
task_name (str): name of the task of the environment;
horizon (int): the horizon;
gamma (float): the discount factor;
task_kwargs (dict, None): parameters of the task;
dt (float, .01): duration of a control step;
width_screen (int, 480): width of the screen;
height_screen (int, 480): height of the screen;
camera_id (int, 0): position of camera to render the environment;
"""
# MDP creation
if task_kwargs is None:
task_kwargs = dict()
task_kwargs['time_limit'] = np.inf # Hack to ignore dm_control time limit.
self.env = suite.load(domain_name, task_name, task_kwargs=task_kwargs)
# MDP properties
action_space = self._convert_action_space(self.env.action_spec())
observation_space = self._convert_observation_space(self.env.observation_spec())
mdp_info = MDPInfo(observation_space, action_space, gamma, horizon)
self._viewer = ImageViewer((width_screen, height_screen), dt)
self._camera_id = camera_id
super().__init__(mdp_info)
@staticmethod
@staticmethod
@staticmethod
| [
11748,
14601,
198,
198,
4480,
14601,
13,
40198,
62,
40539,
654,
33529,
198,
220,
220,
220,
14601,
13,
24455,
40539,
654,
7203,
46430,
1600,
6536,
28,
12156,
8344,
341,
20361,
8,
198,
220,
220,
220,
422,
288,
76,
62,
13716,
1330,
18389... | 2.488778 | 802 |
from django.conf.urls import url
from . import views
app_name = 'home'
urlpatterns = [
# ex: /home/
url(r'^$', views.hbase_region_servers, name='hbase_region_servers$'),
url(r'^opentsdb/split$', views.opentsdb_split, name='opentsdb_split'),
url(r'^opentsdb/regions/(?P<encoded_name>[a-f0-9]+)$', views.opentsdb_regions_infos, name='opentsdb_regions_infos$'),
url(r'^opentsdb/regions$', views.opentsdb_regions, name='opentsdb_regions'),
url(r'^opentsdb/metric$', views.opentsdb_metric, name='opentsdb_metric'),
url(r'^hbase/region_servers$', views.hbase_region_servers, name='hbase_region_servers$'),
url(r'^hbase/tables$', views.hbase_tables, name='hbase_tables$'),
url(r'^hbase/tables/(?P<table_name>.+)$', views.hbase_tables_infos, name='hbase_tables_infos$'),
url(r'^hbase/regions$', views.hbase_regions, name='hbase_regions$'),
url(r'^hbase/regions/(?P<encoded_name>[a-f0-9]+)$', views.hbase_regions_infos, name='hbase_regions_infos$')
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
201,
198,
201,
198,
6738,
764,
1330,
5009,
201,
198,
201,
198,
1324,
62,
3672,
796,
705,
11195,
6,
201,
198,
6371,
33279,
82,
796,
685,
201,
198,
220,
220,
220,
1303,
409,
... | 2.168103 | 464 |
# https://pypi.org/project/jax-data/
import jax
import jax.numpy as jnp
import numpy as np | [
2,
3740,
1378,
79,
4464,
72,
13,
2398,
14,
16302,
14,
73,
897,
12,
7890,
14,
198,
198,
11748,
474,
897,
198,
11748,
474,
897,
13,
77,
32152,
355,
474,
37659,
198,
198,
11748,
299,
32152,
355,
45941
] | 2.421053 | 38 |
from core.app import App, AppParameter, AppFile
import unittest
import shutil
import pdb
class TestAppParameter(unittest.TestCase):
"""
test for App Parameters
"""
class TestAppFile(unittest.TestCase):
"""docstring for TestAppFile"""
class TestApp(unittest.TestCase):
"""docstring for TestApp"""
if __name__ == '__main__':
unittest.main()
| [
6738,
4755,
13,
1324,
1330,
2034,
11,
2034,
36301,
11,
2034,
8979,
198,
11748,
555,
715,
395,
198,
11748,
4423,
346,
198,
11748,
279,
9945,
198,
198,
4871,
6208,
4677,
36301,
7,
403,
715,
395,
13,
14402,
20448,
2599,
198,
220,
220,
... | 2.823077 | 130 |
"""
run.py is written to train and evaluate the model
"""
import os
import logging
import glob
from Regression.configuration import BaseConfig
from Regression.methods import RegressionModel, save_model, load_trained
from Regression.dataloader import DataframeLoader
from Regression.train import TrainingEvaluation
__author__ = "Amir Mousavi"
__license__ = "Public Domain"
__maintainer__ = "Amir Mousavi"
__email__ = "azmusavi19@gmail.com"
__status__ = "Production"
logger = logging.getLogger(__name__)
class ModelRrunner:
"""
A class to run the training and testing the model
"""
def run(self):
"""
A method to run the training and testing the model
:return:
None
"""
logger.info("Training/evaluation parameters %s", self.args)
x_train, y_train = self.dataloader_obj.get_train_examples() # get splitted train examples
x_test, y_test = self.dataloader_obj.get_test_examples() # get splitted test examples
if self.args.do_train:
model = self.training(x_train, y_train) # Training the model on train set
self.evaluation(model, x_test, y_test) # Evaluation the model on test set
save_model(model, self.args.path_model) # Save the trained model
if self.args.do_eval and not self.args.do_train: # Evaluation
# get all model trained and saved
checkpoints = list( # in trained Path
os.path.join(self.args.dir_model, c) for c in
os.listdir(self.args.dir_model)
)
logger.info(
"Evaluate the following checkpoints: %s", checkpoints
)
if len(checkpoints)!=0: # If at least a model was exist
for checkpoint in checkpoints: # load the model and do evaluation
trained_model = load_trained(checkpoint)
score = self.train_eval_obj.evaluate(
trained_model, x_test, y_test
)
logger.info(
" predictions on %s was successful", checkpoint
)
logger.info(" Test score: %s ", 100 * score)
else: # If there is not trained model
model = self.training(x_train, y_train) # first do training then evaluation
self.evaluation(model, x_test, y_test) # finally save the trained model
# Save the trained model
save_model(model, self.args.path_model)
def training(self, x_train, y_train):
"""
training method
:param x_train: list
:param y_train: list
:return: model
"""
model_ = self.train_eval_obj.train(x_train, y_train)
return model_
def evaluation(self, model, x_test, y_test):
"""
training method
:param model: model
:param x_test: list
:param y_test: list
:return:
None
"""
score = self.train_eval_obj.evaluate(model, x_test, y_test)
logger.info(" %s predictions was successful", len(x_test))
logger.info(" Test score: %s ", 100 * score)
if __name__ == '__main__':
run_model_obj = ModelRrunner()
run_model_obj.run()
| [
37811,
201,
198,
5143,
13,
9078,
318,
3194,
284,
4512,
290,
13446,
262,
2746,
201,
198,
37811,
201,
198,
11748,
28686,
201,
198,
11748,
18931,
201,
198,
11748,
15095,
201,
198,
6738,
3310,
2234,
13,
11250,
3924,
1330,
7308,
16934,
201,
... | 2.075964 | 1,685 |
# -*- coding:utf-8 -*-
from essen.settings import *
BROKER_TRANSPORT = 'sqs'
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
6738,
3209,
268,
13,
33692,
1330,
1635,
198,
198,
11473,
11380,
1137,
62,
5446,
1565,
4303,
9863,
796,
705,
31166,
82,
6,
198
] | 2.166667 | 36 |
from typing import List, Any
import pytorch_lightning.core.lightning as pl
import torch
import torch.nn.functional as F
import numpy as np
from allennlp.modules import ConditionalRandomField
from allennlp.modules.conditional_random_field import allowed_transitions
from torch import nn
from torch.utils.data import DataLoader
from transformers import get_linear_schedule_with_warmup, AutoModel
from log import logger
from utils.metric import SpanF1
from utils.reader_utils import extract_spans, get_tags
| [
6738,
19720,
1330,
7343,
11,
4377,
198,
198,
11748,
12972,
13165,
354,
62,
2971,
768,
13,
7295,
13,
2971,
768,
355,
458,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
299,
32152,
355,
45941,
19... | 3.559441 | 143 |
from easydict import EasyDict as edict
from torchvision import transforms as trans
import torch.nn as nn
from label_smoothing import LabelSmoothing, LabelSmoothingLoss
| [
6738,
2562,
11600,
1330,
16789,
35,
713,
355,
1225,
713,
198,
6738,
28034,
10178,
1330,
31408,
355,
1007,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
6167,
62,
5796,
1025,
722,
1330,
36052,
7556,
1025,
722,
11,
36052,
7556,
... | 3.652174 | 46 |
from pooch import os_cache as _os_cache
from pooch import retrieve as _retrieve
from pooch import HTTPDownloader as _HTTPDownloader
from pooch import Unzip as _Unzip
import pandas as _pd
#import geopandas as _gpd
import os as _os
#from gprm import ReconstructionModel as _ReconstructionModel
# TODO
# Add: Domeier and Torsvik
# Shephard?? (or a version of Seton with Static Polygons)
def fetch_CaoToyRodinia(load=True, model_case='NNR'):
'''
Load Toy Billion-year reconstructions from Cao et al (2020), Tectonics
doi: 10.1029/2020GC009244
model_case options: 'NNR' [default], 'OV', 'SSL'
'''
fnames = _retrieve(
url="https://zenodo.org/record/3854549/files/1000Myr_synthetic_tectonic_reconstructions.zip?download=1",
known_hash="md5:b7ea40c77826ef5d5e3b99affa3e9d66",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='CaoToyRodinia'),
)
dirname = _os.path.split(fnames[0])[0]
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel()
reconstruction_model.add_rotation_model('{:s}/Global_EB_250-0Ma_GK07_2017_ASM.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/triple_junction_superoceanic_plates.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/410-250_toy_introversion_simplified.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/1000-410_toy_introversion_simplified.rot'.format(dirname))
reconstruction_model.add_continent_polygons('{:s}/COBfile_1000_0_Toy_introversion.gpml'.format(dirname))
reconstruction_model.add_coastlines('{:s}/coastline_file_1000_250_new_valid_time.gpml'.format(dirname))
reconstruction_model.add_coastlines('{:s}/coastline_file_250_0_new_valid_time.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Global_EarthByte_Mesozoic-Cenozoic_plate_boundaries_2016_v5.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/TopologyBuildingBlocks_AREPS.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Toy_introversion_plate_boundaries_410_250_new_valid_time.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Toy_introversion_plate_boundaries_1000_410_new_valid_time.gpml'.format(dirname))
if model_case == 'NNR':
reconstruction_model.add_rotation_model('{:s}/NLR_SLOW_CONTINENT_0Ma_1000Ma_NNR.rot'.format(dirname))
elif model_case == 'OV':
reconstruction_model.add_rotation_model('{:s}/NLR_SLOW_CONTINENT_0Ma_1000Ma_OV.rot'.format(dirname))
elif model_case == 'SSL':
reconstruction_model.add_rotation_model('{:s}/NLR_SLOW_CONTINENT_0Ma_1000Ma_SSL.rot'.format(dirname))
else:
ValueError('Unrecognised model name {}'.format(model_case))
return reconstruction_model
def fetch_Li2008(load=True):
'''
Load Rodinia reconstruction from Li et al (2008),
doi: 10.1016/j.precamres.2007.04.021
and updated in Li et al (2013) Sedimentary Geology
doi: 10.1016/j.sedgeo.2013.05.016
'''
fnames = _retrieve(
url="https://www.earthbyte.org/webdav/ftp/Data_Collections/Li_etal_2008_RodiniaModel.zip",
known_hash="sha256:e659371df79acfd7e599d0a358be0b154705b84d92388c042e6382ef78a3f4f6",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='Li2008'),
)
dirname = _os.path.split(fnames[0])[0]
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('Li++2008')
reconstruction_model.add_rotation_model('{:s}/Li_etal_2008_RodiniaModel/RodiniaModel_CompleteRotationFile.rot'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/Li_etal_2008_RodiniaModel/RodiniaBlocks_WithPlateIDColumnAndIDs.shp'.format(dirname))
return reconstruction_model
def fetch_Matthews2016(load=True):
'''
Load 0-410 Ma reconstruction from Matthews et al, doi:
'''
fnames = _retrieve(
url="https://www.earthbyte.org/webdav/ftp/Data_Collections/Matthews_etal_2016_Global_Plate_Model_GPC.zip",
known_hash="sha256:c88acba32f7e5a00734d14d8c512a20392dc8e62d75fd1777d351eb7e6ada28f",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='Matthews2016'),
)
for fname in fnames:
if _os.path.split(fname)[1] == 'License.txt':
dirname = _os.path.split(fname)[0]
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('Matthews++2016')
reconstruction_model.add_rotation_model('{:s}/Matthews_etal_2016_Global_Plate_Model_GPC/Global_EB_250-0Ma_GK07_Matthews_etal.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/Matthews_etal_2016_Global_Plate_Model_GPC/Global_EB_410-250Ma_GK07_Matthews_etal.rot'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/Matthews_etal_2016_Global_Plate_Model_GPC/StaticGeometries/StaticPolygons/PresentDay_StaticPlatePolygons_Matthews++.shp'.format(dirname))
reconstruction_model.add_continent_polygons('{:s}/Matthews_etal_2016_Global_Plate_Model_GPC/StaticGeometries/ContinentalPolygons/PresentDay_ContinentalPolygons_Matthews++.shp'.format(dirname))
reconstruction_model.add_coastlines('{:s}/Matthews_etal_2016_Global_Plate_Model_GPC/StaticGeometries/Coastlines/Global_coastlines_2015_v1_low_res.shp'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Matthews_etal_2016_Global_Plate_Model_GPC/Global_EarthByte_Paleozoic_plate_boundaries_Matthews_etal.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Matthews_etal_2016_Global_Plate_Model_GPC/Global_EarthByte_Mesozoic-Cenozoic_plate_boundaries_Matthews_etal.gpml'.format(dirname))
return reconstruction_model
def fetch_Merdith2021(load=True):
'''
Load Billion-year reconstruction from Merdith et al (2021) Earth Science Reviews
doi: https://doi.org/10.1016/j.earscirev.2020.103477
'''
fnames = _retrieve(
url="https://zenodo.org/record/4320873/files/SM2-Merdith_et_al_1_Ga_reconstruction.zip?download=1",
known_hash="md5:1786d68e949c4242de1801388c68cb8c",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='Merdith2021'),
)
dirname = '{:s}/Merdith2021/'.format(fnames[0].split('Merdith2021')[0])
#for fname in fnames:
# if '__MACOSX' not in _os.path.split(fname)[0]:
# dirname = _os.path.split(fname)[0]
# break
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('Merdith++2021')
reconstruction_model.add_rotation_model('{:s}/SM2/1000_0_rotfile_Merdith_et_al.rot'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/SM2/shapes_static_polygons_Merdith_et_al.gpml'.format(dirname))
#reconstruction_model.add_coastlines('{:s}/'.format(dirname))
reconstruction_model.add_continent_polygons('{:s}/SM2/shapes_continents_Merdith_et_al.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/SM2/410-250_plate_boundaries_Merdith_et_al.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/SM2/250-0_plate_boundaries_Merdith_et_al.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/SM2/TopologyBuildingBlocks_Merdith_et_al.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/SM2/1000-410-Transforms_Merdith_et_al.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/SM2/1000-410-Convergence_Merdith_et_al.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/SM2/1000-410-Divergence_Merdith_et_al.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/SM2/1000-410-Topologies_Merdith_et_al.gpml'.format(dirname))
return reconstruction_model
def fetch_Muller2016(load=True):
'''
Load Pangea breakup reconstruction from Muller et al (2016) Ann Rev Earth & Plan Sci
doi: 10.1146/annurev-earth-060115-012211
'''
fnames = _retrieve(
url="https://www.earthbyte.org/webdav/ftp/Data_Collections/Muller_etal_2016_AREPS/Muller_etal_2016_AREPS_Supplement/Muller_etal_2016_AREPS_Supplement_v1.17.zip",
known_hash="sha256:a671d6f2318b329e6f633065771fe37d29b6932e805e619039c4405dcb0fb91a",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='Muller2016'),
)
#dirname = _os.path.split(fnames[0])[0]
dirname = '{:s}/Muller2016/'.format(fnames[0].split('Muller2016')[0])
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('Muller++2016')
reconstruction_model.add_rotation_model('{:s}/Muller_etal_2016_AREPS_Supplement_v1.17/Global_EarthByte_230-0Ma_GK07_AREPS.rot'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/Muller_etal_2016_AREPS_Supplement_v1.17/Shapefiles/StaticPolygons/Global_EarthByte_GPlates_PresentDay_StaticPlatePolygons_2015_v1.shp'.format(dirname))
reconstruction_model.add_coastlines('{:s}/Muller_etal_2016_AREPS_Supplement_v1.17/Global_EarthByte_230-0Ma_GK07_AREPS_Coastlines.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Muller_etal_2016_AREPS_Supplement_v1.17/Global_EarthByte_230-0Ma_GK07_AREPS_PlateBoundaries.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Muller_etal_2016_AREPS_Supplement_v1.17/Global_EarthByte_230-0Ma_GK07_AREPS_Topology_BuildingBlocks.gpml'.format(dirname))
return reconstruction_model
def fetch_Muller2019(load=True):
'''
Load Pangea breakup reconstruction from Muller et al, (2019) Tectonics
doi: https://doi.org/10.1029/2018TC005462
'''
fnames = _retrieve(
url="https://www.earthbyte.org/webdav/ftp/Data_Collections/Muller_etal_2019_Tectonics/Muller_etal_2019_PlateMotionModel/Muller_etal_2019_PlateMotionModel_v2.0_Tectonics.zip",
known_hash="sha256:32c30c80cd165fe0d28b3fda44a8b7d42e660a2a95baf508bdf7d1666977be9d",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='Muller2019'),
)
dirname = '{:s}/Muller2019/Muller_etal_2019_PlateMotionModel_v2.0_Tectonics/'.format(fnames[0].split('Muller2019')[0])
#dirname = '{:s}/Muller_etal_2019_PlateMotionModel_v2.0_Tectonics/'.format(_os.path.split(fnames[0])[0])
# if downloading for first time, remove the unwanted MeshPoint files
if _os.path.isdir('{:s}/DeformingMeshPoints'.format(dirname)):
import shutil
shutil.rmtree('{:s}/DeformingMeshPoints'.format(dirname))
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('Muller++2019')
reconstruction_model.add_rotation_model('{:s}/Alps_Mesh_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/Andes_Flat_Slabs_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/Andes_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/Australia_Antarctica_Mesh_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/Australia_North_Zealandia_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/Eurasia_Arabia_Mesh_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/Global_250-0Ma_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/North_America_Flat_Slabs_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/North_America_Mesh_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/North_China_Mesh_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/South_Atlantic_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/Southeast_Asia_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_continent_polygons('{:s}/StaticGeometries/ContinentalPolygons/Global_EarthByte_GPlates_PresentDay_ContinentalPolygons_2019_v1.shp'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/StaticGeometries/StaticPolygons/Global_EarthByte_GPlates_PresentDay_StaticPlatePolygons_2019_v1.shp'.format(dirname))
reconstruction_model.add_coastlines('{:s}/StaticGeometries/Coastlines/Global_coastlines_2019_v1_low_res.shp'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Alps_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Alps_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/America_Anyui_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/America_Anyui_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Andes_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Andes_Flat_Slabs_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Andes_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Arctic_Eurasia_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Australia_Antarctica_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Australia_Antarctica_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Australia_North_Zealandia_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Australia_North_Zealandia_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Baja_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Coral_Sea_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Coral_Sea_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/East_African_Rift_Deforming_Mesh_and_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/East-West_Gondwana_Deforming_Mesh_and_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Ellesmere__Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Eurasia_Arabia_Deforming_Mesh_and_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Global_Mesozoic-Cenozoic_PlateBoundaries_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Greater_India_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Greater_India_Mesh_Topologies_2019_v2.gpml'.format(dirname))
#reconstruction_model.add_dynamic_polygons('{:s}/Inactive_Meshes_and_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/North_America_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/North_Atlantic_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/North_Atlantic_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/North_China_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/North_China_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Northern_Andes_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Northern_Andes_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Papua_New_Guinea_Deforming_Meshes_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Papua_New_Guinea_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Scotia_Deforming_Mesh_and_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Siberia_Eurasia_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Siberia_Eurasia_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/South_Atlantic_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/South_Atlantic_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/South_China_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/South_China_Sea_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/South_Zealandia_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/South_Zealandia_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Southeast_Asia_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Southeast_Asia_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/West_Antarctic_Zealandia_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/West_Antarctica_Zealandia_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Western_North_America_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Western_Tethys_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Western_Tethys_Tectonic_Boundary_Topologies_2019_v2.gpml'.format(dirname))
return reconstruction_model
def fetch_Pehrsson2015(load=True):
'''
Load Nuna reconstruction from Pehrsson et al, (2015) Geol Soc London Spec Pub
doi: http://doi.org/10.1144/SP424.5
'''
fnames = _retrieve(
url="https://www.geolsoc.org.uk/~/media/Files/GSL/shared/Sup_pubs/2015/18822_7.zip",
known_hash="sha256:12e7ed7f1f736b0421a60c60151fed7b46ce028b3348f8bf39ba6d7916651b6f",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='Pehrsson2015'),
)
#dirname = _os.path.split(fnames[0])[0]
dirname = '{:s}/Pehrsson2015/'.format(fnames[0].split('Pehrsson2015')[0])
# process the rotation file to enable continuous reconstruction
df = _pd.read_fwf('{:s}/T_Rot_Model_Abs_25Ma_20131004.rot'.format(dirname),
colspecs=[(0,7),(7,17),(17,28),(28,40),(40,51),(51,56),(56,81)],
names=['MovingPlate','Time','Lat','Long','Angle','FixedPlate','Comment'])
dfs = df.sort_values(['MovingPlate','Time'])
dfs.to_csv('{:s}/T_Rot_Model_Abs_25Ma_20131004_sort.rot'.format(dirname),
sep=' ',
header=False,
index=False)
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('Pehrsson++2015')
reconstruction_model.add_rotation_model('{:s}/T_Rot_Model_Abs_25Ma_20131004_sort.rot'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/PlatePolygons.shp'.format(dirname))
return reconstruction_model
def fetch_Seton2012(load=True):
'''
Load Pangea breakup reconstruction from Seton et al (2012)
doi:10.1016/j.earscirev.2012.03.002
'''
fnames = _retrieve(
url="https://www.earthbyte.org/webdav/ftp_data/Data_Collections/Seton_etal_2012_ESR.zip",
known_hash="sha256:b117354f93296dc1035d6709c7d475bf9ad517dc3f882b1621ef68db712c603e",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='Seton2012'),
)
#dirname = _os.path.split(fnames[0])[0]
dirname = '{:s}/Seton2012/'.format(fnames[0].split('Seton2012')[0])
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('Seton++2012')
reconstruction_model.add_rotation_model('{:s}/Seton_etal_2012_ESR/Rotations/Seton_etal_ESR2012_2012.1.rot'.format(dirname))
reconstruction_model.add_coastlines('{:s}/Seton_etal_2012_ESR/Coastlines/Seton_etal_ESR2012_Coastline_2012.1.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Seton_etal_2012_ESR/Coastlines/Seton_etal_ESR2012_Coastline_2012.1.gpml'.format(dirname))
return reconstruction_model
def fetch_TorsvikCocks2017(load=True):
'''
Load Phanerozoic reconstruction from the book 'Earth History and Paleogeography'
by Torvsik and Cocks (2017)
doi: https://doi.org/10.1017/9781316225523
'''
fnames = _retrieve(
url="http://www.earthdynamics.org/earthhistory/bookdata/CEED6.zip",
known_hash="sha256:9b6d6f8a9a6299a269fd16f07aeb48dc0b4d591743d6691b86fde7b550d1ce7b",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='TorsvikCocks2017'),
)
#dirname = _os.path.split(fnames[0])[0]
dirname = '{:s}/TorsvikCocks2017/'.format(fnames[0].split('TorsvikCocks2017')[0])
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('Torsvik+Cocks2017')
reconstruction_model.add_rotation_model('{:s}/Torsvik_Cocks_HybridRotationFile.rot'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/CEED6_TERRANES.shp'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/CEED6_MICROCONTINENTS.shp'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/CEED6_LAND.gpml'.format(dirname))
reconstruction_model.add_coastlines('{:s}/CEED6_LAND.gpml'.format(dirname))
return reconstruction_model
def fetch_vanHinsbergen(load=True):
'''
Load global reconstructions compiled from Douwe van Hinsbergen's work
NB CURRENTLY INCOMPLETE - NEED TO ESTABLISH A SET OF POLYGONS TO USE
'''
fnames = _retrieve(
url="http://www.geologist.nl/wp-content/uploads/2019/09/vanHinsbergen_GPlates_reconstructions.zip",
known_hash="sha256:7ed6319f11b4f4626c8211359cfeb8b454cb4381a81ee368fa11effbf06c1eeb",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='vanHinsbergen'),
)
#dirname = _os.path.split(fnames[0])[0]
dirname = '{:s}/vanHinsbergen/'.format(fnames[0].split('vanHinsbergen')[0])
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('vanHinsbergen++2017')
reconstruction_model.add_rotation_model('{:s}/'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/.shp'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/.shp'.format(dirname))
reconstruction_model.add_coastlines('{:s}/.gpml'.format(dirname))
return reconstruction_model
def fetch_Young2019(load=True):
'''
Load 0-410 Ma reconstruction from Young et al (2019) Geoscience Frontiers
doi: https://doi.org/10.1016/j.gsf.2018.05.011
'''
fnames = _retrieve(
url="https://www.earthbyte.org/webdav/ftp/Data_Collections/Young_etal_2018_GeoscienceFrontiers/Young_etal_2018_GeoscienceFrontiers_GPlatesPlateMotionModel.zip",
known_hash="sha256:3cffdd988b802ad8961aad65901a95890a7b0058a3de3c353cf46986cca9f1f1",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='Young2019'),
)
#for fname in fnames:
# if _os.path.split(fname)[1] == 'License.txt':
# dirname = _os.path.split(fname)[0]
dirname = '{:s}/Young2019/'.format(fnames[0].split('Young2019')[0])
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('Young++2019')
reconstruction_model.add_rotation_model('{:s}/Young_etal_2018_GeoscienceFrontiers_GPlatesPlateMotionModel/Global_410-250Ma_Young_et_al.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/Young_etal_2018_GeoscienceFrontiers_GPlatesPlateMotionModel/Global_250-0Ma_Young_et_al.rot'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/Young_etal_2018_GeoscienceFrontiers_GPlatesPlateMotionModel/StaticPolygons/Global_GPlates_PresentDay_StaticPlatePolygons_Young_et_al.shp'.format(dirname))
reconstruction_model.add_continent_polygons('{:s}/Young_etal_2018_GeoscienceFrontiers_GPlatesPlateMotionModel/ContinentalPolygons/PresentDay_ContinentalPolygons_Young_et_al.shp'.format(dirname))
reconstruction_model.add_coastlines('{:s}/Young_etal_2018_GeoscienceFrontiers_GPlatesPlateMotionModel/Coastlines/Global_coastlines_Young_et_al_low_res.shp'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Young_etal_2018_GeoscienceFrontiers_GPlatesPlateMotionModel/Global_Mesozoic-Cenozoic_plate_boundaries_Young_et_al.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Young_etal_2018_GeoscienceFrontiers_GPlatesPlateMotionModel/Global_Paleozoic_plate_boundaries_Young_et_al.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Young_etal_2018_GeoscienceFrontiers_GPlatesPlateMotionModel/TopologyBuildingBlocks_Young_et_al.gpml'.format(dirname))
return reconstruction_model
def fetch_Scotese(load=True):
'''
Load 0-, doi:
'''
fnames = _retrieve(
url="https://static.cambridge.org/content/id/urn:cambridge.org:id:article:S0016756818000110/resource/name/S0016756818000110sup001.zip",
known_hash="sha256:e01b19cee7c65a011ca4c42f187aba0ec24c1a87b842e2061eab9d22dc52ca80",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='Cao2018_SM'),
)
#print(fnames[0].split('.unzip'))
dirname = '{:s}/'.format(_os.path.split(fnames[0])[0])
#dirname = '{:s}.unzip/SupplementaryMaterial_Cao_etal/'.format(fnames[0].split('.unzip')[0])
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('Scotese2008')
reconstruction_model.add_rotation_model('{:s}/Rotation_models/Scotese_2008_Rotation.rot'.format(dirname))
reconstruction_model.add_continent_polygons('{:s}/Rotation_models/Scotese_2008_PresentDay_ContinentalPolygons.shp'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/Rotation_models/Scotese_2008_PresentDay_ContinentalPolygons.shp'.format(dirname))
return reconstruction_model
def fetch_Golonka(load=True):
'''
Load reconstruction of Golonka, 2007, spanning the time range 0-5XX Ma,
doi:
'''
fnames = _retrieve(
url="https://static.cambridge.org/content/id/urn:cambridge.org:id:article:S0016756818000110/resource/name/S0016756818000110sup001.zip",
known_hash="sha256:e01b19cee7c65a011ca4c42f187aba0ec24c1a87b842e2061eab9d22dc52ca80",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='Cao2018_SM'),
)
dirname = '{:s}/'.format(_os.path.split(fnames[0])[0])
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('Golonka2007')
reconstruction_model.add_rotation_model('{:s}/Rotation_models/Golonka_2007_Rotation.rot'.format(dirname))
reconstruction_model.add_continent_polygons('{:s}/Rotation_models/Golonka_2007_PresentDay_ContinentalPolygons.shp'.format(dirname))
reconstruction_model.add_static_polygons('{:s}/Rotation_models/Golonka_2007_PresentDay_ContinentalPolygons.shp'.format(dirname))
return reconstruction_model
def fetch_Clennett(load=True, model_case='M2019'):
"""
Load reconstruction files associated with the study of Clennett et al (2020),
Geochemistry, Geophysics, Geosystems
doi: https://doi.org/10.1029/2020GC009117
model case must be either:
- 'M2019' (default, version based on deforming model of Muller et al 2019), or
- 'S2013' (rigid topological model based on Shephard et al, 2013)
"""
if model_case=='M2019':
fnames = _retrieve(
url="https://www.earthbyte.org/webdav/ftp/Data_Collections/Clennett_etal_2020_G3/Clennett_etal_2020_M2019.zip",
known_hash="sha256:1ad6a29ceb396b581930734b1f6e8409e52dc4e2ae9658156ac2dd732cb82ab8",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='Clennett2020_M2019'),
)
#dirname = _os.path.split(fnames[0])[0]
#dirname = '{:s}/Clennett_etal_2020_M2019/'.format(_os.path.split(fnames[0])[0])
dirname = '{:s}/Clennett2020_M2019/Clennett_etal_2020_M2019/'.format(fnames[0].split('Clennett2020_M2019')[0])
# if downloading for first time, remove the unwanted MeshPoint files
if _os.path.isdir('{:s}/DeformingMeshPoints'.format(dirname)):
import shutil
shutil.rmtree('{:s}/DeformingMeshPoints'.format(dirname))
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('Clennett++M2019')
reconstruction_model.add_rotation_model('{:s}/Global_250-0Ma_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/Clennett_etal_2020_Rotations.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/DeformingMeshes/Alps_Mesh_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/DeformingMeshes/Andes_Flat_Slabs_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/DeformingMeshes/Andes_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/DeformingMeshes/Australia_Antarctica_Mesh_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/DeformingMeshes/Australia_North_Zealandia_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/DeformingMeshes/Eurasia_Arabia_Mesh_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/DeformingMeshes/North_America_Flat_Slabs_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/DeformingMeshes/North_America_Mesh_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/DeformingMeshes/North_China_Mesh_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/DeformingMeshes/South_Atlantic_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_rotation_model('{:s}/DeformingMeshes/Southeast_Asia_Rotations_2019_v2.rot'.format(dirname))
reconstruction_model.add_continent_polygons('{:s}/Clennett_etal_2020_Terranes.gpml'.format(dirname))
#reconstruction_model.add_static_polygons('{:s}/StaticGeometries/StaticPolygons/Global_EarthByte_GPlates_PresentDay_StaticPlatePolygons_2019_v1.shp'.format(dirname))
reconstruction_model.add_coastlines('{:s}/Clennett_etal_2020_Coastlines.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Clennett__etal_2020_NAm_boundaries.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Clennett_etal_2020_Plates.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Alps_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Alps_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/America_Anyui_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/America_Anyui_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Andes_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Andes_Flat_Slabs_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Andes_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Arctic_Eurasia_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Australia_Antarctica_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Australia_Antarctica_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Australia_North_Zealandia_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Australia_North_Zealandia_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Baja_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Coral_Sea_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Coral_Sea_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/East_African_Rift_Deforming_Mesh_and_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/East-West_Gondwana_Deforming_Mesh_and_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Ellesmere__Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Eurasia_Arabia_Deforming_Mesh_and_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Greater_India_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Greater_India_Mesh_Topologies_2019_v2.gpml'.format(dirname))
#reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Inactive_Meshes_and_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/North_America_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/North_Atlantic_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/North_Atlantic_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/North_China_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/North_China_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Northern_Andes_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Northern_Andes_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Papua_New_Guinea_Deforming_Meshes_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Papua_New_Guinea_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Scotia_Deforming_Mesh_and_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Siberia_Eurasia_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Siberia_Eurasia_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/South_Atlantic_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/South_Atlantic_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/South_China_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/South_China_Sea_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/South_Zealandia_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/South_Zealandia_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Southeast_Asia_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Southeast_Asia_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/West_Antarctic_Zealandia_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/West_Antarctica_Zealandia_Mesh_Topologies_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Western_North_America_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Western_Tethys_Deforming_Mesh_2019_v2.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/DeformingMeshes/Western_Tethys_Tectonic_Boundary_Topologies_2019_v2.gpml'.format(dirname))
return reconstruction_model
elif model_case=='S2013':
fnames = _retrieve(
url="https://www.earthbyte.org/webdav/ftp/Data_Collections/Clennett_etal_2020_G3/Clennett_etal_2020_S2013.zip",
known_hash="sha256:7749aac19c2d07c80a2cd77ba6a9038c8911fa8804c1d4adb3c9da7cb635b691",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
processor=_Unzip(extract_dir='Clennett2020_S2013'),
)
#dirname = '{:s}/Clennett_etal_2020_S2013/'.format(_os.path.split(fnames[0])[0])
dirname = '{:s}/Clennett2020_S2013/Clennett_etal_2020_S2013/'.format(fnames[0].split('Clennett2020_S2013')[0])
from gprm import ReconstructionModel as _ReconstructionModel
reconstruction_model = _ReconstructionModel('Clennett++S2013')
reconstruction_model.add_rotation_model('{:s}/Clennett_etal_2020_Rotations.rot'.format(dirname))
reconstruction_model.add_continent_polygons('{:s}/Clennett_etal_2020_Terranes.gpml'.format(dirname))
reconstruction_model.add_coastlines('{:s}/Clennett_etal_2020_Coastlines.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Clennett_etal_2020_NAm_boundaries.gpml'.format(dirname))
reconstruction_model.add_dynamic_polygons('{:s}/Clennett_etal_2020_Plates.gpml'.format(dirname))
return reconstruction_model
else:
ValueError('Unrecognised model name {}'.format(model_case))
| [
6738,
745,
5374,
1330,
28686,
62,
23870,
355,
4808,
418,
62,
23870,
198,
6738,
745,
5374,
1330,
19818,
355,
4808,
1186,
30227,
198,
6738,
745,
5374,
1330,
7154,
51,
5760,
593,
29356,
355,
4808,
6535,
51,
5760,
593,
29356,
198,
6738,
7... | 2.39438 | 16,725 |
from pathlib import Path
def read_commands(path: Path) -> list[tuple[str, int]]:
"""Read sub commands from a file."""
commands = []
with open(path, "r") as file:
for line in file.readlines():
command, amount = line.split()
commands.append((command, int(amount)))
return commands
if __name__ == "__main__":
main()
| [
6738,
3108,
8019,
1330,
10644,
628,
628,
198,
4299,
1100,
62,
9503,
1746,
7,
6978,
25,
10644,
8,
4613,
1351,
58,
83,
29291,
58,
2536,
11,
493,
60,
5974,
198,
220,
220,
220,
37227,
5569,
850,
9729,
422,
257,
2393,
526,
15931,
198,
... | 2.513514 | 148 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The flow collector daemon
"""
import argparse
import logging
import socketserver
import sys
from flowproc import process
from flowproc import __version__
__author__ = "Tobias Frei"
__copyright__ = "Tobias Frei"
__license__ = "mit"
logger = logging.getLogger(__name__)
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Flow collector daemon command line interface",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
dest="socket",
choices=["udp", "tcp"],
default="udp",
help="select server socket type (future should bring sctp)",
)
parser.add_argument(
"--host",
default="0.0.0.0",
help="set address to listen on",
action="store",
metavar="ipaddr",
)
parser.add_argument(
"-p",
"--port",
default=2055,
help="set port to listen on",
type=int,
action="store",
metavar="int",
)
parser.add_argument(
"--logfile",
default="stderr",
help="set file to log to",
action="store",
metavar="path",
)
parser.add_argument(
"-v",
dest="loglevel",
help="set loglevel INFO",
action="store_const",
const=logging.INFO,
)
parser.add_argument(
"-vv",
dest="loglevel",
help="set loglevel DEBUG",
action="store_const",
const=logging.DEBUG,
)
parser.add_argument(
"-V",
"--version",
help="print version and exit",
action="version",
version="flowproc {ver}".format(ver=__version__),
)
# TODO add options to select output processing
return parser.parse_args(args)
def start_listener(socket_type, addr):
"""Start socketserver
Args:
socket_Type `str` for the time being just UDP
addr `str`,`int` tuple (host, port)
"""
if socket_type.upper() == "UDP":
s = socketserver.UDPServer(addr, _NetFlowUDPHandler)
s.serve_forever()
else:
logger.error("There's no TCP without IPFIX support, exiting...")
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)-9s %(name)s: %(message)s"
logging.basicConfig(
level=loglevel,
stream=sys.stdout,
format=logformat,
datefmt="%b %d %Y %H:%M:%S",
)
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
logger.info("Starting version {}".format(__version__,))
logger.info("Args {}".format(vars(args)))
try:
start_listener(args.socket, (args.host, args.port))
except KeyboardInterrupt:
logger.info("Shutting down...")
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
464,
5202,
22967,
33386,
198,
37811,
198,
198,
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
37037,
1849... | 2.28933 | 1,462 |
from functools import wraps
from abc import ABCMeta, abstractmethod
def dispatch_loader(scraper, loader_name):
"""
Decorator that enforces one time loading for scrapers. The one time loading is applied
to partial loaders, e.g. only parse and load the home team roster once. This is not
meant to be used directly.
:param scraper: property name (string) containing an object of type :py:class:`scrapr.ReportLoader`
:param loader_name: name of method that does the scraping/parsing
:returns: function wrapper
"""
l = '.'.join([scraper, loader_name])
return wrapper
class RepScrWrap(object):
"""
Lazy matchup reader base. Reports can be read in pieces. Only need to read matchup on read of first part. Serves
as the base class for all wrappers of report scrapers.
:param game_key: :py:class:`.GameKey` of the game being loaded
:param rep_reader: object of type :py:class:`nhlscrapi.scrapr.ReportLoader`
"""
@property
@dispatch_loader('_rep_reader', 'parse_matchup')
def matchup(self):
"""
Return the game meta information displayed in report banners including team names,
final score, game date, location, and attendance. Data format is
.. code:: python
{
'home': home,
'away': away,
'final': final,
'attendance': att,
'date': date,
'location': loc
}
:returns: matchup banner info
:rtype: dict
"""
return self._rep_reader.matchup
def load_all(self):
"""
Loads all parts of the report.
:returns: ``self`` or ``None`` if load fails
"""
if self._rep_reader.parse():
return self
else:
return None
| [
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396,
198,
198,
4299,
27965,
62,
29356,
7,
1416,
38545,
11,
40213,
62,
3672,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
4280,
273,
... | 2.314389 | 827 |
import datetime
import importlib
import logging
import os
import platform
import socket
import sys
import pikka_bird_collector
from .config import Config
COLLECTORS = [
'system', # keep first; sort rest
'mongodb',
'mysql',
'postgresql',
'rabbitmq',
'redis']
COLLECTORS_MODULE_P = 'pikka_bird_collector.collectors.'
for c in COLLECTORS:
importlib.import_module(COLLECTORS_MODULE_P + c)
class Collector():
"""
Main collector, which calls individual service Collectors and merges the
results. The environment, containing such things as PID, hostname, and
kernel version, are passed to each collector.
"""
def __init__(self, config=None, logger=None):
"""
PARAMETERS:
path : string
filename of config to parse
logger : logger
logger
"""
self.config = Config(config)
self.logger = logger or logging.getLogger()
self.__set_environment()
def collect(self):
"""
Collect metrics for all invididual service Collectors, returning the
reports in a format suitable for sending to the Server, complete
with dates converted to strings. All times are in UTC, always.
RETURN:
: dict
collected reports, ready for sending to the Server
"""
reports = {}
collecting_at = datetime.datetime.utcnow()
self.logger.info("COLLECTING")
for c in COLLECTORS:
klass = getattr(sys.modules[COLLECTORS_MODULE_P + c], c.title())
service = klass.service()
collector = klass(self.environment, self.config.settings(service))
if collector.enabled():
self.logger.info("COLLECTING %s" % service)
reports[service] = collector.collect()
self.logger.debug("METRICS %s %s" % (service, reports[service]))
else:
self.logger.debug("SKIPPED %s" % service)
collected_at = datetime.datetime.utcnow()
self.logger.info("COLLECTED (%d s)" % (collected_at - collecting_at).seconds)
return {
'collecting_at': collecting_at.isoformat(),
'collected_at': collected_at.isoformat(),
'environment': self.environment,
'reports': reports}
| [
11748,
4818,
8079,
198,
11748,
1330,
8019,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
3859,
198,
11748,
17802,
198,
11748,
25064,
198,
198,
11748,
279,
1134,
4914,
62,
16944,
62,
33327,
273,
198,
6738,
764,
11250,
1330,
17056,
628,... | 2.109555 | 1,214 |
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.ConstructieElement import ConstructieElement
from OTLMOW.OTLModel.Datatypes.BooleanField import BooleanField
from OTLMOW.OTLModel.Datatypes.KlDamwandMateriaal import KlDamwandMateriaal
from OTLMOW.OTLModel.Datatypes.KwantWrdInMeter import KwantWrdInMeter
from OTLMOW.OTLModel.Datatypes.KwantWrdInVierkanteMeter import KwantWrdInVierkanteMeter
from OTLMOW.GeometrieArtefact.LijnGeometrie import LijnGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class Damwand(ConstructieElement, LijnGeometrie):
"""Een grond- en/of waterkerende constructie, die bestaat uit een verticaal in de grond geplaatste wand."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Damwand'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
@property
def isWaterdicht(self):
"""Geeft aan of de damwand al dan niet waterdicht is."""
return self._isWaterdicht.get_waarde()
@isWaterdicht.setter
@property
def materiaal(self):
"""Het materiaal waaruit de damwand bestaat."""
return self._materiaal.get_waarde()
@materiaal.setter
@property
def oppervlakte(self):
"""De totale oppervlakte van de damwandconstructie in vierkante meter."""
return self._oppervlakte.get_waarde()
@oppervlakte.setter
@property
def profiellengte(self):
"""De lengte van één damwandprofiel."""
return self._profiellengte.get_waarde()
@profiellengte.setter
@property
def totaleLengte(self):
"""De totale lengte van de damwandconstructie in lopende meter."""
return self._totaleLengte.get_waarde()
@totaleLengte.setter
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
440,
14990,
44,
3913,
13,
2394,
43,
17633,
13,
14881,
9487,
274,
13,
2394,
43,
8086,
822,
84,
315,
1330,
440,
14990,
8086,
822,
84,
315,
198,
6738,
440,
14990,
44,
3913,
13,
2394,
43,
17633,... | 2.395225 | 754 |
import os
import sys
import time
import click
import logging
import pkg_resources
from livereload import Server, shell
from . import Mambo
from .mambo import PAGE_FORMAT
from .__about__ import *
logging.basicConfig(filename='./error.log', level=logging.ERROR, format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger = logging.getLogger(__name__)
CWD = os.getcwd()
TPL_HEADER = """
---
title: Page Title
description: Page Description
---
"""
TPL_BODY = {
# HTML
"html": """
<div>
<h1>{{ page.title }}</h1>
</div>
""",
# MD
"md": """
# My markdown Mambo!
"""
}
def copy_resource(src, dest):
"""
To copy package data to destination
"""
package_name = "mambo"
dest = (dest + "/" + os.path.basename(src)).rstrip("/")
if pkg_resources.resource_isdir(package_name, src):
if not os.path.isdir(dest):
os.makedirs(dest)
for res in pkg_resources.resource_listdir(__name__, src):
copy_resource(src + "/" + res, dest)
else:
if not os.path.isfile(dest) \
and os.path.splitext(src)[1] not in [".pyc"]:
with open(dest, "wb") as f:
f.write(pkg_resources.resource_string(__name__, src))
else:
print("File exists: %s " % dest)
@click.group()
def cli():
"""
Mambo: An elegant static site generator
"""
pass
@cli.command("version")
def version():
"""Return the vesion of Mambo"""
print(__version__)
footer()
@cli.command("setup")
@click.argument("sitename")
def create_site(sitename):
"""Create a new site directory and init Mambo"""
title('Create new site')
mambo_conf = os.path.join(CWD, Mambo.config_yml)
if os.path.isfile(mambo_conf):
error_exit("Can't create new site in a directory that contain 'mambo.yml'")
sitepath = os.path.join(CWD, sitename)
if os.path.isdir(sitepath):
error_exit("Site directory '%s' exists already!" % sitename)
else:
info("Creating site: %s..." % sitename)
os.makedirs(sitepath)
copy_resource("skel/", sitepath)
stamp_mambo_current_version(sitepath)
info("Site created successfully!")
info("CD into '%s' and run 'mambo serve' to view the site" % sitename)
done()
@cli.command("init")
def init():
"""Initialize Mambo in the current directory """
title("Init Mambo...")
mambo_conf = os.path.join(CWD, Mambo.config_yml)
if os.path.isfile(mambo_conf):
error_exit("Mambo is already initialized in '%s'. Or delete 'mambo.yml' if it's a mistake " % CWD)
else:
copy_resource("skel/", CWD)
stamp_mambo_current_version(CWD)
info("Mambo init successfully!")
info("Run 'mambo serve' to view the site")
done()
@cli.command("create")
@click.argument("pagenames", nargs=-1)
def create_page(pagenames):
"""Create new pages"""
M = Mambo(CWD)
defaultExt = "html"
pages = []
title("Creating new pages...")
# Prepare and check the files
for pagename in pagenames:
page = pagename.lstrip("/").rstrip("/")
_, _ext = os.path.splitext(pagename)
# If the file doesn't have an extension, we'll just create one
if not _ext or _ext == "":
page += ".%s" % defaultExt
# Destination file
dest_file = os.path.join(M.pages_dir, page)
if not page.endswith(PAGE_FORMAT):
error_exit("Invalid file format: '%s'. Only '%s'" %
(page, " ".join(PAGE_FORMAT)))
elif os.path.isfile(dest_file):
error_exit("File exists already: '%s'" % dest_file)
else:
pages.append((page, dest_file))
for page, dest_file in pages:
# Making sure dir is created
dest_dir = os.path.dirname(dest_file)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
markup = os.path.splitext(page)[1].strip('.')
content = TPL_HEADER
content += TPL_BODY[markup]
with open(dest_file, "w") as f:
f.write(content)
log("- %s" % page)
done()
@cli.command("build")
@click.option("-i", "--info", is_flag=True)
@click.option("--env", default=None)
def build(info, env):
"""Build the site"""
title("Building site...")
M = Mambo(CWD, {"env": env, "build": "build"})
log('Name: %s' % M.site_config.get('name'))
log('Env: %s' % M.site_env)
log('Base Url: %s' % M.base_url)
log('Static Url: %s' % M.static_url)
log('Timezone: %s' % M.GLOBAL_TIMEZONE)
log('Sitemap: %s ' % ('Yes' if M.build_config.get('generate_sitemap') else 'No'))
log('')
M.build(print_info=info)
done()
@cli.command('serve')
@click.option("-p", "--port", default=None)
@click.option("--no-livereload", default=None)
@click.option("--open-url", default=None)
@click.option("--env", default=None)
def serve(port, no_livereload, open_url, env):
"""Serve the site """
M = Mambo(CWD, {"env": env, "build": "serve"})
if not port:
port = M.config.get("serve.port", 8000)
if no_livereload is None:
no_livereload = True if M.config.get(
"serve.livereload") is False else False
if open_url is None:
open_url = False if M.config.get(
"serve.open_url") is False else True
title('Serving on port %s' % port)
log('Env: %s' % M.site_env)
log('Base Url: %s' % M.base_url)
log('Static Url: %s' % M.static_url)
log("Livereload: %s" % ("OFF" if no_livereload else "ON"))
M.build()
server = Server()
if no_livereload is False:
server.watch(M.static_dir + "/", build_static)
for c in [M.pages_dir, M.templates_dir, M.content_dir, M.data_dir]:
server.watch(c + "/", build_pages)
server.serve(open_url_delay=open_url, port=str(port), root=M.build_dir)
@cli.command("clean")
def clean():
"""Clean the build dir """
title("Cleaning build dir...")
Mambo(CWD).clean_build_dir()
done()
| [
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
3904,
198,
11748,
18931,
198,
11748,
279,
10025,
62,
37540,
198,
6738,
2107,
260,
2220,
1330,
9652,
11,
7582,
198,
6738,
764,
1330,
337,
22651,
198,
6738,
764,
76,
22651... | 2.245255 | 2,687 |
import os
from cloudshell.cm.ansible.domain.cancellation_sampler import CancellationSampler
from cloudshell.cm.ansible.domain.http_request_service import HttpRequestService
from file_system_service import FileSystemService
from logging import Logger
from models import HttpAuth
| [
11748,
28686,
198,
198,
6738,
6279,
29149,
13,
11215,
13,
504,
856,
13,
27830,
13,
66,
590,
297,
341,
62,
37687,
20053,
1330,
43780,
297,
341,
16305,
20053,
198,
6738,
6279,
29149,
13,
11215,
13,
504,
856,
13,
27830,
13,
4023,
62,
2... | 3.733333 | 75 |
"""
Cross validation functions supporting both MultichannelPipeline and
scikit-learn predictors.
"""
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import balanced_accuracy_score, explained_variance_score
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
import pipecaster.utils as utils
import pipecaster.config as config
import pipecaster.parallel as parallel
__all__ = ['cross_val_score', 'cross_val_predict']
def _fit_predict_split(predictor, Xs, y, train_indices, test_indices,
predict_method='predict', transform_method=None,
score_method=None, fit_params=None):
"""
Clone, fit, and predict with a single channel or multichannel
predictor.
"""
is_classifier = utils.is_classifier(predictor)
model = utils.get_clone(predictor)
fit_params = {} if fit_params is None else fit_params
if utils.is_multichannel(model):
X_trains = [X[train_indices] if X is not None else None
for X in Xs]
model.fit(X_trains, y[train_indices], **fit_params)
else:
model.fit(Xs[train_indices], y[train_indices], **fit_params)
split_predictions = {}
if predict_method is not None:
split_predictions['predict'] = {}
if predict_method == 'auto' and is_classifier:
prediction_made = False
for m in config.predict_method_precedence:
try:
y_pred = _predict(model, Xs, test_indices, m)
if y_pred is not None:
prediction_made = True
except:
pass
if prediction_made is True:
split_predictions['predict']['method'] = m
break
if prediction_made == False:
raise AttributeError('failed to auto-detect prediction '
'method')
elif predict_method == 'auto' and is_classifier == False:
y_pred = _predict(model, Xs, test_indices, 'predict')
split_predictions['predict']['method'] = 'predict'
else:
y_pred = _predict(model, Xs, test_indices, predict_method)
split_predictions['predict']['method'] = predict_method
split_predictions['predict']['y_pred'] = y_pred
if transform_method is not None:
split_predictions['transform'] = {}
if transform_method == 'auto':
prediction_made = False
for m in config.transform_method_precedence:
try:
y_pred = _predict(model, Xs, test_indices, m)
if y_pred is not None:
prediction_made = True
except:
pass
if prediction_made is True:
split_predictions['transform']['method'] = m
break
if prediction_made == False:
raise AttributeError('failed to auto-detect transform '
'method')
elif transform_method == 'auto' and is_classifier == False:
y_pred = _predict(model, Xs, test_indices, 'predict')
split_predictions['transform']['method'] = 'predict'
else:
y_pred = _predict(model, Xs, test_indices, transform_method)
split_predictions['transform']['method'] = transform_method
split_predictions['transform']['y_pred'] = y_pred
if score_method is not None:
split_predictions['score'] = {}
if score_method == 'auto':
prediction_made = False
for m in config.score_method_precedence:
try:
y_pred = _predict(model, Xs, test_indices, m)
if y_pred is not None:
prediction_made = True
except:
pass
if prediction_made is True:
split_predictions['score']['method'] = m
break
if prediction_made == False:
raise AttributeError('failed to auto-detect score '
'method')
elif score_method == 'auto' and is_classifier == False:
y_pred = _predict(model, Xs, test_indices, 'predict')
split_predictions['score']['method'] = 'predict'
else:
y_pred = _predict(model, Xs, test_indices, score_method)
split_predictions['score']['method'] = score_method
split_predictions['score']['y_pred'] = y_pred
return split_predictions, test_indices
def cross_val_predict(predictor, Xs, y=None, groups=None,
predict_method='predict', transform_method=None,
score_method=None, cv=None,
combine_splits=True, n_processes=1, fit_params=None):
"""
Analog of the scikit-learn cross_val_predict function that supports both
single and multichannel cross validation.
Parameters
----------
predictor : estimator/predictor instance
Classifier or regressor that implements the scikit-learn estimator and
predictor interfaces.
Xs : list
List of feature matrices and None spaceholders.
y : list/array, default=None
Optional targets for supervised ML.
groups: list/array, default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used if cv parameter is set to GroupKFold.
predict_method : str, default='predict'
- Name of the method used for predicting.
- If 'auto' :
- If classifier : method picked using
config.predict_method_precedence order (default:
predict->predict_proba->predict_log_proba->decision_function).
- If regressor : 'predict'
transform_method : str, default=None
- Name of the prediction method to call when transforming (e.g. when
outputting meta-features).
- If 'auto' :
- If classifier : method picked using
config.transform_method_precedence order (default:
predict_proba->predict_log_proba->decision_function->predict).
- If regressor : 'predict'
score_method : str, default=None
- Name of prediction method used when scoring predictor performance.
- If 'auto' :
- If classifier : method picked using
config.score_method_precedence order (default:
ppredict_proba->predict_log_proba->decision_function->predict).
- If regressor : 'predict'
cv : int, or callable, default=5
- Set the cross validation method:
- If int > 1: Use StratifiedKfold(n_splits=internal_cv) for
classifiers or Kfold(n_splits=internal_cv) for regressors.
- If None or 5: Use 5 splits with the default split generator.
- If callable: Assumes interface like Kfold scikit-learn.
combine_splits : bool, default=True
- If True: Concatenate results for splits into a single array.
- If False: Return results for separate splits.
n_processes : int or 'max', default=1
- If 1: Run all split computations in a single process.
- If 'max': Run splits in multiple processes, using all
available CPUs.
- If int > 1: Run splits in multiple processes, using up to
n_processes number of CPUs.
fit_params : dict, default={}
Auxiliary parameters sent to pipe fit_transform and fit methods.
Returns
-------
dict
- If combine_splits is True :
{'predict':y_pred, 'transform':y_pred, 'score':y_pred)}
Where y_pred = np.array(n_samples) or None if the type of
prediction was not requested. There will not be dict entries for
prediction method parameters set to None (e.g. no 'transform' key
when transform_method=None).
- If combine_splits is False :
{'predict':[], 'transform':[],
'score':[], 'indices':[])}
Where empty brackets indicate identically ordered lists with one
list item per split. List items are either prediction arrays or
sample indices for the splits. There will not be dict entries for
prediction method parameters set to None (e.g. no 'transform' key
when transform_method=None).
Examples
--------
::
import pipecaster as pc
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
Xs, y, X_types = pc.make_multi_input_classification(n_informative_Xs=3,
n_random_Xs=7)
clf = pc.MultichannelPipeline(n_channels=10)
clf.add_layer(pc.ChannelEnsemble(GradientBoostingClassifier(), SVC()))
predictions = pc.cross_val_predict(clf, Xs, y)
predictions['predict']['y_pred']
# output: [1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, ...]
y
# output: [1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, ...]
"""
is_classifier = utils.is_classifier(predictor)
if is_classifier and y is not None:
classes_, y = np.unique(y, return_inverse=True)
cv = int(5) if cv is None else cv
if type(cv) == int:
if groups is not None:
cv = GroupKFold(n_splits=cv)
else:
if utils.is_classifier(predictor):
cv = StratifiedKFold(n_splits=cv)
else:
cv = KFold(n_splits=cv)
is_multichannel = utils.is_multichannel(predictor)
if is_multichannel:
live_Xs = [X for X in Xs if X is not None]
splits = list(cv.split(live_Xs[0], y, groups))
else:
splits = list(cv.split(Xs, y, groups))
args_list = [(predictor, Xs, y, train_indices, test_indices,
predict_method, transform_method, score_method, fit_params)
for train_indices, test_indices in splits]
n_jobs = len(args_list)
n_processes = 1 if n_processes is None else n_processes
if (type(n_processes) == int and n_jobs < n_processes):
n_processes = n_jobs
if n_processes == 'max' or n_processes > 1:
try:
shared_mem_objects = [Xs, y, fit_params]
job_results = parallel.starmap_jobs(
_fit_predict_split, args_list,
n_cpus=n_processes,
shared_mem_objects=shared_mem_objects)
except Exception as e:
print('parallel processing request failed with message {}'
.format(e))
print('defaulting to single processor')
n_processes = 1
if n_processes == 1:
# print('running a single process with {} jobs'.format(len(args_list)))
job_results = [_fit_predict_split(*args) for args in args_list]
split_predictions, split_indices = zip(*job_results)
# reorganize so splits are in lists
results = {k:{'y_pred':[sp[k]['y_pred'] for sp in split_predictions],
'method':split_predictions[0][k]['method']}
for k in split_predictions[0]}
# decode classes where necessary
if is_classifier:
for predict_method in results:
if results[predict_method]['method'] == 'predict':
results[predict_method]['y_pred'] = [classes_[p]
for p in results[predict_method]['y_pred']]
if combine_splits is True:
sample_indices = np.concatenate(split_indices)
for predict_method in results:
y_concat = np.concatenate(results[predict_method]['y_pred'])
results[predict_method]['y_pred'] = y_concat[sample_indices]
else:
results['indices'] = split_indices
return results
def score_predictions(y_true, y_pred, score_method, scorer,
is_classification, is_binary):
"""
Score predictions with 'auto' method support.
"""
y_true, y_pred = np.array(y_true), np.array(y_pred)
# set scorer if 'auto'
if type(scorer) == str and scorer == 'auto':
if is_classification and score_method == 'predict':
scorer = balanced_accuracy_score
elif (is_classification and score_method in
['predict_proba', 'predict_log_proba', 'decision_function']):
scorer = roc_auc_score
else:
scorer = explained_variance_score
# drop redundant probs to make binary cls results compatible with sklearn
if is_classification and is_binary and len(y_pred.shape) == 2:
y_pred = y_pred[:, 1]
return scorer(y_true, y_pred)
def cross_val_score(predictor, Xs, y=None, groups=None,
score_method = 'predict', scorer='auto',
cv=3, n_processes=1, **fit_params):
"""
Analog of the scikit-learn cross_val_score function that supports both
single and multichannel cross validation.
Parameters
----------
predictor : estimator/predictor instance
Classifier or regressor that implements the scikit-learn estimator and
predictor interfaces.
Xs : list
List of feature matrices and None spaceholders.
y : list/array, default=None
Optional targets for supervised ML.
groups: list/array, default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used if cv parameter is set to GroupKFold.
score_method : str, default='predict'
Name of method called to make predictions for performance scoring. If
'auto', methods are attempted in the order defined in
config.score_method_precedence.
Default: predict_proba->predict_log_proba->decision_function->predict.
scorer : {callable, 'auto'}, default='auto'
- Function calculating performance scores.
- If 'auto':
- explained_variance_score for regressors with predict()
- roc_auc_score for classifiers with {predict_proba,
predict_log_proba, decision_function}
- balanced_accuracy_score for classifiers with only predict()
- If callable: A scorer that returns a scalar figure of merit score
with signature: score = scorer(y_true, y_pred).
cv : int, or callable, default=5
- Set the cross validation method:
- If int > 1: Use StratifiedKfold(n_splits=internal_cv) for
classifiers or Kfold(n_splits=internal_cv) for regressors.
- If None or 5: Use 5 splits with the default split generator.
- If callable: Assumes interface like Kfold scikit-learn.
n_processes : int or 'max', default=1
- If 1: Run all split computations in a single process.
- If 'max': Run splits in multiple processes, using all
available CPUs.
- If int > 1: Run splits in multiple processes, using up to
n_processes number of CPUs.
fit_params : dict, default={}
Auxiliary parameters sent to pipe fit_transform and fit methods.
Returns
-------
- If 1 predict_method is specified: List of scalar figure of merit
scores, one for each split.
- If >1 predict_method is specified: Dict indexed by prediction method
name where values are lists of scores the splits.
Examples
--------
::
import pipecaster as pc
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
Xs, y, X_types = pc.make_multi_input_classification(n_informative_Xs=3,
n_random_Xs=7)
clf = pc.MultichannelPipeline(n_channels=10)
clf.add_layer(pc.ChannelEnsemble(GradientBoostingClassifier(), SVC()))
pc.cross_val_score(clf, Xs, y)
# ouput: [0.7647058823529411, 0.8455882352941176, 0.8180147058823529]
"""
is_classifier = utils.is_classifier(predictor)
is_binary = False
if is_classifier and y is not None:
classes_, y = np.unique(y, return_inverse=True)
if len(classes_) == 2:
is_binary = True
split_results = cross_val_predict(predictor, Xs, y, groups,
predict_method=None,
transform_method=None,
score_method=score_method,
cv=cv, combine_splits=False,
n_processes=n_processes,
**fit_params)
# score the predictions
scores = [score_predictions(y[idx], yp, score_method, scorer,
is_classifier, is_binary)
for yp, idx in zip(split_results['score']['y_pred'],
split_results['indices'])]
return scores
| [
37811,
198,
21544,
21201,
5499,
6493,
1111,
7854,
488,
4276,
47,
541,
4470,
290,
198,
36216,
15813,
12,
35720,
4331,
669,
13,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
82,
29572,
355,
599,
198,... | 2.162031 | 8,054 |
# Copyright Contributors to the Rez project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
from json import dumps # noqa (forwarded)
import sys
if sys.version_info.major >= 3:
# py2
else:
def loads(json_text):
"""Avoids returning unicodes in py2.
https://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-from-json
"""
return _byteify(json.loads(json_text, object_hook=_byteify))
| [
2,
15069,
25767,
669,
284,
262,
47753,
1628,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2... | 3.231511 | 311 |
# HTK Imports
from htk.apps.cpq.utils.accounting import get_invoice_years
from htk.apps.cpq.utils.accounting import get_receivables_by_year
from htk.apps.cpq.utils.crypto import compute_cpq_code
from htk.apps.cpq.utils.crypto import compute_cpq_code_check_hash
from htk.apps.cpq.utils.crypto import is_valid_cpq_code_check_hash
from htk.apps.cpq.utils.crypto import resolve_cpq_code
| [
2,
7154,
42,
1846,
3742,
198,
6738,
289,
30488,
13,
18211,
13,
13155,
80,
13,
26791,
13,
23317,
278,
1330,
651,
62,
16340,
2942,
62,
19002,
198,
6738,
289,
30488,
13,
18211,
13,
13155,
80,
13,
26791,
13,
23317,
278,
1330,
651,
62,
... | 2.623288 | 146 |
import os
from dotenv import load_dotenv
load_dotenv()
# Application
MAXIMUM_CONFIRMATION_CHECKS = 20
# thenewboston
BANK_IP = '54.183.16.194'
BANK_PROTOCOL = 'http'
BOT_ACCOUNT_NUMBER = '598428d3a9df5423aab3e593b5d1b5f056b9fa353607fccb1aa76385cf233851'
# Discord
DISCORD_TOKEN = os.getenv('DISCORD_TOKEN')
# Mongo
MONGO_DB_NAME = 'discord-db'
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
| [
11748,
28686,
198,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
198,
2220,
62,
26518,
24330,
3419,
198,
198,
2,
15678,
198,
22921,
3955,
5883,
62,
10943,
39776,
44,
6234,
62,
50084,
50,
796,
1160,
198,
198,
2,
788,
413,... | 2.12973 | 185 |
"""
CS241 Team Activity 4
Written by Chad Macbeth
"""
| [
37811,
198,
7902,
28872,
4816,
24641,
604,
220,
220,
198,
25354,
416,
19800,
4100,
65,
2788,
198,
37811,
628,
220,
220,
220,
628
] | 2.695652 | 23 |
"""Constants for user privileges in channels.
Privilege levels
================
Historically, there were two user privileges in channels:
* :data:`OP`: channel operator, or chanop, set and unset by ``+o`` and ``-o``
* :data:`VOICE`: the privilege to send messages to a channel with the
``+m`` mode, set and unset by ``+v`` and ``-v``
Since then, other privileges have been adopted by IRC servers and clients:
* :data:`HALFOP`: intermediate level between Voiced and OP, set and unset by
``+h`` and ``-h``
* :data:`ADMIN`: channel admin, above OP and below OWNER, set and unset by
``+a`` and ``-a``
* :data:`OWNER`: channel owner, above ADMIN and OP, set and unset by ``+q`` and
``-q``
.. important::
Not all IRC networks support these added privilege modes. If you are
writing a plugin for public distribution, ensure your code behaves sensibly
if only +v (voice) and +o (op) modes exist.
Compare privileges
==================
This module represents privileges as powers of two, with higher values assigned
to higher-level privileges::
>>> from sopel.privileges import VOICE, HALFOP, OP, ADMIN, OWNER
>>> VOICE < HALFOP < OP < ADMIN < OWNER
True
Then a user's privileges are represented as a sum of privilege levels::
>>> VOICE
1
>>> OP
4
>>> priv = VOICE | OP
>>> priv
5
This allows to use comparators and bitwise operators to compare privileges::
>>> priv >= OP
True
>>> bool(priv & HALFOP)
False
In that case, ``priv`` contains both VOICE and OP privileges, but not HALFOP.
"""
from __future__ import generator_stop
VOICE = 1
"""Privilege level for the +v channel permission
.. versionadded:: 4.1
.. versionchanged:: 8.0
Moved into :mod:`sopel.privileges`.
"""
HALFOP = 2
"""Privilege level for the +h channel permission
.. versionadded:: 4.1
.. versionchanged:: 8.0
Moved into :mod:`sopel.privileges`.
.. important::
Not all IRC networks support this privilege mode. If you are writing a
plugin for public distribution, ensure your code behaves sensibly if only
``+v`` (voice) and ``+o`` (op) modes exist.
"""
OP = 4
"""Privilege level for the +o channel permission
.. versionadded:: 4.1
.. versionchanged:: 8.0
Moved into :mod:`sopel.privileges`.
"""
ADMIN = 8
"""Privilege level for the +a channel permission
.. versionadded:: 4.1
.. versionchanged:: 8.0
Moved into :mod:`sopel.privileges`.
.. important::
Not all IRC networks support this privilege mode. If you are writing a
plugin for public distribution, ensure your code behaves sensibly if only
``+v`` (voice) and ``+o`` (op) modes exist.
"""
OWNER = 16
"""Privilege level for the +q channel permission
.. versionadded:: 4.1
.. versionchanged:: 8.0
Moved into :mod:`sopel.privileges`.
.. important::
Not all IRC networks support this privilege mode. If you are writing a
plugin for public distribution, ensure your code behaves sensibly if only
``+v`` (voice) and ``+o`` (op) modes exist.
"""
OPER = 32
"""Privilege level for the +y/+Y channel permissions
Note: Except for these (non-standard) channel modes, Sopel does not monitor or
store any user's OPER status.
.. versionadded:: 7.0
.. versionchanged:: 8.0
Moved into :mod:`sopel.privileges`.
.. important::
Not all IRC networks support this privilege mode. If you are writing a
plugin for public distribution, ensure your code behaves sensibly if only
``+v`` (voice) and ``+o`` (op) modes exist.
"""
| [
37811,
34184,
1187,
329,
2836,
18850,
287,
9619,
13,
198,
198,
20184,
41866,
2974,
198,
4770,
198,
198,
13749,
26847,
11,
612,
547,
734,
2836,
18850,
287,
9619,
25,
198,
198,
9,
1058,
7890,
25,
63,
3185,
63,
25,
6518,
10088,
11,
393... | 3.132498 | 1,117 |
import json
import requests
class Optimizely:
""" A Python wrapper around the Optimizely REST API
Attributes
_token (str): The Optimizely API token passe in the class constructor.
_api_base_url (str): The Base URL for the Optimizely API. Hardcoded as
it is unlikely to change, but if the API reaches a 'v2' designation,
it could be appropriate to add support for the v1 and v2 APIs within
a single class.
"""
def __init__(self, token):
""" Create an Optimizely object.
The init method just sets a token so that later calls can re-use the
same credentials without passing in the authentication token
explicitly.
Args:
token (str): A valid token for the Optimizely REST API. If you
don't already have a token, you can generate one using the
documentation provided by Optimizely.
References:
Optimizely REST API documentation:
https://developers.optimizely.com/rest/introduction/index.html
"""
self._api_base_url = 'https://www.optimizelyapis.com/experiment/v1{endpoint}'
self._token = token
def _call(self, method, endpoint, data=None):
""" Generic method for calling the Optimizely API.
Abstracts any required plumbing for the optimizely API (in thie case,
assembling the complete request URI and setting the authentication
token in the header) to ensure that behavior is consistent across
HTTP verbs.
The _call method should not be invoked directly. Use the convenience
methods .get, .post, .put, and .delete for a more semantic interface.
Args:
method (callable): A http method from the `requests` library.
endpoint (sting): The Optimizely REST API endpoint to make the
request to. Combined with the method parameter, this will
be a unique operation, meaning that you can wrap this method,
or one of its decendendants in a `partial` to encourage simple
re-use of a single endpoint with multiple payloads.
Keyword Args:
data: (dict|None) The body of the request (not applicable for GET
and DELETE requests).
Returns:
requests.response
"""
headers = {
'Token': self._token
}
if data:
# specify that body is being sent as JSON
headers['content-type'] = 'application/json'
data = json.dumps(data)
uri = self._api_base_url.format(endpoint=endpoint)
response = method(uri, headers=headers, data=data)
return response
def get(self, endpoint):
""" A semantic wrapper for GET requests.
Args:
endpoint: (string) The endpoint to make a request to, not including
the Base URI. For example: '/experiments/123'
Returns:
requests.response
"""
return self._call(requests.get, endpoint)
def post(self, endpoint, data):
""" Semantic wrapper for POST requests to the Optimizely REST API.
Args:
endpoint: (string) The endpoint to make a request to, not including
the Base URI. For example: '/experiments/123'
data: (dict) A json-serializeable object to be encoded in the request
body.
Returns:
requests.Response The response object.
"""
return self._call(requests.post, endpoint, data=data)
def put(self, endpoint, data):
""" Semantic wrapper for PUT requests to the Optimizely REST API.
Args:
endpoint: (string) The endpoint to make a request to, not including
the Base URI. For example: '/experiments/123'
data: (dict) A json-serializeable object to be encoded in the request
body.
Returns:
requests.Response The response object.
"""
return self._call(requests.put, endpoint, data=data)
def delete(self, endpoint):
""" Semantic wrapper for DELETE requests to the Optimizely REST API.
Args:
endpoint: (string) The endpoint to make a request to, not including
the Base URI. For example: '/experiments/123'
Returns:
requests.Response The response object.
"""
return self._call(requests.delete, endpoint)
| [
11748,
33918,
198,
11748,
7007,
628,
198,
4871,
30011,
1096,
306,
25,
198,
220,
220,
220,
37227,
317,
11361,
29908,
1088,
262,
30011,
1096,
306,
30617,
7824,
628,
220,
220,
220,
49213,
198,
220,
220,
220,
220,
220,
220,
220,
4808,
300... | 2.530934 | 1,778 |
import application.models
from application.controller.auth import validate_username
from flask_login import logout_user
| [
198,
11748,
3586,
13,
27530,
198,
6738,
3586,
13,
36500,
13,
18439,
1330,
26571,
62,
29460,
198,
6738,
42903,
62,
38235,
1330,
2604,
448,
62,
7220,
198
] | 4.481481 | 27 |
try:
import numpy as np
except ImportError:
raise Exception('This plugin requires the numpy library, which failed to import. \
Install the python3 version of numpy.')
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.colorchooser as tkcc
import plugin
| [
28311,
25,
198,
197,
11748,
299,
32152,
355,
45941,
198,
16341,
17267,
12331,
25,
198,
197,
40225,
35528,
10786,
1212,
13877,
4433,
262,
299,
32152,
5888,
11,
543,
4054,
284,
1330,
13,
3467,
198,
197,
15798,
262,
21015,
18,
2196,
286,
... | 3.25 | 84 |
from django_tgbot.decorators import processor
from django_tgbot.state_manager import message_types, update_types
from django_tgbot.types.update import Update
from Bot.bot import state_manager, TelegramBot
from Bot.models import TelegramState
from Bot.BotSetting import ReportChannel
from Loan.models import Loan
from ..BotDialog import go_home
from .Dialog import fail_loan, go_buy_loan_Value, go_conf, go_sell_loan_value, go_seller_list_month, \
go_my_request, go_buyer_list_month
from Loan.LoanRequest import conf_loan, cancel_loan, get_loan
@processor(
state_manager,
from_states='/Loan',
update_types=update_types.Message,
message_types=message_types.Text,
)
@processor(
state_manager,
from_states='/Loan',
update_types=update_types.CallbackQuery,
message_types=message_types.Text,
)
| [
6738,
42625,
14208,
62,
25297,
13645,
13,
12501,
273,
2024,
1330,
12649,
198,
6738,
42625,
14208,
62,
25297,
13645,
13,
5219,
62,
37153,
1330,
3275,
62,
19199,
11,
4296,
62,
19199,
198,
6738,
42625,
14208,
62,
25297,
13645,
13,
19199,
1... | 2.922535 | 284 |
import numpy as np
import matplotlib.pyplot as plt
def plot_grad_field(field, occupancy_grid):
"""Plots the gradient field of the np.array of pixels given by field"""
eps = 1e-6
N, M = field.shape
# matrix for the x and y coordinates in every point:
x, y = np.zeros((N, M)), np.zeros((N, M))
for i in range(N):
for j in range(M):
x[i, j] = field[i, j].grad[0] + eps
y[i, j] = field[i, j].grad[1] + eps
# plotting:
f, ax = plt.subplots(1, 1, figsize=(16, M / N * 16))
ax.quiver(x.T, -y.T, scale=1, scale_units='xy')
ax.matshow(occupancy_grid.T)
plt.show()
def array_is_in_list(array, l):
"""Checks if an np.array 'array' is in the list 'l'."""
for item in l:
if (array == item).all():
return True
return False | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
4299,
7110,
62,
9744,
62,
3245,
7,
3245,
11,
42498,
62,
25928,
2599,
198,
220,
220,
220,
37227,
3646,
1747,
262,
31312,
2214,
286,
... | 2.122449 | 392 |