index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
13,400 | 66b05fcc12f1a35173c8529051a0fbffed291a4e | from operator import itemgetter #Needed funcion to sort all scheduled trips
input_file = open('c_no_hurry.in') #File to open
first_line = input_file.readline()
#Gets the first information from the file
n_rows, n_columns, n_vehicles, n_rides, bonus, max_steps = tuple(map(int, first_line.split(' ')))
def greatest_distance():
return n_columns * n_rows
def travel_distance(start, finish):
return abs(start[0] - finish[0]) + abs(start[1] - finish[1])
def make_trip_list(): #First Output with all variables as integers
# adding a number to name that scheduled trip
trips = list()
for i in range(n_rides):
trips.append([i] + input_file.readline().rstrip().split())
# trips.append(str(i) + ' ' + input_file.readline().rstrip())
# trip_list.append(''.join(str(x) for x in input_file.readline().rstrip()))
for i in range(n_rides):
# trips[i] = [int(x) for x in trips[i] if x.isnumber()]
trips[i][1:7] = map(int, trips[i][1:7])
return trips
def availableCar(): #Checking if there is a car available
for car in cars_list:
if (car[2] == False):
return car[0]
return "false"
def AssignNextTrip(carNumber, tripNumber, start, finish):
for car in cars_list: #Assigns next trip
if(car[0] == carNumber):
car[2] = True;
car[1] = travel_distance(start, finish)
car[3] = finish
for line in output_file: #escrever no ficheiro de output o tripNumber
if (line[0] == carNumber):
line.append(tripNumber)
return
return
def NextTrip(trips): #Gets the next scheduled trip and deletes it from the array
if (len(trips) != 0):
nextTrip = trips[0]
del trips[0]
return nextTrip
else:
return False
def UpdateSteps():
for car in cars_list:
if(car[2] == True): #If it moves, it decrements its steps
car[1] -= 1
if(car[1] <= 0): #If steps=0, signifies that there is a car available for the next trip
car[2] = False
# print(make_trip_list())
trip_list = make_trip_list()
# for r in range(n_rows):
# trip_list.append(input_file.readline().rstrip())
#
# print(input_file.readline())
# n_cars = 0
fleet = []
cars_list = []
output_file = []
for car in range(n_vehicles): # Initializes an array of all the cars in the position (0,0), with the value false
cars_list.append([car, 0, False, (0,0)]) #Car number, steps until available, Availability
for car in range(n_vehicles): # Initializes output array
output_file.append([car])
# sorted(trip_list, key=itemgetter(1))
trip_list.sort(key=itemgetter(5)) #Sorts Array by order of scheduling
for r in range(n_rides):
current_step = 0
# trip_id, begin_row, begin_col, end_row, end_col, earliest_start, latest_finish = tuple(map(int, trip_list[r].split(' ')))
trip_id = trip_list[r][0]
begin_row = trip_list[r][1]
begin_col = trip_list[r][2]
end_row = trip_list[r][3]
end_col = trip_list[r][4]
earliest_start = trip_list[r][5]
latest_finish = trip_list[r][6]
begin = (int(begin_row), int(begin_col))
end = (int(end_row), int(end_col))
#print(begin, end)
# while current_step < max_steps:
# distance = travel_distance(begin, end)
#
# if begin[0] != end[0]:
# begin[0] += 1
# else:
# begin[1] += 1
# if travel_distance(begin, end) == greatest_distance():
# current_step += 1
# while(availableCar() != "false"): #Verifies there is a car available
# car = availableCar()
# tripNumber = NextTripNumber(trip_list)
# AssignNextTrip(car, tripNumber) #adicionar viagem, alterar valores dentro do carro
# tripNumber += 1
for i in range(0, max_steps):
while (availableCar() != "false" and NextTrip(trip_list) != False): # Verifies there is a car available
car = availableCar()
nextTrip = NextTrip(trip_list)
tripNumber = nextTrip[0]
start = (nextTrip[1],nextTrip[2])
finish = (nextTrip[3],nextTrip[4])
AssignNextTrip(car, tripNumber, start, finish) # Adds trip, alters values inside the car
UpdateSteps()
#Initializing output file
output=open(".out", "w+")
length = len(output_file)
for i in range(length):
del output_file[i][0] #Deletes the numbering of the car
print(output_file)
for i in range(length):
list_length = len(output_file[i])
#output.write(' '.join(str(x) for x in output_file[i]) + "\n")
output.write(str(list_length))
output.write(' ' + ' '.join(str(x) for x in output_file[i]) + "\n") #Creates file according to instructions
|
13,401 | 069ae919ec3ace8b76fc919b8afd465e5762307b | from django.db import models
from datetime import datetime, date
class Article(models.Model):
title = models.CharField(max_length=500)
date = models.DateField(auto_now_add=False, auto_now=False)
description = models.CharField(max_length=2000)
def __str__(self):
return self.title
|
13,402 | ec0336135f8464f0e17b6eec00293cebf55a9fb9 | from django.urls import path
from .views import ProductListView, ProductDetailView
urlpatterns = [
path('', ProductListView.as_view(), name='products-list'),
path('details/<str:slug>', ProductDetailView.as_view(), name='products-details'),
]
|
13,403 | d30600e5e49ef563d721b34e9164a512d7061c03 | class Dependence:
def __init__(self, x, p):
self.x = x # integer
self.p = set(p) # set
def __repr__(self):
s = str(x) + " <-";
for item in p:
s += " " + str(item)
return s
def __eq__(self, obj):
if (obj is None):
return False
return self.x == obj.x and self.p.symmetric_difference(set(obj.p)) == set()
def __hash__(self):
myHash = 0
for item in p:
myHash += item * item
return myHash + self.x * 7117
|
13,404 | 7f3d11976e29f89ab765e720e01a4396fbe3dd15 | import datetime
import os
import pickle
import h2o.automl
import pandas as pd
import xgboost as xg
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
class Variables(object):
def __init__(self, pipeline: Pipeline):
self.modelNum = str(abs(hash(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))))
self.pipeline = pipeline
Var: Variables
def getPlantsPropulsionData(splitData=True, makePolynomialFeatures=False):
global Var
data = pd.read_csv(filepath_or_buffer="Data Source/data.txt", sep=" ", header=None, engine='python')
col_heading = ['Lever_position', 'Ship_speed', 'Gas_Turbine_shaft_torque', 'Gas_Turbine_rate_of_revolutions',
'Gas_Generator_rate_of_revolutions', 'Starboard_Propeller_Torque', 'Port_Propeller_Torque', 'HP_Turbine_exit_temperature',
'GT_Compressor_inlet_air_temperature', 'GT_Compressor_outlet_air_temperature', 'HP_Turbine_exit_pressure',
'GT_Compressor_inlet_air_pressure', 'GT_Compressor_outlet_air_pressure', 'Gas_Turbine_exhaust_gas_pressure',
'Turbine_Injecton_Control', 'Fuel_flow', 'GT_Compressor_decay_state_coefficient', 'GT_Turbine_decay_state_coefficient']
col_to_drop = ['Lever_position', 'Ship_speed', 'GT_Compressor_inlet_air_temperature', 'GT_Compressor_inlet_air_pressure']
data.columns = list(col_heading)
data = data.drop(col_to_drop, axis=1)
X = data.drop(['GT_Compressor_decay_state_coefficient', 'GT_Turbine_decay_state_coefficient'], axis=1).values
y = data[col_heading[-2:]].values
steps = [('scaler', StandardScaler())]
if makePolynomialFeatures:
steps.insert(0, ('polynomialfeatures', PolynomialFeatures(degree=3, interaction_only=False, include_bias=False)))
pipeline = Pipeline(steps=steps)
Var = Variables(pipeline=pipeline)
if splitData:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, shuffle=True)
X_train = pipeline.fit_transform(X=X_train)
X_test = pipeline.transform(X=X_test)
return X_train, X_test, y_train, y_test
else:
X = pipeline.fit_transform(X=X)
return X, y
def printMetrics(y_true, y_pred):
mean_absolute_error_score = mean_absolute_error(y_true=y_true, y_pred=y_pred, multioutput='uniform_average')
print("mean_absolute_error:", mean_absolute_error_score)
mean_squared_error_score = mean_squared_error(y_true=y_true, y_pred=y_pred, multioutput='uniform_average')
print("mean_squared_error:", mean_squared_error_score)
r2_score_error = r2_score(y_true=y_true, y_pred=y_pred, multioutput='uniform_average')
print("r2_score:", r2_score_error)
def getMetrics(y_true, y_pred):
mean_absolute_error_score = mean_absolute_error(y_true=y_true, y_pred=y_pred, multioutput='uniform_average')
mean_squared_error_score = mean_squared_error(y_true=y_true, y_pred=y_pred, multioutput='uniform_average')
r2_score_error = r2_score(y_true=y_true, y_pred=y_pred, multioutput='uniform_average')
return mean_absolute_error_score, mean_squared_error_score, r2_score_error
def logSave(nameOfModel, reg, metrics, val_metrics):
mean_absolute_error_score, mean_squared_error_score, r2_score_error = metrics
val_mean_absolute_error_score, val_mean_squared_error_score, val_r2_score_error = val_metrics
msg = str(Var.modelNum) + "-" + nameOfModel + "\t\t" + "mae-" + str(mean_absolute_error_score) + "\tmse-" + str(
mean_squared_error_score) + "\tr2-" + str(r2_score_error) + "\tval_mae-" + str(val_mean_absolute_error_score) + "\tval_mse-" + str(
val_mean_squared_error_score) + "\tval_r2-" + str(val_r2_score_error) + "\n"
f = open("SKlogs.log", "a+")
f.write(msg)
f.close()
if not os.path.exists("SKMetrics.csv"):
f = open("SKMetrics.csv", "w")
f.write(",".join(
["Model No.", "Model Type", "mean_absolute_error", "mean_squared_error", "r2_score", "val_mean_absolute_error", "val_mean_squared_error",
"val_r2_score"]) + "\n")
f.close()
f = open("SKMetrics.csv", "a+")
msg = ",".join(
[Var.modelNum, nameOfModel, str(mean_absolute_error_score), str(mean_squared_error_score), str(r2_score_error),
str(val_mean_absolute_error_score), str(val_mean_squared_error_score), str(val_r2_score_error)
])
f.write(msg + "\n")
f.close()
if not os.path.exists("DataPreprocessingPipeline"):
os.mkdir("DataPreprocessingPipeline")
name_of_file = "_".join([Var.modelNum, nameOfModel, "DataPreprocessingPipeline"]) + ".pickle"
pickle_out = open(os.path.join("DataPreprocessingPipeline", name_of_file), "wb")
pickle.dump(Var.pipeline, pickle_out)
if not os.path.exists("SKLearnModels"):
os.mkdir("SKLearnModels")
if not os.path.exists("H2OModels"):
os.mkdir("H2OModels")
if reg is None:
return
if isinstance(reg, list):
if "H2O" in nameOfModel:
name_of_file = "_".join([Var.modelNum, nameOfModel])
h2o.save_model(reg[0].leader, path=os.path.join("H2OModels", name_of_file + "1"))
h2o.save_model(reg[1].leader, path=os.path.join("H2OModels", name_of_file + "2"))
elif type(reg) is xg.XGBRegressor:
name_of_file = "_".join([Var.modelNum, nameOfModel]) + ".bin"
reg[0].save_model(os.path.join("SKLearnModels", name_of_file + "1"))
reg[1].save_model(os.path.join("SKLearnModels", name_of_file + "2"))
else:
name_of_file = "_".join([Var.modelNum, nameOfModel]) + ".pickle"
pickle_out = open(os.path.join("SKLearnModels", name_of_file + "1"), "wb")
pickle.dump(reg[0], pickle_out)
pickle_out = open(os.path.join("SKLearnModels", name_of_file + "2"), "wb")
pickle.dump(reg[1], pickle_out)
else:
name_of_file = "_".join([Var.modelNum, nameOfModel]) + ".pickle"
pickle_out = open(os.path.join("SKLearnModels", name_of_file), "wb")
pickle.dump(reg, pickle_out)
def saveBestParams(nameOfModel, best_params):
f = open("GridSearchParams.txt", "a+")
f.write(Var.modelNum + "-" + nameOfModel + "\t" + str(best_params) + "\n")
f.close()
|
13,405 | 0fa8c506baf3973f362efe7a888323475d10e270 | from django.urls import re_path
from . import views
urlpatterns = [
re_path(r'^reg$',views.reg_view),
re_path(r'^login',views.login_view),
re_path(r'^logout',views.logout_view),
re_path(r'^register$',views.register_view),
] |
13,406 | f94b461d8932761cf0f50be8a13b47f9c9286055 | {
'name': 'Fleet asset',
'version': '8.0.1.0.0',
'license': 'AGPL-3',
'category': 'Generic Modules/Fleet Asset',
'author': 'Andrean Wijaya',
'website': '-',
'depends': ['account','fleet','account_asset'],
'data': [
'views/fleet_asset_view.xml',
],
'installable': True,
}
|
13,407 | 2ab49eee147eb66a4d68953d6ae1dbbb15b67cc3 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Item, Field
class MusicItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
song = Field()
url = Field()
singer = Field()
source = Field()
date = Field()
image = Field()
discuss = Field()
album_id = Field()
class commentItem(scrapy.Item):
album_id = Field()
comment = Field()
singer = Field()
song = Field()
tag = Field()
|
13,408 | 44bee7009a10419851132b42fcad2401b5f22a8b | num=input()
rev=num[::-1]
if num==rev:
print('yes')
else:
print('no')
|
13,409 | dcd321144436a1da130f16c05ac41b2c49c16cb5 | items=list(range(11,21))
for index, item in enumerate(items):
print(index, item)
|
13,410 | bac42a3e34f14106548df2dce672ca976eadd41e | from __future__ import annotations
import requests
import time
from typing import List, TypedDict, Generator
from dataclasses import dataclass
@dataclass
class RedditComment:
"""
A basic reddit comment.
This class excludes much of the data that comes with
a reddit comment in favor of simplicity.
"""
comment_id: str
created_utc: int
author: str
body: str
link_id: str
parent_id: str
subreddit_id: str
class RedditApi:
"""A class to get data from Reddit."""
def get_latest_comments(self, subreddit: str) -> List[RedditComment]:
r = requests.get(
f"https://reddit.com/r/{subreddit}/comments.json",
headers={"User-agent": "TestBot v0.1"},
)
comments = []
if r.status_code == 200:
data = r.json()
for child in data["data"]["children"]:
child = child["data"]
comment = RedditComment(
comment_id=child["name"],
created_utc=child["created_utc"],
author=child["author"],
body=child["body"],
link_id=child["link_id"],
parent_id=child["parent_id"],
subreddit_id=child["subreddit_id"],
)
comments.append(comment)
return comments
def monitor_comments(
self, subreddit: str, delay: float = 15.0
) -> Generator[List[RedditComment], float, RedditComment]:
next_time = 0.0
while True:
if time.time() > next_time:
new_delay = yield self.get_latest_comments(subreddit)
if new_delay:
delay = new_delay
next_time = time.time() + delay
else:
time.sleep(1.0)
if __name__ == "__main__":
subreddit = input("Enter subreddit name to monitor: ")
api = RedditApi()
for comments in api.monitor_comments("news"):
for comment in comments:
print(comment.body)
|
13,411 | 3a5f832d44c6a55004dc94ec1b25a485ceb5d8eb | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: ๅจๆฅ 11ๆ 29 16:49:48 2015
# by: The Resource Compiler for PyQt (Qt v4.8.6)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x14\x1c\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x8b\x00\x00\x00\x15\x08\x06\x00\x00\x00\x76\x75\x1d\xcc\
\x00\x00\x13\xe3\x49\x44\x41\x54\x78\x5e\x6c\x56\x3d\x6b\x15\x51\
\x10\x3d\x73\x77\xdf\xfa\x92\x97\x90\xc6\x57\x29\x28\x48\x04\x49\
\x61\x61\x63\xa5\x68\xe1\x3f\xb0\x4a\x1b\xb0\x8a\xe9\x2c\x62\x11\
\x10\x24\x82\x60\x23\x81\x80\xfa\x2f\x6c\x55\x10\x0b\x7f\x82\x16\
\xfe\x04\x09\x51\x5e\x14\xe1\xed\x08\xc3\x21\x87\xbb\xb3\x03\xd9\
\xbd\xb3\x77\xee\x7c\x9c\x7b\xe6\x4d\x0c\x92\x24\x17\xf1\x64\x13\
\xc0\x43\x00\x77\x1d\xd8\x02\x70\x09\x70\x00\x06\xf0\xe9\x80\xd6\
\x7c\x73\x05\xe7\x3b\x56\xb4\x08\x0d\x92\xd0\xf4\x2d\xf9\xc4\x78\
\x0c\x7a\x34\xe9\xd9\x5f\x3a\x6d\x90\x64\xbf\x12\x1b\xd8\x58\x5a\
\xab\xb2\xfa\x9b\xa5\x3c\xab\x55\xaa\x3f\xc5\x4c\xf5\x64\x7f\x12\
\x4f\x9a\xe9\x6c\xc6\x47\xd8\x0c\xbc\x09\x4f\xd4\x35\xe4\x1c\xb3\
\x5c\x9e\x3d\xdd\x34\xd8\x81\x03\xdb\x11\xc0\x15\x34\x1e\x1e\xae\
\xa5\x87\x4d\x28\x4a\xd4\x4c\x35\xc0\x75\x38\x4c\xb9\xef\x2a\xb2\
\x66\xc2\x08\x81\xb8\x6f\xe6\xdc\x66\x51\x0c\x6d\x75\x75\x89\x56\
\x5c\xd1\xae\xa6\x34\x94\x2b\x65\x84\x82\x0c\x66\x4e\x4f\x66\x30\
\x6e\x2b\x46\xba\xa6\x8c\x9b\x91\x64\x1e\x56\xb9\xb1\x06\xcc\x70\
\x97\x17\x92\x34\x61\xe7\xcc\x0b\x89\xb4\xe3\x54\x1f\x67\x83\x4e\
\x23\x37\x02\xf3\x1e\xc8\xf5\xf9\xb3\x1d\x38\x8e\x0c\xe8\x9c\x44\
\xa0\x5f\x45\x71\x01\xe7\x04\x21\xf2\x77\x83\x24\xbe\xe9\xb4\x1b\
\x75\x91\x8b\x87\xc2\xa5\xf6\x29\x02\x83\x66\x84\x80\xc0\x12\x55\
\x92\x25\x03\xa2\xa4\x69\xeb\xf5\x9e\x6c\x95\x8b\xc5\x65\xd2\xc6\
\x13\x89\x73\x3f\xfb\x00\x75\x13\xcc\x0a\xc7\x14\x04\x9b\x62\xc8\
\x49\xd4\xcf\xcd\xaa\xb9\x32\x79\x99\x67\xa8\x55\xec\x51\x4c\x47\
\x3b\x4a\x77\x52\x95\xa8\x66\x64\x0d\x32\xcf\xbf\xcc\x37\xaf\xbc\
\xd8\x37\xf8\x73\xa6\x24\x9e\x86\xd3\xaa\x6b\xaa\x37\x12\x97\x29\
\x02\x1f\x50\x7f\x2b\xdd\x20\x48\xe2\xb4\x6c\x65\xad\xc1\x52\x11\
\xd4\xb4\xef\x35\x09\x94\x99\xee\x80\xc2\xd8\xc4\x4e\x22\x22\x8a\
\x63\xfa\x46\x31\x5d\x2c\x7d\xa4\xea\x84\x48\xc2\x8e\x22\x1f\x1a\
\x1c\x89\xee\xc4\x52\x8f\x3c\xd6\x55\x3b\x93\xd3\x77\x92\x12\xe7\
\x93\x41\x13\x80\xb8\x85\xce\x86\xaf\x1b\x31\x8f\xff\x01\x59\x6e\
\xdf\x78\xb5\x63\xf0\x37\xe2\x9d\xc1\xc8\xc8\xb1\x42\xc0\x3d\x79\
\x26\x54\xde\xc3\xd1\xd3\xaa\xc0\xac\x10\x97\x61\x02\x39\x25\x16\
\x9c\xc6\x44\x96\x3a\xbf\x2c\x79\x72\x67\xff\x8a\x20\x4b\x09\x3b\
\x14\x7d\x8c\xdc\x25\x58\x4d\xd4\x03\xd5\x9c\x90\xc9\xff\x9d\xe5\
\x9c\xb4\x16\x66\x6a\x95\x06\x66\xf9\x0c\x71\x0d\x02\x14\x37\x58\
\x69\x00\xd4\xb5\x48\x14\x5f\x2e\x14\x71\x5c\x4c\xcd\xe6\x22\x4d\
\x70\x9a\x31\x5a\x00\xb8\x7f\xeb\xf5\x26\x80\xa3\x54\x57\x15\xa8\
\x87\xfb\x92\xe3\x80\xa5\x19\x59\x6d\x25\xd8\xfa\xf1\x70\x0f\xa7\
\x8b\x55\x3c\xd8\x7f\x1c\xfa\xa4\xdb\x40\x5b\x3a\x58\x29\x63\xbf\
\xa6\xa1\x68\x84\x8d\xc7\x56\x79\xe7\x76\xca\x05\xd0\x08\xd1\x75\
\x46\x3e\xba\xef\xca\x47\x35\x89\x14\x33\xb4\x84\xe6\x87\xc3\x5d\
\xb0\x9e\xd8\x98\x4c\x36\xd0\xb6\x1d\x8a\x15\xb8\x6b\x3c\xb2\x77\
\xd0\x7b\x4f\x3f\x4e\x4c\x4a\xec\x47\xf3\xc4\x5f\x03\x43\x61\x90\
\xb8\x7c\xbc\xdd\x7b\x89\x93\xdf\x33\xec\x1e\x6f\x87\xbf\x49\xbb\
\x86\xa6\x4c\x45\x18\x67\x1e\x67\xc4\x15\x86\xee\xc2\x06\x9a\xa6\
\x83\x59\x11\x50\x99\x97\x52\x8c\xe3\x7a\x38\xae\xfa\x25\x7a\xf4\
\xa1\x1b\xf3\xcd\x03\x52\x10\x05\x59\x56\x66\xdd\x81\x01\x5d\x8a\
\xc9\x39\xfb\x7e\xf7\x51\x85\x77\xef\xc0\xc9\x62\x15\x5f\x7f\x5c\
\xc5\xf1\xa7\x7b\xf8\xf5\x77\x8e\x69\xb7\x0e\x9c\x02\x38\x73\x9c\
\xfe\xf9\x16\x64\x99\x4f\xb7\x30\x5d\x5d\x47\x53\x5a\x54\x92\x88\
\x43\x7c\xf5\x4e\x42\xc2\x47\x2e\x94\xc8\xe3\xe7\x62\x86\x2f\xdf\
\xaf\xe1\xdd\xe7\x3b\x58\xfc\x9b\x63\x32\x59\x43\xd3\xae\xa0\xb1\
\x46\x7d\x9c\xe2\x72\xa9\xf5\x68\xef\xff\xe7\xcb\xcc\x83\xe4\xaa\
\xae\x33\xfe\x7b\xfb\xd2\xfb\x36\xdd\xb3\x69\x19\x2d\x68\x47\x42\
\x28\x08\x63\x4c\x40\x48\x40\x30\x26\x38\x05\x66\x71\x8a\x00\x76\
\x30\x95\x2a\x9c\x32\x4e\x2a\x8b\xab\x1c\x9c\x94\x13\xe2\xa4\x62\
\xff\x63\x8c\x13\x28\xbb\x62\x8c\x09\x5e\x58\x65\xcc\x6e\x21\x81\
\x25\x40\x42\x12\x92\x46\xcb\x48\x33\xd2\x8c\x7a\x66\x7a\x7a\xba\
\xa7\x7b\xa6\xf7\x7e\x2f\x2f\xf7\x85\x69\x81\x48\x7a\xaa\x6b\xea\
\x75\x7d\x7d\xef\xf9\xbe\x73\xee\xb9\xe7\x9c\x16\x7c\xe6\x3c\x3e\
\x95\x41\xe1\xa4\x54\xcc\xe3\x63\x85\x50\x24\x15\xe0\x23\x45\xdf\
\xf7\x6e\xfd\x7b\x06\x52\xa3\xdc\xfd\xd8\x57\x38\x31\x91\x44\xd7\
\xc2\x7c\xf3\xc6\x27\xb8\x7c\xd9\x3e\x9e\x7f\x7f\x23\xff\xf4\xc2\
\x56\x34\xd5\x66\x6d\xff\x34\xdf\xbb\xfd\xbb\xbc\x7f\x7a\x80\xfb\
\x1f\xbf\x9d\x9f\xbf\xb1\x89\x6a\x43\xa5\xed\xe4\x44\xb6\xb0\x35\
\x13\xc3\x50\x90\x24\xa5\xe3\xe0\x19\xa0\x2a\xec\xe8\xe8\x6a\xfa\
\xba\xba\xe7\x97\x61\xff\x1f\xef\x8e\x8e\xf7\xdf\x4b\xa9\x6a\x73\
\xc3\xbf\x3d\x00\xb2\x84\xa1\x47\xd1\xfc\x00\xf4\xfd\x70\x7e\x2a\
\x42\xbd\xf9\xda\xc7\x96\x81\x74\x07\x9f\xf8\xea\x18\x5b\x6f\xa9\
\xfc\xf4\xbd\x05\x34\x9b\x55\x74\x55\xe2\xe2\x85\x65\xae\x5f\x74\
\x98\xe8\x15\xe3\xdc\xf7\xf3\xab\x88\x85\x17\x09\x1c\x35\x97\x5a\
\x63\x02\x24\x05\x45\x6d\x62\x9a\x0a\x92\xfc\x61\xe7\xa4\x88\x53\
\x29\x4e\xa0\xf7\x06\x90\xce\x3d\x81\x6e\xdb\xe7\xe5\x47\x0c\xe2\
\xcf\xcf\x5a\x1d\xbb\x4b\x50\x6f\xfa\xb6\x38\xed\x0a\x9b\x16\xce\
\x72\xd3\xd2\x03\x6c\x8c\x1f\xe7\x8b\x3f\xd9\x46\xd5\x5d\x42\xc4\
\xe8\xc3\xd0\x43\xc8\xb2\xea\xaf\xeb\x9f\x78\x5c\xbf\xce\x81\xce\
\xba\x9d\xfd\x10\x38\x81\x01\x7c\x5c\x71\x9e\x8f\x70\x9e\xac\x36\
\x31\x4c\x19\xc5\xcf\x94\xbe\x43\x7d\x62\xec\x3b\xb1\x8e\x01\x7d\
\x94\x15\xb1\x7d\xbc\x73\xa2\x97\x78\x78\x21\xeb\x23\x47\x85\x26\
\x1b\x62\x83\x4c\x14\x62\x84\xec\x0c\xeb\xd2\x39\xf1\xd9\xce\x43\
\xbd\x14\xca\x87\x79\x6a\x4f\x9c\x50\x20\x8d\x69\x35\x90\x65\x0d\
\xcb\x36\x30\x0d\x03\x17\x09\xfc\xab\x67\x5e\xd7\x6a\x7d\xc2\xc3\
\xf8\xba\x5a\xb6\x2a\xf0\x9d\x6b\x9f\x8f\x34\x12\xbe\x6d\x72\x87\
\xbf\xc8\xc6\xcc\xf3\xa7\x08\x6e\xdd\x25\x5f\xfa\x40\xe0\x32\xc9\
\x35\x98\x56\x10\x45\xd1\xf8\xbf\x5e\xaa\x1d\x30\x6e\x76\xdd\xf3\
\xae\xac\x73\x8b\x43\x3f\x58\xea\x1a\xdf\xda\x9e\x62\xae\x9a\x27\
\x1c\xea\x21\x1d\xb6\xd9\x75\xd7\x8b\x2c\x50\xe7\x28\xce\x9e\x42\
\x55\x25\x81\xa3\x0e\x92\xe4\x88\x00\x79\xfd\x0b\x0f\x51\x6e\xd8\
\xdc\xfe\xf2\xdf\x0a\x83\x6c\x2b\xce\xcf\xae\xbc\x97\x72\x33\xc0\
\x1d\x2f\x7f\x43\x90\xb2\xcc\x38\xd7\xf4\xbd\xcd\xb5\xbd\xbf\x26\
\x65\xe6\x98\xae\x87\x78\xee\xe4\xc5\xfc\xf2\xe4\x67\x30\x8d\x30\
\xba\x1e\x42\x55\x0d\x64\x59\x02\xd7\x27\x59\x6f\x68\x3c\xe8\xd9\
\x52\xa9\x15\x88\x86\x2e\xe0\x1f\xb6\x0c\x73\xcb\xea\x11\xbe\xb4\
\x7e\x37\x0f\xbd\xdd\x42\x37\x35\xec\xa0\xc5\x1f\x2e\x7a\x85\xab\
\x33\xaf\x10\x37\x0a\xe4\x6a\x61\x9e\x39\xf9\x7b\x3c\x7b\xea\x32\
\x0c\x23\x8c\xa5\x07\x51\x34\x83\x4b\x12\xbb\xb9\xae\xf7\x79\xfa\
\x03\xa3\xb4\x1c\x85\xa1\x99\x0c\xff\x39\x78\x05\x47\x66\x56\xce\
\xf3\x41\x72\xb8\xb8\xb7\xc4\x4f\x6e\xfe\x57\x46\xcb\x19\xfe\xfa\
\xed\xbb\x69\x10\xc4\x32\xa2\x68\x9a\x85\x8c\xcc\xa1\xdc\x46\xfe\
\xa8\xb4\x9d\x0d\xf1\x2c\x8f\xd4\x54\x2e\xea\xab\x11\xaa\x55\x98\
\x9c\x0e\xd1\x6b\x97\x09\xcb\x83\x94\x2a\x4d\xd6\x86\x46\xc5\xba\
\xbf\x3d\x16\xa6\x52\x9f\xe4\xe0\x57\x7e\x49\xa9\x69\xf1\xf9\xe7\
\xee\x47\x91\x15\xec\x80\xcc\x5d\x17\x3c\xc9\x67\x32\x3b\x70\x1c\
\x89\x97\x4f\x5f\x88\x6f\x87\x4b\xdb\xa9\x20\x2b\x36\x86\x01\x5f\
\x5a\xf3\x14\x97\xa7\xdf\x02\x1c\x0f\xb3\x9e\x9b\x96\xfc\x8e\x52\
\xc3\xe2\xd6\x17\xff\x02\x45\x31\x09\xd8\x09\xb6\x79\xba\x5e\x77\
\xae\xae\xa7\x7c\x5d\x5f\xf8\xec\x37\xa1\x04\x11\xb7\xca\x07\xf7\
\xfd\x8a\xfe\xef\x6c\x20\x12\x49\x11\xd7\xba\x30\x0c\x4b\xf8\xaa\
\x53\xb2\x77\x3a\x2f\xd5\xb2\xf4\x2b\xe8\xb4\xa6\x9d\xbb\xa7\x73\
\xa9\x0b\x72\x46\xbb\xc9\xdf\x5c\x32\x42\xbb\xdd\x20\x1a\x84\x8d\
\xa9\x22\xf5\xbc\xc2\xf7\xdf\x5d\x01\xb4\x71\xa8\x0b\x9c\xd4\x04\
\x45\x15\xd9\x42\x3c\xbb\x4d\xd7\x13\xec\xa0\x88\xf4\xfe\x1e\x9f\
\x38\x2d\x97\x89\xfc\x01\xf0\x30\x5f\x5e\x37\xc7\x5d\x5d\x4f\x93\
\x2d\x27\x78\xf6\xc4\x4a\x16\x07\xce\x72\x4f\xdf\xab\x14\xf3\x59\
\x9e\xc9\x7e\x8a\xae\xe4\x52\x4c\xbb\x07\x5d\x35\x10\x1c\x66\x80\
\x26\x28\x8a\x84\x6d\x85\xe8\xc9\xac\xe2\xf9\xf1\xcb\xb8\xb9\xe7\
\xdb\x5c\xd3\x55\xe4\xc1\x7a\x9e\xf2\xdc\x69\xee\x5e\xf9\x01\xb7\
\xc5\x5f\x60\xbc\x14\xe7\xe9\x89\x15\x6c\x4e\x0e\x73\x6f\xff\x4b\
\x94\xa7\xc7\x78\xd6\x5f\x97\xeb\x33\xc7\xb8\xab\xfb\x51\x2a\x4d\
\x83\x97\x06\x97\x51\x6b\xd4\x58\x1b\xcb\xa3\x57\x0f\x92\x9f\x91\
\xe7\xf9\xac\x4a\x56\x78\xf4\xca\x41\xc6\xb3\x41\xbe\xf6\xee\x56\
\x26\x66\x4f\x12\x0a\x74\x11\x08\x06\x31\x4c\x15\x55\x51\x39\xeb\
\xae\xa2\x3c\x15\xe0\xa2\xc0\x14\x8a\xb2\x90\x8d\xd1\x93\x82\xeb\
\x63\x1f\x6c\xe4\xaf\xd6\xbc\xc1\x95\xfd\x55\x9e\x1e\xae\xb0\x5a\
\x3b\xcd\xf8\x44\x84\x63\x79\xc9\xc3\xc9\xf3\x7c\x66\x66\x4f\x21\
\x4b\x0a\x77\x2d\xdf\xc3\xb5\xc1\x57\x3c\x7d\x62\xbc\x99\x4d\xb3\
\x31\x7e\x50\x60\x9c\x7a\x9b\x46\x6b\x56\x38\xf3\xce\xa5\x6f\x71\
\x5d\xe8\x4d\x26\x0b\x51\x76\x64\x33\x5c\x9c\xf8\x10\xd3\x62\x78\
\x6c\x17\xc1\x60\x8a\x3f\x5d\x50\xe5\xee\xae\x67\xc8\x96\x93\x1d\
\x5d\x7b\x3d\x5d\xa7\xb2\x3c\xb1\x6f\x35\xb7\x0d\x1c\xa2\xd6\x92\
\xf9\x8f\xfd\x36\x95\x5a\xde\x7b\x4f\x80\xbc\x0c\xd3\x8a\x21\x2b\
\x2a\x12\x1f\x1f\xf8\x80\x6a\xd9\xfa\xea\xce\xe7\xe7\x55\x99\x9d\
\x60\xa1\xc5\x97\xfb\xc7\x01\x80\x69\x81\x3f\x9a\x0d\x33\x94\x6b\
\x8b\x93\x1f\x0a\xc5\x04\x8e\xb6\x10\x41\x14\xb5\xfe\x33\x94\xe7\
\xc6\xc5\x7d\x2c\x49\xab\xe6\x83\xa5\x3c\x97\x15\x98\x1b\x02\xfb\
\x04\x6e\xff\x54\x82\x42\x71\x9a\xfc\xa4\xc2\x5a\x05\x6e\x4e\x9d\
\xe0\x91\x83\x31\x54\x4d\x26\x1c\x8e\x62\xda\x01\x91\x82\x85\x03\
\xdb\xa0\xaa\x0a\x9a\xa4\x91\x48\xa4\x09\xa6\x3c\x47\x17\x4d\x12\
\x52\x03\x59\x91\xa8\xd6\xa7\xd9\xaa\xef\x16\xd8\x07\x0e\x6c\x63\
\xef\x68\x96\x45\x76\x88\x5f\xff\xfe\x6e\x6e\xed\x1a\xe2\xd1\xc3\
\x49\x34\x5d\xe2\x0f\x16\x3f\x2b\x30\x5f\x3b\x72\x03\x6f\x0e\xe7\
\xc5\x81\xb1\xad\xf5\x18\x9a\x8e\x6e\x68\x82\x8f\xde\x6e\xf1\xa3\
\xcd\x83\xa8\x65\x87\x7b\xde\x5a\xc7\x44\x6d\x14\xcb\x0c\x13\x0a\
\x06\x3d\xce\x41\x4c\x43\x17\x76\xb9\xae\xc6\x60\x71\x3d\x9b\xb4\
\x5d\x2c\x4f\x38\x5c\x12\x98\x20\x97\x0b\xf0\xdc\x48\x17\xf7\xa5\
\x15\xb6\xa6\x5d\x86\xca\x0d\x02\xb3\x4d\x5e\xca\xf5\xd3\x72\x2a\
\x58\x96\x3d\xaf\xd1\x6c\x65\x42\x5c\x9b\x57\x19\xef\x09\x8e\x5f\
\x3f\x78\x15\x7b\x86\xc7\xb1\xd5\x04\xfb\xae\xdb\x05\x0d\x7f\x06\
\xa2\xa8\x0a\x57\x5b\xfb\xf1\xb9\x6d\x61\xcf\xc8\x38\x01\x2d\xc9\
\xde\x6b\x77\x42\xc3\xa5\x58\x1e\xc5\x95\x5a\x7c\xd6\x3e\x80\xaf\
\x6b\xfc\xa3\xba\x26\x4f\x70\xd5\xab\x1b\xb8\x2d\x01\xb5\x86\xcc\
\x83\x6f\x27\xd0\x34\x4d\xd8\x62\x07\x4c\x71\x0d\x2a\x8a\xf2\x89\
\xb3\x6c\xd5\x0a\x6a\xbd\x9d\x0e\xb4\xd3\x36\x7f\x64\x92\x54\x80\
\xb2\x63\xb1\x71\xe7\x3a\x8a\xa5\x09\xd2\xb1\x0c\xd7\x64\xea\xfc\
\xcb\x92\x7d\xfc\xf3\xd2\xe3\x5c\x5d\x58\x87\x6e\xa8\xbe\x23\x1d\
\x91\x59\x44\x6d\xe2\x3f\x23\xbc\x2b\x01\x9a\xe1\x7f\x86\x83\x00\
\x4a\x40\x62\x6e\x06\x24\xb8\x56\x3b\x06\x29\xc0\x05\xca\xd0\xeb\
\x34\xa8\x54\xf3\x22\xd0\x5c\xa9\xe6\x9f\x60\x55\x87\x82\x8f\xf1\
\xf7\x50\xb0\x4c\x9d\x8c\xdd\xc0\x2c\xd7\xc8\xb7\xc3\x68\x9a\x8a\
\x4b\x8b\xf8\x5c\x89\x1a\x3a\x47\xa7\x67\x71\x9c\x3a\xad\xe8\x5a\
\x6a\xc5\xbd\x64\xdc\xba\x78\x9e\x29\x9f\x25\x31\x9b\xa7\x8a\xc1\
\x7b\x93\x0d\x9a\xed\x2a\xfd\x3d\xab\xe9\x4e\x2f\xc7\x34\xed\xf9\
\xcc\x68\xd2\xc2\x04\x00\x96\x3a\x47\x19\xae\x2d\x20\x93\x5e\x4c\
\xba\xab\x8f\x70\x24\x82\xaa\x68\xf3\x63\x80\x13\xb5\xcd\x6c\x9a\
\xd9\xc5\x96\x64\x95\x0d\x72\x91\x97\xce\xf6\x33\x57\x9f\x61\xcf\
\x58\x82\x4b\x23\x05\x0e\xc5\x2b\x82\xff\x8e\xb3\x11\x24\x1c\x22\
\xe1\x04\x7e\xf0\xfb\x81\x20\xd1\x26\x21\xec\xd6\x38\x3c\x55\x13\
\x3c\x12\x99\xb5\xe0\xad\xa9\xb4\x14\x02\x81\xa8\x38\x24\xf1\xb9\
\x59\x1f\x93\xaf\xe1\xd0\x24\x96\x5e\x07\x33\x3b\x91\xdb\x0a\xa6\
\x19\x40\x96\x5c\x92\xe7\xea\x9a\x04\x24\x7c\x5d\xdd\x06\xd5\x5a\
\xd1\xdf\xb7\x25\x8b\x35\x35\xd5\xf0\xf8\x2c\x24\x16\x4b\x62\x9a\
\x26\xb2\xa8\x11\x3f\xd2\x45\x09\x8e\xaa\x6d\xe9\x9d\x91\xf1\x7c\
\x67\x72\xee\xe8\x1a\x18\x16\x60\x11\x81\xa1\x50\x9c\xee\xfe\x35\
\x4c\xc7\x52\x34\x0b\xfb\x49\xd3\x12\x99\xa4\x5e\x9b\x11\xc4\x71\
\x11\xcf\xb2\x2c\xe3\x8a\x8c\xd4\x44\x55\x24\x90\x20\xaa\xfa\x62\
\x01\x48\xb2\x2b\xae\x92\x5c\x29\x46\xb7\x9b\xe7\xab\x63\x6b\x78\
\x7c\xa8\x48\xc0\x8e\x10\x0a\xc6\xd1\x65\x89\x68\xc4\x25\x18\x0c\
\x63\xdb\x06\x96\xad\xa1\xa9\x9a\x6f\x8b\x2b\xb2\x97\x20\xa5\xeb\
\x12\x37\x96\x1e\x43\x9a\x83\xb7\xda\xcb\x85\xe8\x96\x65\x51\x28\
\x87\x49\xba\x45\x32\x72\x81\xaa\xa6\xb1\x3a\xae\x62\x96\x9a\x4c\
\xba\x11\x54\x4d\xc3\x71\x1b\xe4\x4b\x51\xd2\xee\x34\xcb\xd4\x49\
\xf6\xab\x2a\x89\x44\x86\x9e\xee\x85\xd8\x9a\x82\x23\x9b\x30\x04\
\x2d\x14\xfe\x6c\x78\x09\x0f\xf5\x1c\xe7\xdb\x0b\xa6\xd8\x7b\x24\
\x89\x24\xb7\xd1\x4c\x05\xcb\xd2\xd0\x54\x7d\x5e\xa7\x51\x6d\x33\
\xce\xb8\xc4\x17\xcd\x63\xe8\x25\x97\x37\xc6\x74\x6a\xf5\x19\x0e\
\xcc\xf6\xb1\x55\x99\xe4\x76\x6b\x8c\x6a\x51\xe6\xb5\x6c\x0b\x59\
\xd5\xc4\x7e\xf8\x3e\x15\x5a\x78\x9a\x51\x2e\xd9\x44\xdd\x59\xba\
\xe5\x02\x35\x4d\xa3\x2f\x11\x85\x22\x22\x78\x4d\xc3\x44\x53\x15\
\x1f\xc3\xec\x3c\xb7\x7e\x0f\xe3\xaf\xe3\x63\x14\x4d\xf1\x75\xc5\
\xd3\x75\xf4\x7c\x5d\x63\x51\x17\x8a\x47\x91\x1d\xb0\x4c\x5b\x04\
\x4b\x28\x1c\x22\x18\xb6\xd1\x35\x03\x49\x92\x3a\x3f\x33\xcc\x37\
\x17\xae\xb8\x86\x80\x4f\x6e\x21\x05\x0a\xa0\x08\xba\xdc\xe0\x2f\
\xa3\xc3\x38\xed\x36\xdd\xda\x3b\x6c\x28\x67\xd1\x5a\x0e\xaf\x4a\
\x17\x88\xe2\xd3\xa1\xe9\x93\x52\x24\x14\xd5\x0f\x96\xf1\xd9\x14\
\xdd\xad\x1c\xdf\x8a\x9d\xe0\x68\xdd\xe0\xce\xa9\xf7\xc1\x05\x5c\
\x97\x56\xab\x26\x3a\x96\xed\xca\xd5\xdc\x93\x7f\x92\xef\x04\x0f\
\x73\xf5\x40\x90\xb2\xe2\xb0\xc2\x9e\x26\x2a\x35\xf8\x73\xe3\x0b\
\x84\x43\x49\xa2\xb1\x18\x81\xa0\x89\xa2\xa8\xbe\x2d\x52\x83\xaf\
\x87\x87\x90\xdd\x36\x57\x4c\x1e\x64\x81\x33\x45\x56\x4d\xf2\xc3\
\x4a\x1f\xaa\x56\xa0\x2b\xd5\xcb\x6e\x7d\x0b\xd7\x4f\xfc\x82\x47\
\xc2\xef\xf3\x5b\x2d\xcc\x96\x19\x7f\xef\x17\xd4\x55\x22\xa0\x6c\
\x3b\xc0\x2b\xda\x36\xee\xc8\xfd\x8c\x7f\x0f\xbd\xcb\x8b\x4a\x02\
\xbb\x3c\xcd\xba\x63\x8f\x71\x20\x72\x09\xaf\xa6\x6f\x14\x7c\x6a\
\xb2\xce\xae\x5a\x17\x7f\x37\x54\xe6\xbb\x3d\x59\xfe\x31\x3d\xc9\
\x37\xe6\x72\x94\x67\xb3\x24\xe2\x09\x0c\xcb\xf2\xaf\x47\x17\x5c\
\xba\x18\x6b\xaf\xa0\x7f\xf6\x88\x10\xfb\x37\xe3\x6d\x1c\xa3\xc9\
\x71\x63\x0d\xcc\xec\x25\x43\x85\xd7\x2a\x61\xf2\xb3\x05\xd2\xa9\
\x30\xc9\x64\x1a\xce\x20\x1c\xa2\xaa\x7e\xf7\xb2\xc7\xb9\x88\x6d\
\xe5\x1d\xfc\x20\xfa\xbf\x76\x97\x0e\xf8\x6b\xbb\xae\xa8\x17\x0d\
\xc3\x64\x77\xfb\x22\xae\x99\xdd\xc1\x23\x91\x0f\xb9\x1d\xf4\x31\
\x78\x18\xc7\xc3\x28\x16\xdb\x65\x4f\xd7\x82\xa7\x6b\xe0\x1c\x5d\
\x2d\x4f\x57\xa5\xc1\x57\xb5\x5b\x28\x96\xf6\x12\x65\x8e\x07\x13\
\xc3\x7c\x7f\xb6\x1f\xc7\xa9\xa1\x28\x6d\x0c\x43\x46\x55\xd4\xce\
\x4c\xa6\x13\x0d\x22\x58\xce\x9b\x83\x75\x9e\x3a\xf3\x06\x5d\x6a\
\x71\x27\xa3\x00\x38\xb9\x2c\x39\x25\xc0\x4f\xd5\xb5\x3c\xdc\xec\
\x23\x60\x37\xc9\xa4\xfb\x61\x0c\x50\xc0\x34\x0c\x21\xe0\xe3\x5d\
\x7f\xcc\xed\x23\x4f\x72\x43\xed\x2c\x9b\x5d\x8d\xed\xe5\x25\xdc\
\xdd\x2e\xe2\xb8\x6d\x11\x2c\x9a\x96\xe0\x40\xea\xd3\x3c\x11\xee\
\x61\xe3\xe0\x7f\xb1\xd5\xcd\xa2\x34\xca\x8c\xb6\x82\xbc\x61\x2c\
\xa6\x29\x57\xc4\x29\x36\x74\x19\xcb\xd2\xfd\x60\x29\x81\xee\x0a\
\x5b\x84\x43\xf2\x45\x83\xe7\xf4\xe5\x3c\xec\x2e\x25\x57\x9f\x23\
\x9d\xea\x63\xc9\xe2\x55\x1c\xea\x5e\x82\x8e\xc1\x85\x27\x9f\xe7\
\x96\xd6\x38\x93\x45\x93\x87\xb5\x35\xfc\xb8\x15\xc6\xb6\xa1\xaf\
\x67\x80\xe3\x03\x9b\xf8\x85\x95\xe2\xa2\x63\x4f\x71\x43\x73\x12\
\xa7\x90\xe3\xb8\x9a\x60\x57\xa3\xc6\xa9\xba\x5f\x4b\xa1\x82\x65\
\x5a\xec\x70\x56\xf0\x6e\xdd\x62\x4b\xed\x24\x6f\xca\x67\xd8\x57\
\xe8\xf6\x38\xf7\x11\x8d\x46\xd1\x35\x6d\xbe\xbd\x3f\x1d\xbb\x8c\
\xfe\xd1\x23\x1c\x97\x23\x94\x24\x83\xb8\x1d\xc2\x49\xaf\xa0\x70\
\xa2\x8b\x58\x65\x92\xb7\x5b\x29\x54\x45\x21\x16\x4d\x92\x88\x27\
\xe1\x20\x48\x2a\x18\xa2\xf6\x51\x79\x31\x7d\x1d\x91\xb6\xc2\xba\
\xf1\x9d\x7c\xce\xc9\xf1\x8a\xb7\xcf\x4d\x94\x70\x69\xd3\x68\x55\
\x89\xe8\x09\x7e\x93\xb9\x9e\xe8\x59\x1f\x73\x43\x7b\x8a\x5f\x15\
\xfb\xb9\x53\x2a\x09\x5d\x2b\xd5\x12\xc1\x40\x98\xfd\xc9\x4f\xf1\
\x44\xe4\x1c\x5d\x9b\x9e\xae\x6d\x4f\x57\x7d\x31\x2d\xa9\xca\x8f\
\xf5\xab\xf8\x93\xfc\x6b\x7c\xae\x3d\xc9\x76\x37\xce\x74\x29\x4b\
\xad\xbe\x88\x48\x24\x82\xa1\xfb\x7c\xf8\xd8\xaf\xd5\xd2\x8f\x7e\
\xb8\xd7\x3d\x77\x1c\x0f\xe7\xf7\xd1\xcd\x56\x83\xb1\xb3\x43\xec\
\xdd\xf7\x3a\xb9\xdc\x18\x9a\x6e\xa0\x69\x86\x80\xa8\xaa\x46\x3c\
\xde\xcd\xe2\x45\x2b\x49\x25\x7b\x29\x14\x27\x39\x74\xe8\x77\xa2\
\x78\x5d\xbd\xf2\x12\x02\x81\x30\xc3\xa7\x8f\x72\xfa\xf4\x11\xea\
\x8d\x3a\xa6\x61\xd1\x76\xda\xcc\xcd\x95\x88\xc7\xd3\xac\x5b\x7b\
\x19\xc9\x78\x8f\xf8\xde\xc8\xc8\x20\xe3\x93\x23\xd4\x6a\x15\x11\
\x6c\xa2\xdb\xe9\x59\xcc\x92\x45\xab\x89\x46\xbb\x44\xe1\xd5\x6c\
\x36\x19\xcb\x9e\xf0\x6c\x79\x83\xdc\xd4\x98\xb0\x43\xd3\x74\x31\
\x1f\x88\x46\x92\x2c\xe8\x5b\x4e\x5f\xef\x52\x82\xc1\xa8\x87\xad\
\x93\x9d\x18\x61\x78\xf8\x30\x93\x53\xa3\xb4\x5a\x0d\x51\xf7\x74\
\x25\xfb\x18\x18\x58\xed\xfd\xef\x17\x76\x4e\x79\xeb\x0c\x7b\xf6\
\x8d\x4f\x9c\xa6\xd1\xa8\x89\xa0\x8c\x84\x93\xde\x3a\x4b\xc4\x69\
\x1f\x3a\x75\x50\x0c\xf9\x96\x2d\xbd\x10\x80\x91\x33\x47\x71\x9c\
\x36\x03\x8b\xd7\xd2\xdb\xb3\x04\xdb\x0e\xce\xd7\x2d\xed\x76\x8b\
\xe9\xc2\x04\x87\x07\xdf\xe1\xcc\xe8\x31\xa2\xe1\x24\x6b\x56\x5f\
\x4a\xd2\xd3\x66\x72\xf2\x0c\x1f\x1c\x7e\x8b\xf2\x6c\x91\x45\x0b\
\x56\xb2\x62\xf9\x46\x61\xe7\xd9\xf1\x53\xf8\x9a\x49\xac\x59\x75\
\x29\xb1\x58\xca\xb3\xe5\x0c\x43\x27\x0f\x88\xb5\x44\xfd\xe4\xba\
\xd4\x6a\x73\x24\x53\xbd\x6c\x5a\xbe\x9e\x70\xa4\x8b\x91\xfc\x38\
\x27\x86\xf6\x93\x9a\x18\xe4\x07\xb9\xd7\x19\x96\x6d\xae\xac\x2f\
\x24\x93\x59\xc8\x15\x97\x7f\xde\xb3\x7f\x80\x99\x99\x69\x86\x47\
\xfe\x87\xdb\x08\xf5\x7a\x45\xf0\xb5\xac\x10\x99\xf4\x42\xe2\xb1\
\x2e\x26\x73\xa3\x64\xb3\xa7\x08\x05\x63\x2c\x5f\xb6\xc1\xd3\x7b\
\x40\xf8\x08\x59\xf6\x63\xa2\x33\xfa\x45\xb5\x02\xba\x3f\xbe\xe7\
\xe3\xe3\xd3\xce\x2f\xa0\x86\xab\x78\x1b\x2f\xc4\x30\xb6\x52\xad\
\x94\x71\x5c\x57\x5c\x33\x62\xa4\xaf\x99\xa2\xae\xf0\x8a\x35\x74\
\xc3\x24\x14\x0c\x10\x0c\xd8\x20\xe1\x91\xee\xc2\xd0\x4d\x82\xe1\
\x00\xfd\x7d\x8b\xa8\x37\xaa\x28\xb2\x2a\xbe\xdb\x6a\x35\x31\x4d\
\x8b\x44\xbc\x1b\xcb\x0e\x78\xe4\x83\x24\x12\x71\x4a\xa5\x95\x1e\
\xae\x26\xd2\xae\xae\xfb\x5d\x56\xd8\x7b\xfb\xfd\xbf\x84\xd1\x56\
\xe9\x53\x16\x61\x18\xdb\xc4\x29\x02\x49\xfc\xe9\x9a\x8e\x1d\x0c\
\x09\x27\xdb\x56\x40\xb4\x7f\xae\x63\x61\xd9\x3a\x89\x64\x82\xd9\
\xd9\xa2\xd8\x53\x55\x35\xe1\xa0\x48\x38\x8e\x61\xd8\x00\xa2\x26\
\x4a\x24\x12\xcc\x94\x0a\x34\x9a\x35\x00\x2c\x33\xe0\x71\x89\x80\
\x24\x93\x4a\x75\xa1\xc8\xb2\x08\x46\x49\x56\x48\xa5\xd3\x34\x1b\
\x0d\xc2\xa1\xa8\xb8\xeb\x35\x4d\x9f\xff\xc5\xda\x75\x55\x54\x3d\
\x8d\x6e\x6e\x66\xe9\xd2\x15\xe8\xba\x45\x3c\xde\x85\xa9\xdb\xa2\
\xc6\x09\x86\x6c\x71\xf8\xc2\xc1\x18\x91\x48\x02\x55\x55\x59\xa0\
\x2d\x22\x10\xb4\x04\x8f\x78\x2c\x8d\x69\xd9\x04\x83\x01\x92\xc9\
\x24\x73\x95\x19\xdf\x2d\xc8\x22\x73\x98\xa6\xcd\xa7\x8f\xee\x64\
\xe3\x13\x0f\x70\x7c\xe1\x06\x66\x1a\x35\xd6\x9d\x39\x08\x4d\x78\
\x2d\xbd\x98\x80\x16\x10\xdd\xa3\xd7\xa9\x89\xac\xe7\x65\x0a\x62\
\x89\x18\xe5\xb2\xa7\x6b\xbd\x26\x06\x74\xba\x6e\x12\x0a\x44\x44\
\x87\xd9\xdb\xd3\xcf\xd2\x25\x2b\xc5\xe1\xf4\xf8\x09\xff\x29\x9a\
\xfe\xdf\x7d\x5a\x4d\x8b\x14\x31\x10\xad\x4c\xeb\xae\x9f\x88\xe2\
\xc7\xae\x30\x97\x45\xd0\xf5\x22\xa8\x77\xff\xa5\x57\xff\x81\x1e\
\xbc\x28\xf8\x0b\x3c\x28\xc8\xb2\xc8\x80\xac\xe7\x11\xbd\x8c\x7a\
\x58\x3b\xf5\x4c\xaa\xc8\x3c\x32\xd5\xf4\x6d\xa6\x52\xa9\xbc\xee\
\x54\xba\x92\xbc\x27\x09\x5b\xf9\x03\x25\x15\x6f\xdf\xac\xdc\xcc\
\x44\xe1\x0d\x2a\xd3\x46\xa0\x2a\x79\x1c\x25\x43\xdd\xd7\xf8\x1e\
\x18\x4f\x92\x16\xc9\xae\xc0\x53\x31\x65\x85\x68\x2e\xc8\x45\x6a\
\x62\x58\x26\xab\x6a\xb1\x8d\xce\x28\x39\xff\x50\x6d\x96\x34\xee\
\x63\x23\xb9\x9f\x66\x51\x68\x4b\x02\xdf\xf8\x55\x9f\x44\x72\xac\
\x8d\x51\xfd\x79\x0b\xec\x44\xd8\xb0\x18\x2c\x6e\xf3\x05\x60\x31\
\x73\x56\xc1\x16\xef\xa2\x25\xbb\x63\x81\xe3\x81\x8e\xf6\x3b\x59\
\x4c\xf1\x23\xb1\xe3\xb2\x58\xed\x76\xd3\x31\x42\x1c\xff\xd0\x69\
\x77\x90\x20\xe2\x63\x7a\x4c\x7b\x06\x5b\x20\x6e\xcb\xb9\x7a\xda\
\x3e\x2f\xd5\x85\x93\x44\xb2\xaa\x64\xb3\xc3\xf6\x0b\x36\x2e\x20\
\x63\x1e\x9d\x40\x54\x98\x3f\xe0\xc0\xee\x9c\x7e\x94\x07\xaf\x5f\
\xca\xad\x6f\x27\xa5\xbc\x9c\xcb\xba\xdc\xbc\xbe\x3b\x3c\x92\x57\
\x25\xd1\xa4\xf4\x7d\x7c\xfc\x4c\x9e\x3f\x7d\x51\xf6\x6e\xf7\xcb\
\xdf\xc1\x62\x2b\xb2\x27\x1d\xea\xe0\xc9\xc7\x4f\xc3\xf6\xfd\xf8\
\xc1\x64\xb0\x83\x03\x28\x43\xe9\x38\x82\xf4\xe1\xfd\x19\xa9\x49\
\xee\x82\x3b\xb2\x84\x65\x2a\x32\xc5\xec\xd7\x04\x4d\x34\xcf\xb0\
\xa1\x7d\xac\x66\xe9\xd9\xbd\x09\xed\x19\xb1\x34\xb3\xe3\x4d\x0c\
\x87\x9e\xf7\x9d\xd6\x7b\x4c\x68\xdd\x66\x19\x6c\xbe\x38\xf3\x09\
\xf8\xa3\x4a\x6b\x92\x8e\x22\x8e\xe9\xf8\x98\xd3\xc8\x71\x9a\x14\
\x2a\xbf\x37\x1b\x39\xfb\xfe\x55\x56\xab\x2f\xf2\xf3\xd7\xba\x7e\
\x35\xad\x1c\x1f\x1c\x2c\xe5\xd1\xc3\x27\xb2\x5c\x1e\xd5\xaf\x23\
\x13\x99\x78\x39\x0f\xfc\x1f\x86\x0c\x04\xa1\x9f\x86\xf6\xd9\x1a\
\x62\x46\xca\x3e\xc9\x94\x32\x81\xed\xf4\x0d\x0a\x11\xfa\x44\x6a\
\x9c\xf0\x01\xd2\x0d\xbb\xd9\x1a\x19\xb2\x49\xf1\x53\x67\xd9\x01\
\x1a\x5c\x19\x93\xa2\x02\x80\xf4\x3c\xb3\x58\xd0\xba\x77\x8f\xda\
\xcb\x14\x23\x7b\xcd\x18\x00\xe2\xfc\x87\x45\xec\x9f\x7e\xe6\x46\
\x03\x47\xe1\x53\xb5\xed\xed\x2d\x4a\xd9\x3f\x2e\x25\xb2\xd0\x1e\
\x7f\x36\xa2\x50\x2b\xc5\xd7\x6f\xdc\x2c\xe5\xbf\x96\xe2\x2b\x56\
\xee\x1d\x0e\x63\x33\x8e\x74\x9a\x9c\xa8\xe9\x8a\xef\x2d\x7d\xfe\
\xf4\x03\x6d\xb5\xd3\x9f\xe0\x43\x5b\x3f\x19\x0d\x0c\x85\x42\x00\
\xf3\x62\x56\xcb\x39\x3d\x6b\xe0\x7c\x72\x7c\x1a\xe9\x1b\x7b\xf7\
\x0b\x34\x8e\xc3\x39\xdc\xf5\x99\x5f\xfa\x6c\xa6\x21\x48\x3e\xd9\
\x1e\x3a\xc6\x19\x48\x01\x4b\x27\xc8\x8a\x67\x8d\x69\x09\x45\xf6\
\xf2\xee\xe5\x0e\x56\xda\x9c\x68\x1c\x58\x1a\x3b\xa0\x73\x10\x7b\
\x1d\xa7\x50\x39\x47\xc7\xff\x2e\x65\xbb\x18\x67\x1d\x42\x83\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x07\xf5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x07\xbc\x49\x44\x41\x54\x78\x5e\xc5\x97\x6b\x8c\x5c\x65\
\x19\xc7\x7f\xe7\x9c\xb9\xec\xee\xcc\xee\xce\xce\x74\x77\x67\x29\
\xbd\x41\x5b\x7a\xe3\x22\xb6\xd6\x86\xb4\x5c\x84\x46\x4b\x22\x12\
\x2d\x06\x88\x48\x24\xf8\x45\x25\x22\x31\x5e\x48\x50\x3e\x18\x43\
\x30\x7c\x50\x94\x98\x26\x5e\x30\x11\xac\xa6\x18\x5b\x08\x4a\xc4\
\x16\x28\x2d\x85\x2e\x6d\x97\xdd\xd2\xee\xfd\xd2\xd9\xed\x5c\x2f\
\x3b\x97\x73\xce\x7b\xde\xf7\x78\x76\x72\x92\xc6\x9d\x26\x1b\x22\
\xc6\xff\xe4\xc9\x9b\xc9\x64\xf2\xfb\xcd\xf3\x3c\x99\xbc\x47\x73\
\x5d\x97\xff\x67\x02\xf8\xd1\x34\xed\x61\x60\x37\x30\x08\x0c\x01\
\x13\xc0\xa0\x27\x58\xfd\x9f\x0a\xf8\xf0\xbb\xba\x57\x05\xbf\xfe\
\x9d\x17\x7a\xb6\xce\x8e\x88\x9b\xa7\x07\x45\x66\xf6\xbc\x28\x0e\
\x1e\xae\x67\x3c\xb1\x61\x7c\x19\x5f\x28\xc3\xc7\x18\x0d\xe8\x0c\
\x84\xb4\xdf\xaf\xdb\x16\xbe\xfd\x81\xa7\x13\x91\xd6\x50\x80\xba\
\xed\xe0\x87\x42\xca\x29\xa6\x27\x9d\xe2\xc4\x69\x91\xfd\xf0\xa8\
\x99\x99\x1b\xb5\xd3\xf8\x5d\xf2\x85\x26\xfe\x5b\x81\x87\xaf\xdc\
\x10\xfa\x76\x57\x9f\xb1\xe9\xbe\x9f\xc4\xb9\x76\x55\x92\x68\x4b\
\x80\x9a\x65\xe3\x28\x49\xd5\xb4\x29\x56\x2d\x2a\xa6\x83\x23\x15\
\xa2\xee\x8a\xd4\x79\x3b\x33\x35\x20\x32\xe7\x8e\x9b\x99\xd1\x7e\
\xab\x08\x97\x84\x80\x89\x8f\x32\x36\x0d\xf8\xf3\xb5\xb7\xb6\x7e\
\x09\xc0\x13\x60\xdb\x55\xeb\xf1\xba\x00\x9a\x02\x14\x9a\x26\x01\
\x05\xde\x69\x0a\xd1\x10\xaa\x2c\x92\xf2\xc6\x96\x59\x18\xdb\xc8\
\xbb\x66\x66\xe2\x7d\xbb\x38\x5f\x90\xc3\xbe\xcc\xdf\x3d\x99\x59\
\x7f\xcc\x5d\xc0\x36\x20\x09\x9c\x06\x06\x3c\x51\x15\x60\x51\x82\
\x7a\x3b\x4a\x01\xb8\xbe\xc0\xa5\xb3\xc5\x50\xb4\x46\x5d\xba\xa3\
\x12\xba\x25\x2e\x0b\x52\x36\xf3\x2b\xad\xee\xd2\x76\xb3\xbb\x72\
\x9f\xf0\xc4\xec\xc6\xd8\xa6\x87\x44\xe6\x85\x27\xf2\x41\x60\xdf\
\x02\x3c\x60\x04\x9f\x6f\x6f\x4d\x2e\xdf\xbd\xee\xf1\xc8\x68\xfe\
\xf0\xc5\xfe\xa9\xbf\xa4\x3d\xa9\x7b\x9a\x04\x5c\x15\x46\xba\x2e\
\x1a\x00\x6e\x03\xac\x6b\xa0\xe9\x1a\x06\x5e\x69\x5a\xe3\x3d\x48\
\x94\xeb\xa0\x07\x04\x2d\x11\xc1\xb2\xa8\x00\x24\x68\x92\x0b\x7d\
\xf3\xb1\x9e\x95\xe5\x98\x27\xb0\xdb\x83\xec\xf7\x6a\x5f\xa4\x25\
\x96\x8c\xb7\xaf\x5f\x71\xfd\x55\x9f\x8b\xea\x21\x47\x0e\x5c\x38\
\x94\xb5\x9c\x4a\xa0\x49\xc0\x91\x01\x0f\x0a\x1a\x34\x4e\x7d\x01\
\xa8\x1b\x84\x08\x50\xce\x3b\x4c\x4f\x57\xa9\xd4\x6c\xd0\x15\x2d\
\x61\x8d\xbe\xbe\x56\x7a\x92\x1d\xd8\xd2\x44\xe1\x00\x92\xbe\xce\
\x76\x4a\x55\x85\x9f\xbb\x7a\x63\x6b\x36\xc6\xa2\xc9\x4d\x7b\xd6\
\x3e\x49\x5f\xe2\x0a\xd2\xd5\x35\x1d\xa1\x60\x9b\xee\x09\x04\x2f\
\x23\xa0\xf9\x70\x30\x3c\x70\xc0\x30\xd0\x64\x90\xd3\xef\xd5\x98\
\x1c\x17\x18\x81\x30\x9a\x1e\x06\xad\xb1\x17\x4c\x8f\x09\x3a\xbb\
\x24\xd7\xdf\x18\x21\x14\x91\x48\x1c\x34\x14\x61\xa3\x0d\xa0\xb7\
\xab\xa3\xf7\x6b\xcb\xa2\x2b\xae\xd9\x7c\xd5\xfd\xec\xdc\x71\x13\
\xf9\x52\x19\x23\xe4\x28\x0d\x17\xc0\x6d\x16\x70\xfc\x5f\xae\xeb\
\x18\x9a\x4e\xc8\x83\x1d\x7d\xb5\x46\x39\x17\x21\x1c\x0c\xa1\x6c\
\x07\xe1\x98\x68\x86\x83\xeb\x86\x09\x04\x0c\x2a\x69\x78\xfb\xb5\
\x3a\x3b\xee\x68\x41\x0f\x0b\x14\x8a\xf4\xb4\x22\x18\x08\x5f\xbd\
\xba\xe7\xba\xc8\x8a\xde\x4f\x18\x77\x7d\xfa\x8b\x1c\x1f\x79\x8d\
\x2d\xab\x3f\x45\xb8\x15\xfc\x19\xa3\xb3\x28\x42\x3a\x5e\x49\x94\
\x54\xe8\xe8\x0c\x9e\xb0\x28\xcf\xc6\x69\x35\x12\x88\x9a\x46\x67\
\xdc\xe4\xd6\xcf\x87\xd8\xf3\xe5\x28\x7b\xf6\x46\xb8\x7a\xbd\x8e\
\x53\x0b\x62\xd8\x49\xce\x1c\x05\x57\x05\x99\x18\xae\xf2\xa7\xa7\
\x53\x6c\x5c\x71\x53\x67\x32\xb6\xb6\xf3\xde\xdb\x1f\xe1\x8d\x91\
\xfd\x1c\x3b\xf7\x07\x12\xdd\x11\x02\x6d\x28\x21\xeb\x0a\x70\x9a\
\x04\x6c\x69\xe3\x48\x81\x72\x25\x4a\x28\x46\xfa\x5b\x68\x6f\xeb\
\x46\x98\xd0\x99\x2c\xb0\x65\x17\x58\x46\x8d\xa2\x59\xa5\x6a\x99\
\x84\x5b\x5d\xae\xdb\x2e\x71\xcc\x00\xb5\x74\x2f\xb9\x94\xcb\xbb\
\x87\x53\xb4\xce\xdf\x40\x22\xb2\x3a\x72\xdb\x8e\xbb\x49\xf4\xc6\
\x39\x7e\xe6\x65\x92\x89\xe5\x44\x62\x06\x89\x65\xb1\xf0\xf2\x9e\
\xb5\xad\xc0\x8a\x66\x01\xc7\x44\x48\x0b\xd7\x15\x8c\x0f\xd6\x31\
\x54\x27\x9a\x0a\x52\xb7\x32\x6c\xbe\x59\x52\xac\x97\xa8\xd9\x35\
\x34\x1c\x4e\xbd\x2e\x39\xfa\xb7\x10\x42\xda\xc4\xfb\x0a\x44\x42\
\x09\xa6\x86\x0c\xd2\xa9\x0a\xa3\xa3\x93\x0c\x8d\xf4\xb3\x7c\x79\
\x1f\x9d\xcb\xc2\x3c\xf3\xf8\xf3\x7c\xf3\xc1\xc7\xd1\xc2\x26\x37\
\x6c\xd9\x9a\x7c\x78\xef\x13\x1b\x80\xdd\x4d\x02\xa6\x53\xc1\x96\
\x75\xa4\x6b\x52\xca\x39\x38\x02\x72\x99\x02\x1d\x57\x66\x28\x9b\
\x25\x2c\xa7\x86\xa1\x0b\xde\x3a\x50\x23\x33\xda\x4b\x50\x8f\x70\
\xf2\xcd\x12\x5d\xab\xf2\xe4\xb3\x25\x0a\x29\x83\x35\xd7\x46\x68\
\x89\xd7\x30\x65\xdd\xaa\xca\x54\x21\x5f\x1b\xab\x14\x0a\xd3\x98\
\x6e\x8e\xb6\x58\x00\x4b\x2b\xd4\x86\x67\xde\xcd\x01\xe7\x9b\x04\
\xea\xa2\x8c\x2d\x2b\x38\xaa\xca\x07\x27\xca\x54\xe6\xab\x64\xd2\
\x39\xda\x7b\x2b\x58\x72\x1e\x5d\x37\x39\xf9\xea\x3c\x33\x1f\xc4\
\x41\xe9\x14\x2b\xe3\x6c\xff\x42\x95\xd8\x95\x55\xc6\xc7\xa6\x18\
\x1b\x2a\xb3\x61\x7b\x98\x9d\x0f\xd9\x94\xc5\x70\xf6\xbb\x4f\xdd\
\xfb\xc6\x33\xcf\xfe\x68\x64\xef\xa3\x37\x71\xe8\xe0\x11\x3a\x62\
\x41\xa6\xd2\x1f\xe4\xfe\x78\xf0\x17\x53\xc0\x1b\x4d\x02\x96\x2c\
\x53\x77\x8a\x98\xb2\x44\xb1\x98\x27\x9b\xc9\x91\xc9\x64\xa9\x5b\
\x25\xd0\x2a\xcc\x9e\x9f\xe7\xed\xbf\x1a\x28\xe9\x92\x4e\x67\x48\
\x4d\x17\x98\x4b\x65\x11\xb2\xc8\x6c\x6a\x8e\xb9\xb9\x39\x84\x91\
\xe7\x9a\x9d\x06\x2e\xea\x9d\x62\x39\xff\xd0\xbf\x4e\xbe\x74\x21\
\x44\xc7\x68\x40\x0f\xd6\x6b\x32\x5f\xbf\x98\x9f\xa8\x68\x9a\x06\
\xa0\x35\x09\x48\x25\x10\xd2\x6a\xec\xc2\xca\xcd\x16\xb9\x5c\x8e\
\x6a\xb5\xca\xc4\x60\x8d\xb1\xfe\x1a\xbf\xfe\x5e\x05\xd0\x98\x9c\
\x1a\x67\x72\x72\x12\xd7\x69\xe3\xd0\x73\x92\xd1\xf7\x6b\x08\x21\
\x68\x89\xcd\x23\x94\x8d\x7f\xd1\x51\x40\xc1\x71\x4d\x53\x51\x9f\
\x49\x65\xcf\x4d\xbf\x79\xe2\x95\xe9\x93\x03\x6f\x65\x6d\x61\x29\
\x40\x36\xff\x15\x03\x0a\x17\xa1\x24\xad\x31\x97\x42\xa1\x40\x34\
\x1a\xa5\xff\x9f\x0e\x8f\xfd\xb2\x83\x9e\xd5\x39\x2e\x4c\x5c\xe0\
\x9e\x47\x03\x8c\x0f\x99\x9c\x3b\x16\x27\x1c\x8e\xf3\x9b\x27\x2f\
\x12\xef\x72\x89\xf6\x56\x90\x4a\xe1\x67\x41\x44\x2d\xfc\xe7\xdb\
\x14\x36\xbe\xf8\xfa\x93\x5b\x5f\x7c\x1d\x05\x0c\x2c\x94\x27\xd9\
\x24\xe0\x7f\x49\x21\xa4\x64\xe3\x8e\x10\x2f\xef\xcb\xa3\xd7\x74\
\x4c\xb3\x95\x03\xcf\x15\x78\xe4\x67\x09\x0e\x1f\xca\xb3\x7e\xa7\
\xc1\x96\x5b\x22\x28\x27\xcf\x87\xc7\x3b\x69\x8f\xc6\x29\x97\xcb\
\xdc\xbe\x0b\x5f\x80\xff\x90\xc0\xbf\xd0\xb0\x28\x3a\xcd\x41\xb9\
\x6e\x43\xc0\x31\x24\xdb\xef\x6c\x74\x01\x80\x63\xaf\xc0\xef\x7e\
\x9a\xe6\xc6\x5d\xed\x84\x8c\x00\xd2\xd2\x48\xf4\xd1\xf8\xbc\x54\
\x2a\x79\x02\x25\x72\x73\x36\x4b\x64\x69\x01\x8f\x8f\xa3\x14\x35\
\x61\x73\xeb\xfd\x6d\x24\xd7\x55\x1a\x10\xaf\x65\x9c\x3a\xe2\xf2\
\xfd\xbb\x53\x7c\xeb\x96\x49\xbe\xe1\xd5\x8b\x3f\x2f\x63\x9a\x26\
\xd9\x6c\xc6\x93\x28\x33\x32\x50\x5f\x7c\xdf\x58\xfa\x4e\x78\xf9\
\x2e\x28\x2c\xc7\xa1\x54\xaf\xf3\xc0\x8f\x63\x1c\xfc\x55\x89\x23\
\x2f\x95\x31\x0c\x63\x41\xc4\x2b\x50\x4a\x7a\x70\x8b\x35\x9b\x43\
\x7c\xf5\x07\x3d\xec\x7f\x76\x8e\xdb\x1e\xec\x40\xb8\x02\x3f\x11\
\x6f\xfe\xbd\x40\x05\xa8\xfb\xa3\x58\x5a\xe0\x52\x17\x24\x35\x01\
\x2a\x00\x77\x3f\xd6\xc1\xb6\xcf\xb6\x71\xe4\x40\x89\xf4\x8c\x0d\
\xae\x4b\x24\xa6\xf3\xc9\xcf\x24\xb8\xe1\x8e\x36\x4c\x47\xf0\x95\
\xa7\xe2\xde\xe9\xe0\x2a\x17\x3f\x57\x00\x3b\x81\x61\x60\xcc\x93\
\xa9\xb8\x5e\x9a\x04\x96\x92\xa8\x8b\xc6\x4e\xd0\xb5\xce\x60\xef\
\x0f\x63\x18\x9a\x0e\x80\x50\xb2\xd1\xa5\x7c\xad\xda\xd8\x9b\xc6\
\xcb\x67\xfb\xe9\x01\x76\x00\x12\xc8\x00\x55\xa0\x59\x60\x49\x09\
\x57\x21\xbd\x12\xd2\xc1\x6b\x69\xa3\x5c\x1f\xa8\xdc\x26\x28\x73\
\x23\x42\x4e\x9c\xb1\x4d\x60\x14\x48\x03\x65\x40\x2c\x82\x37\x09\
\x2c\x29\x22\x71\x69\xa2\x01\x13\xa7\x6d\xe1\x01\xad\xb1\x7e\xab\
\x9a\x3a\x6b\x97\xab\x65\x95\x02\xce\x01\x87\x81\x0f\x81\x8b\x40\
\xd9\x93\xbe\xac\xc0\x3f\x0a\xb3\x72\xd3\xc2\xb5\x9c\xa5\x83\x55\
\x51\xee\xf8\x69\x4b\x4c\x9e\x16\x66\x6a\x58\xd4\xce\xbf\x63\x16\
\x81\x49\x1f\x34\x00\x9c\x05\xe6\xf0\x17\x0f\xb0\x01\x71\x69\x01\
\x9b\x05\xf6\xcf\x8d\x89\x3b\xdb\x13\xfa\x2a\x20\x02\xcd\xed\x9c\
\x1b\x15\x62\xdc\x03\x8e\xbe\x67\x95\x73\x33\xa2\x08\x8c\xf9\xa0\
\x61\xe0\x24\x50\xf2\x81\xa6\x0f\x94\x3e\x70\xc9\x04\x3c\xb3\x92\
\x37\xd3\xdf\x66\x67\xe4\x72\x60\xeb\xec\xb0\xc8\x7a\xd0\xfa\xf0\
\x09\x4b\x9e\x3f\x66\x0a\xaf\x9d\x19\x60\x1a\x18\x02\x4e\xf9\xf0\
\xb2\xbf\x50\x96\x5f\x3e\xf0\xa3\x47\xf3\x04\x16\x3f\x9c\x9e\xf5\
\x67\x96\x06\x2e\x00\x05\x60\xde\x07\x9a\x80\xf0\x81\x2e\x1f\x43\
\x9a\x1e\xcf\x7d\x19\xdd\x1f\x8f\x06\xa8\xa6\x96\x7e\x8c\xf9\x37\
\x48\x49\x93\x4e\x5e\xab\x2a\x08\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x00\x92\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x01\x00\x00\x00\x15\x08\x03\x00\x00\x00\xb0\x8c\x74\x36\
\x00\x00\x00\x3f\x50\x4c\x54\x45\x13\x00\x76\x1d\x0a\x7c\x26\x14\
\x81\x2f\x1f\x87\x39\x29\x8d\x42\x34\x93\x4c\x3e\x99\x55\x48\x9e\
\x5f\x52\xa5\x68\x5d\xab\x71\x67\xb0\x7b\x71\xb7\x84\x7b\xbd\x8d\
\x85\xc2\x97\x8f\xc8\xa0\x9a\xce\xa9\xa4\xd3\xb3\xaf\xda\xbc\xba\
\xdf\xc6\xc3\xe5\xce\xce\xeb\x3a\xf5\x91\x9d\x00\x00\x00\x0e\x49\
\x44\x41\x54\x08\x1d\x63\x61\x60\x61\x24\x0e\x02\x00\x08\xf2\x00\
\x69\x3c\x62\x5d\x93\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x0a\x90\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x49\x00\x00\x00\x15\x08\x06\x00\x00\x00\x0a\x3f\xc3\x50\
\x00\x00\x0a\x57\x49\x44\x41\x54\x78\x5e\x85\x98\x7b\x6c\x9b\x57\
\xf9\xc7\x3f\xc7\xaf\xed\xc4\x8e\x9d\xfb\xc5\x69\x9c\xa6\x69\xbb\
\x34\x69\xda\x6e\x4d\xb7\x5e\x36\xa9\x15\x65\x65\x6d\xc7\xba\x8e\
\x15\x7e\x05\x15\x86\xc6\x06\x42\x68\x42\xe2\x0f\xc4\xb4\x0d\x04\
\x4c\xa0\x0d\x10\xe3\x22\xa4\x41\xcb\x26\x2e\xe2\x07\x48\x1b\x12\
\x83\x69\xac\x4b\x3a\xba\x6e\xeb\xbd\x6b\xd3\x26\x6d\x73\x6b\xae\
\x6d\x9c\x34\x76\xdd\xd8\xf1\x25\xef\x43\x64\x39\x3e\x7a\x5f\x63\
\x71\xfe\xf1\x79\x8f\xcf\xf9\x3e\xcf\x79\x2e\xdf\xf3\x9c\xa3\xaa\
\xf9\xa6\x08\x00\xa0\x00\x00\xc1\xda\x14\x20\x96\xbe\x64\xfb\x2a\
\x37\x5f\x21\xd9\x5f\xa5\xd7\x17\xc0\x00\xd1\xf3\xac\xf8\x85\xd7\
\x59\xd6\x82\xa0\xac\x3a\x6b\x1c\xdb\x97\xd2\x3a\xe7\xd6\x08\x82\
\x1a\x53\x70\x51\xe0\x5d\xe0\xaf\xd3\xbc\x78\x95\x02\x4d\x35\x96\
\x3c\x23\x7a\xeb\x0a\xa5\xb1\x6c\x96\xb1\x6a\x2c\x99\xbe\xa0\x94\
\x9e\x8c\x58\x37\x8e\x02\xd1\x78\x28\xc9\x49\x01\xad\x38\xa8\xcc\
\x18\x5a\x86\x05\xc5\x66\x06\x34\x98\x16\x60\x9d\xa7\xf4\xb8\x90\
\x13\x01\x4a\x90\xc5\xdd\x66\xe6\x80\xd2\x42\xff\x88\x52\xdf\x1d\
\x99\x7d\x3e\xcf\x58\xaa\xa5\xe6\x7b\x62\x8f\x10\x50\x28\x11\xc4\
\x22\x3b\x2b\x49\x00\x9b\x61\xb4\x11\xb3\x5e\x12\x65\x0b\x4d\x21\
\xdf\xd8\xd9\x0f\xa5\xf1\xb3\x1e\xd6\x63\x68\x43\xa0\xc5\xe4\x7b\
\x4b\x69\xbd\xb5\x4e\xe8\x39\xba\x8b\x52\x3a\x16\xb5\x8f\x14\xa2\
\x04\x50\x49\xe0\x6b\x57\x42\xdf\x3e\x68\x31\xd2\x5d\x4d\x3f\x14\
\x11\xdb\x62\x8b\x1f\x15\xa2\x37\xa9\xc7\x2c\x29\x23\x18\x7b\x7b\
\x99\x3f\x5b\x47\x3a\x26\xb8\x42\x95\x08\x56\x03\x2b\x25\x96\xe0\
\xc7\x2a\x25\x8b\xa7\xc7\x76\x16\xb9\xb8\x98\x9c\x27\x96\x4e\x31\
\x6d\x38\xec\x09\xad\xd3\x4d\x40\x94\x15\x4f\x23\x92\x9b\x6f\xae\
\x6c\x86\x1b\x53\xa4\x12\x49\xdc\x89\x44\x36\xd8\x14\xa0\xb3\x41\
\x9b\x9a\x67\xce\x5d\xfb\xd6\x0f\x72\x88\x5b\xda\x7e\x62\x0d\x6e\
\x1d\xaa\x36\xf3\x0b\xe4\x07\x3f\xdb\x76\xac\x64\xd3\x57\x1d\x9c\
\x0f\x1c\xa6\x7b\x72\x88\x53\xe7\x2f\x12\xea\x4d\xd1\xfa\xb7\xc7\
\x71\xc5\xfd\xda\xb3\x48\xbe\xe3\x01\xac\xcc\x91\xc1\x7b\x7c\xc3\
\x1a\x6a\xce\xcc\x10\xba\x3a\xcc\x85\x33\xdd\x5c\x89\x8c\xf1\x76\
\xb0\x99\xb8\xd3\xbd\x88\x82\x2a\xcc\x09\x3a\xa5\x33\xfa\xad\xa0\
\x7d\xef\x66\x4e\x46\xdc\xf4\xf5\x86\x39\x77\xa2\x97\x5b\xc3\xc3\
\xb4\xc6\x67\x71\x09\x50\x90\x41\x79\xf2\x83\x9e\x6f\x1c\x04\x50\
\xdb\xef\xfe\x85\xe4\xed\x5c\x0a\x10\xa8\xd6\x8b\x96\xd6\x5a\x0e\
\x3c\x71\x37\xeb\xb7\x96\x33\x2d\x57\x19\x50\x3d\xbc\x2e\xc7\xb8\
\x38\xde\xcb\xd5\xc1\x41\x5c\x6f\xb7\xb0\xf4\xcd\x4f\xe5\x6c\x8c\
\x58\xd7\xdb\xb3\x6a\x65\x6b\x0d\x9f\x7f\xe2\x1e\x36\xaf\x5b\x46\
\xfa\xaa\xe0\xea\x49\x21\xc7\x3e\x62\xbc\xb7\x9f\xc1\xc1\x3e\xde\
\x15\xe1\x48\x7d\x55\xce\x61\x05\x02\x52\xeb\xd7\x56\xc3\x81\x05\
\xbc\xb6\x7b\x97\x71\x25\x05\x3d\x21\x78\xff\x7d\xa1\xa7\x67\x9c\
\xa1\x81\x41\x9c\x63\x23\xac\xb8\x3d\xa3\xe9\x03\xb1\xea\x25\x24\
\x05\xd6\x74\x9e\x7e\xea\xaa\x7a\x70\xeb\xcb\x62\x57\x5c\xb0\xd3\
\x82\x1e\xf3\xf9\x8a\xd8\xff\xc5\xf5\x3c\xf8\x68\x2b\x23\x9c\x67\
\x94\x4b\x8c\x4a\x88\x2b\x8c\x32\x3e\x1f\x22\x14\xbb\x09\x26\x9c\
\x38\x75\x82\x95\x07\xbf\x8c\xff\xc6\x32\x8b\xf2\x82\xd5\xf1\xbe\
\xd2\x22\x3e\xfb\xd8\x7a\x3e\xf9\xf0\x1a\xd4\x79\x13\x2e\xa5\x91\
\xd0\x2d\x18\x9d\x64\x3e\x14\x26\x76\x73\x06\x13\xe1\xd4\x89\x53\
\xfc\x7f\x53\x15\x21\x9f\xa7\x10\x95\x23\x80\xcf\x9f\xc1\x63\xd7\
\xa3\x6b\x38\x7f\x0b\x2e\xde\x82\xd0\x14\x8c\x8c\xc0\xd4\x54\x9a\
\x99\x99\x18\x60\x72\x62\x01\xaf\x2d\x34\x4a\xb9\x99\xce\xa7\x57\
\xcd\xfd\x7f\xfc\xc7\xd1\xaf\x1c\x50\x9f\xde\xf9\x8a\xa0\x5b\x41\
\x05\x00\x76\xef\x6d\x63\xdf\xe7\xee\x24\x5a\x32\x44\x84\x01\x2e\
\xcb\x35\xce\xd3\xc7\x94\x44\x98\x55\x09\xdc\xa6\x03\x9f\xe1\x21\
\x85\xc9\xb9\xc1\x0b\x0c\xfc\x6d\x8e\x4d\x87\xbf\x8e\xbd\xd9\xf1\
\xbc\x63\x2e\x52\x03\x0a\xe7\xb5\x30\xf4\x4d\x20\x91\x28\x2a\x91\
\xc2\x74\x18\x18\x9e\x62\x30\xd3\x0c\x5e\xe8\xe1\x8d\xc1\x1e\x8e\
\xad\x6f\xfd\x9f\x78\xa3\x0e\x37\xfd\x69\x18\x1a\x85\xbe\x01\x88\
\x46\x4d\xe2\x71\x07\x0e\x87\x89\xd7\xeb\xc0\x34\xa1\xbb\x7b\x90\
\xe1\xe3\x1f\x72\x9f\x2b\x65\x4d\xd9\xfc\xd6\xe2\xf4\x96\xb8\x41\
\x6c\x35\x8c\xc2\x72\x9c\x6f\xd8\xd8\xc0\x9e\x7d\x6b\x28\x6b\x8e\
\xd3\x2f\x5d\x5c\x67\x84\x3e\x99\xe0\xb2\x39\x4c\x94\x18\x1e\x29\
\x22\x60\x54\x52\xef\xa8\xa4\x4c\x7c\x4c\xa8\x10\xa7\x92\x69\xd2\
\x45\x09\x54\xb1\x13\x8f\x43\x21\xa8\x2c\xae\xd0\xb1\x31\x98\xc1\
\x6b\x2c\xaa\x80\xf7\xe6\x61\x24\x86\x73\x62\x0a\x73\xf8\x3a\xc4\
\x12\x48\x91\x1b\xa3\xd2\x8f\xa3\xb2\x0c\xf1\x15\xa3\x42\x51\x92\
\xe9\x14\x6e\x52\x94\x3a\x21\x55\xe4\xca\x12\xb6\x02\xa0\xe3\x9e\
\x06\x1e\x5e\xc0\x73\x36\x54\xf0\xef\x28\x8c\x46\x60\x7c\x0c\x86\
\x47\xe6\x89\xc5\xc0\xed\x82\xaa\x2a\xa8\xac\x74\xe0\x2b\xc9\x44\
\x16\xe9\x74\x12\x94\x42\x15\xbb\x32\xfa\x91\x97\xbd\x82\x00\x28\
\xf5\x69\xa7\xc7\xeb\x46\x34\x75\xa3\xd0\xf5\xc5\x7c\xfd\x34\x1b\
\x3f\xe3\xe3\x40\xc7\x82\x77\xe8\xe6\x22\x43\x7c\x44\x3f\x7d\x8c\
\x11\x37\xe3\xcc\x26\xe7\x68\x71\x2f\xa5\xda\x28\xa7\x55\x35\xd2\
\x44\x80\x39\x49\x71\x36\x65\x12\x9e\x88\x50\x36\xb9\x9c\x4a\x7f\
\x31\x02\x28\x11\x6a\xe7\x15\x7b\x37\xb4\xb1\xee\x4b\x6d\xa8\x6e\
\x81\xa1\x24\xd2\x3f\x01\x63\x21\xcc\x78\x8a\xe4\x5c\x0c\xf7\xd2\
\x00\x46\xb9\x0f\xd5\x58\x47\x3c\x50\x8c\x33\x25\x60\x9a\x4c\x44\
\x42\x4c\x95\x2a\x9c\x15\x25\x38\xb3\xb4\x9c\x2c\xf6\xb0\xf6\x81\
\x3b\x79\xec\xe1\x25\x5c\x88\xc2\xd0\x14\xf4\xf7\xc3\xf8\x04\xc4\
\x62\xf3\xcc\xcd\x25\x69\x5a\xea\xa6\xac\xcc\xa0\xb1\x11\x02\xb5\
\x90\x4c\x82\x69\xa6\x88\x4c\x4d\x50\x96\x8e\x53\xe9\x2f\xcd\x95\
\x19\xa2\xb4\x0d\x24\xcb\x55\x4a\x64\x9b\xd3\xe3\x71\x81\xb2\x33\
\x91\x60\x3a\xe6\xf9\xe0\x91\x83\xdc\xb7\xee\x51\xae\xc8\x3b\x9c\
\x67\x80\x61\x26\x09\x11\xe6\x46\x7c\x8a\x26\x4f\x3d\xab\x3c\x4d\
\xac\x65\x39\x4b\xa9\xc5\x8b\x0f\xb7\x78\x30\xd5\x0c\xa7\xe5\x32\
\x35\xdd\x9b\xb9\x6b\x72\x1f\x78\x05\x00\x87\x09\xbb\x8e\x8f\xd2\
\xbe\x6e\x13\xe6\x3b\x82\x63\xe0\x06\x4c\xce\x40\xf8\x36\xf1\xa9\
\x30\x9e\xfa\x5a\x3c\x4d\xb5\xb0\xbc\x9e\xb9\x5a\x2f\xf3\x3e\x10\
\x8f\xa0\x66\x14\x8e\xcb\xa3\xf4\xd6\xf8\x98\x59\xdb\x8e\x57\x04\
\x50\x98\x08\xef\x3b\xeb\x58\xd9\x50\xcb\xe1\x08\x0c\x0c\xc2\xe4\
\x24\x84\xc3\xc2\xd4\xd4\x1c\x4b\xea\x3d\x2c\x6b\xf2\xd0\xdc\x0c\
\x75\x75\x50\xe2\x02\x8f\x13\xc2\x02\x97\xaf\x08\x4b\x12\x31\x3a\
\x56\x55\xeb\xda\x55\x15\xbc\x73\xb4\x2f\xa4\x5b\x11\x92\x5f\xa2\
\x71\xbd\xbc\x17\x5f\xb1\x9b\x31\x63\x9a\x6b\xdc\xe0\xa6\x8a\x32\
\x2b\x09\xaa\x95\x9f\x3b\x3d\xab\x68\x25\xc8\x1d\x04\xf1\xe1\xa3\
\x18\x2f\x4a\x0c\x00\xfe\x72\xa6\x93\x57\x8f\x74\xb1\x35\x74\x00\
\x8f\xd7\xb5\xe8\x17\xfc\xd7\x23\x14\x7b\xbc\x18\xd3\xb7\xe0\x46\
\x04\x15\x9d\x45\x12\x49\x94\xdf\x87\x67\x55\x23\x04\x03\xa4\x82\
\xe5\xa4\x7c\x30\xef\x05\x65\x08\x00\x97\x3a\xdf\xe6\xc8\x6b\x2f\
\x11\xd9\xf2\x31\xbc\x5e\x17\x92\x65\xd4\xf1\x39\xf0\xfa\xdc\x4c\
\x4f\x3b\xb9\x7e\x03\x6e\xdf\x86\x44\x02\xfc\x7e\xc5\xaa\x16\x0f\
\x8d\x41\x68\x08\x82\xbf\x18\xbc\x2e\xc8\x66\x14\x9d\xaf\x9f\xe1\
\xf5\x17\x8e\xb0\xed\x8e\x3a\x3c\xb5\xd5\xb9\x5d\xe7\x5d\x2a\xf4\
\x78\x83\x7a\xfe\xd9\xc3\x92\x63\x76\x5d\xe0\x72\xc9\xff\x1e\x3d\
\x3b\xfe\xcc\xea\x96\xd5\x38\x5c\x8a\x52\xa3\x84\x06\x47\x6d\xc6\
\x38\x4b\x09\xd0\x48\x10\x20\x67\x9c\xce\xc1\x93\xbc\xd0\xf5\x27\
\x22\x43\x45\x6c\x77\x7c\x8a\x66\x69\xb3\xf8\xa3\xe4\xf2\x08\xdb\
\xba\xa3\xb4\xb4\xaf\xc2\xa5\x0c\x8c\x12\x2f\x8e\xda\x32\x08\xd6\
\x41\xa0\x9c\x68\x50\x01\xda\x38\x93\x27\x2f\xd1\x75\xe8\xe7\x0c\
\xcf\x84\x71\x6f\xdd\x81\xa3\x69\x85\x25\xd6\x2f\x85\xe6\x38\xa3\
\xea\x69\x5b\xc0\x33\x0c\x27\x5e\xaf\x93\xba\x5a\x45\x30\xc8\xc2\
\x2f\x34\xd6\x80\x08\x18\xd9\x05\xa7\xde\x1b\xe0\xd0\x8f\xba\x88\
\x0c\x87\xd9\xbe\xa1\x99\xe6\x86\x8a\xbc\x13\x4a\x44\x27\x15\xe8\
\xea\x20\xc3\x49\xf6\x7a\x4c\x29\xb8\xee\xef\x25\x3a\x17\x63\x36\
\x79\x9b\xaa\xa2\x2a\x96\x39\x02\x6c\x66\x35\x2b\x09\x82\xb8\x50\
\x18\x00\x0c\x86\xc7\xf9\x6e\xd7\x21\x8e\x9f\x1b\x66\x87\x6b\x1f\
\x1b\x7d\x1f\x23\xd7\xf4\xd5\x03\xef\xe4\x34\x73\xf1\x39\x92\xb7\
\xe3\x14\x55\x95\xe3\x08\x54\xc1\xea\xa5\x44\x82\x6e\x0c\x97\xe4\
\x8c\x93\x1c\x0f\x73\xe4\x37\xbf\xe4\xa3\x0f\x8f\xe2\xd9\xbe\x93\
\x8a\x8e\x4d\xd6\xba\x36\x0b\x3b\x11\x8b\x12\x77\xc4\x99\x9d\x4d\
\x50\x55\xe5\x26\x10\x50\xb4\xb7\x43\x43\x3d\xb8\x04\x1c\xd9\xc9\
\xe3\xd7\x66\x38\xf8\xe3\x4e\x8e\xbf\xd9\xcb\x8e\x7b\x57\xb2\xf1\
\xff\x5a\xd1\x38\xe8\x56\xe8\xba\x88\xc2\xe9\xf5\xb8\xff\x6b\x4d\
\xb6\x27\xfd\x24\x2f\xdd\x3c\x8a\xa1\x0c\xaa\x0c\x7f\x86\x94\xab\
\xa4\x02\x83\x92\x45\xd6\xe2\x3b\x5d\x87\x78\xb9\xeb\x0d\x76\x7b\
\xf7\xf3\xfd\xaa\x67\x51\x4a\xa1\x0a\x94\x0f\xb2\x6b\x13\x37\x5f\
\x7a\x1b\x65\x38\x30\xfc\xfe\x4c\xf4\xa4\x2a\x5c\x18\x25\x02\x80\
\x98\x70\xf2\xd0\x2b\x74\x1d\xfa\x15\xa5\x0f\x3c\x48\xfd\x73\xcf\
\x67\xf0\x0a\x1d\xcc\x0f\xad\x0b\xf0\xe2\xf1\x19\x0c\x43\x51\x5a\
\x6a\x10\xa8\x83\x72\x3f\x94\xa8\xc5\x6c\x90\x4c\xe4\x1c\x5a\x30\
\xd0\xee\xad\xab\xf8\xfe\x53\x3b\xb2\x78\xb6\x02\xd9\xf6\x2a\x91\
\xcf\xce\xa0\x5e\xfd\xf5\x19\x01\x9d\x66\x4a\xf7\x39\xea\xf9\x0b\
\x47\x9a\x7e\xc7\xc6\xd5\x1d\xdc\x5f\xb1\x85\x4a\xb3\x94\x3b\x1c\
\x4b\xf8\xe7\xd9\xd3\xfc\xb4\xf3\x35\x56\xa4\x37\xb0\xb7\xec\x73\
\x54\xbb\x02\xd9\x93\xc0\x5a\x91\x0a\xd6\xf0\x75\xbf\xdb\x4b\xb0\
\x6b\x90\xd5\x1d\x77\x51\xb1\x65\x2d\x66\xa9\x87\xf9\x25\x95\x74\
\x9f\x7e\x87\xce\xdf\xfe\x0c\xb3\x79\x19\x55\x0f\xed\xc1\x55\x5d\
\x6d\x3b\x88\x35\xa6\x82\x5c\xa7\xb3\x2f\xca\xe1\x31\x45\x47\x47\
\x3b\x5b\x36\x57\xe0\xf7\x9b\x34\x34\x38\x38\xf5\xe6\x59\x5e\x59\
\x30\xce\x8a\xea\x52\xf6\x7e\xbc\x9d\x9a\xca\x92\x2c\x9c\x2d\xc3\
\x04\xc8\xe1\xd9\xfe\xd4\x17\x5a\xd4\x9f\xff\xd0\x2d\x64\xf9\x48\
\x34\xd5\xe7\x50\x9e\x4e\xef\x47\xca\x63\x98\xee\x24\x65\x9e\x32\
\xa2\x83\xe0\x19\x6b\xe0\xd1\x9a\x2f\xb0\xc6\xb7\x1e\xc4\x7a\xc5\
\xd6\x42\xb4\x44\x7d\x93\x10\xe2\xcf\xfd\x9e\x8a\xb4\x1b\x97\x69\
\xe2\x29\xf3\x31\x18\xef\x63\xbc\x28\x4e\xe0\x91\xbd\xf8\xda\xdb\
\xad\x6b\x95\xf6\xb7\xe5\xa2\xac\x65\xf0\xf4\x5f\x7b\x48\x3b\xbd\
\xcc\x1b\x0e\xca\x4a\xbd\xc4\xaf\x0d\x50\x7c\x2b\xc2\xbe\x07\xd6\
\xb2\xa6\x25\x60\x4d\x0f\xdb\xb3\x0d\xe8\x3d\x03\xfa\xa2\x6b\x2b\
\xc1\xd5\xdf\x5f\xbb\x2c\x96\xdc\xb4\x3d\x53\x4d\xcf\xdf\x20\x61\
\xc6\xb9\x90\x3c\xc5\x5b\xe1\xd7\xd9\xe5\xdd\xcf\x27\x2a\xf6\xe4\
\xa4\x4a\x06\x48\x2c\xa4\xaf\x94\x8e\x02\x32\xdf\xfa\xbd\x6a\x7e\
\x2a\x92\x39\xd5\xe6\x2e\xf6\x33\xf3\xaf\x63\x94\xec\xbc\x87\xca\
\xfb\xb7\x5a\xd3\x4a\xef\x48\x1b\x24\x9f\x65\x33\xb8\x53\xb7\x12\
\x24\x92\x29\xce\x0f\x85\xf9\xd7\x07\x03\xec\xbe\xbb\x81\x1d\x5b\
\x96\xdb\xd9\x38\xef\xc1\x0f\x11\x84\xac\xd1\x51\xd6\x94\xb3\xda\
\x08\x75\xf8\xad\xa1\xc5\xe9\x58\x9b\xe4\x65\x2d\x62\xea\xbc\xce\
\x85\x9f\xf6\xb6\x2d\xb3\x0b\xbc\x3f\xea\x5f\xc1\x04\x14\x3a\xc5\
\x95\xad\x64\xd3\x39\x6b\x7f\x93\xca\xc7\xcb\x4e\x71\x80\x92\x7c\
\x66\x44\x69\x2c\x84\xc2\x7b\x05\x5b\x1f\x9c\x9e\x12\x57\xde\x22\
\xa5\x14\x82\x64\x84\x69\x7d\x44\xbf\xc1\xd8\x73\x56\xc7\x95\x5d\
\x46\x96\xab\xd0\x26\x53\x9a\x69\x10\xcb\x53\x80\x5e\xa6\x49\xd2\
\xa6\x97\xae\x8c\x75\xcd\x82\x8d\xfc\x0a\xef\x5f\x9f\xde\x36\x89\
\x62\xf5\x83\x9e\xbf\x78\xba\x79\xdd\xf9\x8f\x79\xca\x4a\x0b\x3a\
\x1d\x75\xc9\xae\x34\xbe\xa6\x22\x2d\xc4\x32\x07\xbd\x3c\x5f\x7f\
\x4b\x67\x31\x9a\x2c\xda\xda\x21\x0a\x9f\x42\x68\x3d\x0a\x26\x86\
\x56\xd0\xe6\x4c\x6d\x78\xcb\xf9\x23\xf0\x1f\x11\xff\x03\xae\x1c\
\x08\xf0\x33\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x03\
\x00\x00\x78\xc3\
\x00\x72\
\x00\x65\x00\x73\
\x00\x05\
\x00\x70\x37\xd5\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\
\x00\x10\
\x0e\xb8\x9b\xc7\
\x00\x74\
\x00\x69\x00\x74\x00\x6c\x00\x65\x00\x62\x00\x61\x00\x72\x00\x4c\x00\x65\x00\x66\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x06\
\x07\x87\x57\x47\
\x00\x71\
\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x12\
\x05\x15\x55\x27\
\x00\x74\
\x00\x69\x00\x74\x00\x6c\x00\x65\x00\x62\x00\x61\x00\x72\x00\x43\x00\x65\x00\x6e\x00\x74\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\
\x00\x67\
\x00\x11\
\x0b\x0b\x8a\x07\
\x00\x74\
\x00\x69\x00\x74\x00\x6c\x00\x65\x00\x62\x00\x61\x00\x72\x00\x52\x00\x69\x00\x67\x00\x68\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x0c\x00\x02\x00\x00\x00\x04\x00\x00\x00\x03\
\x00\x00\x00\x54\x00\x00\x00\x00\x00\x01\x00\x00\x1c\x19\
\x00\x00\x00\x42\x00\x00\x00\x00\x00\x01\x00\x00\x14\x20\
\x00\x00\x00\x7e\x00\x00\x00\x00\x00\x01\x00\x00\x1c\xaf\
\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
13,412 | a398975505e8363b3fcf339a2a23af33ec555463 | import numpy as np
from SenselUse import sensel
import threading
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import os
enter_pressed = False
plt.figure(figsize=(15, 7))
COUNT = 1
# my_cmap = plt.cm.RdBu(np.arange(plt.cm.RdBu.N))
# my_cmap[:,0:3] *= 0.5
# my_cmap = ListedColormap(my_cmap)
def waitForEnter():
global enter_pressed
input("Press Enter to exit...")
enter_pressed = True
return
def openSensel():
handle = None
(error, device_list) = sensel.getDeviceList()
if device_list.num_devices != 0:
(error, handle) = sensel.openDeviceByID(device_list.devices[0].idx)
return handle
def initFrame():
error = sensel.setFrameContent(handle, sensel.FRAME_CONTENT_PRESSURE_MASK)
(error, frame) = sensel.allocateFrameData(handle)
error = sensel.startScanning(handle)
return frame
def scanFrames(frame, info):
error = sensel.readSensor(handle)
(error, num_frames) = sensel.getNumAvailableFrames(handle)
for i in range(num_frames):
error = sensel.getFrame(handle, frame)
printFrame(frame, info)
def printFrame(frame, info):
global COUNT
COUNT = COUNT + 1
print("COUNT",COUNT)
def closeSensel(frame):
error = sensel.freeFrameData(handle, frame)
error = sensel.stopScanning(handle)
error = sensel.close(handle)
if __name__ == "__main__":
handle = openSensel()
if handle != None:
(error, info) = sensel.getSensorInfo(handle)
frame = initFrame()
t = threading.Thread(target=waitForEnter)
t.start()
while (enter_pressed == False):
scanFrames(frame, info)
closeSensel(frame)
|
13,413 | e7591b2c55992ba48026d012d75a9febac41ac39 | #DEPENDENCIES
#Allow Path Access to the Prelude's Directory
import sys
if not(".." in sys.path):
sys.path.append("..")
#Utilities Dependencies
from Py_Preludes import *
#Typing Dependencies
from typing import List
#Enum Dependencies
from enum import Enum,auto,unique
#Context Extension Dependencies
from SimpleTwoWayCtx_ctx import *
#CarrierSet Types Declarations
#Translation of Context: SimpleTwoWayCtxExt
#This context extends the following context: SimpleTwoWayCtx_class
class SimpleTwoWayCtxExt_class():
def __init__(self) -> None:
#Context Utils
self.__Initialized_Context = False
self.__Attributes_SetFlag : bool = True
#Context Extended Dependency Object
self.__SimpleTwoWayCtx : SimpleTwoWayCtx_class
#CarrierSets
#EndCarrierSets
self.__Attributes_SetFlag = False
#Constants
self.OTHERDIR : PyRel[DIRECTION_CS,DIRECTION_CS]
#EndConstants
#Context Extended Dependency Object Get Method
def SimpleTwoWayCtx_get(self) -> SimpleTwoWayCtx_class:
return self.__SimpleTwoWayCtx
#Initialized_Context Flag Attribute Get Method
def Initialized_ContextGetMethod(self) -> bool:
return self.__Initialized_Context
#CarrierSets Get/Set Methods
#End CarrierSets Get/Set Methods
#Constants Get/Set Methods
@property
def OTHERDIR(self) -> PyRel[DIRECTION_CS,DIRECTION_CS]:
return self.__OTHERDIR
@OTHERDIR.setter
def OTHERDIR(self, OTHERDIR_userIn : PyRel[DIRECTION_CS,DIRECTION_CS]) -> None:
if self.__Attributes_SetFlag == False: raise Exception("Changing the state of this Context is disabled.")
self.__OTHERDIR : PyRel[DIRECTION_CS,DIRECTION_CS] = OTHERDIR_userIn
#End Constants Get Methods
#Axiom Check Methods
def ax2_axiomCheck(self) -> bool:
return PyFamilies(PyFamilyTypes.TotalFunctions, self.SimpleTwoWayCtx_get().DIRECTION, self.SimpleTwoWayCtx_get().DIRECTION).PyContains(self.OTHERDIR)
def ax3_axiomCheck(self) -> bool:
return self.OTHERDIR(self.SimpleTwoWayCtx_get().NorthSouth) == self.SimpleTwoWayCtx_get().EastWest
def ax4_axiomCheck(self) -> bool:
return self.OTHERDIR(self.SimpleTwoWayCtx_get().EastWest) == self.SimpleTwoWayCtx_get().NorthSouth
def ax5_axiomCheck(self) -> bool:
return P.QuantifiedForAll( (lambda boundIdentifiers : PyPrelude.LogicImplication(self.SimpleTwoWayCtx_get().DIRECTION.PyContains(boundIdentifiers[0]), self.OTHERDIR(self.OTHERDIR(boundIdentifiers[0])) == boundIdentifiers[0])) , [ (0,"DIRECTION_CS") ] )
def ax6_axiomCheck(self) -> bool:
return self.OTHERDIR.PyComposition(self.OTHERDIR).PyIsSubset(P.ID())
#End Axiom Check Methods
#Check ALL Axioms
def checkAllAxioms(self) -> bool:
checkedAns_local : bool = True
allAxioms_local : List[str] = [ "ax2" , "ax3" , "ax4" , "ax5" , "ax6" ]
for Axiom_local in allAxioms_local:
AxiomMethod_local = getattr(self,Axiom_local + "_axiomCheck")
checkedAns_local = checkedAns_local and AxiomMethod_local()
return checkedAns_local
#End Check ALL Axioms
#Checked Initialization Method
def checkedInit(self , SimpleTwoWayCtx_userIn : SimpleTwoWayCtx_class = SimpleTwoWayCtx_class(), OTHERDIR_userIn : PyRel[DIRECTION_CS,DIRECTION_CS] = P.NoParam() ) -> None:
if OTHERDIR_userIn is None:
OTHERDIR_userIn = P.PyRandValGen("PyRel[DIRECTION_CS,DIRECTION_CS]")
if self.__Initialized_Context: raise Exception("Context already initialized!")
self.__Initialized_Context = True
#Enable Attributes Set Method
self.__Attributes_SetFlag = True
#Assign Parameter to Context Extended Dependency Object
self.__SimpleTwoWayCtx = SimpleTwoWayCtx_userIn
if not(self.__SimpleTwoWayCtx.Initialized_ContextGetMethod()):
self.__SimpleTwoWayCtx.checkedInit()
self.OTHERDIR = OTHERDIR_userIn
if P.DESIGN_BY_CONTRACT_ENABLED():
attempt_Count : int = 0
while not(self.checkAllAxioms()):
self.OTHERDIR = P.PyRandValGen("PyRel[DIRECTION_CS,DIRECTION_CS]")
if attempt_Count == P.HIGHMAXGENATTEMPTS():
raise Exception("Initialization could not satisfy the Axioms!")
attempt_Count += 1
#Disable Attributes Set Method
self.__Attributes_SetFlag = False
#User/Debugging Functions
def __str__(self) -> str:
tmp_values : List[str] = list()
#Print Constants
tmp_values.append("###")
tmp_values.append("SimpleTwoWayCtxExt Constants")
tmp_values.append("OTHERDIR ==> " + str(self.OTHERDIR))
#Print Extended Context Constants
tmp_values.append(self.__SimpleTwoWayCtx.__str__())
return "\n".join(tmp_values)
def __repr__(self) -> str:
return self.__str__()
|
13,414 | be72e722f1cdb71fe1987559eab0826dfac5c8c5 | a=int(input("Enter limit:"))
b=1
c=1
print(b)
print(c)
for i in range(1,a-1):
d=c+b
print(d)
b=c
c=d |
13,415 | ede30aa8afbc9bdbbaa47b3c9729615df1d5e802 | from sys import stdin
def busquedaBinaria(n, item):
primero = 0
ultimo = len(n)-1
while primero<=ultimo:
mid = (primero + ultimo)//2
if n[mid] == item:
return "esta",mid
else:
if item < n[mid]:
ultimo = mid-1
else:
primero = mid+1
return "no esta",primero
def main():
N=int(stdin.readline().strip())
w=[int(i) for i in stdin.readline().strip().split()]
Q=int(stdin.readline().strip())
q=[int(i) for i in stdin.readline().strip().split()]
n=list(set(w))
n.sort()
for i in q:
con,ind=busquedaBinaria(n,i)
if con=="esta":
if ind!=0 and ind!=len(n)-1:
print(n[ind-1],n[ind+1])
elif ind==0:
print("X",n[ind+1])
elif ind==len(n)-1:
print(n[ind-1],"X")
else:
if ind!=0 and ind!=len(n):
print(n[ind-1],n[ind])
elif ind==0:
print("X",n[ind])
elif ind==len(n):
print(n[ind-1],"X")
main()
|
13,416 | 79fab049a6737b93da1d48b9880e8ff6944f0c5f | import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
def speed_up(a):
speed_up = a[0]/a
return speed_up
P = np.array([1,4,16,24,36])
P_square = np.array([1,4,16,25,36])
run_time_square = np.array([1143.29, 298.748,93.9222, 84.9656, 46.224])
run_time_vert = np.array([1140.44,300.632,95.7239,91.0576,48.556])
run_time_horiz = np.array([1028.21,270.011,90.9404,88.1855,46.192])
run_time_no_io_square =np.array([294.812,85.6856,76.6362,37.9617])
run_time_no_io_vert =np.array([285.008,87.3275,81.764,39.3733])
run_time_no_io_horiz =np.array([254.679,82.0836,78.5395,36.826])
io_frac_square = (run_time_square[1:5]-run_time_no_io_square)/run_time_square[1:5]
io_frac_vert = (run_time_vert[1:5]-run_time_no_io_vert)/run_time_vert[1:5]
io_frac_horiz = (run_time_horiz[1:5]-run_time_no_io_horiz)/run_time_horiz[1:5]
speed_up_square = speed_up(run_time_square)
speed_up_horiz = speed_up(run_time_horiz)
speed_up_vert = speed_up(run_time_vert)
parallel_efficiency_square = speed_up_square/P_square
parallel_efficiency_vert = speed_up_vert/P
parallel_efficiency_horiz = speed_up_horiz/P
fig1,ax1 = plt.subplots(1,1)
ax1.plot(P,speed_up_vert,marker = 'x', label = 'Vertical strips')
ax1.plot(P_square,speed_up_square,marker = 'x', label = 'Squares')
ax1.plot(P,speed_up_horiz,marker = 'x', label = 'Horizontal strips')
ax1.set_xlabel('Number of processors')
ax1.set_ylabel('Speed up')
ax1.legend(loc='upper left')
ax1.set_title('Speed-up')
fig2,ax2 = plt.subplots(1,1)
ax2.plot(P,run_time_vert,marker = 'x', label = 'Vertical strips')
ax2.plot(P_square,run_time_square,marker = 'x', label = 'Squares')
ax2.plot(P,run_time_horiz,marker = 'x', label = 'Horizontal strips')
ax2.set_xlabel('Number of processors')
ax2.set_ylabel('Run time')
ax2.legend(loc = 'upper right')
ax2.set_title('Run time')
fig3,ax3 = plt.subplots(1,1)
ax3.plot(P,parallel_efficiency_vert,marker = 'x', label = 'Vertical strips')
ax3.plot(P_square,parallel_efficiency_square,marker = 'x', label = 'Squares')
ax3.plot(P,parallel_efficiency_horiz,marker = 'x', label = 'Horizontal strips')
ax3.set_xlabel('Number of processors')
ax3.set_ylabel('Parallel efficiency')
ax3.legend(loc = 'upper right')
ax3.set_title('Parallel efficiency')
fig4,ax4 = plt.subplots(1,1)
ax4.plot(P[1:5],io_frac_vert,marker = 'x', label = 'Vertical strips')
ax4.plot(P_square[1:5],io_frac_square,marker = 'x', label = 'Squares')
ax4.plot(P[1:5],io_frac_horiz,marker = 'x', label = 'Horizontal strips')
ax4.set_xlabel('Number of processors')
ax4.set_ylabel('Fraction of time I/O takes')
ax4.legend(loc = 'upper left')
ax4.set_title('Fraction of time used for I/O')
|
13,417 | b331efb2a21f4ea7b57e24c40a7faf8aaea786f6 | #
# a1pr1.py - Assignment 1, Problem 1
#
# Indexing and slicing puzzles
#
# This is an individual-only problem that you must complete on your own.
#
#
# List puzzles
#
pi = [3, 1, 4, 1, 5, 9]
e = [2, 7, 1]
# Example puzzle (puzzle 0):
# Creating the list [2, 5, 9] from pi and e
answer0 = [e[0]] + pi[-2:]
print(answer0)
# Solve puzzles 1-4 here:
# Puzzle1
# Creating the list [2, 7] from pi and e
answer1 = e[:2]
print(answer1)
# Puzzle2
# Creating the list [5, 4, 3] from pi and e
answer2 = pi[-2::-2]
print(answer2)
# Puzzle3
# Creating the list [3, 5, 7] from pi and e
answer3 = [pi[0], pi[-2], e[1]]
print(answer3)
# Puzzle4
# Creating the list [1, 2, 3, 4, 5] from pi and e
answer4 = e[-1::-2] + pi[::2]
print(answer4)
#
# String puzzles
#
b = 'boston'
u = 'university'
t = 'terriers'
# Example puzzle (puzzle 5)
# Creating the string 'bossy'
answer5 = b[:3] + t[-1] + u[-1]
print(answer5)
# Solve puzzles 5-10 here:
# Puzzle6
# Creating "universe" from b, u and t
answer6 = u[:-3] + t[1]
print(answer6)
# Puzzle7
# Creating "roster" from b, u and t
answer7 = t[2] + b[1:4] + t[-3:-1]
print(answer7)
# Puzzle8
# Creating "boisterous" from b, u and t
answer8 = b[:2] + t[4::3] + t[:3] + b[1] + u[::6]
print(answer8)
# Puzzle9
# Creating "yesyesyes" from b, u and t
answer9 = (u[-1] + t[-3::2]) * 3
print(answer9)
# Puzzle10
# Creating "trist" from b, u and t
answer10 = t[:-3:2] + b[2:4]
print(answer10)
|
13,418 | e705c35aaa083db2245f815310a9874ddd42f7b8 | from typing import Tuple, Any
from dataset import Dataset
from relevance_engines.criage_engine import CriageEngine
from link_prediction.models.model import Model
from explanation_builders.explanation_builder import NecessaryExplanationBuilder
class CriageNecessaryExplanationBuilder(NecessaryExplanationBuilder):
"""
The CriageNecessaryExplanationBuilder object guides the search for necessary facts to remove for Criage
"""
def __init__(self, model: Model,
dataset: Dataset,
hyperparameters: dict,
sample_to_explain: Tuple[Any, Any, Any],
perspective: str):
"""
CriageNecessaryExplanationBuilder object constructor.
:param model: the model to explain
:param dataset: the dataset used to train the model
:param hyperparameters: the hyperparameters of the model and of its optimization process
:param perspective
"""
super().__init__(model, dataset, sample_to_explain, perspective, 1)
self.engine = CriageEngine(model=model,
dataset=dataset,
hyperparameters=hyperparameters)
def build_explanations(self,
samples_to_remove: list,
top_k: int =10):
rule_2_relevance = {}
(head_to_explain, _, tail_to_explain) = self.sample_to_explain
for i, sample_to_remove in enumerate(samples_to_remove):
print("\n\tComputing relevance for sample " + str(i) + " on " + str(len(samples_to_remove)) + ": " +
self.dataset.printable_sample(sample_to_remove))
tail_to_remove = sample_to_remove[2]
if tail_to_remove == head_to_explain:
perspective = "head"
elif tail_to_remove == tail_to_explain:
perspective = "tail"
else:
raise ValueError
relevance = self.engine.removal_relevance(sample_to_explain=self.sample_to_explain,
perspective=perspective,
samples_to_remove=[sample_to_remove])
rule_2_relevance[tuple([sample_to_remove])] = relevance
cur_line = ";".join(self.triple_to_explain) + ";" + \
";".join(self.dataset.sample_to_fact(sample_to_remove)) + ";" \
+ str(relevance)
with open("output_details_1.csv", "a") as output_file:
output_file.writelines([cur_line + "\n"])
return sorted(rule_2_relevance.items(), key=lambda x: x[1])[:top_k]
|
13,419 | 3b99a4d2366b5717708af53c885b60b489799f84 | # Generated by Django 2.2.1 on 2019-05-16 00:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myApp', '0002_auto_20190516_0021'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='profile_image',
field=models.ImageField(blank=True, default='default-profile.png', upload_to='profile_image'),
),
]
|
13,420 | 9c3dce4d6e8fd58197b83dc188ad2b1a474bfb7a | import sqlite3
conn = sqlite3.connect("SnackBar.db")
def initiate_leiding_table(conn):
cursor = conn.cursor()
cursor.execute("""CREATE TABLE leiding (
first text,
last text,
schuld float
)""")
def initiate_snackbar_table(conn):
cursor = conn.cursor()
cursor.execute("""CREATE TABLE snackbar (
naam text
)""")
def initiate_snack_table(conn):
cursor = conn.cursor()
cursor.execute("""CREATE TABLE snack (
naam text,
snackbar text,
prijs float
)""")
def generate_insert_leiding_statement(leider, connection):
cursor = connection.cursor()
statement = "INSERT INTO leiding VALUES ('{}', '{}', {})".format(leider[0], leider[1], leider[2])
cursor.execute(statement)
connection.commit()
def fill_leiding_table():
file = open("src/python_parts/database_setup/leiding.csv", "r")
for line in file:
leider = line[:-1].split()
print(leider)
generate_insert_leiding_statement(leider, conn)
def addbar(bar, connection):
cursor = connection.cursor()
statement = "INSERT INTO snackbar VALUES ('{}')".format(bar)
cursor.execute(statement)
connection.commit()
return 0
def add_snack(bar, connection, snack):
cursor = connection.cursor()
statement = "INSERT INTO snack VALUES ('{}', '{}', {})".format(snack[0], bar, snack[1])
print(statement)
cursor.execute(statement)
connection.commit()
def fill_test_bar(con):
file = open("src/python_parts/database_setup/test_snackbars.csv", "r")
for line in file:
bar = line[:-1]
addbar(bar, con)
# important, bar is a string
def fill_test_snacks_for_njam_njam(conn, bar):
file = open("src/python_parts/database_setup/njam_njam_snacks.csv", "r")
for line in file:
snack = line[:-1].split("\t")
add_snack("njam njam", conn, snack)
curs = conn.cursor()
initiate_leiding_table(conn)
fill_leiding_table()
initiate_snack_table(conn)
initiate_snackbar_table(conn)
fill_test_bar(conn)
fill_test_snacks_for_njam_njam(conn, "njam njam")
|
13,421 | 744e67a418647dd88fcec020f9d38546ba4723dc | # -*- coding: utf-8 -*-
'''
้กต้ข่งฃๆๅจ
'''
__author__ = 'Evan Hung'
import urlparse
import re
from bs4 import BeautifulSoup
class HtmlParser(object):
def parse(self, page_url, html_cont):
if page_url is None or html_cont is None:
return
soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls, new_data
def _get_new_urls(self, page_url, soup):
new_urls = set()
# ๆ็ดขhrefๅฑๆงไธบ/item/***ๆ ผๅผ็ๆๆ้พๆฅๆ ็ญพ
links = soup.find_all('a', href=re.compile(r'/item/\w+'))
for link in links:
new_url = link['href']
new_full_url = urlparse.urljoin(page_url, new_url) # ๅๅนถไธบๅฎๆดurl
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self, page_url, soup):
res_data = {}
# url
res_data['url'] = page_url
# ๆ็ดขๆ ้ขๆ ็ญพ ่งๅไธบ<dd class ="lemmaWgt-lemmaTitle-title"><h1> title text </h1>
title_node = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')
if title_node:
res_data['title'] = title_node.get_text()
else:
res_data['title'] = ''
# ๆ็ดขๆ่ฆๆ ็ญพ ่งๅไธบ<div class ="lemma-summary"> summary content </div>
summary_node = soup.find('div', class_='lemma-summary')
if summary_node:
res_data['summary'] = summary_node.get_text()
else:
res_data['summary'] = ''
return res_data
|
13,422 | 2a2bd3714d4b2805a43416861951e615c5e1eb07 | import pyttsx3
engine = pyttsx3.init()
ssound = engine.getProperty('voices')
for sound in ssound:
print('voice')
print('id %s' %sound.id)
print('gender %s' %sound.gender)
print('**************************')
|
13,423 | 83f8c193a287a07e096a190df7736b4c103aeaa4 | #!/usr/bin/env python
## Create a schema for the table and then create the table.
from google.cloud import bigquery
client = bigquery.Client()
table_id = "innate-entry-286804.rns_sample_dataset.rns_db_4"
schema=[
bigquery.SchemaField('itemid','STRING',mode='REQUIRED'),
bigquery.SchemaField('quantity','STRING',mode='REQUIRED'),
bigquery.SchemaField('userid','STRING',mode='REQUIRED'),
bigquery.SchemaField('Metadata','RECORD',mode='REPEATED',fields=[
bigquery.SchemaField('geography','STRING',mode='REQUIRED'),
bigquery.SchemaField('location','STRING',mode='REQUIRED'),
bigquery.SchemaField('hourofday','INT64',mode='REQUIRED')
]
)
]
table = bigquery.Table(table_id,schema=schema)
## Creating an ingestion-time partitioned table
table.time_partitioning = bigquery.TimePartitioning("HOUR") ## Defaults to DAY
table = client.create_table(table)
print("Created table {}.{}.{}".format(table.project, table.dataset_id, table.table_id))
|
13,424 | 0aa1e216b5f136bd30251a4d3b1b1b24f6c8466f | # Used by the network to perform actions and getting new states
from Grab_screen import grab_screen
class Game_state:
def __init__(self, agent, game):
self._agent = agent
self._game = game
# get_state(): Accepts an array of actions and performs the action on the Dino
# Returns the new state, the reward, and if the game ended
def get_state(self, actions):
score = self._game.get_score()
reward = 0.1 * score / 10
is_over = False
if actions[1] == 1:
self._agent.jump()
reward = 0.1 * score / 11
image = grab_screen()
if self._agent.is_crashed():
self._game.restart()
reward = -11 / score
is_over = True
return image, reward, is_over |
13,425 | 43d0988b7f6e79345bae3f48040486a00b7a49d5 | from rest_framework import serializers
from .models import *
class ForecastSerializer(serializers.ModelSerializer):
class Meta():
model = Forecast
fields = ('place_name','cyclone_id','cyclone_name','image_link','time_of_last_forecast','created_at') |
13,426 | de81d6098282e7e405bcacc9f5d518ceb8f3a881 | import sys
infile = sys.argv[1]
with open(infile) as inputf:
lines = inputf.readlines()
dna = lines[0].strip()
k = int(lines[1].strip())
matrix = []
for line in lines[2:]:
values = line.strip().split()
linevals = []
for val in values:
linevals.append(float(val))
matrix.append(linevals)
trans = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
mostPr = float('-inf')
mostKmer = ''
for i in range(len(dna)-k):
kmer = dna[i:i+k]
Pr = 1
for jj, w in enumerate(kmer):
ii = trans[w]
Pr *= matrix[ii][jj]
if Pr > mostPr:
mostPr = Pr
mostKmer = kmer
print(mostKmer)
|
13,427 | 595297e304abd3ecd44084e0584a2387176bc2cb | import sys
from PySide6 import QtWidgets
from productiveware.widgets.main_window import MainWidget
if __name__ == '__main__':
app = QtWidgets.QApplication()
main_window = MainWidget()
sys.exit(app.exec())
|
13,428 | 1e11380d8b13bd2a60fcd53e1116dba06d51bc38 | #!/usr/bin/python3
# coding = utf-8
"""
@author:m1n9yu3
@file:main.py
@time:2021/01/12
"""
import threading
from tmp.get_data import *
from tmp.keyword_get import ask_url, search_key
'''
target : ็ฎๆ
http://floor.huluxia.com/post/detail/ANDROID/2.3?platform=2&market_id=tool_baidu&post_id={ๅธๅญid}&page_no={้กตๆฐ}
ๅธๅญid ไพๆฌก้ๅข
'''
def section_multi_thread(start_id, step):
"""็บฟ็จๆงๅถ ๏ผ ไธๆฌก่ท 1000 ไธช็บฟ็จ"""
# for i in range(start_id, step+start_id):
# parse_json(url, start_id+i)
threads = []
for i in range(step):
threads.append(threading.Thread(target=download_json_image, args=(start_id + i,)))
for i in threads:
i.start()
for i in threads:
i.join()
def section_get():
url = "http://floor.huluxia.com/post/detail/ANDROID/2.3?platform=2&market_id=tool_baidu&post_id={}&page_no={}"
# ๆถ้ๅๅงๅๆฐๆฎ
section = input("่ฏท่พๅ
ฅๅบ้ด: start-end (start >= 1, end > start)")
start = int(section.split('-')[0])
end = int(section.split('-')[1])
thread_num = int(input("่ฏท่พๅ
ฅ็บฟ็จๆฐ้:"))
# ๅผๅง็ฌๅ
step = 1000 # ่ฎพ็ฝฎ็บฟ็จๆฐ้
for i in range(start, end, thread_num):
# parse_json(url, i)
# ไธไธไธช็ฎๆ ๏ผๅฐ่ฏๅค็บฟ็จไผๅ
section_multi_thread(url, i, thread_num)
# ็ฌๅ่ฎฐๅฝ ๏ผ 2021.1.13, 8:00 ็ฌๅๅฐ 24000 post_id
def get_leg():
"""่ทๅ็พ่
ฟๅพ็"""
path = input("่ฏท่พๅ
ฅ็ฌๅ่ทฏๅพ๏ผไป
ๆฏๆๅทฒๅญๅจ็็ฎๅฝ๏ผๆ่
ๅ็บง็ฎๅฝ:")
try:
page_num = int(input("่ฏท่พๅ
ฅ้กตๆฐ,้กตๆฐ่ถๅคง๏ผ็ฌ็่ถๆ
ข:"))
except ValueError:
page_num = 5
url = "http://floor.huluxia.com/post/list/ANDROID/2.1?platform=2&market_id=tool_baidu&start={}&count=20&cat_id=56&tag_id=0&sort_by=0"
if path[-1] != '/':
path += '/'
ask_url(url, path, page_num)
def get_post_id():
post_id = int(input("่ฏท่พๅ
ฅ post id๏ผ"))
path = input("่ฏท่พๅ
ฅ็ฎๅฝ,่พๅ
ฅq ,ๅไฟๅญๅฐ้ป่ฎค็ฎๅฝ๏ผ")
if path == 'q':
download_json_image(post_id, './img/')
else:
download_json_image(post_id, './{}/'.format(path))
def main():
# ๆธ
้คๆฅๅฟ ็ฌๅ่ฟ็จไธญๅบ็ฐ็้่ฏฏ
remove_("../log.txt")
"""ไธปๆจกๅ่ๅ๏ผๅฐๆๆๅ่ฝ้ๅๆไธไธช่ๅ"""
while True:
print("------่ๅ-------")
print("1. ๅบ้ด็ฌๅ")
print("2. ็ฌๅ็พ่
ฟๅพ็")
print("3. ๅ
ณ้ฎๅญ็ฌๅ")
print("4. ็ฌๅ post_id ๅฏนๅบ็ๅธๅญ")
print("5. ่ฎพ็ฝฎไปฃ็")
print("q. ้ๅบ่ๅ")
set_proxy(None)
flag = input("่ฏท่พๅ
ฅไฝ ็้้กน:")
if flag == '1':
section_get()
elif flag == '2':
get_leg()
elif flag == '3':
keyword = input("่ฏท่พๅ
ฅๅ
ณ้ฎๅญ:")
search_key(keyword)
elif flag == '4':
get_post_id()
elif flag == '5':
http_ip = input("่ฏท่พๅ
ฅ: ไปฃ็ipๅฐๅ:็ซฏๅฃ ")
set_proxy({"http": http_ip})
elif flag == 'q':
break
if __name__ == '__main__':
main()
# get_leg()
|
13,429 | 6efb43fc22c94ece22322f6a841a9e07df8fe06a | from help import *
import math
import re
import sys
def settings(str):
return dict(re.findall("\n[\s]+[-][\S]+[\s]+[-][-]([\S]+)[^\n]+= ([\S]+)",str))
def coerce(s1):
"""
Converts value to Boolean, if value is not a boolean string it converts it to integer.
Parameters
----------
s : str
value to be converted to boolean or integer
Return
------
int
Bool
"""
if s1=="true":
return True
elif s1=="false":
return False
elif s1.isnumeric():
return int(s1)
elif '.' in s1 and s1.replace('.','').isnumeric():
return float(s1)
else:
return s1
def cli(t):
for slot,v in t.items():
v=str(v)
for n,x in enumerate(sys.argv):
if x=="-" + slot[0] or x=="--" + slot:
if v == "false":
v = "true"
elif v == "true":
v = "false"
else:
v = sys.argv[n + 1]
t[slot] = coerce(v)
return t
def eg(key, str, fun):
egs[key] = fun
global help
help = help + ' -g '+ key + '\t' + str + '\n'
|
13,430 | e638123ed947787fc611d5580f5908e93fea8afc | ''' ะคะพัะผะฐัะธัะพะฒะฐะฝะธะต ัััะพะบ
'''
name = 'John'
age = 34
# print('My name is ' + name + '. I\'m ' + str(age ))
# print('My name is % (name)s. I\'m %(age)d' %{'name': name, 'age': age}) #ะฝะต ัะฐะฑะพัะฐะตั!!!
#print('My name is %s. I\'m %d' % ('David', age))
print('Title: %s, Price: %f' %('Sony', 40)) #Title: Sony, Price: 40.000000
print('Title: %s, Price: %.2f' %('Sony', 40)) #Title: Sony, Price: 40.00
# format
# print ('My name is {}. I\'m {}'.format(name, age))
# print ('My name is {0}. I\'m {1}'.format(name, age))
# print ('My {1} name is {0}. I\'m {1}'.format(name, age))
# f-strings
# print (f'My name is {name}. I\'m {age}')
print (f'My name is {name}. I\'m {age + 5}')
print('5 + 2 = {}'. format (5 + 2))
print(f'5 + 2 - {5 + 2}')
|
13,431 | e924c622706ed88627ff31dba68fb4a620a65a6b | import predictor
import pandas as pd
active_drivers = [['Daniel Ricciardo','McLaren'],
['Mick Schumacher','Haas F1 Team'],
['Carlos Sainz','Ferrari'],
['Valtteri Bottas','Mercedes'],
['Lance Stroll','Aston Martin'],
['George Russell','Williams'],
['Lando Norris','McLaren'],
['Sebastian Vettel','Aston Martin'],
['Kimi Rรคikkรถnen','Alfa Romeo'],
['Charles Leclerc','Ferrari'],
['Lewis Hamilton','Mercedes'],
['Yuki Tsunoda','AlphaTauri'],
['Max Verstappen','Red Bull'],
['Pierre Gasly','AlphaTauri'],
['Fernando Alonso','Alpine F1'],
['Sergio Pรฉrez','Red Bull'],
['Esteban Ocon','Alpine F1'],
['Antonio Giovinazzi','Alfa Romeo'],
['Nikita Mazepin','Haas F1 Team'],
['Nicholas Latifi','Williams']]
qualif = []
for row in active_drivers:
#for elem in row:
circuit = "Silverstone Circuit"
#circuit = "Yas Marina Circuit"
driver = row[0]
q = predictor.getQualifData(circuit, driver)
qualif.append(q)
print(qualif)
|
13,432 | dda07b23dc1fa4266a687b5cbab4d6e19f710ffc | # Copyright (c) 2020, Vladimir Efimov
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import modules.text_processor_normalize as tpn
from modules.term_scoring import get_term_score
def count_term_entries(sentences, tp, term_count, term_document_count, topics_words):
document_terms = set()
for (sentence, topic) in sentences:
words = tp.sentence_to_words(sentence)
if topic != "" and topic not in topics_words:
topics_words[topic] = {}
for word in words:
if word in term_count:
term_count[word] += 1
if word not in document_terms:
term_document_count[word] += 1
document_terms.add(word)
else:
term_count[word] = 1
term_document_count[word] = 1
document_terms.add(word)
if topic == "":
continue
if word in topics_words[topic]:
topics_words[topic][word] += 1
else:
topics_words[topic][word] = 1
if __name__ == "__main__":
term_count = {}
term_document_count = {}
term_score = {}
topics_words = {}
num_documents = 0
text_processor = tpn.TextProcessorNormalize("stop_words.txt")
stop_words = text_processor.get_stop_words()
# empty word could be in word list represented text as specific of text processing
stop_words.add("")
if len(sys.argv) == 1:
print("Usage: python3 " + sys.argv[0] + " <file with labeled text>")
print("")
print("\tProgram counts terms in labeled text and evaluates term's score as for not labeled text")
print("\tAdditionally program calculates words distribution across topics")
exit()
label_filename = sys.argv[1]
with open(label_filename) as f:
f.readline() # skip header
line = f.readline().strip()
old_filename = ""
sentences = []
while line:
line_parts = line.split("\t")
if len(line_parts) < 2:
break
filename = line_parts[0]
sentence = line_parts[1]
topic = "" if len(line_parts) == 2 else line_parts[2]
if filename == old_filename or old_filename == "":
sentences.append((sentence, topic))
else:
count_term_entries(sentences, text_processor, term_count, term_document_count, topics_words)
sentences.clear()
sentences.append((sentence, topic))
num_documents += 1
old_filename = filename
line = f.readline().strip()
if len(sentences) > 0:
num_documents += 1
count_term_entries(sentences, text_processor, term_count, term_document_count, topics_words)
for (term, count) in term_count.items():
term_score[term] = get_term_score(count, term_document_count[term], num_documents)
sorted_items = sorted(term_score.items(), reverse=True, key=lambda key_value: (key_value[1], key_value[0]))
topic_list = topics_words.keys()
header = ["Term", "Score", "Count", "Document count", "Labeled ratio"]
header.extend(topic_list)
print("\t".join(header))
for (term, score) in sorted_items:
output_line = ""
labeled_count = 0
for topic in topics_words:
if term in topics_words[topic]:
labeled_count += topics_words[topic][term]
labeled_ratio = float(labeled_count) / float(term_count[term])
for topic in topics_words:
if term in topics_words[topic]:
topic_word_ratio = float(topics_words[topic][term]) / float(labeled_count)
output_line += "\t{:f}".format(topic_word_ratio)
else:
output_line += "\t0.0"
output_line = "{}\t{:f}\t{}\t{}\t{:f}".format(
term, score, term_count[term], term_document_count[term], labeled_ratio) + output_line
print(output_line)
|
13,433 | 96adad4aa658cd8f0a06f33ef3294b1d9f15eb35 | import streamlit as st
import pandas as pd
import os
from PIL import Image
from datetime import datetime
import streamlit.components.v1 as stc
import base64
import time
timestr = time.strftime("%Y%m%d-%H%M%S")
import sqlite3
conn = sqlite3.connect('data.db')
c = conn.cursor()
metadata_wiki = """
"""
HTML_BANNER = """
<div style="background-color:#364e5f;padding:10px;border-radius:10px">
<h1 style="color:white;text-align:center;">Power BI Presentation
</h1>
<h2 style="color:white;text-align:center;">Lecturer: Iman Eftekhari </h2>
</div>
"""
HTML_BANNER2 = """
<div style="background-color:#364e5f;padding:10px;border-radius:10px">
<h2 style="color:white;text-align:center;">Jun 15, 2021 07:00 PM Canberra, Melbourne, Sydney</h2>
</div>
"""
@st.cache
def load_image(image_file):
img = Image.open(image_file)
return img
def get_readable_time(mytime):
return datetime.fromtimestamp(mytime).strftime('%Y-%m-%d-%H:%M')
def create_uploaded_filetable():
c.execute('CREATE TABLE IF NOT EXISTS filestable(name TEXT,family TEXT, occupation TEXT, interest TEXT, other TEXT)')
def add_file_details(name,family, occupation,interest,other):
#c.execute(' INSERT INTO filestable (name,family, occupation,interest,other) VALUES (?,?,?,?,?) WHERE NOT EXISTS (SELECT * FROM filestable WHERE name = "Sima" AND family = "Reza") ' ,(name,family, occupation,interest, other)) #WHERE NOT EXISTS ( SELECT * FROM filestable WHERE name = "Sima" AND family = "Reza")
c.execute('INSERT INTO filestable(name,family, occupation,interest,other) VALUES (?,?,?,?,?) ' ,(name,family, occupation,interest, other)) #WHERE NOT EXISTS ( SELECT * FROM filestable WHERE name = "Sima" AND family = "Reza")
conn.commit()
def view_all_data():
c.execute('SELECT * FROM filestable ')
data = c.fetchall()
return data
def main():
st.image(load_image("pbi2.png"))
stc.html(HTML_BANNER)
stc.html(HTML_BANNER2)
menu = ["Home","Register"]
choice = st.sidebar.selectbox("Menu",menu)
create_uploaded_filetable()
if choice == "Home":
#st.subheader("Home")
st.image(load_image("pbi.png"))
st.write(metadata_wiki)
elif choice == "Register":
#st.subheader("Insert your information here:")
with st.beta_expander("Insert your information here:"):
input1 = st.empty()
input2 = st.empty()
input3 = st.empty()
other=" "
name = input1.text_input("* Name:", value = "")
family = input2.text_input("* Family:", value = "")
occupation = input3.text_input(" Occupation:", value = "")
#m = st.multiselect("Interested in :",['Data Scientist','Data Analyst','BI Specialist', 'Business Analyst', 'Power BI Developer', 'other'])
interest = st.radio("Interested in :",['Data Scientist','Data Analyst','BI Specialist', 'Business Analyst', 'Power BI Developer', 'Other'])
if interest == "Other":
input4 = st.empty()
other = input4.text_area("Others")
if st.button("Submit"):
add_file_details(name, family,occupation,interest,other)
name = input1.text_input("* Name:", value = " ")
family = input2.text_input("* Family:", value = " ")
occupation = input3.text_input(" Occupation:", value = " ")
if interest == "Other":
other = input4.text_area("Others", value = " ")
st.success("Registered successfully")
st.write('''Here is the link of the presentation:\n
Join Zoom Meeting
https://us05web.zoom.us/j/82574739442?pwd=WWRqSmdpU0dvUW4yOEwwRkFpSmJwdz09
Meeting ID: 825 7473 9442
''')
st.write("Passcode: An9fr4")
#st.write(view_all_data())
if __name__== "__main__":
main()
|
13,434 | 269db89ad962d2707c4dd8bc6d8fec63a37851e1 | from .base import BaseResourceTest
from test.factories import DatasetGenerationJobFactory
from src.master.resources.dataset_generation_job import DatasetGenerationJobResource, DatasetGenerationJobListResource
import os
from src.master.resources.datasets import load_dataset_as_csv
from src.models import Dataset, DatasetGenerationJob
import pandas as pd
from src.db import db
class DatasetGenerationJobTest(BaseResourceTest):
def test_returns_all_dataset_generation_jobs(self):
# Given
job = DatasetGenerationJobFactory()
job2 = DatasetGenerationJobFactory()
# When
result = self.get(self.url_for(DatasetGenerationJobListResource))
# Then
assert len(result) == 2
assert result[0]['id'] == job.id
assert result[1]['id'] == job2.id
def test_returns_my_dataset_generation_job(self):
# Given
job = DatasetGenerationJobFactory()
# When
result = self.get(self.url_for(DatasetGenerationJobResource, job_id=job.id))
# Then
assert result['id'] == job.id
def test_put_upload_dataset(self):
# Given
job = DatasetGenerationJobFactory()
dirname = os.path.dirname(__file__)
fixture = os.path.join(dirname, '../../../fixtures/generated_dataset.csv')
data = dict(
file=(open(fixture, 'rb'), "generated.csv"),
)
# When
response = self.test_client.put(
self.url_for(DatasetGenerationJobResource, job_id=job.id),
content_type='multipart/form-data',
data=data
)
# Then
assert response.status_code == 200
assert response.json["id"] is not None
ds = self.db.session.query(Dataset).first()
result_buffer = load_dataset_as_csv(self.db.session, ds)
result_dataframe = pd.read_csv(result_buffer)
expected_dataframe = pd.read_csv(fixture)
result_dataframe.index = expected_dataframe.index
pd.testing.assert_frame_equal(expected_dataframe, result_dataframe)
updated_job: DatasetGenerationJob = DatasetGenerationJob.query.get(job.id)
assert updated_job.dataset == ds
assert updated_job.end_time is not None
def test_abort_after_second_upload_for_same_id(self):
# Given
job = DatasetGenerationJobFactory()
dirname = os.path.dirname(__file__)
fixture = os.path.join(dirname, '../../../fixtures/generated_dataset.csv')
firstData = dict(
file=(open(fixture, 'rb'), "generated.csv"),
)
secondData = dict(
file=(open(fixture, 'rb'), "generated.csv"),
)
# When
self.test_client.put(
self.url_for(DatasetGenerationJobResource, job_id=job.id),
content_type='multipart/form-data',
data=firstData
)
response = self.test_client.put(
self.url_for(DatasetGenerationJobResource, job_id=job.id),
content_type='multipart/form-data',
data=secondData
)
# Then
assert response.status_code == 400
def test_create_dataset_generation_job(self):
# Given
data = dict()
data['parameters'] = "{'nodes': 10, 'samples':1000}"
data['generator_type'] = 'MPCI'
data['datasetName'] = 'creation_test_dataset'
data['kubernetesNode'] = 'test_k8s_node'
# When
self.post(self.url_for(DatasetGenerationJobListResource), json=data)
job: DatasetGenerationJob = db.session.query(DatasetGenerationJob).first()
# Then
assert job.dataset_id is None
assert job.datasetName == data['datasetName']
assert job.generator_type == data['generator_type']
assert job.node_hostname == data['kubernetesNode']
assert job.parameters == data['parameters']
def test_create_dataset_generation_job_without_kubernetes_node(self):
# Given
data = dict()
data['parameters'] = "{'nodes': 10, 'samples':1000}"
data['generator_type'] = 'MPCI'
data['datasetName'] = 'creation_test_dataset'
# When
self.post(self.url_for(DatasetGenerationJobListResource), json=data)
job: DatasetGenerationJob = db.session.query(DatasetGenerationJob).first()
# Then
assert job.dataset_id is None
assert job.datasetName == data['datasetName']
assert job.generator_type == data['generator_type']
assert job.parameters == data['parameters']
assert job.node_hostname is None
|
13,435 | 9066aa06aef0e1f77c7f902aa3d4822923b06092 | #doing linear searches
names = ["Bill", "Charlie", "Fred", "Alien"]
if "Aunty" in names:
print("Found")
else:
print("Not Found")
|
13,436 | cfb3de6ff3c83ed3cef2527064a1a1d9151c1ef8 |
import pandas.tools.plotting as pdplt
import matplotlib.pylab as plt
import seaborn as sns
import subprocess
import pandas as pd
import numpy as np
import serial
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from Tkinter import *
import Tkinter as Tk
class EnterInterface:
def __init__(self, master):
self.master = master
master.title("Farmer-Scientist Tool")
master.minsize(width=1000, height=550)
f1 = Frame(master, height=100, width=175)
f1.pack_propagate(0) # don't shrink
f1.pack()
f1.place(x=200, y=60)
self.greet_button = Button(f1, text="Scientist", command=self.graph_menu)
self.greet_button.config(activebackground='Green', relief='raised')
self.greet_button.pack(fill=BOTH, expand=1)
f = Frame(master, height=100, width=175)
f.pack_propagate(0) # don't shrink
f.pack()
f.place(x=650, y=60)
self.greet_button = Button(f, text="Farmer", command=greet_1)
self.greet_button.config(activebackground='Green')
self.greet_button.pack(fill=BOTH, expand=1)
f3 = Frame(master, height=50, width=175)
f3.pack_propagate(0) # don't shrink
f3.pack()
f3.place(x=425, y=200)
self.close_button = Button(f3, text="Close", command=master.quit)
self.close_button.config(activebackground='Red')
self.close_button.pack(fill=BOTH, expand=1)
def graph_menu(self):
menubar = Menu(root)
menubar.add_command(label="Andrews Graph", activebackground='Light Green', command=self.display_andrews_graph)
menubar.add_command(label="Regression Graph", activebackground='Light Green', command=self.regression_graph)
menubar.add_command(label="Temperature Gradient", activebackground='Light Green', command=self.temp)
menubar.add_command(label="FaceGrid", activebackground='Light Green', command=self.face)
menubar.add_command(label="Humidity Gradient", activebackground='Light Green', command=self.humidity)
menubar.add_command(label="Quit", activebackground='Light Green', command=root.quit)
# display the menu
root.config(menu=menubar)
def display_andrews_graph(self):
pdplt.andrews_curves(df, "output", ax=None)
plt.show()
def regression_graph(self):
df = pd.read_csv("test_file2.csv", names=['humidity', 'temp', 'moisture', 'LDR', 'output'])
sns.jointplot("moisture", "humidity", df, kind='reg')
def temp(self):
df = pd.read_csv("test_file2.csv", names=['humidity', 'temp', 'moisture', 'LDR', 'output'])
g = sns.FacetGrid(df, col="output")
g.map(sns.distplot, "temp")
plt.show()
def humidity(self):
df = pd.read_csv("test_file2.csv", names=['humidity', 'temp', 'moisture', 'LDR', 'output'])
g = sns.FacetGrid(df, col="output")
g.map(sns.distplot, "humidity")
plt.show()
def face(self):
df = pd.read_csv("test_file2.csv", names=['humidity', 'temp', 'moisture', 'LDR', 'output'])
g = sns.FacetGrid(df, col="output")
g.map(sns.regplot, "humidity", "temp")
plt.xlim(0, 100)
plt.ylim(0, 35)
plt.show()
class greet_1:
def __init__(self):
def encode_target(df, target_column):
df_mod = df.copy()
targets = df_mod[target_column].unique()
map_to_int = {name: n for n, name in enumerate(targets)}
df_mod["Target"] = df_mod[target_column].replace(map_to_int)
return df_mod, targets
df2, targets = encode_target(df, "output")
features = list(df2.columns[:4])
y = df2["Target"]
X = df2[features]
dt = DecisionTreeClassifier(min_samples_split=20, random_state=99)
dt.fit(X, y)
# plt.figure()
# plt.show()
arduino_data = []
# dt_test = pd.read_csv("test_this.csv", names=['humidity', 'temp', 'moisture', 'LDR']
ser = serial.Serial('/dev/ttyACM0', baudrate=9600, timeout=1)
def getValues():
ser.write(b'g')
arduino_data = ser.readline().decode('ascii')
return arduino_data
f5 = Frame(root, height=100, width=175)
f5.pack_propagate(0) # don't shrink
f5.pack()
f5.place(x=10, y=80)
L1 = Label(f5, text="Enter 0 to proceed :")
L1.pack()
E1 = Entry(f5, bd=5)
E1.pack()
answer = E1.get()
print(answer)
if answer == 0:
test_data = []
test_data = getValues()
myFile = open("test_data_1.csv", 'w+')
myFile.write(test_data)
myFile.close()
print ("File Written")
dt_test = pd.read_csv("test_data_1.csv", names=['humidity', 'temp', 'moisture', 'LDR'])
final_data = dt_test.head(1)
type = dt.predict(final_data)
# print dt.predict(x_test)
if type == '[0]':
print 'dry'
elif type == '[1]':
print 'Healthy'
else:
print "Unfavorable"
# reading the training data and storing it in pandas dataframe
df = pd.read_csv("test_file2.csv", names=['humidity', 'temp', 'moisture', 'LDR', 'output'])
# print df['output'].unique()
# sns.pairplot(df, hue="output", size=2)
# plt.show()
root = Tk.Tk()
background_image = Tk.PhotoImage(file="/home/kapoor1/Desktop/hello.png")
background_label = Tk.Label(root, image=background_image)
background_label.place(x=0, y=0, relwidth=1, relheight=1)
my_gui = EnterInterface(root)
# root["bg"] = 'white'
root.mainloop()
'''
def visualize_tree(tree,feature_names):
with open("dt.dot", 'w') as f:
export_graphviz(tree, out_file=f, feature_names=feature_names)
command = ["dot", "-Tpng", "dt.dot", "-o", "dt.png"]
try:
subprocess.check_call(command)
except:
exit("Could not run dot, ie graphviz, to "
"produce visualization")
visualize_tree(dt, features)
'''
|
13,437 | ed6645a367407c554fd8aad9dc14b038d3cb4626 | #!/usr/bin/env python
# coding: utf-8
import os
import re
import time
import json
defaults = "--single-transaction --skip-lock-tables --compact --skip-opt --quick --no-create-info" \
"--master-data --skip-extended-insert"
# ignore_tables = ["soccerda.ndb_apply_status"]
def dump_mysql(conn, schemas, misc = defaults, ignore_tables = []):
return "tmp/data_1458743695.sql"
opts = "--host=%s --port=%s --user=%s --password=%s" % \
(conn["host"], conn["port"], conn["user"], conn["passwd"])
for table in ignore_tables:
opts = "%s --ignore-table=%s" % (opts, table)
opts = "%s %s" % (misc, opts)
tfile = "tmp/data_%d.sql" % time.time()
cmdstr = "mysqldump %s --databases %s >%s" % (opts, schemas, tfile)
os.system(cmdstr)
print cmdstr
return tfile
# for table name
# pat_table = re.compile(r"CREATE TABLE `(.*)` \(\n")
def get_tables(conn, schemas):
pat_key = re.compile(r" `(.*)` ")
pat_item = re.compile(r" `(.*)` (\w+)")
tables = {}
st = ""
name = ""
fsql = dump_mysql(conn, schemas, "-d")
for line in file(fsql).readlines():
if line[:12] == "CREATE TABLE":
st = "table_begin"
name = pat_key.search(line).groups(0)[0]
tables [name] = {}
#print name
elif st == "table_begin":
if line[:3] == " `":
res = pat_item.search(line).groups(0)
item= res[0]; itype = res[1]
tables[name][item] = {}
tables[name][item]["type"] = itype
tables[name][item]["val"] = ""
#print item, itype
else:
st = "table_end"
#print json.dumps(tables)
return tables
if __name__ == "__main__":
from sync import Config
conf = Config("./sync.ini")
conn = conf.parse_db_conn()
schemas = conf.parse_schemas()
#dump_mysql(conn, schemas)
get_tables(conn, schemas)
|
13,438 | 5dd88a0800664d6e8d42caef784f2751ed44b2f1 | from models.detector import face_detector
import numpy as np
from models.parser import face_parser
import cv2, os
part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],
[255, 0, 85], [255, 0, 170],
[0, 255, 0], [85, 255, 0], [170, 255, 0],
[0, 255, 85], [0, 255, 170],
[0, 0, 255], [85, 0, 255], [170, 0, 255],
[0, 85, 255], [0, 170, 255],
[255, 255, 0], [255, 255, 85], [255, 255, 170],
[255, 0, 255], [255, 85, 255], [255, 170, 255],
[0, 255, 255], [85, 255, 255], [170, 255, 255]]
def show_face_bbox(img_path):
"""
detecting face bbox.
:param img_path: img
:return:
"""
if not os.path.exists("./result"):
print("make dir!")
os.mkdir("./result")
im = cv2.imread(img_path)
fd = face_detector.FaceAlignmentDetector()
bboxes = fd.detect_face(im, with_landmarks=False)
ret = bboxes[0][0:4]
print(ret)
cv2.rectangle(im, (int(ret[1]), int(ret[0])), (int(ret[3]), int(ret[2])), (0, 255, 0), 2)
score = bboxes[0][-1]
cv2.imwrite("./result/test_bbox.jpg", im)
def show_face_parser(img_path, save_img=True):
"""
facial segmentation.
:param img_path:
:return:
"""
im = cv2.imread(img_path)
print(im.shape)
h, w = im.shape[0:2]
fp = face_parser.FaceParser()
# fp.set_detector(fd) # fd = face_detector.FaceAlignmentDetector()
parsing_map = fp.parse_face(im, bounding_box=None, with_detection=False)
map = parsing_map[0].reshape(h, w, 1)
mask1 = map == 10
mask2 = map == 1
mask3 = map == 14
mask = (mask1 + mask2 + mask3).astype(np.uint8)
# mask = cv2.GaussianBlur(mask, (5, 5), 0)
img_mask_fg = cv2.bitwise_and(im, im, mask=mask)
mask_inv = cv2.bitwise_not(mask * 255)
# mask_inv = cv2.GaussianBlur(mask_inv, (5, 5), 0)
mask = cv2.GaussianBlur(mask, (5, 5), 0)
img_mask_bg = cv2.bitwise_and(im, im, mask=mask_inv)
num_of_class = 17
if save_img:
map = cv2.cvtColor(map, cv2.COLOR_GRAY2BGR)
map_color = np.zeros_like(map)
for pi in range(1, num_of_class + 1):
# print(pi, part_colors[pi])
index = np.where(map == pi)
map_color[index[0], index[1], :] = part_colors[pi]
cv2.imwrite("./result/test_seg.jpg", map_color)
cv2.imwrite("./result/test_mask.jpg", mask * 255)
cv2.imwrite("./result/img_mask_fg.jpg", img_mask_fg)
cv2.imwrite("./result/img_mask_bg.jpg", img_mask_bg)
print("Mask saved!")
return img_mask_fg, img_mask_bg, mask
def fast_guideFilter(I, p, winSize, eps, s):
"""
Fast guidedFilter
:param I:
:param p:
:param winSize:
:param eps:
:param s:
:return:
"""
h, w = I.shape[:2]
size = (int(round(w * s)), int(round(h * s)))
small_I = cv2.resize(I, size, interpolation=cv2.INTER_CUBIC)
small_p = cv2.resize(I, size, interpolation=cv2.INTER_CUBIC)
X = winSize[0]
small_winSize = (int(round(X * s)), int(round(X * s)))
mean_small_I = cv2.blur(small_I, small_winSize)
mean_small_p = cv2.blur(small_p, small_winSize)
mean_small_II = cv2.blur(small_I * small_I, small_winSize)
mean_small_Ip = cv2.blur(small_I * small_p, small_winSize)
var_small_I = mean_small_II - mean_small_I * mean_small_I
cov_small_Ip = mean_small_Ip - mean_small_I * mean_small_p
# print(var_small_I.mean())
# print(cov_small_Ip.mean())
# if var_small_I.mean() >= 0.009:
# eps = 0.01
small_a = cov_small_Ip / (var_small_I + eps)
small_b = mean_small_p - small_a * mean_small_I
mean_small_a = cv2.blur(small_a, small_winSize)
mean_small_b = cv2.blur(small_b, small_winSize)
size1 = (w, h)
mean_a = cv2.resize(mean_small_a, size1, interpolation=cv2.INTER_LINEAR)
mean_b = cv2.resize(mean_small_b, size1, interpolation=cv2.INTER_LINEAR)
q = mean_a * I + mean_b
return q
def guideFilter(img):
"""
:param img:
:return:
"""
guide = img
# guide = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
dst1 = cv2.ximgproc.guidedFilter(
guide=guide, src=img, radius=32, eps=2000, dDepth=-1)
dst2 = cv2.ximgproc.guidedFilter(
guide=guide, src=img, radius=64, eps=1000, dDepth=-1)
dst3 = cv2.ximgproc.guidedFilter(
guide=guide, src=img, radius=32, eps=1000, dDepth=-1)
return dst1, dst2, dst3
if __name__ == '__main__':
img_path = "./1.jpeg"
fg, bg, mask_fg = show_face_parser(img_path, True)
## guided filter
# dst1, dst2, dst3 = guideFilter(fg)
#
# dst1 = cv2.add(dst1, bg)
# dst2 = cv2.add(dst2, bg)
# dst3 = cv2.add(dst3, bg)
#
#
# cv2.imwrite("./result/image_eps50.jpg", dst1)
# cv2.imwrite("./result/image_eps500.jpg", dst2)
# cv2.imwrite("./result/image_eps1000.jpg", dst3)
## Fast guilded filter
gray = cv2.cvtColor(fg, cv2.COLOR_BGR2GRAY)
var = cv2.meanStdDev(gray, mask=mask_fg)
print(var)
eps = 0.001 if var[1] < 40 else 0.01
print(eps)
winSize = (16, 16) # convolution kernel
# image = cv2.resize(fg, None, fx=0.8, fy=0.8, interpolation=cv2.INTER_CUBIC)
I = fg / 255.0 #
p = I
s = 3 # step length
guideFilter_img = fast_guideFilter(I, p, winSize, eps, s)
guideFilter_img = guideFilter_img * 255 # (0,1)->(0,255)
guideFilter_img[guideFilter_img > 255] = 255
guideFilter_img = np.round(guideFilter_img)
guideFilter_img = guideFilter_img.astype(np.uint8)
guideFilter_img = cv2.add(guideFilter_img, bg)
img_zero = np.zeros_like(fg)
ret, binary = cv2.threshold(gray, 5, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img_zero, contours, -1, (255, 255, 255), 3)
blurred_img = guideFilter_img
output = np.where(img_zero == np.array([255, 255, 255]), cv2.GaussianBlur(blurred_img, (5, 5), 0), blurred_img)
cv2.imwrite("./result/mask.jpg", img_zero)
cv2.imwrite("./result/post.jpg", output)
cv2.imwrite("./result/winSize_16.jpg", guideFilter_img)
|
13,439 | 0a45ac8d436f16359163b89b9ad21a855c5d4b3f | class LogUtil:
def __init__(self):
"""
Initiate Log Util.
This Class contains utilities for helping with Logging
"""
self.META_AE_IP = 'HTTP_X_APPENGINE_USER_IP'
self.FORWARDED_FOR = 'HTTP_X_FORWARDED_FOR'
print('{} - Initialized'.format(__name__))
def get_ip(self, request):
"""
Get Ip from request
:param request: Http Request Object
:return: Ip of the player connected
"""
return request.META.get(self.META_AE_IP) \
if self.META_AE_IP in request.META else request.META.get(self.FORWARDED_FOR)
|
13,440 | 7edd2d284ec6ee1c2f70fb5aac9fadee7fcec5b1 | import cv2
import numpy as np
kernel = np.ones((5,5),np.uint8)
print(kernel)
path = './archivos/futbol.jpg'
img = cv2.imread(path)
img = cv2.resize(img,(0,0),fx=0.3,fy=0.3)
imGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imGray,(7,7),0)
imgCanny = cv2.Canny(imgBlur,100,200)
imgDilation = cv2.dilate(imgCanny,kernel,iterations=10)
imgEroded = cv2.erode(imgDilation,kernel,iterations=2)
cv2.imshow('lena',img)
cv2.imshow('GrayScale',imGray)
cv2.imshow('Img Blur',imgBlur)
cv2.imshow('Img Canny',imgCanny)
cv2.imshow('Img Dilation', imgDilation)
cv2.imshow('Img Erosion', imgEroded)
cv2.waitKey(0)
#cv2.destroyAllWindows |
13,441 | 8e18c7fed9d67ef518950b5490ef7703c537b947 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
np.set_printoptions(linewidth=500)
np.set_printoptions(precision=8)
import random
# In[ ]:
#ๅคงใใใ่ฟใ
def norm(r):
return(np.sqrt(np.real(np.dot(r.conjugate(),r))))
# In[ ]:
#ในใใณ๏ผใในใใณ๏ผใฎในใใใซใไฝใ๏ผ่ฆๆ ผๅ่พผใฟ๏ผ
def spin1(s1,s0,sm1):
a=np.array([s1,s0,sm1])
a=a/norm(a)
return(a)
def spin2(r2,r1,r0,rm1,rm2):
a=np.array([r2,r1,r0,rm1,rm2])
a=a/norm(a)
return(a)
def rands1():
a=[random.uniform(-1,1) for i in range(6)]
return(spin1(complex(a[0],a[1]),complex(a[2],a[3]),complex(a[4],a[5])))
def rands2():
a=[random.uniform(-1,1) for i in range(10)]
return(spin2(complex(a[0],a[1]),complex(a[2],a[3]),complex(a[4],a[5]),complex(a[6],a[7]),complex(a[8],a[9])))
# In[ ]:
#ในใใณๆผ็ฎๅญ
rt2=np.sqrt(2.0)
F3x=np.array([[0.0,rt2,0.0],
[rt2,0.0,rt2],
[0.0,rt2,0.0]
])/2.0
F3y=np.array([[0.0,rt2,0.0],
[-rt2,0.0,rt2],
[0.0,-rt2,0.0]
])/2.0j
F3z=np.array([[1.0,0.0,0.0],
[0.0,0.0,0.0],
[0.0,0.0,-1.0]
])
rt6=np.sqrt(6.0)
F5x=(np.array([[0.0,2.0,0.0,0.0,0.0],
[2.0,0.0,rt6,0.0,0.0],
[0.0,rt6,0.0,rt6,0.0],
[0.0,0.0,rt6,0.0,2.0],
[0.0,0.0,0.0,2.0,0.0]
]))/2.0
F5y=(np.array([[0.0,2.0,0.0,0.0,0.0],
[-2.0,0.0,rt6,0.0,0.0],
[0.0,-rt6,0.0,rt6,0.0],
[0.0,0.0,-rt6,0.0,2.0],
[0.0,0.0,0.0,-2.0,0.0]
]))/2.0j
F5z=np.array([[2.0,0.0,0.0,0.0,0.0],
[0.0,1.0,0.0,0.0,0.0],
[0.0,0.0,0.0,0.0,0.0],
[0.0,0.0,0.0,-1.0,0.0],
[0.0,0.0,0.0,0.0,-2.0]
])
# In[ ]:
#A_0^2
P02=np.array([[0.0,0.0,0.0,0.0,1.0],
[0.0,0.0,0.0,-1.0,0.0],
[0.0,0.0,1.0,0.0,0.0],
[0.0,-1.0,0.0,0.0,0.0],
[1.0,0.0,0.0,0.0,0.0]
])/np.sqrt(5.0)
def A0(r):
F=np.dot(r,np.dot(P02,r))
Fans=np.dot(F.conjugate(),F).real
return(Fans)
# In[ ]:
#F_(m)^(f)
def F1(s):
Fx=np.dot(s.conjugate(),np.dot(F3x,s))
Fy=np.dot(s.conjugate(),np.dot(F3y,s))
Fz=np.dot(s.conjugate(),np.dot(F3z,s))
return(np.array([Fx,Fy,Fz]).real)
def F2(r):
Fx=np.dot(r.conjugate(),np.dot(F5x,r))
Fy=np.dot(r.conjugate(),np.dot(F5y,r))
Fz=np.dot(r.conjugate(),np.dot(F5z,r))
return(np.array([Fx,Fy,Fz]).real)
def F11(s):
F=np.dot(F1(s),F1(s))
return(F)
def F22(r):
F=np.dot(F2(r),F2(r))
return(F)
def F12(s,r):
F=np.dot(F1(s),F2(r))
return(F)
# In[ ]:
#P_1^12่ซๆใฎใใฟๆใก
r01=np.sqrt(1.0/10.0)
r04=np.sqrt(2.0/5.0)
r03=np.sqrt(3.0/10.0)
r06=np.sqrt(3.0/5.0)
def P112beta(s,r):
A11=(s[0]*r[2]*r01-s[1]*r[1]*r03+s[2]*r[0]*r06)
A10=(s[0]*r[3]*r03-s[1]*r[2]*r04+s[2]*r[1]*r03)
A1m1=(s[0]*r[4]*r06-s[1]*r[3]*r03+s[2]*r[2]*r01)
ans=np.dot(A11.conjugate(),A11)+np.dot(A10.conjugate(),A10)+np.dot(A1m1.conjugate(),A1m1)
return(ans.real)
# In[ ]:
#ใจใใซใฎใผใ่จ็ฎ
def Espin(c11,c12,c22,c112,c212,s,r):
ans=(c11*F11(s)+c12*F22(r)+c22*A0(r))/2.0+c112*F12(s,r)+c212*P112beta(s,r)
return(np.real(ans))
# In[ ]:
#้ฉๅฝใช็งปๅ
def Move(s1,move):
test=s1+move
return test/norm(test)
# In[ ]:
#maxstepๅๅใใชใใชใใพใงๅปใฟhใงใจใใซใฎใผไฝใๆนใซ็งปๅ
def Eoptimize(c11,c12,c22,c112,c212,s1,s2,maxstep,h):
Einit=10000.
ikeep=0
while True:
E=Espin(c11,c12,c22,c112,c212,s1,s2)
test1=Move(s1,rands1()*h)
test2=Move(s2,rands2()*h)
Etest=Espin(c11,c12,c22,c112,c212,test1,test2)
if E>Etest:
s1=test1
s2=test2
E=Etest
ikeep=0
else:
ikeep+=1
if ikeep==maxstep:
return(E,s1,s2)
# In[ ]:
#ๅปใฟๅน
ๅคใใฆใใไฝใใจใใซใฎใผใธ
def Eopt(c11,c12,c22,c112,c212):
s1test=rands1()
s2test=rands2()
Etest,s1test,s2test=Eoptimize(c11,c12,c22,c112,c212,s1test,s2test,100,1)
Etest,s1test,s2test=Eoptimize(c11,c12,c22,c112,c212,s1test,s2test,100,.1)
Etest,s1test,s2test=Eoptimize(c11,c12,c22,c112,c212,s1test,s2test,100,.01)
Etest,s1test,s2test=Eoptimize(c11,c12,c22,c112,c212,s1test,s2test,100,.001)
return(Etest,s1test,s2test)
# In[ ]:
#ๅๆๅคๅคใใฆไฝๅบฆใ่ฉฆใ
def Emin(c11,c12,c22,c112,c212):
E=10000
tall=50
keep=0
while True:
Etest,s1test,s2test=Eopt(c11,c12,c22,c112,c212)
if E>Etest:
E=Etest
sans1=s1test
sans2=s2test
keep=0
else:
keep+=1
if keep==tall:
return(E,sans1,sans2)
# In[ ]:
#็ถๆ
ใๆธใๅบใ
RUN = 50
c11= RUN/50.
c12= RUN/50.
c112, c212=0.5, .0
for c22 in np.arange(-1.,1.02,.02):
E,s1,s2=Emin(c11,c12,c22,c112,c212)
ans=[c11,c12,c22,c112,c212,s1,s2]
a_str=[str(a) for a in ans ]
with open("data2_{:+06.3f}_{:+06.3f}.txt".format(c11,c22), mode="a") as f:
s = "{c[0]:6.3f} {c[1]:6.3f} {c[2]:6.3f} {c[3]:6.3f} {c[4]:6.3f}" " {s1[0].real:15.8E} {s1[0].imag:15.8E}" " {s1[1].real:15.8E} {s1[1].imag:15.8E}" " {s1[2].real:15.8E} {s1[2].imag:15.8E}" " {s2[0].real:15.8E} {s2[0].imag:15.8E}" " {s2[1].real:15.8E} {s2[1].imag:15.8E}" " {s2[2].real:15.8E} {s2[2].imag:15.8E}" " {s2[3].real:15.8E} {s2[3].imag:15.8E}" " {s2[4].real:15.8E} {s2[4].imag:15.8E}\n"
f.write(s.format(c=[c11,c12,c22,c112,c212],s1=s1,s2=s2))
|
13,442 | 064432b66e4882c5b9ab0e544ce2971aba4bd16d | #1.ํจ์
print('#################### 1.ํจ์ ###################')
def add(num1,num2):
return num1 + num2
print(add(1,2))
def add_mul(num1,num2): #๋ค์ค ๋ฆฌํด๊ฐ์ ํํํํ๋ก ๋ฐํ
return num1 + num2 , num1*num2
print(add_mul(1,2))
my_add , my_mul = add_mul(1,2) #ํํ ์ธํจํน
print(my_add)
print(my_mul)
#2.๋ชจ๋
print('#################### 2.๋ชจ๋ ###################')
# from ํจํค์ง๋ช
import ๋ชจ๋๋ช
-> ์ด์ ๊ฐ์ ํ์์ผ๋ก ์ ์ธํ๊ณ ํจ์๋ฅผ ํ์ฉํ๋ค.
import ch6_
ch6_.animal1()
ch6_.animal2()
|
13,443 | 9a5d8aff68f1cc9b436294114d5b632eb83cbbd6 | # Bill Karr's Code for Assignment 3, Problem 1
from __future__ import division
import numpy as np
def qr_iteration(A, tol):
n = len(A)
for i in range(n-1,0,-1):
while np.linalg.norm(A[i-1,:i-1]) >= tol:
sigma = A[i][i]
Q,R = np.linalg.qr(A - sigma*np.eye(n,n))
A = np.dot(R,Q) + sigma*np.eye(n,n)
return np.diag(A)
tol = 1e-16
A_1 = np.array([[2,3,2],[10,3,4],[3,6,1]])
eigenvalues_1 = qr_iteration(A_1.copy(), tol)
print "Matrix ="
print A_1
print "Computed eigenvalues: ", eigenvalues_1
print "Actual eigenvalues: ", np.linalg.eigvals(A_1)
A_2 = np.array([[6,2,1],[2,3,1],[1,1,1]])
eigenvalues_2 = qr_iteration(A_2.copy(), tol)
print "Matrix ="
print A_2
print "Computed eigenvalues: ", eigenvalues_2
print "Actual eigenvalues: ", np.linalg.eigvals(A_2) |
13,444 | 1fa35d0d288b5464dbb6da4f654b93f39c847535 | '''
ํผ๋ณด๋์น ์๋ 0๊ณผ 1๋ก ์์ํ๋ค. 0๋ฒ์งธ ํผ๋ณด๋์น ์๋ 0์ด๊ณ , 1๋ฒ์งธ ํผ๋ณด๋์น ์๋ 1์ด๋ค. ๊ทธ ๋ค์ 2๋ฒ์งธ ๋ถํฐ๋ ๋ฐ๋ก ์ ๋ ํผ๋ณด๋์น ์์ ํฉ์ด ๋๋ค.
์ด๋ฅผ ์์ผ๋ก ์จ๋ณด๋ฉด Fn = Fn-1 + Fn-2 (n>=2)๊ฐ ๋๋ค.
n=17์ผ๋ ๊น์ง ํผ๋ณด๋์น ์๋ฅผ ์จ๋ณด๋ฉด ๋ค์๊ณผ ๊ฐ๋ค.
0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597
n์ด ์ฃผ์ด์ก์ ๋, n๋ฒ์งธ ํผ๋ณด๋์น ์๋ฅผ ๊ตฌํ๋ ํ๋ก๊ทธ๋จ์ ์์ฑํ์์ค.
์ฒซ์งธ ์ค์ n์ด ์ฃผ์ด์ง๋ค. n์ 20๋ณด๋ค ์๊ฑฐ๋ ๊ฐ์ ์์ฐ์ ๋๋ 0์ด๋ค.
'''
import sys
def fibonachi(n):
if n == 0 or n ==1:
return n
return fibonachi(n-1)+fibonachi(n-2)
N = int(sys.stdin.readline())
print(fibonachi(N))
|
13,445 | a84e61d28571af1d0f51591745022556bc234ea8 | import numpy as np
from smart.ops import SealOps
from smart.seal_matrix import CipherMatrix
class SealKernel:
def __init__(self, vectors, gamma, coef0, degree, kernel_name, seal_ops: SealOps):
self.coef0 = coef0
self.gamma = gamma
self.degree = degree
self.vectors = vectors
self.seal_ops = seal_ops
self.func = self.get_kernel_func(kernel_name=kernel_name)
def linear(self, features: CipherMatrix):
# Instead of vectors * features^T we calculate features * vectors^T and get W^T
return self.seal_ops.dot_matrix_with_plain_matrix_transpose(features, self.vectors)
def poly(self, features: CipherMatrix):
return [pow((self.gamma * k) + self.coef0, self.degree) for k in np.dot(self.vectors, features)]
def rbf(self, features: CipherMatrix):
return [np.exp(-self.gamma * k) for k in [np.sum(np.power(vector - features, 2)) for vector in self.vectors]]
def sigmoid(self, features: CipherMatrix):
return [np.tanh((self.gamma * k) + self.coef0) for k in np.dot(self.vectors, features)]
def get_kernel_func(self, kernel_name):
kernel_func_switcher = {'linear': self.linear, 'poly': self.poly, 'rbf': self.rbf, 'sigmoid': self.sigmoid}
return kernel_func_switcher[kernel_name]
def __call__(self, features):
return self.func(features)
|
13,446 | 925d220066b3902f44a9645b1ac59f152025dcd3 | import requests
import time
import datetime
from Send_Notifications import Get_Technical_Owner ,First_mail
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from DBConnection_Sagar import Base,cert_data_sagar
import logging
logger_Insert_db_error = logging.getLogger('Certrenewal_Insert_db_error ')
Insert_db_error = logging.FileHandler('E:\\4-FlaskForms\logs\Certrenewal_warnign_Insert_db_error.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
Insert_db_error.setFormatter(formatter)
logger_Insert_db_error.addHandler(Insert_db_error)
logger_Insert_db_error.setLevel(logging.WARNING)
logger_Insert_db = logging.getLogger('Certrenewal_Insert_db_info ')
Insert_db_info = logging.FileHandler('E:\\4-FlaskForms\logs\Insert cert detail db.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
Insert_db_info.setFormatter(formatter)
logger_Insert_db.addHandler(Insert_db_info)
logger_Insert_db.setLevel(logging.INFO)
USERNAME = ""
PASSWORD = ""
formatDate = "%Y-%m-%d"
formatTime = "%H-%M-%S"
formatDateTime = "%b %d %H:%M:%S %Y %Z"
fmt = "%Y-%m-%d %H:%M:%S"
prodServers = ['10.119.251.185','10.119.251.161','10.119.251.162',"10.119.251.166"]
ogranistation =""
city= "Stockholm"
state ="Stockholm"
country ="SE"
CN=""
SAN=""
engine = create_engine(r'sqlite:///E:\Scripts\Project_Cert\certdetail_sagar.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
def Insert_Into_Database (cert_name,common_name,organisation,ou,country,locality,email,state,technical_owner,expiration_date,renewal_status,serverip,partition):
cert_data_obj= cert_data_sagar()
cert_data_obj.cert_name=cert_name
cert_data_obj.common_name=common_name
cert_data_obj.orangnisation=organisation
cert_data_obj.ou=ou
cert_data_obj.country=country
cert_data_obj.locality=locality
cert_data_obj.email=email
cert_data_obj.state=state
cert_data_obj.technical_owner=technical_owner
cert_data_obj.expiration_date=expiration_date
cert_data_obj.renewal_status=renewal_status
cert_data_obj.serverip=serverip
cert_data_obj.partition=partition
session.add(cert_data_obj)
session.commit()
def getServerName(server):
if server == "10.119.251.162":
return "Internet/DMZ Production Load Balancer (seistolbp01)"
elif server == "10.119.251.161":
return "Internet/DMZ Verification Load Balancer (seistolbv01)"
elif server == "10.119.251.185":
return "Datacenter Production Load Balancer (sezstolbp01)"
elif server == "10.119.251.166":
return "Datacenter Test Load Balancer (sezstolbt02)"
def getCertInfo(server):
req = requests.get("https://" + server + "/mgmt/tm/sys/file/ssl-cert?expandSubcollections=true",
auth=(USERNAME, PASSWORD), verify=False)
res = req.json()
return res['items']
def get_cert_parameters (json) :
subjectraw = json['subject']
cert_name = json ['name']
partition = json['partition']
attributes = subjectraw.split(",")
#print(json)
#print (attributes)
subject = {}
try :
for attr in attributes:
#print(attr.split("=")[0] + " : " + attr.split("=")[1])
subject[attr.split("=")[0]] = attr.split("=")[1]
#check keys in the subject
if 'O' not in subject:
print("Missing O key, setting O to ICA")
subject['O']= "ICA AB"
if 'OU' not in subject:
print("Missing OU key ,setting OU to NETWORK INFRA")
subject['OU']= "NETWORK INFRA"
if 'CN' not in subject:
print("Missing CN key ,print invalid certificate")
if 'L' not in subject:
print("Missing L key ,setting L to Stockholm")
subject['L'] = "Stockholm"
if 'C' not in subject:
print("Missing C key ,setting C to Sweden")
subject['C']='SE'
if 'emailAddress' not in subject:
print("Missing email key ,setting emailaddress to cominfo@ica.se ")
subject['emailAddress'] = 'cominfo@ica.se'
if 'ST' not in subject:
print("Missing ST key ,setting ST to Stockholm ")
subject['ST']='Stockholm'
except :
print(cert_name ," inside exception" ,len(subject))
return subject ,partition
def validate_certname():
return False
def Cert_Info_To_DB():
logger_Insert_db.info("Attempting to collect data from Load Balancers")
for server in prodServers:
print(server)
logger_Insert_db.info("Connecting to load Balancer "+ server)
items = getCertInfo(server)
logger_Insert_db.info("Received certificate data from " + server)
for item in items:
cert_name = item['name']
if "CN=Issuing CA Device"in item['issuer'] :
temp = datetime.datetime.strptime(item['expirationString'] , formatDateTime)
certExpirationTime = datetime.datetime.strptime(str(temp.date()), formatDate)
logger_Insert_db.info("Checking whether certificate expires witin 60 days :"+ cert_name)
if (int((certExpirationTime.date() - datetime.datetime.date(datetime.datetime.now())).days)) > 450 and int(
(certExpirationTime.date() - datetime.datetime.date(datetime.datetime.now())).days) <= 800:
logger_Insert_db.info("Certificate expires with in 60 days " + cert_name)
logger_Insert_db.info("Collecting data for " + cert_name)
subject,partition = get_cert_parameters(item)
technical_owner = Get_Technical_Owner(subject['OU'])
print(technical_owner,"technucal owner" ,cert_name)
if cert_name=="testSndt.ica.ia-hc.net20180219.crt" :
try:
logger_Insert_db.info("Inserting collected certificate details in Database : "+ cert_name +" "+server)
Insert_Into_Database(cert_name,subject['CN'],subject['O'],subject['OU'],subject['C'],subject['L'],subject['emailAddress'],subject['ST'],technical_owner,certExpirationTime,
"initial",server,partition)
logger_Insert_db.info("Insertion Sucessfully completed "+cert_name+" "+server )
First_mail(cert_name,certExpirationTime.date(),technical_owner)
except Exception as e :
#print("Error While inserting data for certifcate "+ cert_name )
if "UNIQUE constraint failed:" in str(e):
logger_Insert_db.info("Data for Vertificate already exists in database " + cert_name + " : Primary Key Violation can be ignored"+"\n\n")
else :
logger_Insert_db_error.error("Error While inserting data for certifcate "+ cert_name)
logger_Insert_db_error.error(e)
else :
logger_Insert_db.info("certificate not expiring in 60 days " + cert_name + " " + server+"\n")
session.close()
def get_database_data():
session = DBSession()
cert_d = session.query(cert_data_sagar).all()
for cert in cert_d:
if cert.cert_name== "testSndt.ica.ia-hc.net20180219.crt" :
print(cert.common_name ,cert.country,cert.cert_name ," ", cert.expiration_date ," ", cert.technical_owner ," ", cert.partition ,cert.verifier_email ,cert.dateTime1 ,cert.dateTime2 )
session.close()
Cert_Info_To_DB()
get_database_data()
session.close()
|
13,447 | 70e458602947075475efae3d984038ac70dcce33 | import serial.tools.list_ports
import urllib
from md5 import md5
from time import time
import socket
IPADDR = '209.20.80.141'
PORTNUM = 11311
def usage():
print 'Usage: cicada.py <serial device> "me@example.com" "My Name" "160 Varick, New York, NY 10031"'
print
print "Run cicada.py to upload your sensor's temperature to WNYC and learn how to check on the status of your upload"
print
print "Your system has the following serial ports; your thermometer will be one of them:"
serial.tools.list_ports.main()
import sys
sys.exit(1)
def send(temp, email, name, addr):
email, name, addr = map(urllib.quote_plus, (email, name, addr))
packet = "cicada 0 0 2 %s %s %s %s" % (temp, email, name, addr)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
s.connect((IPADDR, PORTNUM))
s.send(packet)
s.close()
def main(argv):
if len(argv) != 5:
usage()
_, device, email, name, addr = argv
if '@' not in email:
usage()
print "You can track your data uploads at http://project.wnyc.net/cicada/raw/upload/" + md5(email).hexdigest()
print
print "Trying to connect to sensor"
ser = serial.Serial(device, 19200, timeout=5)
t = time()
first_time = True
while True:
line = ser.readline()
if line.startswith('Temp in F'):
if first_time:
print "Temp sensor detected!"
first_time = False
temp = int(float(line.split()[-1]))
send(temp, email, name, addr)
print temp, "F"
t = time()
if time() - t > 60:
if first_time:
print "No temp sensor detected"
else:
print "Temp sensor died"
break
if __name__ == "__main__":
import sys
main(sys.argv)
|
13,448 | 8a59fe51813b23d00a0f55a603a18a2a3bd93554 | import random
def numero_aleatorio():
lista=[]
while len(lista)!=5:
num=random.randrange(0,9)
if num not in lista:
lista.append(str(random.randrange(1,9)))
numero="".join(lista)
return numero
def comprueba(secreto,numero):
#Creamos diccionario para guardar los valores
comprobar={
'muertos': 0,
'heridos': 0
}
#Si los dos numeros son iguales hemos ganado
if secreto==numero:
comprobar['muertos']=5
else:
muertos=heridos=0
#for i in range(0,5):
# if lista_s[i]==lista[i]:
# muertos+=1
#for i in lista:
# for j in range(0,5):
# if i in lista_s and lista_s[j]==i and lista[j]!=lista_s[j]:
# heridos+=1
#Guardamos los resultados en el diccionario
comprobar['muertos']=muertos
comprobar['heridos']=heridos
#Devolvemos el diccionario con los muertos y heridos
return comprobar
if __name__ == "__main__":
#Contador de intentos
intentos=0
#Condicion para seguir en el bucle
acierto=False
#Sacamos el numero aleatorio
n_ale=numero_aleatorio()
#Mientras no acertemos sigue el bucle
while acierto!=True:
numero=input("introduzca un numero:\t")
#Comprobamos que el numero tengo 5 cifras
if len(numero)!=5:
print("Ese numero no tiene cinco cifras")
else:
#Comprobamos el numero
diccionario=comprueba(n_ale,numero)
#Sumamos 1 al intento
intentos+=1
#Si los muertos da 5 significa que hemos acertado
if diccionario['muertos']==5:
print('Acertaste en',intentos,'intentos')
#Cambiamos la condicion para que termine el bucle
acierto=True
else:
print(diccionario['muertos'],'muertos',diccionario['heridos'],'heridos') |
13,449 | 56312a8ac462a5e62acd84aa4459b659ee9bba3f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: David Beam, db4ai
Date: 18 January 2018
Description:
"""
# Include files
import gym
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from random import uniform
# Include custom files
import functions as func
import Configuration as cfg
import StateModel as sm
# -------------------------------------------------------------------------
# Action Network Functions
def action_output(w1, w2, X):
hidden = np.dot(X,w1)
g = (1-np.exp(-hidden))/(1+np.exp(-hidden))
v = np.dot(g,w2)
u = (1-np.exp(-v))/(1+np.exp(-v))
return u, g
def action_cost(J):
return 0.5 * J**2
def action_update(action_w1, action_w2, critic_factor, error, X, u, g):
# Change in w2
d_w2 = (0.5 * error * (1 - np.power(u,2)) * g * critic_factor).reshape(24,1)
# Change in w1
d_w1 = np.outer(X,0.5 * error * (1 - np.power(u,2)) * action_w2 * 0.5 * (1 - np.power(g.reshape(24,1),2)) * critic_factor)
# Normalize the weights
w1 = (action_w1 + d_w1) / np.linalg.norm(action_w1 + d_w1, ord=1)
w2 = (action_w2 + d_w2) / np.linalg.norm(action_w2 + d_w2, ord=1)
return w1, w2
# -------------------------------------------------------------------------
# Critic Network Functions
def critic_output(w1, w2, input):
q = np.dot(input,w1)
_p = (1-np.exp(-q))/(1+np.exp(-q))
J = np.dot(_p,w2)
return J, _p
def critic_cost(alpha, J, Jlast, r):
return 0.5*(alpha*J - (Jlast-r))**2
def critic_update(critic_w1, critic_w2, error, x_a, _p, alpha):
# Change in w2
d_w2 = (alpha*error * _p).reshape(24,1)
# Change in w1
temp_a = x_a.reshape(6,1)
temp_b = alpha * error * critic_w2 * (0.5*(1-np.power(_p,2).reshape(24,1)))
d_w1 = np.outer(temp_a,temp_b)
# Normalize the weights
w1 = (critic_w1 + d_w1) / np.linalg.norm(critic_w1 + d_w1, ord=1)
w2 = (critic_w2 + d_w2) / np.linalg.norm(critic_w2 + d_w2, ord=1)
# Compute the critic factor used to update the action network
critic_factor = np.sum( 0.5*w2*(1-np.power(_p,2)) * w1[4,:] )
# Output
return w1, w2, critic_factor
# -------------------------------------------------------------------------
# Other Functions
# Plot the best results
def plot_results(angle_hist,vel_hist,j_hist,u_hist,x_hist,aw1_hist,aw2_hist,cw1_hist,cw2_hist):
plt.title("pendulum angle over time", fontsize=14)
plt.plot(angle_hist)
#plt.show()
#plt.title("cart vel over time", fontsize=14)
#plt.plot(vel_hist)
#plt.show()
#plt.title("cost-to-go", fontsize=14)
#plt.plot(j_hist)
#plt.show()
#plt.title("force over time", fontsize=14)
#plt.plot(u_hist)
#plt.show()
plt.title("x-dist over time", fontsize=14)
plt.plot(x_hist)
plt.show()
#plt.title("action w1 mean over time", fontsize=14)
#plt.plot(aw1_hist)
#plt.show()
#plt.title("action w2 mean over time", fontsize=14)
#plt.plot(aw2_hist)
#plt.show()
#plt.title("critic w1 mean over time", fontsize=14)
#plt.plot(cw1_hist)
#plt.show()
#plt.title("critic w2 mean over time", fontsize=14)
#plt.plot(cw2_hist)
#plt.show()
func.print_header()
print("Defining Variables")
# Initialize variables
alpha = 0.9
print("Defining Model")
# Setup the NN Model
action_w1 = np.ones((4,24),dtype=float) * np.random.normal(cfg.init_weights_bias_mean_val,cfg.init_weights_bias_std_dev,(4,24))
action_w2 = np.ones((24,1),dtype=float) * np.random.normal(cfg.init_weights_bias_mean_val,cfg.init_weights_bias_std_dev,(24,1))
critic_w1 = np.ones((6,24),dtype=float) * np.random.normal(cfg.init_weights_bias_mean_val,cfg.init_weights_bias_std_dev,(6,24))
critic_w2 = np.ones((24,1),dtype=float) * np.random.normal(cfg.init_weights_bias_mean_val,cfg.init_weights_bias_std_dev,(24,1))
# Create the CartPole environment for the physics model
env = gym.make('CartPole-v0')
print("Training Model")
# Train the Model
best_angle_hist = []
best_vel_hist = []
best_j_hist = []
best_u_hist = []
best_x_hist = []
best_aw1_hist = []
best_aw2_hist = []
best_cw1_hist = []
best_cw2_hist = []
Done = False
max_i = 0
for epoch in range(cfg.epochs):
if Done == True:
break
# Initial Values
i = 0
fail = False
t = 0
dt = 0.02
Jlast = np.array([0]).reshape(1,1)
# Initial state of the physics model
state = np.array([0,0,0,0,0]).reshape(1,5) # angle,ang_vel,ang_acc,x,x_vel
# Random initial force
if np.random.uniform(0,1) < 0.5:
u = 10
action_u = 1
else:
u = -10
action_u = 0
# Calculate the change in state then the new state matrix
# Use the OpenAI Cart-Pole model
# state = [x, xdot, theta, thedadot]
observation = env.reset()
env._max_episode_steps = 600001
state, reward, done, info = env.step(action_u)
X = np.array([np.rad2deg(state[2]),np.rad2deg(state[3]),state[0],state[1]]).reshape(1,4)
# Placeholders
angle_hist = []
vel_hist = []
j_hist = []
u_hist = []
x_hist = []
aw1_hist = []
aw2_hist = []
cw1_hist = []
cw2_hist = []
# Loop through the iterations until fail or pass
while fail==False and i < 600000:
# Action
u, g = action_output(action_w1,action_w2,X)
if u >= 0:
u = 10 # force
action_u = 1 # OpenAI action state
elif u < 0:
u = -10 # force
action_u = 0 # OpenAI action state
# Render the OpenAI movie
if cfg.renderOpenAImodel:
env.render()
# Calculate the change in state then the new state matrix
# Use the OpenAI Cart-Pole model
# state = [x, xdot, theta, thedadot]
state, reward, done, info = env.step(action_u)
X = np.array([np.rad2deg(state[2]),np.rad2deg(state[3]),state[0],state[1]]).reshape(1,4)
# Determine the success feedback, r
# state = [ang, ang vel, dist, vel, ang_acc]
#angle = np.rad2deg(X[0,0])%360
angle = X[0,0]%360
if angle > 180:
angle = angle - 360
if angle <= 12 and angle >= -12 and X[0,2]>-2.4 and X[0,2]<2.4:
r = 0
update_range = 1
else:
r = -1
update_range = 100
# Critic, create the critic input and evaluate the network
critic_input = np.concatenate((X,np.array([u,r],dtype=float).reshape((1,2))),axis=1)
J, _p = critic_output(critic_w1,critic_w2,critic_input)
# Calculate the action and critic error
Ea = action_cost(J)
Ec = critic_cost(alpha, J, Jlast, r)
# Update the weights
for update in range(update_range):
critic_w1, critic_w2, critic_factor = critic_update(critic_w1, critic_w2, Ec, critic_input, _p, 0.001)
action_w1, action_w2 = action_update(action_w1, action_w2, critic_factor, 0.1*Ea, X, u, g)
# Save history
angle_hist.append(angle)
vel_hist.append(X[0,3])
j_hist.append(J[0,0])
u_hist.append(u)
x_hist.append(X[0,2])
aw1_hist.append(np.mean(action_w1))
aw2_hist.append(np.mean(action_w2))
cw1_hist.append(np.mean(critic_w1))
cw2_hist.append(np.mean(critic_w2))
# Break the loop if we fail to keep the angle in range
if r == -1:
fail = True
# Print a summary
print("Epoch:", '%04d' % (epoch+1), "max was:", '%06d' % (max_i + 1), "steps, this epoch was:", '%06d' % (i + 1))
# Save best run only
if i > max_i:
max_i = i
best_angle_hist = angle_hist
best_vel_hist = vel_hist
best_j_hist = j_hist
best_u_hist = u_hist
best_x_hist = x_hist
best_aw1_hist = aw1_hist
best_aw2_hist = aw2_hist
best_cw1_hist = cw1_hist
best_cw2_hist = cw2_hist
# Check if we reached the max time step
if i == 600000:
Done = True
print("Epoch:", '%04d' % (epoch+1), " MAX STEP COUNT REACHED, 600,000!")
# Increment the time index and save variables
i = i + 1
t = t + dt
Jlast = J
# Done with one trial, loop back
#plot_results(angle_hist,vel_hist,j_hist,u_hist,x_hist,aw1_hist,aw2_hist,cw1_hist,cw2_hist)
#temp = 1
plot_results(best_angle_hist,best_vel_hist,best_j_hist,best_u_hist,best_x_hist,best_aw1_hist,best_aw2_hist,best_cw1_hist,best_cw2_hist)
|
13,450 | 4e1e078cdd8a892e5523c32bafb48f48f1ad347b | def str_rev(str):
rstr = ''
index = len(str)
while index > 0:
rstr += str[ index - 1 ]
index = index - 1
return rstr
print(str_rev('string'))
|
13,451 | b4745c4fce15537556d9906216ddb979934937a7 | #!/usr/bin/python
import matplotlib.pyplot
import matplotlib.mlab
import collections
def Import(filename):
datas=matplotlib.mlab.csv2rec(filename,delimiter='\t')
print "LogCan20:", filename, "OK n=", len(datas)
return datas
def PlotT(datas ):
matplotlib.pyplot.figure()
ax1 = matplotlib.pyplot.subplot(1,1,1)
ignore_fields=set(['date_hour', 'timestamp', 'cycle', 'unuseddata', 'unuseddata_1', 'unuseddata_2', 'unuseddata_3', 'unuseddata_4', 'unuseddata_5', 'unuseddata_6'])
for field in datas.dtype.names:
if field not in ignore_fields:
ax1.plot(datas.timestamp,datas[field], label=field)
ax1.legend()
ax1.grid(True)
matplotlib.pyplot.ion()
try:
infosVCU=Import("AgentCanArcelorInfoFromVcu.txt")
inputVCU=Import("AgentCanArcelorInputFromVcu.txt")
outputVCU=Import("AgentCanArcelorOutputToVcu.txt")
print 'OK'
except IOError:
print 'End.'
|
13,452 | 8439fca8bd57db3b86645f8d6d154be74d8bb377 | import json
import logging
log = logging.getLogger(__name__)
sh = logging.StreamHandler()
log.addHandler(sh)
def test_rule_access(as_user):
r = as_user.get('/rules')
assert r.status_code == 403
r = as_user.post('/rules', json={'test': 'rule'})
assert r.status_code == 403
|
13,453 | d401eeedfee373acc0953abbb318a551c6509d90 | input = open('allvectors.in', 'r')
output = open('allvectors.out', 'w')
s=int(input.read())
b=[]
for i in range(2**s):
j=2
while(j<=2**s):
if (i%j<=(j/2-1)):
b.append(0)
else:
b.append(1)
j*=2
for k in range(s):
output.write(str(b[s-k-1]))
output.write("\n")
b.clear()
|
13,454 | a6a7924aa8e329ca4caa47e69b0063fa76bf7b0f | #!/usr/bin/env python3
import sys
from markup_processor import process
from formatters.html import HtmlFormat
def main():
process(HtmlFormat, sys.stdin, sys.stdout)
if __name__ == '__main__':
main()
|
13,455 | b9dd9907dd3bd0dbfd91cc2e5a2a07daafa2634e | # 10.SQL IS NULL Query:
# IS NULL Syntax:
'''
SELECT column_names
FROM table_name
WHERE column_name IS NULL;
'''
# The IS NULL Operator:
# Always use IS NULL to look for NULL values.
# IS NULL operator is used to test for empty values (NULL values).
'''
SELECT CustomerName, ContactName, Address
FROM Customers
WHERE Address IS NULL;
'''
# SQL lists all customers with a NULL value in the "Address" field: |
13,456 | 764c08e08cda7355219352dd3e5ecd2aa6d66d84 | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from django.views.generic import RedirectView
admin.autodiscover()
urlpatterns = patterns('',
url(r'', include('social_auth.urls')),
url(r'^$', RedirectView.as_view(url='/accounts/profile')),
url(r'^accounts/', include('accounts.urls', namespace="accounts")),
url(r'^trip/', include('trip.urls', namespace="trip")),
url(r'^invite/', include('invite.urls', namespace="invite")),
url(r'^finder/', include('finder.urls', namespace="finder")),
url(r'^tasks/', include('tasks.urls', namespace="tasks")),
# Examples:
# url(r'^$', 'socialecom.views.home', name='home'),
# url(r'^socialecom/', include('socialecom.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
13,457 | 18cf94acdd3ad8c9968909428b8598d019d9867e | #--------------------------------
# Functions for plotting
#--------------------------------
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def plot_dist_3p(
hst,
xi,
yi,
ax=None,
filled=False,
fcolors=None,
**kwargs,
):
"""Plot bi-dimensional histogram. Show the contours of the
histogram which enclose the highest 30%, 60%, and 90%
centered distribution.
:his: (2D numpy array) bi-dimensional histogram
:xi: (1D numpy array) centers of x dimension
:yi: (1D numpy array) centers of y dimension
:ax: (matplotlib.axes, optional) axis to plot figure on
:filled: (bool) filled contour if True
:fcolors: (list, optional) color string or sequence of colors, optional)
:return: (matplotlib figure object) figure
"""
vl = [0.3, 0.6, 0.9]
fig = plot_dist_xp(hst, xi, yi, ax=ax, levels=vl, filled=filled, fcolors=fcolors, **kwargs)
return fig
def plot_dist_4p(
hst,
xi,
yi,
ax=None,
filled=False,
fcolors=None,
**kwargs,
):
"""Plot bi-dimensional histogram. Show the contours of the
histogram which enclose the highest 30%, 60%, 90% and 99%
centered distribution.
:his: (2D numpy array) bi-dimensional histogram
:xi: (1D numpy array) centers of x dimension
:yi: (1D numpy array) centers of y dimension
:ax: (matplotlib.axes, optional) axis to plot figure on
:filled: (bool) filled contour if True
:fcolors: (list, optional) color string or sequence of colors, optional)
:return: (matplotlib figure object) figure
"""
vl = [0.3, 0.6, 0.9, 0.99]
fig = plot_dist_xp(hst, xi, yi, ax=ax, levels=vl, filled=filled, fcolors=fcolors, **kwargs)
return fig
def plot_dist_xp(
hst,
xi,
yi,
ax=None,
levels=None,
filled=False,
fcolors=None,
**kwargs,
):
"""Plot bi-dimensional histogram. Show the contours of the
histogram which enclose the highest p1%, p2%, ... and pN%
centered distribution.
:his: (2D numpy array) bi-dimensional histogram
:xi: (1D numpy array) centers of x dimension
:yi: (1D numpy array) centers of y dimension
:ax: (matplotlib.axes, optional) axis to plot figure on
:levels: (list of float, optional) contour levels, 0.0-1.0
:filled: (bool) filled contour if True
:fcolors: (list, optional) color string or sequence of colors
:return: (matplotlib figure object) figure
"""
# use curret axis if not specified
if ax is None:
ax = plt.gca()
hsum = np.sum(hst)
hlist = -np.sort(-hst.flatten())/hsum
hcum = np.cumsum(hlist)
vl = levels
nv = len(vl)
vlev = np.zeros(nv)
for i in np.arange(nv):
ind = np.argmin(abs(hcum-vl[i]))
vlev[i] = hlist[ind]
pdfData = hst/hsum
pdfData[pdfData==0] = 1e-12
if not filled:
fig = ax.contour(xi, yi, np.log10(np.transpose(pdfData)), levels=np.log10(vlev[::-1]), **kwargs)
else:
if fcolors is None:
cmap = cm.get_cmap('bone')
fcolors = cmap(np.linspace(1.0, 0.0, 11)[0:nv+1])
else:
nfc = len(fcolors)
if nfc != nv+1:
raise ValueError('Length of fcolors should equal to number of levels + 1.')
fig = ax.contourf(xi, yi, np.log10(np.transpose(pdfData)), levels=np.log10(vlev[::-1]),
colors=fcolors, extend='both', **kwargs)
return fig
def plot_regime_diagram_background_BG12(
ax=None,
):
"""Plot the background of the regime diagram
following Fig. 3 of Belcher et al., 2012
:ax: (matplotlib.axes, optional) axis to plot figure on
"""
if ax is None:
ax = plt.gca()
# range of power
xpr = [-1, 1]
ypr = [-3, 3]
# range
xlims = [10**i for i in xpr]
ylims = [10**i for i in ypr]
# size of x and y
nx = 500
ny = 500
xx = np.logspace(xpr[0], xpr[1], nx)
yy = np.logspace(ypr[0], ypr[1], ny)
zz1 = np.zeros([nx, ny])
zz2 = np.zeros([nx, ny])
zz3 = np.zeros([nx, ny])
for i in np.arange(nx):
for j in np.arange(ny):
zz1[i,j] = 2*(1-np.exp(-0.5*xx[i]))
zz2[i,j] = 0.22*xx[i]**(-2)
zz3[i,j] = 0.3*xx[i]**(-2)*yy[j]
zz = zz1 + zz2 + zz3
ax.contourf(xx, yy, np.transpose(np.log10(zz)),
levels=[-0.1, 0, 0.1, 0.25, 0.5, 1, 2, 3, 4],
cmap='summer', extend='both')
ax.contour(xx, yy, np.transpose(np.log10(zz)),
levels=[-0.1, 0, 0.1, 0.25, 0.5, 1, 2, 3, 4],
colors='darkgray')
ax.contour(xx, yy, np.transpose(zz1/zz), levels=0.9, colors='k',
linestyles='-', linewidths=2)
ax.contour(xx, yy, np.transpose(zz2/zz), levels=0.9, colors='k',
linestyles='-', linewidths=2)
ax.contour(xx, yy, np.transpose(zz3/zz), levels=0.9, colors='k',
linestyles='-', linewidths=2)
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('La$_t$')
ax.set_ylabel('$h/L_L$')
ax.set_aspect(aspect=1/3)
ax.text(0.85, 3e-3, '0', color='k', fontsize=8, rotation=-90)
ax.text(1.6, 1e-2, '0.1', color='k', fontsize=8, rotation=-90)
ax.text(3.8, 1e-1, '0.25', color='k', fontsize=8, rotation=-90)
ax.text(4, 1e2, '0.5', color='k', fontsize=8, rotation=33)
ax.text(3.2, 3e2, '1', color='k', fontsize=8, rotation=36)
ax.text(0.53, 1e2, '2', color='k', fontsize=8, rotation=38)
ax.text(0.3, 3.1e2, '3', color='k', fontsize=8, rotation=39)
ax.text(0.12, 5e2, '4', color='k', fontsize=8, rotation=40)
ax.text(0.11, 4e-3, 'Langmuir', bbox=dict(boxstyle="square",ec='k',fc='w'))
ax.text(3, 4e-3, 'Shear', bbox=dict(boxstyle="square",ec='k',fc='w'))
ax.text(0.13, 1e2, 'Convection', bbox=dict(boxstyle="square",ec='k',fc='w'))
def plot_regime_diagram_background_L19(
ax=None,
):
"""Plot the background of the reegime diagram
in Li et al., 2019
:ax: (matplotlib.axes, optional) axis to plot figure on
"""
if ax is None:
ax = plt.gca()
# range of power
xpr = [-1, 1]
ypr = [-3, 3]
# range
xlims = [10**i for i in xpr]
ylims = [10**i for i in ypr]
# background following Fig. 3 of Belcher et al., 2012
nx = 500
ny = 500
xx = np.logspace(xpr[0], xpr[1], nx)
yy = np.logspace(ypr[0], ypr[1], ny)
zz1 = np.zeros([nx, ny])
zz2 = np.zeros([nx, ny])
zz3 = np.zeros([nx, ny])
for i in np.arange(nx):
for j in np.arange(ny):
zz1[i,j] = 2*(1-np.exp(-0.5*xx[i]))
zz2[i,j] = 0.22*xx[i]**(-2)
zz3[i,j] = 0.3*xx[i]**(-2)*yy[j]
zz = zz1 + zz2 + zz3
rz_ST = zz1/zz
rz_LT = zz2/zz
rz_CT = zz3/zz
fr = np.ones(zz.shape) * 7
cfrac = 0.25
fr[(rz_LT<cfrac) & (rz_CT<cfrac)] = 1
fr[(rz_ST<cfrac) & (rz_CT<cfrac)] = 2
fr[(rz_ST<cfrac) & (rz_LT<cfrac)] = 3
fr[(rz_ST>=cfrac) & (rz_LT>=cfrac) & (rz_CT<cfrac)] = 4
fr[(rz_ST>=cfrac) & (rz_CT>=cfrac) & (rz_LT<cfrac)] = 5
fr[(rz_LT>=cfrac) & (rz_CT>=cfrac) & (rz_ST<cfrac)] = 6
color_list = ['firebrick','forestgreen','royalblue','gold','orchid','turquoise','w']
cb_ticks = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]
cmap, norm = from_levels_and_colors(cb_ticks, color_list)
ax.contourf(xx, yy, np.transpose(fr), cmap=cmap, norm=norm)
ax.contour(xx, yy, np.transpose(fr), colors='darkgray')
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('La$_t$')
ax.set_ylabel('$h/L_L$')
ax.set_aspect(aspect=1/3)
ax.text(0.11, 4e-3, 'Langmuir', bbox=dict(boxstyle="square",ec='k',fc='w'))
ax.text(3, 4e-3, 'Shear', bbox=dict(boxstyle="square",ec='k',fc='w'))
ax.text(0.13, 1e2, 'Convection', bbox=dict(boxstyle="square",ec='k',fc='w'))
def set_ylabel_multicolor(
ax,
strings,
colors,
anchorpad = 0.,
**kwargs,
):
"""Use multiple colors in the ylabel
:ax: (matplotlib.axes) axis to set ylabel
:strings: (list of strings) strings for the label
:colors: (list of strings) name of colors for the label
:ancharpad: (float) Pad between the text and the frame as fraction of the font size
"""
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker
boxes = [TextArea(text, textprops=dict(color=color, ha='left',va='bottom',rotation=90,**kwargs)) for text,color in zip(strings[::-1],colors[::-1])]
ybox = VPacker(children=boxes,align="center", pad=0, sep=5)
anchored_ybox = AnchoredOffsetbox(loc=3, child=ybox, pad=anchorpad, frameon=False, bbox_to_anchor=(-0.15, -0.05), bbox_transform=ax.transAxes, borderpad=0.)
ax.add_artist(anchored_ybox)
|
13,458 | ddbdb452c9053c042391cb2c246eaac1665441c8 | import unittest
from config.config import Config
TEMPLATE_CONFIG_FILE = 'config/config.ini.template'
class TestConfig(unittest.TestCase):
def setUp(self):
self.config = Config(TEMPLATE_CONFIG_FILE)
def test_get_mongo_url(self):
self.assertEquals(self.config.get_mongo_url(), 'mongodb://USER:PASSWORD@HOSTNAME/AUTH_DB')
def test_get_db_host(self):
self.assertEquals(self.config.get_db_host(), 'HOSTNAME')
def test_get_auth_db_name(self):
self.assertEquals(self.config.get_auth_db_name(), 'AUTH_DB')
def test_get_db_user(self):
self.assertEquals(self.config.get_db_user(), 'USER')
def test_get_db_password(self):
self.assertEquals(self.config.get_db_password(), 'PASSWORD')
def test_challonge_api_key(self):
self.assertEquals(self.config.get_challonge_api_key(), 'API_KEY')
def test_get_fb_app_id(self):
self.assertEquals(self.config.get_fb_app_id(), 'FB_APP_ID')
def test_get_fb_app_token(self):
self.assertEquals(self.config.get_fb_app_token(), 'FB_APP_TOKEN')
|
13,459 | 87076944a9c5f77061eae8120cc7d8536c944421 | # -*- coding: utf-8 -*-
"""
Created on Fri May 24 09:46:47 2019
@author: CDEC
"""
import cv2
import numpy as np
kernel = np.ones((5,5),np.uint8)
for i in range(1,21):
path = 'D:\Documents\OPENCV\Placas\Placa ('+str(i)+").jpg"
img = cv2.imread(path)
numero = 0
caracteres = []
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 25, 4)
edgess = cv2.dilate(thresh, None,iterations=1)
yu = cv2.GaussianBlur(thresh, (5, 5), 0)
edge = cv2.Canny(yu, 180, 260)
imageContours, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0,255,0), 1, cv2.LINE_AA)
for cnt in contours:
area = cv2.contourArea(cnt)
(x, y, w, h) = cv2.boundingRect(cnt)
aspect_ratio = float(w)/h
rect_area = w*h
extension = float(area)/rect_area
hull = cv2.convexHull(cnt)
hull_area = cv2.contourArea(hull)
equi_diametro = np.sqrt(4*area/np.pi)
mask = np.zeros(gray.shape,np.uint8)
cv2.drawContours(mask,[cnt],0,255,-1)
pixelpoints = np.transpose(np.nonzero(mask))
pixelpoints = cv2.findNonZero(mask)
if(aspect_ratio > 0.1 and aspect_ratio < 1 and area >= 650 and equi_diametro >= 32 and extension >= 0.1):
(x, y, w, h) = cv2.boundingRect(cnt)
#cv2.drawContours(img,[cnt],0,(0,0,255),-1,4)
cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
solidez = float(area)/hull_area
Plate = img[y:y+h,x:x+w]
caracteres.append(img[y:y+h,x:x+w])
numero += 1
print (i)
print (numero ,'\n')
print("X: ", x ,'\n'
"Y: ", y, '\n'
"W: ", w , '\n'
"H: ", h, '\n'
"Y+H: ",y+h, '\n'
'X+W: ',x+w, '\n'
"area: ",area ,'\n'
'aspect_ratio...1: ',aspect_ratio, '\n'
'Extension: ',extension, '\n'
'Solidez: ',solidez, '\n'
'Diametro: ',equi_diametro, '\n'
'PIXEL: ',len(pixelpoints), '\n'
"-------------------------------------------------------",'\n')
print ('caracteres: ',len(caracteres), '\n')
cv2.namedWindow('img', cv2.WINDOW_NORMAL)
cv2.imshow('img', img)
cv2.waitKey()
'''cv2.namedWindow('img4', cv2.WINDOW_NORMAL)
cv2.imshow('img4', edge)
cv2.waitKey()'''
cv2.destroyAllWindows() |
13,460 | ee95a54f73b3c68ff1d2dcd386b972f7b3d51dc5 | from PIL import Image
w = 640
h = 480
image = Image.open('/home/pi/0.jpg')
pixels = image.load()
for i in range(0,w):
for j in range(0,h):
white = True
for each in pixels[i,j]:
if not (each < 80 or each > 155):
white = False
if(white):
pixels[i,j] = (255,255,255)
else:
pixels[i,j] = (0,0,0)
image.save('/home/pi/0_filtered.jpg')
#for i in range(0,w):
# for j in range(0,h):
# if ( i < 120 or j < 120 or i > w - 120 or j > h - 80):
#pixels[i,j] = (0,0,0)
for i in range(120,w-120):
count = 0
for j in range(120,h-80):
if (pixels[i,j] == (255,255,255)):
count += 1
if (count > 90):
for j in range(0,h):
pixels[i,j] = (255,0,0)
i = w
image.save('/home/pi/0_targeted.jpg')
|
13,461 | 065bb34ed1cd9a4e2037f9492a6e3cca6892f787 |
# time: O(nlogn)
# space: O(n)
from collections import Counter
class Solution:
def findLHS(self, nums: List[int]) -> int:
counter = Counter(nums)
max_len = 0
keys = set(list(counter.keys()))
for key in counter.keys():
if key+1 in keys:
max_len = max(max_len, counter[key+1]+counter[key])
return max_len |
13,462 | dcf26389c0f841e33f9a2ecc759db2a16e88a295 | # Copyright 2019 Markus Liljergren
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import date
driverVersion = "v1.0.MMDDTb"
def getDriverVersion(driverVersionSpecial=None):
if(driverVersionSpecial != None):
driver_version_current = driverVersionSpecial
else:
driver_version_current = driverVersion
if(driver_version_current.find("MMDD") != -1):
driver_version_current = driver_version_current.replace("MMDD", date.today().strftime("%m%d"))
return driver_version_current
from hubitat_codebuilder import HubitatCodeBuilderError
"""
Snippets used by hubitat-driver-helper-tool
"""
def getHeaderLicense(driverVersionSpecial=None):
driverVersionActual = getDriverVersion(driverVersionSpecial)
return """/**
* Copyright 2020 Markus Liljergren
*
* Code Version: """ + driverVersionActual + """
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/"""
def getDefaultImports():
return """/** Default Imports */
import groovy.json.JsonSlurper
import groovy.json.JsonOutput
// Used for MD5 calculations
import java.security.MessageDigest
"""
#import java.math.MathContext NOT ALLOWED!!! WHY?
#import groovy.transform.TypeChecked
#import groovy.transform.TypeCheckingMode
def getChildComponentDefaultUpdatedContent():
return """
// This is code needed to run in updated() in ALL Child drivers
getDriverVersion()
"""
def getDefaultParentImports():
return getDefaultImports() + """/* Default Parent Imports */
"""
def getUpdateNeededSettingsTasmotaHeader():
return """// updateNeededSettings() Generic header BEGINS here
def currentProperties = state.currentProperties ?: [:]
state.settings = settings
def configuration = new XmlSlurper().parseText(configuration_model_tasmota())
def isUpdateNeeded = "NO"
if(runReset != null && runReset == 'RESET') {
for ( e in state.settings ) {
logging("Deleting '${e.key}' with value = ${e.value} from Settings", 50)
// Not sure which ones are needed, so doing all...
device.clearSetting("${e.key}")
device.removeSetting("${e.key}")
state?.settings?.remove("${e.key}")
}
}
prepareDNI()
// updateNeededSettings() Generic header ENDS here
"""
def getUpdateNeededSettingsTasmotaModuleCommand(moduleNumber):
return '''
// Tasmota Module selection command (autogenerated)
moduleNumber = '''+str(moduleNumber)+''' // Defined earlier
getAction(getCommandString("Module", null))
getAction(getCommandString("Template", null))
if(disableModuleSelection == null) disableModuleSelection = false
if(disableModuleSelection == false) {
logging("Setting the Module soon...", 10)
logging(device.currentValue('module'), 10)
if(device.currentValue('module') != null && !device.currentValue('module').startsWith("[${moduleNumber}:")) {
logging("This DOESN'T start with [${moduleNumber} ${device.currentValue('module')}",10)
getAction(getCommandString("Module", "${moduleNumber}"))
} else {
logging("This starts with [${moduleNumber} ${device.currentValue('module')}",10)
}
} else {
logging("Setting the Module has been disabled!", 10)
}
'''
def getUpdateNeededSettingsTasmotaDynamicModuleCommand(moduleNumber = -1, defaultDeviceTemplate = ''):
return """
// Tasmota Module and Template selection command (autogenerated)
getAction(getCommandString("Module", null))
getAction(getCommandString("Template", null))
if(disableModuleSelection == null) disableModuleSelection = false
def moduleNumberUsed = moduleNumber
if(moduleNumber == null || moduleNumber == -1) moduleNumberUsed = """+str(moduleNumber)+"""
boolean useDefaultTemplate = false
def defaultDeviceTemplate = ''
if(deviceTemplateInput != null && deviceTemplateInput == "0") {
useDefaultTemplate = true
defaultDeviceTemplate = ''
}
if(deviceTemplateInput == null || deviceTemplateInput == "") {
// We should use the default of the driver
useDefaultTemplate = true
defaultDeviceTemplate = '""" + defaultDeviceTemplate + """'
}
if(deviceTemplateInput != null) deviceTemplateInput = deviceTemplateInput.replaceAll(' ','')
if(disableModuleSelection == false && ((deviceTemplateInput != null && deviceTemplateInput != "") ||
(useDefaultTemplate && defaultDeviceTemplate != ""))) {
def usedDeviceTemplate = defaultDeviceTemplate
if(useDefaultTemplate == false && deviceTemplateInput != null && deviceTemplateInput != "") {
usedDeviceTemplate = deviceTemplateInput
}
logging("Setting the Template (${usedDeviceTemplate}) soon...", 100)
logging("templateData = ${device.currentValue('templateData')}", 10)
if(usedDeviceTemplate != '') moduleNumberUsed = 0 // This activates the Template when set
// Checking this makes installs fail: device.currentValue('templateData') != null
if(usedDeviceTemplate != null && device.currentValue('templateData') != usedDeviceTemplate) {
logging("The template is currently NOT set to '${usedDeviceTemplate}', it is set to '${device.currentValue('templateData')}'", 100)
// The NAME part of th Device Template can't exceed 14 characters! More than that and they will be truncated.
// TODO: Parse and limit the size of NAME???
getAction(getCommandString("Template", usedDeviceTemplate))
} else if (device.currentValue('module') == null){
// Update our stored value!
getAction(getCommandString("Template", null))
}else if (usedDeviceTemplate != null) {
logging("The template is set to '${usedDeviceTemplate}' already!", 100)
}
} else {
logging("Can't set the Template...", 10)
logging(device.currentValue('templateData'), 10)
//logging("deviceTemplateInput: '${deviceTemplateInput}'", 10)
//logging("disableModuleSelection: '${disableModuleSelection}'", 10)
}
if(disableModuleSelection == false && moduleNumberUsed != null && moduleNumberUsed >= 0) {
logging("Setting the Module (${moduleNumberUsed}) soon...", 100)
logging("device.currentValue('module'): '${device.currentValue('module')}'", 10)
// Don't filter in this case: device.currentValue('module') != null
if(moduleNumberUsed != null && (device.currentValue('module') == null || !(device.currentValue('module').startsWith("[${moduleNumberUsed}:") || device.currentValue('module') == '0'))) {
logging("Currently not using module ${moduleNumberUsed}, using ${device.currentValue('module')}", 100)
getAction(getCommandString("Module", "${moduleNumberUsed}"))
} else if (moduleNumberUsed != null && device.currentValue('module') != null){
logging("This starts with [${moduleNumberUsed} ${device.currentValue('module')}",10)
} else if (device.currentValue('module') == null){
// Update our stored value!
getAction(getCommandString("Module", null))
} else {
logging("Module is set to '${device.currentValue('module')}', and it's set to be null, report this to the creator of this driver!",10)
}
} else {
logging("Setting the Module has been disabled!", 10)
}
"""
def getUpdateNeededSettingsTelePeriod(forcedTelePeriod=None):
if (forcedTelePeriod==None):
return """
// updateNeededSettings() TelePeriod setting
getAction(getCommandString("TelePeriod", (telePeriod == '' || telePeriod == null ? "300" : telePeriod)))
"""
else:
return '''
// updateNeededSettings() TelePeriod setting
getAction(getCommandString("TelePeriod", "''' + str(forcedTelePeriod) + '''"))
'''
def getUpdateNeededSettingsTHMonitor():
return """
// updateNeededSettings() Temperature/Humidity/Pressure setting
getAction(getCommandString("TempRes", (tempRes == '' || tempRes == null ? "1" : tempRes)))
"""
def getUpdateNeededSettingsTasmotaFooter():
return """
getAction(getCommandString("TelePeriod", "${getTelePeriodValue()}"))
// updateNeededSettings() Generic footer BEGINS here
getAction(getCommandString("SetOption113", "1")) // Hubitat Enabled
// Disabling Emulation so that we don't flood the logs with upnp traffic
getAction(getCommandString("Emulation", "2")) // Hue Emulation Enabled, REQUIRED for device discovery
getAction(getCommandString("HubitatHost", device.hub.getDataValue("localIP")))
logging("HubitatPort: ${device.hub.getDataValue("localSrvPortTCP")}", 1)
getAction(getCommandString("HubitatPort", device.hub.getDataValue("localSrvPortTCP")))
getAction(getCommandString("FriendlyName1", device.displayName.take(32))) // Set to a maximum of 32 characters
// We need the Backlog inter-command delay to be 20ms instead of 200...
getAction(getCommandString("SetOption34", "20"))
// Just make sure we update the child devices
logging("Scheduling refreshChildren...", 1)
runIn(30, "refreshChildren")
runIn(60, "refreshChildrenAgain")
logging("Done scheduling refreshChildren...", 1)
if(override == true) {
sync(ipAddress)
}
//logging("Cmds: " +cmds,1)
sendEvent(name:"needUpdate", value: isUpdateNeeded, displayed:false, isStateChange: false)
// updateNeededSettings() Generic footer ENDS here
"""
#configuration.Value.each
#{
# if ("${it.@setting_type}" == "lan" && it.@disabled != "true"){
# if (currentProperties."${it.@index}" == null)
# {
# if (it.@setonly == "true"){
# logging("Setting ${it.@index} will be updated to ${it.@value}", 2)
# cmds << getAction("/configSet?name=${it.@index}&value=${it.@value}")
# } else {
# isUpdateNeeded = "YES"
# logging("Current value of setting ${it.@index} is unknown", 2)
# cmds << getAction("/configGet?name=${it.@index}")
# }
# }
# else if ((settings."${it.@index}" != null || it.@hidden == "true") && currentProperties."${it.@index}" != (settings."${it.@index}" != null? settings."${it.@index}".toString() : "${it.@value}"))
# {
# isUpdateNeeded = "YES"
# logging("Setting ${it.@index} will be updated to ${settings."${it.@index}"}", 2)
# cmds << getAction("/configSet?name=${it.@index}&value=${settings."${it.@index}"}")
# }
# }
#}
def getGenericOnOffFunctions():
return """
/* Generic On/Off functions used when only 1 switch/button exists */
def on() {
logging("on()", 50)
def cmds = []
cmds << getAction(getCommandString("Power", "On"))
return cmds
}
def off() {
logging("off()", 50)
def cmds = []
cmds << getAction(getCommandString("Power", "Off"))
return cmds
}
"""
def getRGBWOnOffFunctions():
return """
/* RGBW On/Off functions used when only 1 switch/button exists */
def on() {
logging("on()", 50)
def cmds = []
def h = null
def s = null
def b = 100
if(state != null) {
//h = state.containsKey("hue") ? state.hue : null
//s = state.containsKey("saturation") ? state.saturation : null
b = state.containsKey("level") ? state.level : 100
}
if(b < 20) b = 20
if(state.colorMode == "CT") {
state.level = b
cmds << setColorTemperature(colorTemperature ? colorTemperature : 3000)
cmds << setLevel(state.level, 0)
} else {
cmds << setHSB(h, s, b)
}
cmds << getAction(getCommandString("Power", "On"))
return cmds
}
def off() {
logging("off()", 50)
def cmds = []
cmds << getAction(getCommandString("Power", "Off"))
return cmds
}
"""
def getDefaultFunctions(comment="", driverVersionSpecial=None):
driverVersionActual = getDriverVersion(driverVersionSpecial)
return '''/* Default Driver Methods go here */
private String getDriverVersion() {
//comment = "''' + comment + '''"
//if(comment != "") state.comment = comment
String version = "''' + driverVersionActual + '''"
logging("getDriverVersion() = ${version}", 100)
sendEvent(name: "driver", value: version)
updateDataValue('driver', version)
return version
}
'''
def getDefaultAppMethods(driverVersionSpecial=None):
driverVersionActual = getDriverVersion(driverVersionSpecial)
return '''/* Default App Methods go here */
private String getAppVersion() {
String version = "''' + driverVersionActual + '''"
logging("getAppVersion() = ${version}", 50)
return version
}
'''
def getLoggingFunction(specialDebugLevel=True):
extraDebug = ""
if(specialDebugLevel):
extraDebug = """
case 100: // Only special debug messages, eg IR and RF codes
if (level == 100 ) {
log.info "$message"
didLogging = true
}
break
"""
return """/* Logging function included in all drivers */
private boolean logging(message, level) {
boolean didLogging = false
Integer logLevelLocal = (logLevel != null ? logLevel.toInteger() : 0)
if(!isDeveloperHub()) {
logLevelLocal = 0
if (infoLogging == true) {
logLevelLocal = 100
}
if (debugLogging == true) {
logLevelLocal = 1
}
}
if (logLevelLocal != "0"){
switch (logLevelLocal) {
case -1: // Insanely verbose
if (level >= 0 && level < 100) {
log.debug "$message"
didLogging = true
} else if (level == 100) {
log.info "$message"
didLogging = true
}
break
case 1: // Very verbose
if (level >= 1 && level < 99) {
log.debug "$message"
didLogging = true
} else if (level == 100) {
log.info "$message"
didLogging = true
}
break
case 10: // A little less
if (level >= 10 && level < 99) {
log.debug "$message"
didLogging = true
} else if (level == 100) {
log.info "$message"
didLogging = true
}
break
case 50: // Rather chatty
if (level >= 50 ) {
log.debug "$message"
didLogging = true
}
break
case 99: // Only parsing reports
if (level >= 99 ) {
log.debug "$message"
didLogging = true
}
break
""" + extraDebug + """}
}
return didLogging
}
"""
def getSpecialDebugEntry(label=None):
if(label==None):
return '<Item label="descriptionText" value="100" />'
else:
return '<Item label="' + label + '" value="100" />'
def getCreateChildDevicesCommand(childType='component'):
#childType == 'not_component' should
start = "try {\n"
end = """
} catch (com.hubitat.app.exception.UnknownDeviceTypeException e) {
log.error "'${getChildDriverName()}' driver can't be found! Did you forget to install the child driver?"
}"""
if(childType=='component'):
#return('addChildDevice("${getDeviceInfoByName("namespace")}", "${getChildDriverName()}", "$device.id-$i", [name: "$device.name #$i", label: "$device.displayName $i", isComponent: true])')
return(start + 'addChildDevice("${getDeviceInfoByName("namespace")}", "${getChildDriverName()}", "$device.id-$i", [name: "${getFilteredDeviceDriverName()} #$i", label: "${getFilteredDeviceDisplayName()} $i", isComponent: true])' + end)
elif(childType=='not_component'):
return(start + 'addChildDevice("${getDeviceInfoByName("namespace")}", "${getChildDriverName()}", "$device.id-$i", [name: "${getFilteredDeviceDriverName()} #$i", label: "${getFilteredDeviceDisplayName()} $i", isComponent: false])' + end)
else:
raise HubitatCodeBuilderError('Unknown childType specified in getcreateChildDevicesCommand(childType={})'.format(str(childType)))
def getGetChildDriverNameMethod(childDriverName='default'):
if(childDriverName == 'default'):
return """String getChildDriverName() {
String deviceDriverName = getDeviceInfoByName('name')
if(deviceDriverName.toLowerCase().endsWith(' (parent)')) {
deviceDriverName = deviceDriverName.substring(0, deviceDriverName.length()-9)
}
String childDriverName = "${deviceDriverName} (Child)"
logging("childDriverName = '$childDriverName'", 1)
return(childDriverName)
}"""
else:
return """String getChildDriverName() {
String childDriverName = '""" + childDriverName + """ (Child)'
logging("childDriverName = '$childDriverName'", 1)
return(childDriverName)
}"""
def getCalculateB0():
return """String calculateB0(String inputStr, repeats) {
// This calculates the B0 value from the B1 for use with the Sonoff RF Bridge
logging('inputStr: ' + inputStr, 0)
inputStr = inputStr.replace(' ', '')
//logging('inputStr.substring(4,6): ' + inputStr.substring(4,6), 0)
Integer numBuckets = Integer.parseInt(inputStr.substring(4,6), 16)
List buckets = []
logging('numBuckets: ' + numBuckets.toString(), 0)
String outAux = String.format(' %02X ', numBuckets.toInteger())
outAux = outAux + String.format(' %02X ', repeats.toInteger())
logging('outAux1: ' + outAux, 0)
Integer j = 0
for(i in (0..numBuckets-1)){
outAux = outAux + inputStr.substring(6+i*4,10+i*4) + " "
j = i
}
logging('outAux2: ' + outAux, 0)
outAux = outAux + inputStr.substring(10+j*4, inputStr.length()-2)
logging('outAux3: ' + outAux, 0)
String dataStr = outAux.replace(' ', '')
outAux = outAux + ' 55'
Integer length = (dataStr.length() / 2).toInteger()
outAux = "AA B0 " + String.format(' %02X ', length.toInteger()) + outAux
logging('outAux4: ' + outAux, 0)
logging('outAux: ' + outAux.replace(' ', ''), 10)
return(outAux)
}"""
def getGenerateLearningPreferences(types='["Default", "Toggle", "Push", "On", "Off"]', default_type='Default'):
return '''// Methods for displaying the correct Learning Preferences and returning the
// current Action Name
def generateLearningPreferences() {
input(name: "learningMode", type: "bool", title: addTitleDiv("Learning Mode"), description: '<i>Activate this to enter Learning Mode. DO NOT ACTIVATE THIS once you have learned the codes of a device, they will have to be re-learned!</i>', displayDuringSetup: false, required: false)
if(learningMode) {
input(name: "actionCurrentName", type: "enum", title: addTitleDiv("Action To Learn"),
description: addDescriptionDiv("Select which Action to save to in Learn Mode."),
options: ''' + types + ''', defaultValue: "''' + default_type + '''",
displayDuringSetup: false, required: false)
input(name: "learningModeAdvanced", type: "bool", title: addTitleDiv("Advanced Learning Mode"),
description: '<i>Activate this to enable setting Advanced settings. Normally this is NOT needed, be careful!</i>',
defaultValue: false, displayDuringSetup: false, required: false)
if(learningModeAdvanced) {
input(name: "actionCodeSetManual", type: "string", title: addTitleDiv("Set Action Code Manually"),
description: '<i>WARNING! For ADVANCED users only!</i>',
displayDuringSetup: false, required: false)
input(name: "actionResetAll", type: "bool", title: addTitleDiv("RESET all Saved Actions"),
description: '<i>WARNING! This will DELETE all saved/learned Actions!</i>',
defaultValue: false, displayDuringSetup: false, required: false)
}
}
}
String getCurrentActionName() {
String actionName
if(!binding.hasVariable('actionCurrentName') ||
(binding.hasVariable('actionCurrentName') && actionCurrentName == null)) {
logging("Doesn't have the action name defined... Using ''' + default_type + '''!", 1)
actionName = "''' + default_type + '''"
} else {
actionName = actionCurrentName
}
return(actionName)
}'''
def getChildComponentMetaConfigCommands():
return """
// metaConfig is what contains all fields to hide and other configuration
// processed in the "metadata" context of the driver.
def metaConfig = clearThingsToHide()
metaConfig = setDatasToHide(['metaConfig', 'isComponent', 'preferences', 'label', 'name'], metaConfig=metaConfig)
""" |
13,463 | ffa669bb82cae0d7b8436040849409a61b4e024b | '''
Task: Jump over numbers
You are given a list of non-negative integers and you start at the left-most integer in this list. After that you need to perform the following step:
Given that the number at the position where you are now is P you need to jump P positions to the right in the list. For example, if you are at position 6 and the number at position 6 has the value 3, you need to jump to position 6 + 3 = 9. Repeat this operation until you reach beyond the right-side border of the list.
Your program must return the number of jumps that it needs to perform following this logic. Note that the list may contain the number 0, which mean that you can get stuck at a this position forever. In such cases you must return the number -1.
The length N of the input list will be in the range [1, 1000].
SAMPLE INPUT
3 4 1 2 5 6 9 0 1 2 3 1
SAMPLE OUTPUT
4
Note: In the sample example you start at position 1, where the number is 3. Then you must jump to position 4, where the number is 2. After that you jump to position 6 where the number is 6. This will lead you to position 12, which is the last number in the list and has the value 1. From there you jump 1 position to the right and must stop. This is a total of 4 jumps.
'''
def jump_over_numbers(list):
jumps = 0
idx = 0
while idx < len(list):
if list[idx] == 0:
return -1
idx = idx + list[idx]
jumps += 1
return jumps
if __name__ == '__main__':
assert jump_over_numbers([]) == 0
assert jump_over_numbers([3,4,1,2,5,6,9,0,1,2,3,1]) == 4
assert jump_over_numbers([3,4,5,1,2,4,0,1,3,4]) == -1
|
13,464 | 04996dc529114fa67376ae400c0def154ffe6ede | from astropy import cosmology as cosmo
import logging
from autoconf import conf
import autofit as af
import autoarray as aa
import autogalaxy as ag
from autolens.lens.model.analysis import AnalysisDataset
from autolens.lens.model.preloads import Preloads
from autolens.interferometer.model.result import ResultInterferometer
from autolens.interferometer.model.visualizer import VisualizerInterferometer
from autolens.interferometer.fit_interferometer import FitInterferometer
from autolens.lens.model.settings import SettingsLens
from autolens import exc
logger = logging.getLogger(__name__)
logger.setLevel(level="INFO")
class AnalysisInterferometer(AnalysisDataset):
def __init__(
self,
dataset,
positions: aa.Grid2DIrregular = None,
hyper_dataset_result=None,
cosmology=cosmo.Planck15,
settings_pixelization=aa.SettingsPixelization(),
settings_inversion=aa.SettingsInversion(),
settings_lens=SettingsLens(),
):
super().__init__(
dataset=dataset,
positions=positions,
hyper_dataset_result=hyper_dataset_result,
cosmology=cosmology,
settings_pixelization=settings_pixelization,
settings_inversion=settings_inversion,
settings_lens=settings_lens,
)
if self.hyper_dataset_result is not None:
self.set_hyper_dataset(result=self.hyper_dataset_result)
else:
self.hyper_galaxy_visibilities_path_dict = None
self.hyper_model_visibilities = None
@property
def interferometer(self):
return self.dataset
def modify_before_fit(self, paths: af.DirectoryPaths, model: af.AbstractPriorModel):
self.check_and_replace_hyper_images(paths=paths)
if not paths.is_complete:
visualizer = VisualizerInterferometer(visualize_path=paths.image_path)
visualizer.visualize_interferometer(interferometer=self.interferometer)
visualizer.visualize_hyper_images(
hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict,
hyper_model_image=self.hyper_model_image,
)
logger.info(
"PRELOADS - Setting up preloads, may take a few minutes for fits using an inversion."
)
self.set_preloads(paths=paths, model=model)
return self
def set_hyper_dataset(self, result):
super().set_hyper_dataset(result=result)
self.hyper_model_visibilities = result.hyper_model_visibilities
self.hyper_galaxy_visibilities_path_dict = (
result.hyper_galaxy_visibilities_path_dict
)
def associate_hyper_visibilities(
self, instance: af.ModelInstance
) -> af.ModelInstance:
"""
Takes visibilities from the last result, if there is one, and associates them with galaxies in this search
where full-path galaxy names match.
If the galaxy collection has a different name then an association is not made.
e.g.
galaxies.lens will match with:
galaxies.lens
but not with:
galaxies.lens
galaxies.source
Parameters
----------
instance
A model instance with 0 or more galaxies in its tree
Returns
-------
instance
The input instance with visibilities associated with galaxies where possible.
"""
if self.hyper_galaxy_visibilities_path_dict is not None:
for galaxy_path, galaxy in instance.path_instance_tuples_for_class(
ag.Galaxy
):
if galaxy_path in self.hyper_galaxy_visibilities_path_dict:
galaxy.hyper_model_visibilities = self.hyper_model_visibilities
galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[
galaxy_path
]
return instance
def log_likelihood_function(self, instance):
"""
Determine the fit of a lens galaxy and source galaxy to the interferometer in this lens.
Parameters
----------
instance
A model instance with attributes
Returns
-------
fit : Fit
A fractional value indicating how well this model fit and the model interferometer itself
"""
try:
return self.fit_interferometer_for_instance(
instance=instance
).figure_of_merit
except (
exc.PixelizationException,
exc.InversionException,
exc.GridException,
OverflowError,
) as e:
raise exc.FitException from e
def fit_interferometer_for_instance(
self,
instance,
use_hyper_scalings=True,
preload_overwrite=None,
check_positions=True,
):
self.associate_hyper_images(instance=instance)
tracer = self.tracer_for_instance(instance=instance)
if check_positions:
self.settings_lens.check_positions_trace_within_threshold_via_tracer(
tracer=tracer, positions=self.positions
)
hyper_background_noise = self.hyper_background_noise_for_instance(
instance=instance
)
return self.fit_interferometer_for_tracer(
tracer=tracer,
hyper_background_noise=hyper_background_noise,
use_hyper_scalings=use_hyper_scalings,
)
def fit_interferometer_for_tracer(
self,
tracer,
hyper_background_noise,
use_hyper_scalings=True,
preload_overwrite=None,
):
preloads = self.preloads if preload_overwrite is None else preload_overwrite
return FitInterferometer(
interferometer=self.dataset,
tracer=tracer,
hyper_background_noise=hyper_background_noise,
use_hyper_scaling=use_hyper_scalings,
settings_pixelization=self.settings_pixelization,
settings_inversion=self.settings_inversion,
preloads=preloads,
)
@property
def fit_func(self):
return self.fit_interferometer_for_instance
def stochastic_log_evidences_for_instance(self, instance):
instance = self.associate_hyper_images(instance=instance)
tracer = self.tracer_for_instance(instance=instance)
if not tracer.has_pixelization:
return None
if not any(
[
isinstance(pix, aa.pix.VoronoiBrightnessImage)
for pix in tracer.pixelization_list
]
):
return
hyper_background_noise = self.hyper_background_noise_for_instance(
instance=instance
)
settings_pixelization = (
self.settings_pixelization.settings_with_is_stochastic_true()
)
log_evidences = []
for i in range(self.settings_lens.stochastic_samples):
try:
log_evidence = FitInterferometer(
interferometer=self.dataset,
tracer=tracer,
hyper_background_noise=hyper_background_noise,
settings_pixelization=settings_pixelization,
settings_inversion=self.settings_inversion,
preloads=self.preloads,
).log_evidence
except (
exc.PixelizationException,
exc.InversionException,
exc.GridException,
OverflowError,
) as e:
log_evidence = None
if log_evidence is not None:
log_evidences.append(log_evidence)
return log_evidences
def visualize(self, paths: af.DirectoryPaths, instance, during_analysis):
instance = self.associate_hyper_images(instance=instance)
fit = self.fit_interferometer_for_instance(instance=instance)
visualizer = VisualizerInterferometer(visualize_path=paths.image_path)
visualizer.visualize_fit_interferometer(
fit=fit, during_analysis=during_analysis
)
visualizer.visualize_tracer(
tracer=fit.tracer, grid=fit.grid, during_analysis=during_analysis
)
if fit.inversion is not None:
visualizer.visualize_inversion(
inversion=fit.inversion, during_analysis=during_analysis
)
visualizer.visualize_contribution_maps(tracer=fit.tracer)
if visualizer.plot_fit_no_hyper:
fit = self.fit_interferometer_for_tracer(
tracer=fit.tracer,
hyper_background_noise=None,
use_hyper_scalings=False,
preload_overwrite=Preloads(use_w_tilde=False),
)
visualizer.visualize_fit_interferometer(
fit=fit, during_analysis=during_analysis, subfolders="fit_no_hyper"
)
def save_results_for_aggregator(
self,
paths: af.DirectoryPaths,
samples: af.OptimizerSamples,
model: af.Collection,
):
if conf.instance["general"]["hyper"]["stochastic_outputs"]:
self.save_stochastic_outputs(paths=paths, samples=samples)
def make_result(
self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch
):
return ResultInterferometer(
samples=samples, model=model, analysis=self, search=search
)
def save_attributes_for_aggregator(self, paths: af.DirectoryPaths):
super().save_attributes_for_aggregator(paths=paths)
paths.save_object("uv_wavelengths", self.dataset.uv_wavelengths)
paths.save_object("real_space_mask", self.dataset.real_space_mask)
paths.save_object("positions", self.positions)
|
13,465 | e6ba87b2b552723a46c7a8a3e005a520fae25bf6 | import json
def hash_str(x):
return abs(hash(json.dumps(x, sort_keys=True))).to_bytes(8, "big").hex()
|
13,466 | bc32faadb10d168466977da0825e7ef4e1b6002e | height = [int(input()) for _ in range(9)]
check = [False] * 9
result = []
def recursive(index):
global result
if index == 9:
answer = 0
count = 0
demo = []
for i in range(9):
if check[i]:
answer += height[i]
count += 1
demo.append(height[i])
if answer == 100 and count == 7:
demo.sort()
result = demo
return
check[index] = True
recursive(index + 1)
check[index] = False
recursive(index + 1)
recursive(0)
for i in result:
print(i)
|
13,467 | 4231d0b652ab9071d0443d91d93b89bcbfba615b | import torch
import torch_geometric
from torch_geometric.profile import benchmark
from torch_geometric.testing import (
disableExtensions,
onlyFullTest,
onlyLinux,
withCUDA,
withPackage,
)
from torch_geometric.utils import scatter
# Basic "Gather-Apply-Scatter" patterns commonly used in PyG:
def gather_scatter(x, edge_index, reduce='sum'):
row, col = edge_index
x_j = x[row]
return scatter(x_j, col, dim_size=x.size(0), reduce=reduce)
def gather_cat_scatter(x, edge_index, reduce='sum'):
row, col = edge_index
x_ij = torch.cat([x[col], x[row]], dim=-1)
return scatter(x_ij, col, dim_size=x.size(0), reduce=reduce)
def gather_weight_scatter(x, edge_index, edge_weight, reduce='sum'):
row, col = edge_index
x_j = x[row] * edge_weight.view(-1, 1)
return scatter(x_j, col, dim_size=x.size(0), reduce=reduce)
def gather_transform_scatter(x, edge_index, matrix, reduce='sum'):
row, col = edge_index
x_j = x[row] @ matrix
return scatter(x_j, col, dim_size=x.size(0), reduce=reduce)
def fused_gather_scatter(x, edge_index, reduce=['sum', 'mean', 'max']):
row, col = edge_index
x_j = x[row]
outs = [scatter(x_j, col, dim_size=x.size(0), reduce=r) for r in reduce]
return torch.cat(outs, dim=-1)
@withCUDA
@onlyLinux
@onlyFullTest
@disableExtensions
@withPackage('torch>=2.0.0')
def test_torch_compile(device):
x = torch.randn(10, 16, device=device)
edge_index = torch.randint(0, x.size(0), (2, 40), device=device)
edge_weight = torch.rand(edge_index.size(1), device=device)
matrix = torch.randn(x.size(-1), x.size(-1), device=device)
expected = gather_scatter(x, edge_index)
compiled_op = torch_geometric.compile(gather_scatter)
out = compiled_op(x, edge_index)
assert torch.allclose(out, expected, atol=1e-6)
expected = gather_cat_scatter(x, edge_index)
compiled_op = torch_geometric.compile(gather_cat_scatter)
out = compiled_op(x, edge_index)
assert torch.allclose(out, expected, atol=1e-6)
expected = gather_weight_scatter(x, edge_index, edge_weight)
compiled_op = torch_geometric.compile(gather_weight_scatter)
out = compiled_op(x, edge_index, edge_weight)
assert torch.allclose(out, expected, atol=1e-6)
expected = gather_transform_scatter(x, edge_index, matrix)
compiled_op = torch_geometric.compile(gather_transform_scatter)
out = compiled_op(x, edge_index, matrix)
assert torch.allclose(out, expected, atol=1e-6)
expected = fused_gather_scatter(x, edge_index)
compiled_op = torch_geometric.compile(fused_gather_scatter)
out = compiled_op(x, edge_index)
assert torch.allclose(out, expected, atol=1e-6)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--backward', action='store_true')
args = parser.parse_args()
num_nodes, num_edges = 10_000, 200_000
x = torch.randn(num_nodes, 64, device=args.device)
edge_index = torch.randint(num_nodes, (2, num_edges), device=args.device)
edge_weight = torch.rand(num_edges, device=args.device)
matrix = torch.randn(64, 64, device=args.device)
for reduce in ['sum', 'mean', 'max']:
print(f'Aggregator: {reduce}')
benchmark(
funcs=[
gather_scatter,
torch_geometric.compile(gather_scatter),
],
func_names=['Vanilla', 'Compiled'],
args=(x, edge_index, reduce),
num_steps=50 if args.device == 'cpu' else 500,
num_warmups=10 if args.device == 'cpu' else 100,
backward=args.backward,
)
benchmark(
funcs=[
gather_cat_scatter,
torch_geometric.compile(gather_cat_scatter),
],
func_names=['Vanilla Cat', 'Compiled Cat'],
args=(x, edge_index, reduce),
num_steps=50 if args.device == 'cpu' else 500,
num_warmups=10 if args.device == 'cpu' else 100,
backward=args.backward,
)
benchmark(
funcs=[
gather_weight_scatter,
torch_geometric.compile(gather_weight_scatter),
],
func_names=['Vanilla Weight', 'Compiled Weight'],
args=(x, edge_index, edge_weight, reduce),
num_steps=50 if args.device == 'cpu' else 500,
num_warmups=10 if args.device == 'cpu' else 100,
backward=args.backward,
)
benchmark(
funcs=[
gather_transform_scatter,
torch_geometric.compile(gather_transform_scatter),
],
func_names=['Vanilla Transform', 'Compiled Transform'],
args=(x, edge_index, matrix, reduce),
num_steps=50 if args.device == 'cpu' else 500,
num_warmups=10 if args.device == 'cpu' else 100,
backward=args.backward,
)
benchmark(
funcs=[
fused_gather_scatter,
torch_geometric.compile(fused_gather_scatter),
],
func_names=['Vanilla Fused', 'Compiled Fused'],
args=(x, edge_index),
num_steps=50 if args.device == 'cpu' else 500,
num_warmups=10 if args.device == 'cpu' else 100,
backward=args.backward,
)
|
13,468 | 7441555938981f54f76db26f8bf7201306bf6080 | import pytest
from scripts.use_pd_array_in_core import use_pd_array
BAD_FILE_0 = "import pandas as pd\npd.array"
BAD_FILE_1 = "\nfrom pandas import array"
GOOD_FILE_0 = "from pandas import array as pd_array"
GOOD_FILE_1 = "from pandas.core.construction import array as pd_array"
PATH = "t.py"
@pytest.mark.parametrize("content", [BAD_FILE_0, BAD_FILE_1])
def test_inconsistent_usage(content, capsys):
result_msg = (
"t.py:2:0: Don't use pd.array in core, import array as pd_array instead\n"
)
with pytest.raises(SystemExit, match=None):
use_pd_array(content, PATH)
expected_msg, _ = capsys.readouterr()
assert result_msg == expected_msg
@pytest.mark.parametrize("content", [GOOD_FILE_0, GOOD_FILE_1])
def test_consistent_usage(content):
# should not raise
use_pd_array(content, PATH)
|
13,469 | ed241a66598331db5e9a5e74dc819614bd9aa89c | print ("Learn the steps of hte 5 sequence tango.")
print ("What step do you wish to learn?")
whichStep = int(input())
if whichStep == 1:
print ("Leader takes a step back.")
elif whichStep == 2:
print ("Side step towards centre of floor.")
elif whichStep == 3:
print ("Leader steps outside of follower.")
elif whichStep == 4:
print ("Preparation of the cross with the forward step.")
elif whichStep == 5:
print ("Leader closes his feet, follower completes cross step.")
else:
print ("Terminate the sequence.") |
13,470 | 19747cea3b21d22a495f9ada22bfef7bc7efafe8 | def login_disponivel(login,lista):
login = input('Qual รฉ o seu login? ')
i=1
if login in lista:
login1 = login + str(i)
lista.append(login1)
else:
lista.append(login) |
13,471 | db4b1431be679f2484c1f6bc2e1b74ac7a3ee45b | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright https://github.com/VPerrollaz
#
# Distributed under terms of the %LICENSE% license.
"""
Gรฉnรฉration du graphe permettant de coder un algorithme glouton.
"""
import random as rd
from enum import Enum, auto
class Genre(Enum):
"""Enum pour gรฉrer le genre de transition."""
Demarrage = auto()
Voisinage = auto()
def echange(liste, el1, el2):
"""Echange les รฉlรฉments el1 et el2 dans la liste."""
ind1 = liste.index(el1)
ind2 = liste.index(el2)
liste[ind1] = el2
liste[ind2] = el1
class Mouvement:
"""Classe dรฉcrivant un mouvement possible dans un Graphe."""
def __init__(self, donnees):
if len(donnees) == 2:
self.genre = Genre.Demarrage
else:
self.genre = Genre.Voisinage
self.donnees = donnees
def __repr__(self):
return "Mouvement({})".format(self.donnees)
def __str__(self):
if self.genre is Genre.Demarrage:
return f"Demarrage: {self.donnees[0]} <-> {self.donnees[1]}"
return "Voisinage de {} : {} <-> {}".format(*self.donnees)
class Graphe:
"""Classe permettant la paramรฉtrisation d'un algorithme glouton."""
def __init__(self, demarrage, voisinage):
self.plus_grand = max(voisinage.keys())
self.demarrage = demarrage
self.voisinage = voisinage
self.dernier = None
self.admissibles = list()
for nombre in self.voisinage.keys():
if len(self.voisinage[nombre]) > 1:
self.admissibles.append(nombre)
@classmethod
def default(cls, nb_sommets):
"""Initialisation par ordre croissant."""
demarrage = 1
voisinage = dict()
for i in range(1, nb_sommets + 1):
voisinage[i] = list()
for j in range(1, nb_sommets + 1):
if i == j:
continue
if (i % j == 0) or (j % i == 0):
voisinage[i].append(j)
return cls(demarrage, voisinage)
def __repr__(self):
return f"Graphe({self.demarrage, self.voisinage})"
def __str__(self):
return "{}\n{}".format(self.demarrage, self.voisinage)
def modification(self, mouvement: Mouvement):
"""Modifie le graphe en fonction du mouvement demandรฉ."""
if mouvement.genre is Genre.Demarrage:
entier1, entier2 = mouvement.donnees
if self.demarrage == entier1:
self.demarrage = entier2
elif self.demarrage == entier2:
self.demarrage = entier1
else:
raise ValueError("Aucun des deux nombres n'est le dรฉmarrage.")
else:
entier, voisin1, voisin2 = mouvement.donnees
echange(self.voisinage[entier], voisin1, voisin2)
self.dernier = mouvement
def mutation(self):
"""Dรฉtermine une transition possible et l'ajoute ร l'historique."""
if rd.random() > 0.9:
donnees = (self.demarrage, rd.randint(1, self.plus_grand))
else:
entier = rd.choice(self.admissibles)
voisin1, voisin2 = rd.sample(self.voisinage[entier], 2)
donnees = (entier, voisin1, voisin2)
mouv = Mouvement(donnees)
self.modification(mouv)
def inversion(self):
"""Annule le dernier mouvement et l'enlรจve de l'historique."""
self.modification(self.dernier)
self.dernier = None
|
13,472 | 06c424dc5adf8d87e932159faa2ccba85d68dc80 | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A multi-task and semi-supervised NLP model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from model import encoder
from model import shared_inputs
class Inference(object):
def __init__(self, config, inputs, pretrained_embeddings, tasks):
with tf.variable_scope('encoder'):
self.encoder = encoder.Encoder(config, inputs, pretrained_embeddings)
self.modules = {}
for task in tasks:
with tf.variable_scope(task.name):
self.modules[task.name] = task.get_module(inputs, self.encoder)
class Model(object):
def __init__(self, config, pretrained_embeddings, tasks):
self._config = config
self._tasks = tasks
self._global_step, self._optimizer = self._get_optimizer()
self._inputs = shared_inputs.Inputs(config)
with tf.variable_scope('model', reuse=tf.AUTO_REUSE) as scope:
inference = Inference(config, self._inputs, pretrained_embeddings,
tasks)
self._trainer = inference
self._tester = inference
self._teacher = inference
if config.ema_test or config.ema_teacher:
ema = tf.train.ExponentialMovingAverage(config.ema_decay)
model_vars = tf.get_collection("trainable_variables", "model")
ema_op = ema.apply(model_vars)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, ema_op)
def ema_getter(getter, name, *args, **kwargs):
var = getter(name, *args, **kwargs)
return ema.average(var)
scope.set_custom_getter(ema_getter)
inference_ema = Inference(
config, self._inputs, pretrained_embeddings, tasks)
if config.ema_teacher:
self._teacher = inference_ema
if config.ema_test:
self._tester = inference_ema
self._unlabeled_loss = self._get_consistency_loss(tasks)
self._unlabeled_train_op = self._get_train_op(self._unlabeled_loss)
self._labeled_train_ops = {}
for task in self._tasks:
task_loss = self._trainer.modules[task.name].supervised_loss
self._labeled_train_ops[task.name] = self._get_train_op(task_loss)
def _get_consistency_loss(self, tasks):
return sum([self._trainer.modules[task.name].unsupervised_loss
for task in tasks])
def _get_optimizer(self):
global_step = tf.get_variable('global_step', initializer=0, trainable=False)
warm_up_multiplier = (tf.minimum(tf.to_float(global_step),
self._config.warm_up_steps)
/ self._config.warm_up_steps)
decay_multiplier = 1.0 / (1 + self._config.lr_decay *
tf.sqrt(tf.to_float(global_step)))
lr = self._config.lr * warm_up_multiplier * decay_multiplier
optimizer = tf.train.MomentumOptimizer(lr, self._config.momentum)
return global_step, optimizer
def _get_train_op(self, loss):
grads, vs = zip(*self._optimizer.compute_gradients(loss))
grads, _ = tf.clip_by_global_norm(grads, self._config.grad_clip)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
return self._optimizer.apply_gradients(
zip(grads, vs), global_step=self._global_step)
def _create_feed_dict(self, mb, model, is_training=True):
feed = self._inputs.create_feed_dict(mb, is_training)
if mb.task_name in model.modules:
model.modules[mb.task_name].update_feed_dict(feed, mb)
else:
for module in model.modules.values():
module.update_feed_dict(feed, mb)
return feed
def train_unlabeled(self, sess, mb):
return sess.run([self._unlabeled_train_op, self._unlabeled_loss],
feed_dict=self._create_feed_dict(mb, self._trainer))[1]
def train_labeled(self, sess, mb):
return sess.run([self._labeled_train_ops[mb.task_name],
self._trainer.modules[mb.task_name].supervised_loss,],
feed_dict=self._create_feed_dict(mb, self._trainer))[1]
def run_teacher(self, sess, mb):
result = sess.run({task.name: self._teacher.modules[task.name].probs
for task in self._tasks},
feed_dict=self._create_feed_dict(mb, self._teacher,
False))
for task_name, probs in result.iteritems():
mb.teacher_predictions[task_name] = probs.astype('float16')
def test(self, sess, mb):
return sess.run(
[self._tester.modules[mb.task_name].supervised_loss,
self._tester.modules[mb.task_name].preds],
feed_dict=self._create_feed_dict(mb, self._tester, False))
def get_global_step(self, sess):
return sess.run(self._global_step)
|
13,473 | b4fc575a56530582016e5b7044169e033c5a976d | #!/usr/bin/env python
from __future__ import print_function
import pdb
import tracer_func as tracer
import ConfigParser
import argparse
import sys
import os
import subprocess
import pipes
import glob
import shutil
import re
from collections import defaultdict, Counter
from time import sleep
import warnings
import pickle
from prettytable import PrettyTable
from operator import attrgetter
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC, generic_dna
def main():
parser = argparse.ArgumentParser(description = "get cell inform from pickle file")
parser.add_argument('dir', metavar="<DIR>", help='directory containing subdirectories for each cell to be summarised')
parser.add_argument('--ignore_inkt', '-i', help='ignore iNKT cells ', action="store_true")
parser.add_argument("--sample", dest="sample_id", type=str, nargs='?', default="SAMPLE", help="sample id")
parser.add_argument("-o", "--output_prefix", metavar="STR", type=str, dest="output_prefix",
default="output_prefix", help="output prefix [default: %(default)s]")
args = parser.parse_args()
root_dir = os.path.abspath(args.dir)
subdirectories = os.walk(root_dir).next()[1]
pkl_dir = "filtered_TCR_seqs"
outdir = "{}/filtered_TCR_summary".format(root_dir)
tracer.makeOutputDir(outdir)
out1 = open("%s.dna_seq.fa" % args.output_prefix,"w")
out2 = open("%s.protein_seq.fa" % args.output_prefix,"w")
for d in subdirectories:
cell_pkl = "{root_dir}/{d}/{pkl_dir}/{d}.pkl".format(pkl_dir=pkl_dir, d=d, root_dir=root_dir)
if os.path.isfile(cell_pkl):
cl = pickle.load(open(cell_pkl))
if not cl.is_empty and not (cl.is_inkt and args.ignore_inkt):
for locus in ['A','B','D', 'G']:
if cl.all_recombinants[locus] is not None:
for recombinant in cl.all_recombinants[locus]:
aaseq = Seq(str(recombinant.dna_seq), generic_dna).translate()
seqAnn = "productive=%s in_frame=%s stop_codon=%s cdr3=%s" % (recombinant.productive, recombinant.in_frame, recombinant.stop_codon, recombinant.cdr3)
print(">%s %s\n%s" % ("|".join([cl.name,locus, recombinant.contig_name, recombinant.identifier]), seqAnn, recombinant.dna_seq), file = out1)
print(">%s %s\n%s" % ("|".join([cl.name,locus, recombinant.contig_name, recombinant.identifier]), seqAnn, str(aaseq)), file = out2)
print("",file = out1)
print("",file = out2)
if __name__ == '__main__':
main()
|
13,474 | 0358674298b2f64a864a58fcee4bf25de745d5b1 | # 4/2009 BAS
# read() replicates behavior of pyserial
# readexactly() added, which is probably more useful
# readbuf() dumps current buffer contents
# readpacket() has old broken behavior of read() - lowest level / fastest
import socket
import time
MOXA_DEFAULT_TIMEOUT = 1.0
# Socket modes:
# Nonblocking
# s.setblocking(0) means s.settimeout(0)
# Read returns as much data as possible, does not fail
# Doesn't work for me on windows. Don't use.
# Timeout
# s.settimeout(n)
# Waits until buffer has enough data, then returns
# Throws exception (caught by read) if not enough data is ready after n second.
# Read returns '' on fail
# Blocking
# s.setblocking(1) or s.settimeout(None)
# Waits forever until buffer has enough data, then returns
# This is the default mode for sockets
# Check socket.getdefaulttimeout() to see what mode sockets are created in
# pyserial style wrapper over IA 5250 TCP Server mode
class Serial_TCPServer(object):
"""Class to speak with the moxa serial / Ethernet converter.
Set up the moxa box ports according to the specifications of the device
hooked into each serial port.
A typical sequence of messages for dealing with a device. Create the
socket once::
>>> moxa = moxa_serial.Serial_TCPServer(('IP',port),timeout=1.0)
Then do this sequence, complicated in some way by an individual device's hand
shaking needs::
>>> moxa.flushInput()
>>> moxa.write(msg)
>>> moxa.readexactly(n)
I write a "cmd" methods that handle the proper sequence with checksums etc.
Most devices require a certain delay between commands, which is left to the
user. If using multithreading, wrap your delays in mutexes.
Args:
port (tuple): (IP addr, TCP port)
timeout (float): Timeout for reading from the moxa box
"""
def __init__(self, port, timeout=MOXA_DEFAULT_TIMEOUT):
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setblocking(0)
self.settimeout(timeout)
self.sock.connect(self.port)
def readexactly(self, n):
"""Tries to read exactly n bytes within the timeout.
Args:
n: Number of bytes to read.
Returns:
str: Returned message if n bytes were read. Empty string if
``len(message) != n``.
"""
t0 = time.time()
msg = ""
timeout = self.gettimeout()
while len(msg) < n:
newtimeout = timeout - (time.time() - t0)
if newtimeout <= 0.0:
break
self.settimeout(newtimeout)
try:
msg = self.sock.recv(n, socket.MSG_PEEK)
except BaseException:
pass
# Flush the message out if you got everything
if len(msg) == n:
msg = self.sock.recv(n).decode()
# Otherwise tell nothing and leave the data in the buffer
else:
msg = ''
self.settimeout(timeout)
return msg
def readbuf_slow(self, n):
"""Reads whatever is in the buffer right now, but is O(N) in buffer
size.
Args:
n: Number of bytes to read.
"""
msg = ''
self.sock.setblocking(0)
try:
for i in range(n):
msg += self.sock.recv(1)
except BaseException:
pass
self.sock.setblocking(1) # belt and suspenders
self.settimeout(self.__timeout)
return msg
def readbuf(self, n):
"""Returns whatever is currently in the buffer. Suitable for large
buffers.
Args:
n: Number of bytes to read.
"""
if n == 0:
return ''
try:
msg = self.sock.recv(n)
except BaseException:
msg = ''
n2 = min(n - len(msg), n / 2)
return msg + self.readbuf(n2)
def readpacket(self, n):
"""Like ``read()``, but may not return everything if the moxa box
flushes too soon.
Will probably read whatever arrives in the buffer, up to n or the
timeout. Use ``read()`` for certainty.
"""
try:
msg = self.sock.recv(n)
except BaseException:
msg = ''
return msg
def read(self, n):
"""Like ``readexactly()``, but returns whatever is in the buffer if it
can't fill up.
This replicates the behavior of the read method in pyserial. I feel
that ``readexactly()`` has better behavior for most applications
though.
Args:
n: Number of bytes to read. Will read at most n bytes.
Returns:
str: Returned message of up to n bytes.
"""
msg = self.readexactly(n)
n2 = n - len(msg)
if n2 > 0:
msg += self.readbuf(n2)
return msg
def readline(self, term='\n'):
msg = ''
while True:
c = self.readexactly(1)
if c == term or c == '':
return msg
msg += c
def readall(self):
msg = ""
while True:
c = self.readexactly(1)
if c == '\r':
return msg
if c == '':
return False
msg += c
return msg
def write(self, msg):
"""Sends message to the moxa box.
Args:
msg (str): Message to send, including terminator (i.e. ``\\r\\n``) if
needed.
"""
self.sock.send(msg.encode())
def writeread(self, msg):
self.flushInput()
self.write(msg)
return self.readall()
def flushInput(self):
"""Erases the input buffer at this moment.
Before I ask for new info from a device, I flush my
receive buffer to make sure I don't get any garbage in
front.
"""
self.sock.setblocking(0)
try:
while len(self.sock.recv(1)) > 0:
pass
except BaseException:
pass
self.sock.setblocking(1)
self.sock.settimeout(self.__timeout)
def settimeout(self, timeout):
"""Sets the socket in timeout mode."""
assert timeout > 0.0
self.__timeout = timeout
self.sock.settimeout(timeout)
# We don't query the socket's timeout or check that they're still
# correct. Since self.sock e is public this could be the wrong
# timeout!
def gettimeout(self):
return self.__timeout
timeout = property(gettimeout, settimeout,
doc='Communication timeout. Only use timeout mode '
+ 'with ``timeout > 0.0``.')
|
13,475 | bf0300b9271eadd489dd1579ad47fa8cca8cdfb2 | from . import views
from django.urls.conf import include, path
import debug_toolbar
urlpatterns = [
path('register', views.register, name='register'),
path('login', views.login, name='login'),
path('logout', views.logout, name='logout'),
path('__debug__/', include(debug_toolbar.urls), name='logout'),
]
|
13,476 | 4c0d61fe0c2233b9b09ddf313e0396aa9fc87e23 | # Generated by Django 2.1.5 on 2019-02-09 14:23
from django.db import migrations, models
import django_mysql.models
class Migration(migrations.Migration):
dependencies = [
('whoweare', '0003_whowearefields_fourthsectionmissionvisionvaluesdescriptionsinlist'),
]
operations = [
migrations.AlterField(
model_name='whowearefields',
name='fourthsectionmissionvisionvaluesdescriptionsinlist',
field=django_mysql.models.ListCharField(models.CharField(max_length=500), blank=True, max_length=2505, null=True, size=5, verbose_name='Fourth Section mission vision and values descriptions in list *'),
),
]
|
13,477 | 2eb0eb97b9ca727e6d002ac2929a0d9771631ff9 | from models import *
from constants import *
from utils import MyDataset
import matplotlib.pyplot as plt
import random
import torch
import itertools
vae_type = 'conv'
dataset = 'Total'
subset = True
model_name = 'Total_VAE__2021-02-04 11:05:04.531770.pt'
model_path = MODELS_ROOT + model_name
if subset:
if dataset == 'Total':
mnist_train_dataset = torch.load(DATA_ROOT + 'subsets/' + 'MNIST' + '/training.pt')
mnist_test_dataset = torch.load(DATA_ROOT + 'subsets/' + 'MNIST' + '/test.pt')
fashion_mnist_train_dataset = torch.load(DATA_ROOT + 'subsets/' + 'Fashion_MNIST' + '/training.pt')
fashion_mnist_test_dataset = torch.load(DATA_ROOT + 'subsets/' + 'Fashion_MNIST' + '/test.pt')
train_dataset = MyDataset(x=mnist_train_dataset.data, y=fashion_mnist_train_dataset.data)
test_dataset = MyDataset(x=mnist_test_dataset.data, y=fashion_mnist_test_dataset.data)
else:
train_dataset = torch.load(DATA_ROOT + 'subsets/' + dataset + '/training.pt')
test_dataset = torch.load(DATA_ROOT + 'subsets/' + dataset + '/test.pt')
else:
if dataset == 'MNIST':
from mnist_downloader import train_dataset, test_dataset
elif dataset == 'Fashion_MNIST':
from fashion_mnist_downloader import train_dataset, test_dataset
else:
raise NotImplementedError
batch_size = 1
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
if vae_type == 'conv':
hidden_size = 32 * 11 * 11
vae = UNet_VAE(hidden_size=hidden_size, latent_size=LATENT_SIZE)
else:
vae = DenseVAE(out_features=100)
vae.load_state_dict(torch.load(model_path))
vae.eval()
plt.figure(figsize=(15, 5))
for i in range(1, 8, 2):
rand_sample_idx = random.randint(0, 5000)
rand_sample = next(itertools.islice(train_loader, rand_sample_idx, None))
rand_sample_prime = vae(rand_sample[0])[0]
plt.subplot(1, 8, i)
plt.title('Original')
plt.imshow(rand_sample[0].reshape(28, 28).detach().numpy())
plt.subplot(1, 8, i + 1)
plt.title('Reconstruction')
plt.imshow(rand_sample_prime.reshape(28, 28).detach().numpy())
plt.show()
|
13,478 | a95dc47f786c33ffc3b523d45bd418c9e4656a0a | import math
def calcula_trabalho (F,teta,s):
trabalho = F * math.cos(teta) * s
return trabalho |
13,479 | 5517b041a7ee292d1c00b6c3bd7acf4ad6bf42e0 | # Create your views here.
from django.shortcuts import render, redirect, get_object_or_404
from sculptqr.models import QRCode
from django import forms
from django.http import HttpResponse
from django.conf import settings
# For image upload
import os
import shutil
import Image as PILImage
import ImageFilter
import StringIO
from django.core.files.base import ContentFile
from django.core.files import File
import random
def generated_rnd_base32(length):
convDict = { 0:'0', 1:'1', 2:'2', 3:'3', 4:'4', 5:'5', 6:'6', 7:'7',
8:'8', 9:'9',10:'A',11:'B',12:'C',13:'D',14:'E',15:'F',
16:'G',17:'H',18:'J',19:'K',20:'M',21:'N',22:'P',23:'Q',
24:'R',25:'S',26:'T',27:'V',28:'W',29:'X',30:'Y',31:'Z'}
result = ''
for dummy in range(length):
result += convDict[random.randrange(0, 32)]
return result
def redirect_service(request,code) :
qr_code = QRCode.objects.get(code=code)
return redirect(qr_code.url)
#-----------------------------------
#
# IMAGES
#
#-----------------------------------
class ImageUploadForm(forms.Form):
url = forms.URLField()
image = forms.ImageField()
def homepage(request, qr_code_id):
qr_code = None
if (qr_code_id) : qr_code = QRCode.objects.get(pk=qr_code_id)
if request.method == 'POST':
image_form = ImageUploadForm(request.POST,request.FILES)
if image_form.is_valid():
file = image_form.cleaned_data['image']
qr_code = QRCode()
#image.description = ""
qr_code.url = image_form.cleaned_data['url']
qr_code.code = generated_rnd_base32(8)
qr_code.source_image = file
(image_path,file_name) = os.path.split(qr_code.source_image.path)
(file_name_prefix,file_name_ext) = file_name.rsplit('.',1)
cropped_image_file_name = file_name_prefix + '_cropped.jpg'
image_data = PILImage.open(qr_code.source_image)
image_data.thumbnail((16,16),PILImage.ANTIALIAS)
image_data = image_data.convert('1')
image_io = StringIO.StringIO()
image_data.save(image_io,'png')
image_file = ContentFile(image_io.getvalue())
qr_code.cropped_image.save(cropped_image_file_name,image_file)
qr_image_file_name = 'qr_code_'+ str(qr_code.id) +'.png'
sculptqr_url = 'http://' + request.get_host() + '/c/' + qr_code.code + '/'
os.system('/server/sculptqr/project/c/encode ' + sculptqr_url + ' ' + settings.MEDIA_ROOT + qr_code.cropped_image.name)
shutil.move('/tmp/foo.png','/tmp/' + qr_image_file_name);
image_data = PILImage.open('/tmp/' + qr_image_file_name,'r')
image_data = image_data.resize((400,400),PILImage.NEAREST)
image_io = StringIO.StringIO()
image_data.save(image_io,'png')
image_file = ContentFile(image_io.getvalue())
qr_code.qr_image.save(qr_image_file_name,image_file)
qr_code.save()
os.remove('/tmp/' + qr_image_file_name)
return render(request,'sculptqr/homepage.html',{'form':image_form, 'qr_code':qr_code})
else:
image_form = ImageUploadForm()
return render(request,'sculptqr/homepage.html',{'form':image_form,'qr_code':qr_code})
#def image_crop(request, qr_code_id):
#
# #XXX Need error checking
# crop_rect = eval(request.POST['crop_rect'])
# target_size = eval(request.POST['target_size'])
# qr_code = QRCode.objects.get(pk=qr_code_id)
#
# (image_path,file_name) = os.path.split(qr_code.source_image.path)
# (file_name_prefix,file_name_ext) = file_name.rsplit('.',1)
#
# image_data = PILImage.open(qr_code.source_image)
#
# # check if this is a qurious image as we need to adjust the names
# #if image.role == Image.IMAGE_ROLE_QURIOUS_THUMBNAIL or image.role == Image.IMAGE_ROLE_QURIOUS_GALLERY:
# # is_lo_res = False
# # cropped_image_file_name = file_name_prefix + '_cropped-hd.jpg'
# # crop_and_save(image, image_data, cropped_image_file_name, crop_rect, target_size, is_lo_res)
# # # if this is a qurious image, also crop at half res
# # is_lo_res = True
# # target_size = (target_size[0]/2, target_size[1]/2)
# # cropped_image_file_name = file_name_prefix + '_cropped.jpg' # lo-res images just omit -hd suffix
# # crop_and_save(image, image_data, cropped_image_file_name, crop_rect, target_size, is_lo_res)
# #else:
# cropped_image_file_name = file_name_prefix + '_cropped.jpg'
# is_lo_res = False
# crop_and_save(qr_code, image_data, cropped_image_file_name, crop_rect, target_size, is_lo_res)
#
# return redirect('sculptqr.views.stage2',qr_code_id=qr_code_id)
# #return HttpResponse('{"cropped_image":{"url":"%s","id":%d}}' % (qr_code.cropped_image.url, qr_code.id))
#
#
#def crop_and_save(qr_code, image_data, cropped_image_file_name, crop_rect, target_size, is_lo_res):
# cropped_image_data = image_data.convert('L')
# cropped_image_data = cropped_image_data.filter(ImageFilter.FIND_EDGES)
# cropped_image_data = cropped_image_data.crop(crop_rect)
# cropped_image_data.thumbnail(target_size,PILImage.ANTIALIAS)
#
# cropped_image_io = StringIO.StringIO()
# # Check if the image has an alpha layer and if so make it white.
# if cropped_image_data.mode in ["RGBA","LA"] :
# if cropped_image_data.mode == "RGBA" : index = 3
# else : index = 1
# cropped_image_data.load()
# background = PILImage.new("RGB", cropped_image_data.size, (255, 255, 255))
# background.paste(cropped_image_data, mask=cropped_image_data.split()[index]) # 3 is the alpha channel
# background = background.convert('1')
# background.save(cropped_image_io,'png')
# else:
# if cropped_image_data.mode != "RGB": cropped_image_data = cropped_image_data.convert("RGB") # Convert GIF/PNG w/pallette to JPEG friendly RGB
# #cropped_image_data = cropped_image_data.convert('1')
# #cropped_image_data = cropped_image_data.filter(ImageFilter.FIND_EDGES)
# cropped_image_data = cropped_image_data.point(lambda i: ((i > 10) and 1) or 255)
# cropped_image_data = cropped_image_data.convert('1')
# cropped_image_data.save(cropped_image_io,'png')
# cropped_image_file = ContentFile(cropped_image_io.getvalue())
#
# qr_code.cropped_image.save(cropped_image_file_name,cropped_image_file)
# qr_code.save()
|
13,480 | 2347d1d881781ddbf231e164a511edef0c66ef65 | from urllib.request import urlretrieve
import os
from os.path import exists, join
import tarfile
if not exists("data"):
os.mkdir("data")
csv_tar_file = "https://storage.googleapis.com/track_data_ncar_ams_3km_csv_small/track_data_ncar_ams_3km_csv_small.tar.gz"
nc_tar_file = "https://storage.googleapis.com/track_data_ncar_ams_3km_nc_small/track_data_ncar_ams_3km_nc_small.tar.gz"
print("Get csv files")
urlretrieve(csv_tar_file, join("data", csv_tar_file.split("/")[-1]))
print("Get nc files")
urlretrieve(nc_tar_file, join("data", nc_tar_file.split("/")[-1]))
print("Extract csv tar file")
csv_tar = tarfile.open(join("data", csv_tar_file.split("/")[-1]))
csv_tar.extractall("data/")
csv_tar.close()
print("Extract nc tar file")
nc_tar = tarfile.open(join("data", nc_tar_file.split("/")[-1]))
nc_tar.extractall("data/")
nc_tar.close()
|
13,481 | f8124ff68bbed7e3633b3f9473bf0eaa9816c03b | '''
240. Write a program to Read a Text File and Print all the Numbers Present in the Text File
'''
|
13,482 | c89081083dfcb1eb21fc5252abe25fb922209ef4 | from django.apps import AppConfig
class BizzConfig(AppConfig):
name = 'bizz'
|
13,483 | 4223de2c4bc64fb1b06900f7b98f112c136b2686 | from django.contrib import admin
from .models import Album, Song
#username : admin
#password : admin1234
admin.site.register(Album)
admin.site.register(Song) |
13,484 | 9b2e32e481f57e7e901bd7881fba4e9c170048de | #!/usr/bin/env python
prime=[2,3,5,7]
n=10000
for i in xrange(11,n):
l=len(prime)
flag=0
for j in xrange(l):
if i % prime[j] == 0:
flag=1
if flag == 0:
prime+=[i]
print len(prime)
print max(prime)
|
13,485 | 68e227171d80be555b737f95fc15ee21367ed784 | from sqlalchemy import Column
from sqlalchemy import Date
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.orm import relationship
from wewallet.application.models import Model
class Billing(Model):
__tablename__ = 'billings'
id = Column(Integer, primary_key=True)
bills = relationship("Bill", backref="billing")
class Bill(Model):
__tablename__ = 'bills'
id = Column(Integer, primary_key=True)
billing_id = Column(Integer, ForeignKey('billings.id'))
date = Column(Date(), nullable=False)
place = Column(String)
|
13,486 | 05084057b80c237ef12f749eb4bd25e2264fd6bb | # ๅๅฐ็นๅพ https://zh.wikipedia.org/wiki/%E5%93%88%E5%B0%94%E7%89%B9%E5%BE%81
import numpy as np
from skimage.feature import haar_like_feature_coord
from skimage.feature import draw_haar_like_feature
feature_coord, _ = haar_like_feature_coord(2, 2, 'type-4')
image = draw_haar_like_feature(np.zeros((2, 2)),
0, 0, 2, 2,
feature_coord,
max_n_features=1)
print(image)
|
13,487 | c9ec8624aa734f68254c1ca27da802247557698e | import time, sys, os
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from wifi24 import Wifi24
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "{}/{}".format(os.path.pardir,os.path.pardir))))
from assertion import Assert
from network import NetworkOps as conn
from PageObjects.security import SecurityPage
from PageObjects.login import LoginPage
from PageObjects.radio import RadioPage
from PageObjects.network import NetworkPage
from PageObjects.software import SoftwarePage
from exceptions import (NetworkError, WifiConnError, WebElementError,SeleniumServerError, ElementMatchError)
class TestWPA2toWPA(Wifi24):
def test_wpa2_to_wpa(self, setUp):
"""Connect WS to DUT then change sec wpa2 to wpa/wpa2
"""
network = conn()
assertion = Assert()
# select wireless interface and enable wireless
radio_page = RadioPage(self.firefox)
radio_page.select_wifi_interface(iface="2.4GHZ")
radio_page.enable(radio_page.get_wireless())
radio_page.apply_changes()
# assert wireless is enabled and wifi interface is 2.4Ghz
assertion.is_equal(radio_page.get_wifi_interface(), "2.4 Ghz")
wireless = radio_page.get_wireless()
assertion.is_true(radio_page.is_enabled(wireless), "Wireless")
# enable primary network and wpa2 and disable wpa
network_page = NetworkPage(self.firefox)
network_page.enable(network_page.get_primary_network())
network_page.enable(network_page.get_wpa2())
network_page.disable(network_page.get_wpa())
network_page.apply_changes()
# check primary network and wpa2 are enabled and encryption is AES
netwrk = network_page.get_primary_network()
assertion.is_true(network_page.is_enabled(netwrk), 'Primary Network')
wpa2 = network_page.get_wpa2()
assertion.is_true(network_page.is_enabled(wpa2), 'WPA2 enabled')
wpa = network_page.get_wpa()
assertion.is_false(network_page.is_enabled(wpa), 'WPA disabled')
# Wifi connection attempt
network.reset_network_mngr()
wifi_connection = network.wifi_connection(
ssid=self.SSID, pswd=self.SSID_PASS, timeout=20)
try:
assertion.is_wificonnected(wifi_connection)
except WifiConnError:
self.reset_wifisession(self.firefox, self.SSID)
raise
# enable wpa
time.sleep(30)
network_page.enable(network_page.get_wpa())
# Set encryption to TKIP
network_page.set_encryption("TKIP")
network_page.apply_changes()
# check wpa-psk is enabled and encryption is TKIP
print(network_page.is_enabled(
network_page.get_wpa()))
assertion.is_true(network_page.is_enabled(
network_page.get_wpa()), 'WPA is enabled')
assertion.is_equal(network_page.get_encryption(), 'TKIP+AES')
# Disconnect wired interface
eth_iface = network.eth_iface_name() # get name of wired iface
eth_disc_attempt = network.disconnect_iface(eth_iface)
try:
assertion.is_sucessful(eth_disc_attempt, "ethernet disconnect")
except NetworkError:
self.reset_wifisession(self.firefox, self.SSID)
raise
# ping attempt
ip = 'www.google.com'
wifi_iface = network.wifi_iface_name() # get name of wifi iface
ping_attempt = network.ping_attempt(wifi_iface, ip)
try:
assertion.is_sucessful(ping_attempt, "ping attempt")
finally:
network.connect_iface(eth_iface)
self.reset_wifisession(self.firefox, self.SSID)
|
13,488 | ac76d43729ff54487238008aaed7ff2f9a01c7de | import notes_pakets.corp_notes as modelo
class Action:
def new_note(self, usuario):
print(f"Hola {usuario[1]}\nIniciamos")
title = input("Introduce el titulo de nota: ")
description = input("Ingrese la nota a guardar: ")
nota = modelo.Note(usuario[0], title, description)
guardar = nota.guardar()
if guardar[0] >= 1:
print(f"Se a creado la nota {nota.titulo}")
else:
print(f"La nota no se guardo{usuario[1]}")
def mostrar(self, usuario):
print(f"{usuario[1]} estas son tus notas")
nota = modelo.Note(usuario[0])
notas = nota.listar()
for nota in notas:
print(f"""
###########################
{nota[2]}
{nota[3]}
""")
def borrar(self, usuario):
print(f"\n Bien {usuario[1]}, selecciona la nota a borrar")
nota = modelo.Note(usuario[0], titulo)
eliminar = nota.eliminar()
if eliminar[0] >= 1:
print(f"Se elimino la nota {nota.titulo}")
|
13,489 | b5644dc2ff6701ea7774832d523db37e91c8efb0 | import json
from pathlib import Path
import cv2
import pandas as pd
from tqdm import tqdm
def load_web_icon_dataset(dataset_path):
dataset_path = Path(dataset_path)
images = []
alt_texts = []
for json_path in tqdm(list(dataset_path.glob('*.json'))):
try:
with open(str(json_path), 'r') as f:
attributes = json.load(f)['attributes']
if attributes.get('alt'):
image_path = str(json_path.parent) + '/' + '{}.jpg'.format(json_path.name.split(".")[0])
image = cv2.imread(str(image_path))
if image is not None:
images.append(image)
alt_texts.append(attributes['alt'])
except Exception as e:
pass
return images, alt_texts
|
13,490 | d5cdb46b01411bd58d69f1cb1437ecdf730a21ed | # coding: utf-8
import sys
import web
import url
sys.path.append('./controllers')
urls = url.urls
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
|
13,491 | 84093f2c0f5bed38cba70de6282953a3764b2a3c | # -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as f:
long_description = f.read()
setup(
name='dj-email-url',
version='0.1.0',
url='https://github.com/migonzalvar/dj-email-url',
license='BSD',
author='Miguel Gonzalez',
author_email='migonzalvar@gmail.com',
description='Use an URL to configure email backend settings in your '
'Django Application.',
long_description=long_description,
py_modules=['dj_email_url'],
zip_safe=False,
include_package_data=True,
platforms='any',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
13,492 | 5cc2c95d912ba6692a2f9827f192a05b2e3c6b82 | from datetime import datetime
from flask import jsonify, make_response, request, render_template
from flask_httpauth import HTTPTokenAuth
from flask_login import login_required
import json
from app.mod_user.models import AuthorizationError, User, UserEntry
from . import api_module as mod_api
from . import controllers as controller
from . import error_handler
from .models import *
from app import CONFIG
auth = HTTPTokenAuth(scheme='Token')
success_text = "Success"
error_text = "Error"
failure_text = "Error"
banned_text = "You're banned from lampPost. Please contact a developer."
internal_failure_message = "Something went wrong. Please contact a developer."
event_dne_text = "No event with that id exists."
def gen_response(status):
response = {"status": status}
return response
def gen_data_response(data):
response = gen_response(success_text)
response["data"] = data
return jsonify(response)
def gen_error_response(error_msg):
response = gen_response(error_text)
response["error_msg"] = error_msg
return jsonify(response)
def gen_failure_response(failure_msg):
# Only print failure message if in DEBUG mode.
# Otherwise, use a canned response.
if CONFIG["DEBUG"]:
return gen_error_response(failure_msg)
else:
return gen_error_response(internal_failure_message)
@auth.verify_token
def verify_token(token):
# TODO: Make this less scary.
if CONFIG["DEBUG"] and CONFIG["BYPASS_API_AUTH"]:
return True
user = User.verify_auth_token(token)
if user is None:
return False
return True
# TODO: Show some sort of error message in browser if a search fails.
# More of a job for frontend, but I didn't want it to get lost in the HTML.
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 403)
def get_user_in_token(request):
user = None
try:
user = User.get_user_in_token(request)
except AuthorizationError:
pass
return user
@mod_api.route("/event/add", methods=["PUT"])
@auth.login_required
def add_event():
if not request.is_json:
return gen_error_response("Request was not JSON.")
try:
data = request.get_json()
if isinstance(data, str):
# TODO: better error message
return gen_failure_response("Request must be JSON, not string.")
except:
return gen_error_response("Request was malformatted.")
try:
# Check that the correct parameters have been given.
missing_fields = get_missing_fields(data)
if len(missing_fields) > 0:
return gen_error_response("Request was missing %s parameter(s)." % ",".join(missing_fields))
# Make sure creator matches authorized user.
user = User.get_user_in_token(request)
if user is None:
return gen_error_response("Invalid authorization.")
if user.netid != data["creator"]:
return gen_error_response("Attempted to create event for different user.")
if controller.is_banned(user):
return gen_error_response(banned_text)
# Try to add new event.
new_event = controller.add_event(data)
# Return id of newly added event.
return gen_data_response({"id": str(new_event.id)})
except Exception as e:
return gen_error_response(error_handler.main_handler(e))
@mod_api.route("/event/get/<id>", methods=["GET"])
def get_event(id):
try:
user = get_user_in_token(request)
event = controller.get_event(id)
# Make sure event is visible.
if event is not None and not controller.is_visible(event, user):
event = None
if event is None:
return gen_error_response(event_dne_text)
return gen_data_response(get_raw_event(event));
except Exception as e:
return gen_error_response(error_handler.main_handler(e))
@mod_api.route("/event/edit/<id>", methods=["POST"])
@auth.login_required
def edit_event(id):
if not request.is_json:
return gen_error_response("Request was not JSON.")
try:
data = request.get_json()
except Exception as e:
return gen_failure_response("Request was malformatted.")
# Make sure creator matches authorized user.
try:
event = controller.get_event(id)
if event is None:
return gen_error_response(event_dne_text)
user = User.get_user_in_token(request)
if user is None:
return gen_error_response("Invalid authorization.")
if user.netid != event.creator:
return gen_error_response("Attempted to edit event for different user.")
if controller.is_banned(user):
return gen_error_response(banned_text)
updated_event = controller.edit_event(id, data)
except Exception as e:
return gen_error_response(error_handler.main_handler(e))
if updated_event is None:
return gen_error_response(event_dne_text)
return gen_data_response(get_raw_event(updated_event))
@mod_api.route("/event/delete/<id>", methods=["DELETE"])
@auth.login_required
def delete_event(id):
try:
event = controller.get_event(id)
if event is None:
return gen_error_response("No event with that id exists.")
# Make sure it is the creator that is deleting the event.
event_creator_netid = controller.get_event_creator(id)
try:
user = User.get_user_in_token(request)
if user is None:
return gen_error_response("Invalid authorization.")
if user.netid != event_creator_netid:
return gen_error_response("Attempted to delete event for different user.")
except AuthorizationError:
return gen_error_response("Invalid authorization.")
event = controller.delete_event(id)
if event is None:
return gen_error_response(event_dne_text)
return gen_data_response(get_raw_event(event))
except Exception as e:
return gen_error_response(error_handler.main_handler(e))
@mod_api.route("/event/search/", defaults={"query":"","start_datetime":datetime.now()}, methods=["GET", "POST"])
@mod_api.route("/event/search/<query>", defaults={"start_datetime":datetime.now()}, methods=["GET", "POST"])
@mod_api.route("/event/search/<query>/<start_datetime>", methods=["GET", "POST"])
def event_search(query, start_datetime):
tags = None
# Alternatively, allow user to send json parameters.
if request.is_json:
try:
data = request.get_json()
# TODO: Uncomment this stuff when we have test coverage.
if "query" in data:
query = data["query"]
if "start_datetime" in data:
start_datetime = data["start_datetime"]
if "tags" in data:
tags = data["tags"]
except Exception as e:
print(type(e))
return gen_failure_response("Request was malformatted.")
try:
user = get_user_in_token(request)
events = controller.search_events(query, start_datetime, user=user, tags=tags)
events = [get_raw_event(event) for event in events]
return gen_data_response(events)
except Exception as e:
return gen_error_response(error_handler.main_handler(e))
@mod_api.route("/user/get_events/<userid>", defaults={"include_past":True}, methods=["GET"])
@mod_api.route("/user/get_events/<userid>/<include_past>", methods=["GET"])
@auth.login_required
def get_created_events(userid, include_past):
try:
user = controller.get_user_by_uid(userid)
if user is None:
return gen_error_response("No user with that id exists.")
# Make sure creator matches authorized user.
try:
token_user = User.get_user_in_token(request)
if token_user is None or token_user.netid != user.netid:
return gen_error_response("Attempted to get created events for different user.")
except AuthorizationError:
return gen_error_response("Invalid authorization.")
if isinstance(include_past, str):
# include_past defaults to True when an invalid value is passed.
include_past = include_past != "False"
events = controller.get_events_by_creator(str(user.netid), include_past)
events = [get_raw_event(event) for event in events]
return gen_data_response(events)
except Exception as e:
return gen_error_response(error_handler.main_handler(e))
@mod_api.route("/user/fav/add/<userid>/<eventid>")
@auth.login_required
def add_event_fav(userid, eventid):
try:
event = controller.get_event(eventid)
user = controller.get_user_by_uid(userid)
if event is None:
return gen_error_response("No event with that id exists.")
elif user is None:
return gen_error_response("No user with that id exists.")
# Make sure favoriter matches authorized user.
try:
token_user = User.get_user_in_token(request)
if token_user is None or token_user.netid != user.netid:
return gen_error_response("Attempted to add a favorite for different user.")
except AuthorizationError:
return gen_error_response("Invalid authorization.")
if eventid not in user.favorites:
controller.add_user_favorite(user, eventid)
return gen_data_response(event.favorites) # need to return something or views gets angry
except Exception as e:
return gen_error_response(error_handler.main_handler(e))
@mod_api.route("/user/fav/remove/<userid>/<eventid>")
@auth.login_required
def remove_event_fav(userid, eventid):
try:
event = controller.get_event(eventid)
user = controller.get_user_by_uid(userid)
if event is None:
return gen_error_response("No event with that id exists.")
elif user is None:
return gen_error_response("No user with that id exists.")
# Make sure favoriter matches authorized user.
try:
token_user = User.get_user_in_token(request)
if token_user is None or token_user.netid != user.netid:
return gen_error_response("Attempted to remove a favorite for different user.")
except AuthorizationError:
return gen_error_response("Invalid authorization.")
if eventid in user.favorites:
controller.remove_user_favorite(user, eventid)
else:
return gen_error_response("You can't un-favorite an event that isn't in your favorites!")
return gen_data_response(event.favorites)
except Exception as e:
return gen_error_response(error_handler.main_handler(e))
@mod_api.route("/user/fav/get/<userid>")
@auth.login_required
def get_favorites(userid):
try:
user = controller.get_user_by_uid(userid)
if user is None:
return gen_error_response("No user with that id exists.")
# Make sure caller matches authorized user.
try:
token_user = User.get_user_in_token(request)
if token_user is None or token_user.netid != user.netid:
return gen_error_response("Attempted to get a different user's favorites.")
except AuthorizationError:
return gen_error_response("Invalid authorization.")
try:
events = controller.get_favorite_events(user.favorites)
events = [get_raw_event(event) for event in events]
return gen_data_response(events)
except Exception as e:
return gen_error_response(error_handler.main_handler(e))
except Exception as e:
return gen_error_response(error_handler.main_handler(e))
# Allow a user to report an event.
@mod_api.route("/event/report/<eventid>", methods=["PUT"])
@auth.login_required
def report_event(eventid):
try:
if not request.is_json:
return gen_error_response("Request was not JSON.")
try:
data = request.get_json()
except Exception as e:
return gen_error_response("JSON was malformatted.")
if "reason" not in data:
return gen_error_response("Request was missing field 'reason'.")
try:
user = User.get_user_in_token(request)
report = controller.add_report(user, data["reason"], eventid)
except RateError as e:
return gen_error_response(str(e))
return gen_data_response(report)
except ValidationError as e:
return gen_error_response(str(e))
except Exception as e:
return gen_error_response(error_handler.main_handler(e))
# Get trending events.
@mod_api.route("/event/trending", methods=["GET"])
def trending_events():
try:
user = get_user_in_token(request)
trending_events = controller.get_trending_events(user)
trending_events = [get_raw_event(event) for event in trending_events]
return gen_data_response(trending_events)
except Exception as e:
return gen_error_response(error_handler.main_handler(e))
# Send in feedback.
@mod_api.route("/feedback/", methods=["PUT"])
@mod_api.route("/feedback", methods=["PUT"])
def send_feedback():
try:
if not request.is_json:
return gen_error_response("Request was not JSON.")
try:
data = request.get_json()
except Exception as e:
return gen_error_response("JSON was malformatted.")
return gen_data_response(controller.add_feedback(data))
except Exception as e:
return gen_error_response(error_handler.main_handler(e))
|
13,493 | 9561c85e08a529b565b5e6da9e5d22e79b3c42b4 | import pandas as pd
import numpy as np
import logging
import os
import tarfile
from tempfile import TemporaryFile
from kgx.utils import make_path
from .transformer import Transformer
from typing import Dict, List, Optional
LIST_DELIMITER = '|'
_column_types = {
'publications' : list,
'qualifiers' : list,
'category' : list,
'synonym' : list,
'provided_by' : list,
'same_as' : list,
'negated' : bool,
}
class PandasTransformer(Transformer):
"""
Implements Transformation from a Pandas DataFrame to a NetworkX graph
"""
_extention_types = {
'csv' : ',',
'tsv' : '\t',
'txt' : '|'
}
def parse(self, filename: str, input_format='csv', **kwargs):
"""
Parse a CSV/TSV
May be either a node file or an edge file
"""
if 'delimiter' not in kwargs:
kwargs['delimiter'] = self._extention_types[input_format]
if filename.endswith('.tar'):
with tarfile.open(filename) as tar:
for member in tar.getmembers():
f = tar.extractfile(member)
df = pd.read_csv(f, comment='#', **kwargs) # type: pd.DataFrame
if member.name == 'nodes.csv':
self.load_nodes(df)
elif member.name == 'edges.csv':
self.load_edges(df)
else:
raise Exception('Tar file contains unrecognized member {}'.format(member.name))
else:
df = pd.read_csv(filename, comment='#', **kwargs) # type: pd.DataFrame
self.load(df)
def load(self, df: pd.DataFrame):
if 'subject' in df:
self.load_edges(df)
else:
self.load_nodes(df)
def build_kwargs(self, data:dict) -> dict:
data = {k : v for k, v in data.items() if v is not np.nan}
for key, value in data.items():
if key in _column_types:
if _column_types[key] == list:
if isinstance(value, (list, set, tuple)):
data[key] = list(value)
elif isinstance(value, str):
data[key] = value.split(LIST_DELIMITER)
else:
data[key] = [str(value)]
elif _column_types[key] == bool:
try:
data[key] = bool(value)
except:
data[key] = False
else:
data[key] = str(value)
return data
def load_nodes(self, df:pd.DataFrame):
for obj in df.to_dict('record'):
self.load_node(obj)
def load_node(self, obj:Dict):
kwargs = self.build_kwargs(obj.copy())
n = kwargs['id']
self.graph.add_node(n, **kwargs)
def load_edges(self, df: pd.DataFrame):
for obj in df.to_dict('record'):
self.load_edge(obj)
def load_edge(self, obj: Dict):
kwargs = self.build_kwargs(obj.copy())
s = kwargs['subject']
o = kwargs['object']
self.graph.add_edge(s, o, **kwargs)
def build_export_row(self, data:dict) -> dict:
"""
Casts all values to primitive types like str or bool according to the
specified type in `_column_types`. Lists become pipe delimited strings.
"""
data = {k : v for k, v in data.items() if v is not np.nan}
for key, value in data.items():
if key in _column_types:
if _column_types[key] == list:
if isinstance(value, (list, set, tuple)):
data[key] = LIST_DELIMITER.join(value)
else:
data[key] = str(value)
elif _column_types[key] == bool:
try:
data[key] = bool(value)
except:
data[key] = False
else:
data[key] = str(value)
return data
def export_nodes(self, encode_header_types=False) -> pd.DataFrame:
rows = []
for n, data in self.graph.nodes(data=True):
row = self.build_export_row(data.copy())
row['id'] = n
rows.append(row)
df = pd.DataFrame.from_dict(rows)
return df
def export_edges(self, encode_header_types=False) -> pd.DataFrame:
rows = []
for s, o, data in self.graph.edges(data=True):
row = self.build_export_row(data.copy())
row['subject'] = s
row['object'] = o
rows.append(row)
df = pd.DataFrame.from_dict(rows)
cols = df.columns.tolist()
cols = self.order_cols(cols)
df = df[cols]
return df
def order_cols(self, cols: List[str]):
ORDER = ['id', 'subject', 'predicate', 'object', 'relation']
cols2 = []
for c in ORDER:
if c in cols:
cols2.append(c)
cols.remove(c)
return cols2 + cols
def save(self, filename: str, extention='csv', zipmode='w', **kwargs):
"""
Write two CSV/TSV files representing the node set and edge set of a
graph, and zip them in a .tar file.
"""
if extention not in self._extention_types:
raise Exception('Unsupported extention: ' + extention)
if not filename.endswith('.tar'):
filename += '.tar'
delimiter = self._extention_types[extention]
nodes_content = self.export_nodes().to_csv(sep=delimiter, index=False)
edges_content = self.export_edges().to_csv(sep=delimiter, index=False)
nodes_file_name = 'nodes.' + extention
edges_file_name = 'edges.' + extention
def add_to_tar(tar, filename, filecontent):
content = filecontent.encode()
with TemporaryFile() as tmp:
tmp.write(content)
tmp.seek(0)
info = tarfile.TarInfo(name=filename)
info.size = len(content)
tar.addfile(tarinfo=info, fileobj=tmp)
make_path(filename)
with tarfile.open(name=filename, mode=zipmode) as tar:
add_to_tar(tar, nodes_file_name, nodes_content)
add_to_tar(tar, edges_file_name, edges_content)
return filename
def save_csv(self, filename: str, type='n', **args):
"""
Write a CSV/TSV
May be either a node file or an edge file
"""
if type == 'n':
df = self.export_nodes()
else:
df = self.export_edges()
df.to_csv(filename, index=False)
|
13,494 | f92603209d8d298a8858bbd6cba8d72ac58253b9 | import random
p = 0.5
max_unanswered_calls = 4
max_nr_days = 100
successes = 0
for d in range(max_nr_days):
if random.random() > p and random.random() > p and random.random() > p and random.random() > p and random.random() <= p:
successes += 1
print(successes/max_nr_days)
|
13,495 | 2641713d05c390402c4ef075bc672335462e63ef | '''
1. ๋ฌธ์ ๋ถ์
- ํ ์ธ๋ฐ์ ๊ธ์ก์ด ์ผ๋ง์ธ๊ฐ?
- ํ ์ธ๋ ๊ธ์ก์ด ์ผ๋ง์ธ๊ฐ?
2. ํ ์ธ๋ฐ์ ๊ธ์ก : ๊น์์ค ๊ธ์ก์ ์๋ฏธ
3. ํ ์ธ๋ ๊ธ์ก : ํ ์ธ๋ฐ์ ๊ธ์ก์ ๋นผ๊ณ ์ค์ ์ง๋ถํ ๊ธ์ก
4. ํ ์ธ์จ = (ํ ์ธ์ก / ์ํ์ก) * 100 = (2,000 / 10,000) * 100 = 0.2 * 100 = 20%
5. ํ ์ธ ์ ์ฉ๋ ๊ฐ๊ฒฉ = ์ํ์ก * (100% - ํ ์ธ์จ) = 10,000 * (1.00 - 0.1) = 9,000์
1) ๋งค๊ฐ๋ณ์์ ์ดํด
2) ๋ฐํ์ ์ดํด
'''
#๋ค์๊ณผ ๊ฐ์ด import๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค.
#import math
# def solution(price, grade):
# # ์ฌ๊ธฐ์ ์ฝ๋๋ฅผ ์์ฑํด์ฃผ์ธ์.
# answer = 0
# if grade == 'V':
# sale = price * 0.15
# answer += (price - int(sale))
# elif grade == 'G':
# sale = price * 0.1
# answer += (price - int(sale))
# elif grade == 'S':
# sale = price * 0.05
# answer += (price - int(sale))
# return answer
def solution(price, grade):
# ์ฌ๊ธฐ์ ์ฝ๋๋ฅผ ์์ฑํด์ฃผ์ธ์.
answer = 0
# ์ ๋ฌ๋ ๋ฌธ์์ด ์์ 'S' 'G' 'V' ์ค ํ๋์ ์ฃผ์์ ๊ฐ์ผ๋ฉด
if grade == 'S':
answer = int(price * 0.95) # 5% ํ ์ธ๋ฐ์ 95%๋ง ๋ด๋ฉด ๋๋ค.
elif grade == 'G':
answer = int(price * 0.9) # 10% ํ ์ธ๋ฐ์ 90%๋ง ๋ด๋ฉด ๋๋ค.
elif grade == 'V':
answer = int(price * 0.85) # 5%๋ฅผ ํ ์ธ๋ฐ์ 85%๋ง ๋ด๋ฉด ๋๋ค.
return answer # ๋๋ int(answer)
#์๋๋ ํ
์คํธ์ผ์ด์ค ์ถ๋ ฅ์ ํด๋ณด๊ธฐ ์ํ ์ฝ๋์
๋๋ค.
price1 = 2500
grade1 = "V"
ret1 = solution(price1, grade1)
print(ret1)
#[์คํ] ๋ฒํผ์ ๋๋ฅด๋ฉด ์ถ๋ ฅ ๊ฐ์ ๋ณผ ์ ์์ต๋๋ค.
# print("solution ํจ์์ ๋ฐํ ๊ฐ์", ret1, "์
๋๋ค.")
price2 = 96900
grade2 = "S"
ret2 = solution(price2, grade2)
print(ret2)
#[์คํ] ๋ฒํผ์ ๋๋ฅด๋ฉด ์ถ๋ ฅ ๊ฐ์ ๋ณผ ์ ์์ต๋๋ค.
# print("solution ํจ์์ ๋ฐํ ๊ฐ์", ret2, "์
๋๋ค.") |
13,496 | 804b6acbf9057afd432c20bb08e02cfbe0390509 | '''
6. ZigZag Conversion
The string "PAYPALISHIRING" is written in a zigzag pattern
on a given number of rows like this: (you may want to display
this pattern in a fixed font for better legibility)
P A H N
A P L S I I G
Y I R
P I N
A L S I G
Y A H R
P I
And then read line by line: "PAHNAPLSIIGYIR"
Write the code that will take a string and make this conversion
given a number of rows:
string convert(string text, int nRows);
convert("PAYPALISHIRING", 3) should return "PAHNAPLSIIGYIR".
'''
class Solution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
if s == "" or len(s) <= numRows or numRows == 1:
return s
ret = []
row = 0
while row < numRows:
ret.append(list())
row += 1
flag = numRows - 1
row = 0
for l in s:
print(ret)
ret[abs(row)].append(l)
# Going from Top -> Bottom
if (row > 0) and (row == flag):
flag = 0
row = -row + 1
continue
# Going from Bottom -> Top
if (row <= 0) and (row == flag):
flag = numRows - 1
row += 1
continue
row += 1
retS = "".join(map("".join, ret))
return retS
if __name__ == "__main__":
b = "ab"
a = "PAYPALISHIRING"
sol = Solution()
print(sol.convert(b, 1))
|
13,497 | 75caa344cd346acbbd07b413133d6c4306576ad3 | s=input()
s = s[::-1]
print(s) |
13,498 | a3dfcbcafaaa634f396f1d7320a67293b2d952b1 | from django.test import TestCase
from django.test.client import Client
from model_mommy import mommy
from .models import Vehicle
# Create your tests here.
class ReserveTestCase(TestCase):
def setUp(self):
self.vehicle = mommy.make(
'vehicle.Vehicle', reservation_code='1', _quantity=10
)
self.client = Client()
def tearDown(self):
self.vehicle.delete()
|
13,499 | b65c5d2453428a07ddefe880529a53e4340f8c4d | import sqlite3
import glob
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import scipy.spatial.distance as distance
def insert_similarity_name(cur, sim_name):
cur.execute("INSERT INTO similarity (name) VALUES (?)",
(sim_name,)
)
sim_id = cur.execute("SELECT * FROM similarity WHERE name = ?",
(sim_name,)).fetchone()
return sim_id[0] # On ne rรฉcupรจre pas la ligne, mais uniquement l'id
def insert_similarities(cur, corpus_path, sim_name, sim_id):
# sim_name : char_ngram_1,8
analyzer = sim_name.split('_')[0]
ngram_range = (int(sim_name.split('_')[-1].split(',')[0]), int(sim_name.split('_')[-1].split(',')[1]))
corpus = []
for file in glob.glob(corpus_path + '*.txt'):
txt = open(file).read()
corpus.append(txt)
vectorizer = CountVectorizer(analyzer=analyzer, ngram_range=ngram_range, max_features=50000)
vectorizer.fit(corpus)
for file1 in glob.glob(corpus_path + '*.txt'):
# ./sources/Corpus/1-100/Moreau65_GALL.pdf-10.txt
file_name = file1.split('/')[-1]
id_moreau = file_name.split('_')[0].replace('Moreau', '')
num_page = file_name.split('-')[-1].replace('.txt', '')
id_p1 = cur.execute("SELECT * FROM page WHERE item = ? AND id_page = ?",
(id_moreau,num_page,)).fetchone()
id_p1 = id_p1[0] # uniquement l'id
for file2 in glob.glob(corpus_path + '*.txt'):
file_name = file2.split('/')[-1]
id_moreau = file_name.split('_')[0].replace('Moreau', '')
num_page = file_name.split('-')[-1].replace('.txt', '')
id_p2 = cur.execute("SELECT * FROM page WHERE item = ? AND id_page = ?",
(id_moreau,num_page,)).fetchone()
id_p2 = id_p2[0] # uniquement l'id
if file1 != file2:
txt1 = open(file1).read()
txt2 = open(file2).read()
vec1 = vectorizer.transform([txt1]).toarray()
vec2 = vectorizer.transform([txt2]).toarray()
sim = 1 - distance.cosine(vec1, vec2)
cur.execute("INSERT INTO sim (id_sim, id_p1, id_p2, val_sim) VALUES (?, ?, ?, ?)",
(sim_id, id_p1, id_p2, sim)
)
connection = sqlite3.connect('database.db')
with open('schema.sql') as f:
connection.executescript(f.read())
# Parcours du tableur qui contient les informations de la bibliographie de Moreau
liste_maz = {}
with open('./static/ListeMazarinades_all_22juin2021.csv') as f:
line = f.readline()
while line != '':
line = line.strip().split(',')
if len(line) > 5:
liste_maz[line[0]] = [line[1], line[2], line[4], line[5]]
line = f.readline()
cur = connection.cursor()
# Insertion des items dans la table ITEM
corpus_path = './static/Corpus/1-100/'
for file in glob.glob(corpus_path + '*.pdf'):
id_moreau = file.split('/')[-1].split('_')[0].replace('Moreau', '')
if id_moreau in liste_maz.keys() and '-' not in id_moreau:
info = liste_maz[id_moreau]
cur.execute("INSERT INTO item (id_moreau, nb_page, titre, date, lieu) VALUES (?, ?, ?, ?, ?)",
(id_moreau, liste_maz[id_moreau][3], liste_maz[id_moreau][0], liste_maz[id_moreau][1], liste_maz[id_moreau][2])
)
# Insertion des pages dans la table PAGE
for file in glob.glob(corpus_path + '*.png'):
# ./sources/Corpus/1-100/Moreau65_GALL.pdf-10.png
file_name = file.split('/')[-1]
id_moreau = file_name.split('_')[0].replace('Moreau', '')
num_page = file_name.split('-')[-1].replace('.png', '')
cur.execute("INSERT INTO page (id_page, item, url_num, url_txt, url_alto) VALUES (?, ?, ?, ?, ?)",
(num_page, id_moreau, file, file.replace('.png', '.txt'), file.replace('.png', '.alto'))
)
# Similarity 1: char_ngram_1,8
sim_name = 'char_ngram_1,8'
sim_id = insert_similarity_name(cur, sim_name)
insert_similarities(cur, corpus_path, sim_name, sim_id)
connection.commit()
connection.close() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.