index
int64
0
1,000k
blob_id
stringlengths
40
40
code
stringlengths
7
10.4M
11,400
2d540f18f09bd700504a6873e951c91f9129d064
def dataSegmentation(str): length=len(str) res=list() i=0 while i<length: if str[i]==' ': i+=1 continue elif str[i]>='a' and str[i]<='z': temp=str[i] i+=1 if i<length: while str[i]>='a' and str[i]<='z': temp=temp+str[i] i+=1 if i>=length: break res.append(temp) else: res.append(str[i]) i+=1 return res
11,401
bb9ec5c651023041114877c79d8ce189d3ba4739
tempo = int(input("Tempo: ")) i = (1042000/1500)**(1/tempo) - 1 if (i <= 0.01): print(round(i,5)) print("Real") else: print(round(i,5)) print("Irreal")
11,402
d7ca0f562622d44ca33d08b96289d949c5447f91
import socket import chunkserver import master import client import net import random import log import msg try: import settings # Assumed to be in the same directory. except ImportError: sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. This is required\n" % __file__) sys.exit(1) if(settings.DEBUG): reload(settings) # testing, remove # execute() if(settings.DEBUG): reload(settings) reload(chunkserver) reload(master) reload(client) reload(net) reload(msg) if(settings.TESTING): import thread def random_port(): return int(random.random()*32000 + 32000) # set up debug data settings.MASTER_META_FNAME = 'meta_test.obj' settings.MASTER_CHUNK_PORT = random_port() settings.MASTER_CLIENT_PORT = settings.MASTER_CHUNK_PORT + 1 settings.CHUNK_CLIENT_PORT = settings.MASTER_CLIENT_PORT + 1 log.log("[gfs testing] chunk port %i, master client port %i chunk client port %i" % (settings.MASTER_CHUNK_PORT, settings.MASTER_CLIENT_PORT,settings.CHUNK_CLIENT_PORT)) master.write_test_meta() global master global chunk log.log("master-chunkserver handshake") master = master.MasterServer() # start + load meta chunk = chunkserver.ChunkServer() # init + load chunks chunk.write_test_chunk() master.tick() # connection from chunkserver chunk.tick() # send ChunkConnect master.tick() # recv ChunkConnect log.log("dropping chunkserver") master.drop_chunkserver(master.chunkservers.keys()[0]) chunk.tick() # lost conn to master, reconnecting master.tick() chunk.tick() # send ChunkConn msg master.tick() # recv ChunkConn msg, add chunkserver log.log("testing FileInfo fetch") global fir, fi fir = client.file_info('foo') fir.next() master.tick() fi = fir.next() log.log("fetched. length %i" % fi.length()) log.log("client read") global r r = client.read("foo",0,32) r.next() # connects to master, sends ReadReq master.tick() # get conn master.tick() # get ReadReq r.next() # send ReadChunk to chunkserver chunk.tick() # get ReadChunk, send response s = r.next() log.log("received: %s" %s) expected = "abcdefghijklmnopqrstuvwxyzABCDEF" if s != expected: log.err("got %s expected %s" % (s,expected)) log.log("appending data") data = "1234567890" global a a = client.append("foo",data) a.next() # connect to master, send request master.tick() # get AppendReq, a.next() # connecting to chunkserver, SendData chunk.tick() # recv SendData a.next() # get success for mutate 1, send commit to master master.tick() # send commit msg to chunk chunk.tick() # write data master.tick() # get response, done with commit, master.tick() # send client success res = a.next() # receive success log.log("wrote mutate_id(%i)" % res.mutate_id) log.log("second FileInfo fetch") global fi2r, fi2 fi2r = client.file_info('foo') fi2r.next() master.tick() fi2 = fi2r.next() log.log("fetched. length %i" % fi2.length()) expected = fi.length() + len(data) if expected != fi2.length(): log.err("file length mismatch. expected %i, received %i" % (expected,fi2.length())) log.log("read what was written") global r r = client.read("foo",fi.length(),len(data)) for data2 in r: log.log("ticking...") if data2: break master.tick() chunk.tick() log.log("read2 %s" % data2) if data2 != data: log.err("data mismatch: appended %s, read %s" % (data, data2)) # todo: try the failure states for each step of the read # todo: chunk_info still has server info for two servers as of the append test.
11,403
67d063769410fc186f630335bd88aa215ff82a4e
class Device: def __init__(self, d_id=0, d_task_id=0, d_device_type='', d_device_width='', d_device_long='', d_regulation_type='', d_notice=''): self.id = d_id self.task_id = d_task_id self.device_type = d_device_type self.device_width = d_device_width self.device_long = d_device_long self.regulation_type = d_regulation_type self.notice = d_notice
11,404
114f0947619ac4bbbc04691fd3766e289d3ae311
from sh import cp, rm, diff, ErrorReturnCode import sh import os SURSA_VERIFICATA = 'conjectura_and.cpp' cp('../' + SURSA_VERIFICATA, '.') os.system('g++ ' + SURSA_VERIFICATA) filename = 'grader_test' for i in range(1, 11): print 'Testul ', i cp(filename + str(i) + '.in', 'conjectura.in') os.system('./a.out') print diff('conjectura.out', filename + str(i) + '.ok', _ok_code=[0, 1]) for extension in ['in', 'out']: rm('conjectura.' + extension) rm(SURSA_VERIFICATA) rm('a.out')
11,405
2dc0606e42fce311a59ee1e6909a2dcaf2b667a4
x = 2 def f(): y = 3 print(x) f() print(x)
11,406
d978abbc0c95b55756f6d64b5ebeaa124b60507d
# -*- coding: utf8 -*- # import Image, ImageDraw, ImageFont import random from cStringIO import StringIO import tornado from Base import BaseHandler class Captcha( BaseHandler ): def create_captcha( self, charNum, fontSize ): alphArr = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'R', 'S', 'T', 'W', 'X', 'Z', '1', '2', '3', '4', '5', '6', '8'] #fontfile = "%s/tools" font = ImageFont.truetype( '%s/sysdir/fonts/acidic.ttf' % self.homedir, fontSize ) width, height = font.getsize( "W" ) interval = width width, height = ( width + 7 ) * charNum, height + 20 image = Image.new( "RGB", ( width, height ), ( 192, 192, 192 ) ) draw = ImageDraw.Draw( image ) captcha = '' for i in range( 0, charNum ): #создаем фон для буквы: charImg = Image.new( "RGB", ( fontSize + 10, fontSize + 10 ), "black" ) tmpDraw = ImageDraw.Draw( charImg ) #помещаем на фон случайную букву случайного цвета: buk = random.choice( alphArr ) captcha = '%s%s' % ( captcha, buk ) tmpDraw.text( ( 3, 1 ), buk, font = font, fill = ( random.randint( 20, 150 ), random.randint( 20, 140 ), random.randint( 160, 200 ) ) ) charImg = charImg.rotate( random.randint( -30, 30 ) ) #создаем маску, чтобы фон буквы стал прозрачным: mask = Image.new( 'L', ( fontSize + 10, fontSize + 10 ), 0 ) mask.paste( charImg, ( 0, 0 ) ) hpos = 10 + ( i * interval + random.randint( 10, interval - 10 ) ) vpos = random.randint( 10, 20 ) image.paste( charImg, ( hpos, vpos ), mask ) #два раза для лучшей видимости image.paste( charImg, ( hpos, vpos ), mask ) for i in range( 0, random.randint( 4, 8 ) ): draw.line( ( random.randint( 6, width - 6 ), random.randint( 3, height - 3 ), random.randint( 6, width - 6 ), random.randint( 2, height - 3 ) ), fill = ( random.randint( 70, 150 ), random.randint( 20, 220 ), random.randint( 160, 220 ) ), width = random.randint( 1, 2 ) ) #image.save("captcha.jpg") out = StringIO() self.set_session( 'captcha', captcha ) #import tools.webpy as web #web.setcookie('ss0', hashlib.md5(captcha).hexdigest(), 600) image.save( out, 'PNG' ) return out.getvalue() def get( self ): self.set_header( 'Content-Type', 'image/png' ) self.write( self.create_captcha( 4, 35 ) ) if __name__ == '__main__': Captcha.create_captcha( 7, 40 )
11,407
cdbe692d6085ae19604db8a65932b8ab93ea9dc8
import numpy as np class Parenthesis(object): def is_valid(self, string): n = len(string) dp = [[False for _ in range(n)] for _ in range(n)] # init for i in range(n): if string[i] == '*': dp[i][i] = True for i in range(n - 1): if string[i] in "(*" and string[i + 1] in "*)": dp[i][i + 1] = True for l in range(3, n + 1): for i in range(n): j = i + l - 1 if j >= n: continue if string[i] in '(*' and string[j] in "*)": if dp[i + 1][j - 1]: dp[i][j] = True continue for k in range(i, j + 1): if dp[i][k] and dp[k + 1][j]: dp[i][j] = True break # print(np.array(dp)) return dp[0][-1] if __name__ == "__main__": p = Parenthesis() res = p.is_valid("(*)") print(res)
11,408
adbda5b3a5d6649b880ce69c73ae82b64bcec2bb
import sys from operator import itemgetter this = sys.modules[__name__] this.fuel_map = {"gas(euro/MWh)": ["gasfired"], "kerosine(euro/MWh)": ["turbojet"], "wind(%)": ["windturbine"], "co2(euro/ton)": ["gasfired", "turbojet"] } # Due to the multiplication by 10 to avoid floating points calculation issues, # We have a great number of levels of recursion. This should be OK (just playing, would # never do it for real - or use recursion for that matter) sys.setrecursionlimit(10000) class PayLoad: """ Represents a payload problem with target payload, fuel prices and powerstations with all the necessary logic """ def __init__(self, payload): # Verify proper format for the payload... self.load = 0 self.fuels = payload["fuels"] self.payload = payload self.powerplants = [] self.feasible = True self.unfeasible_reason = "" if self.verify_payload(): # Get the load self.load = payload["load"] for powerplant in payload["powerplants"]: powerplant["price_per_Mw"] = 0.0 powerplant["p"] = 0 for fuel in payload["fuels"]: if powerplant["type"] in this.fuel_map[fuel]: if fuel == "co2(euro/ton)": powerplant["price_per_Mw"] += payload["fuels"][fuel] * 0.3 elif fuel == "wind(%)": # Wind Turbines are ON/OFF # This should not be done, it is done ONLY to have a simplified visual representation and # have no problems with FP arithmetics powerplant["pmax"] = int(powerplant["pmax"] * payload["fuels"][fuel] / 10) powerplant["pmin"] = powerplant["pmax"] else: powerplant["price_per_Mw"] += payload["fuels"][fuel] * (1 / powerplant["efficiency"]) # Notice that this may not be correct because we are assigning co2 costs for MW generated # It should be related to amount of fuel burnt in my opinion sorted_pp = sorted(payload["powerplants"], key=itemgetter("price_per_Mw")) self.powerplants.extend(sorted_pp) self.scale_up() self.verify_load_feasibility() if self.feasible: self.calculate_vals() self.allocate_powerplants(0, 0) self.scale_down_and_clean() def sol_val(self): """ Calculate the value of a solution """ val = 0 for pp in self.powerplants: if "index" in pp: val += pp["vals"][pp["index"]] return val @staticmethod def possible_vals(pp): """ Allocates a list of all possible values admitted for a Powerplant it's very inefficient... """ if pp["type"] == "w": vals = [0, pp["pmax"]] elif pp["type"] == "windturbine": vals = [0, pp["pmin"]] for i in range(pp["pmin"], pp["pmax"] - pp["pmin"] + 1): vals.append(pp["pmin"] + i) else: # Turbojet vals = [0] for i in range(pp["pmin"], pp["pmax"] - pp["pmin"]): vals.append(pp["pmin"] + i) return vals def calculate_vals(self): """ Adding the possible values to the powerplants, shall be removed afterwards """ for pp in self.powerplants: pp["vals"] = self.possible_vals(pp) pp["index"] = 0 def allocate_powerplants(self, pp_number, pp_move_index): """ This is the allocation of the load to the powerplants simple backtracking, not complete but works on the examples and a number of other tests""" self.powerplants[pp_number]["index"] = pp_move_index if self.sol_val() == self.load: return True else: if self.sol_val() < self.load: if pp_move_index < len(self.powerplants[pp_number]["vals"]) - 1: if self.allocate_powerplants(pp_number, pp_move_index + 1): return True else: if pp_number < len(self.powerplants) - 1: if self.allocate_powerplants(pp_number + 1, 0): return True else: # Backtrack py putting previous to smaller satisfying... while self.sol_val() > self.load: if pp_number > 0: if self.powerplants[pp_number - 1]["index"] > 0: self.powerplants[pp_number - 1]["index"] = self.powerplants[pp_number - 1]["index"] - 1 else: self.powerplants[pp_number]["index"] = 0 else: self.powerplants[pp_number]["index"] = 0 if pp_number < len(self.powerplants) - 1: if self.allocate_powerplants(pp_number + 1, 0): return True def verify_load_feasibility(self): """ Checks if we can achieve it """ max_load = 0 for pp in self.powerplants: max_load += pp["pmax"] min_load = max_load for pp in self.powerplants: min_load = min(pp["pmin"], min_load) if self.load > max_load: self.feasible = False self.unfeasible_reason = f"Requested load {self.load/10} too high for our powerstations " return False if self.load < min_load: self.feasible = False self.unfeasible_reason = f"Requested load {self.load/10} too low for our powerstations " return False return True @staticmethod def verify_payload(): """ Should verify that payload contains all required fields nothing done, just to remember that it should be there... """ return True def scale_up(self): """ Multiply by 10 so to avoid floating point arithmetics tricks """ self.load *= 10 for pp in self.powerplants: if pp["type"] != "windturbine": pp["pmin"] *= 10 pp["pmax"] *= 10 def scale_down_and_clean(self): """ Divide by 10 to present the results with 1 decimal """ self.load /= 10.0 for pp in self.powerplants: pp["pmin"] /= 10.0 pp["pmax"] /= 10.0 if "index" in pp and "vals" in pp: pp["p"] = pp["vals"][pp["index"]] / 10 del (pp["vals"]) del (pp["index"]) else: pp["p"] = 0
11,409
e2242209111734d51fa5804bb9c746bbfa58234e
import pymongo class Mongo: def __init__(self): MONGODB_HOST = '127.0.0.1' MONGODB_PORT = '27027' MONGODB_TIMEOUT = 1000 URI_CONNECTION = "mongodb://" + MONGODB_HOST + ":" + MONGODB_PORT + "/" try: self._client = pymongo.MongoClient(URI_CONNECTION) print ('OK -- Connected to MongoDB at server %s' % (MONGODB_HOST)) self._db = self._client['Ecologic'] self._coleccion = self._db['Bomba'] except pymongo.errors.ServerSelectionTimeoutError as error: print ('Error with MongoDB connection: %s' % error) except pymongo.errors.ConnectionFailure as error: print ('Could not connect to MongoDB: %s' % error) def insertarDatos(self, segundos, fecha): c = self._coleccion.find() d = self._coleccion.insert({"_id":c.count()+1,"segundos":segundos, "fecha":fecha}) def consultarCantidadRegistros(self): c = self._coleccion.count() if c >= 300: d = self._coleccion.remove()
11,410
929d068104901bf257cc7eabd66b9c9dca734fc4
class Config(object): init_scale = 0.04 learning_rate = 0.001 max_grad_norm = 15 num_layers = 3 num_steps = 30 hidden_size = 800 # 隐藏层size last_iteration = 0 # 上次训练的迭代数,从头训练写0,增量写上次的数 iteration = 30 # 这次增加训练的迭代数 save_freq = 1 # 每多少次自动保存 keep_prob = 0.5 batch_size = 128 model_path = './Model' #parameters for generation save_time = 210 #从这开始是generate用的,使用第几个迭代保存的模型,必须满足上面储存的次数 is_sample = True is_beams = True beam_size = 2 len_of_generation = 49 # 生成的文字长度 start_sentence = u'。\n故'# 以什么字开头
11,411
d4777f4746c2aaef7d48ee367e4761ecf328bed5
from flask_wtf import FlaskForm from wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField, RadioField,\ SelectField, FloatField, IntegerField from wtforms.validators import DataRequired, Length, Optional, InputRequired, ValidationError, Email, EqualTo from wtforms.fields.html5 import DateField from flask_wtf.file import FileField, FileAllowed from flask_login import current_user class AddressForm(FlaskForm): first_name=StringField('First Name', validators=[DataRequired(), InputRequired()]) last_name=StringField('Last Name', validators=[DataRequired(), InputRequired()]) street=StringField('Street', validators=[DataRequired(), InputRequired()]) zipcode=StringField('Zip', validators=[DataRequired(), InputRequired()]) state=SelectField('District', validators=[DataRequired()], choices=[("Arua", "Arua"), ("Kampala", "Kampala"), ("Tororo","Tororo")]) country=SelectField('Country', validators=[DataRequired()], choices=[("Uganda", "Uganda"), ("Kenya","Kenya"), ("Tanzania", "Tanzania"),("Rwanda","Rwanda")]) telephone=StringField('Telephone', validators=[DataRequired(), InputRequired(), Length(min=10, max=13)]) email=StringField('Email', validators=[Email(), InputRequired()]) submit=SubmitField('Save Address') class AccountForm(FlaskForm): firstname=StringField('First Name', validators=[DataRequired(), InputRequired()]) lastname=StringField('Last Name', validators=[DataRequired(), InputRequired()]) company=StringField('Company', validators=[DataRequired(), InputRequired()]) street=StringField('Street', validators=[DataRequired(), InputRequired()]) zipcode=StringField('Zip', validators=[DataRequired(), InputRequired()]) state=SelectField('State', validators=[DataRequired()], choices=[]) country=SelectField('Country', validators=[DataRequired()], choices=[]) tel=StringField('Telephone', validators=[DataRequired(), InputRequired()]) email=StringField('Email', validators=[Email(), InputRequired()]) submit=SubmitField('Save changes') class RegistrationForm(FlaskForm): firstname=StringField('First Name', validators=[ InputRequired(), Length(min=3, max=20)]) lastname=StringField('Last Name', validators=[ InputRequired(), Length(min=3, max=20)]) username=StringField('Username', validators=[ InputRequired(), Length(min=3, max=20)]) email=StringField('Email', validators=[ InputRequired(), Email()]) tel=StringField('Telephone', validators=[InputRequired(), Length(min=10, max=13)]) password=PasswordField('Password',validators=[ InputRequired(), Length(min=3, max=30)]) confirm_password=PasswordField('Confirm password',validators=[InputRequired(), EqualTo('password')]) countries=SelectField('Countries', validators=[InputRequired()], choices=[("Kenya","Kenya"), ("Uganda", "Uganda"), ("Rwanda", "Rwanda"), ("Tanzania", "Tanzania")]) district=SelectField('District', validators=[InputRequired()], choices=[("Arua","Arua"), ("Nebbi", "Nebbi"), ("Yumbe", "Yumbe"), ("Koboko", "Koboko")]) profile_picture=FileField('Select Profile Picture', validators=[InputRequired(),FileAllowed(['jpg', 'png'])]) submit=SubmitField('Sign-up') class LoginForm(FlaskForm): email=StringField('Email', validators=[Email(), InputRequired()]) password=PasswordField('Password',validators=[ InputRequired(), Length(min=3, max=30)]) submit=SubmitField('Login') class ContactForm(FlaskForm): firstname=StringField('First Name', validators=[DataRequired(), InputRequired()]) lastname=StringField('Last Name', validators=[DataRequired(), InputRequired()]) email=StringField('Email', validators=[Email(), InputRequired()]) subject=StringField('Subject', validators=[DataRequired(), InputRequired()]) message=TextAreaField('Message', validators=[DataRequired(), InputRequired()]) submit=SubmitField('Send Message') class AddToCartForm(FlaskForm): product_name=StringField("Product Name", validators=[InputRequired()]) product_image=StringField("Product Image", validators=[InputRequired()]) size=RadioField('Size', validators=[ InputRequired()]) color=RadioField('Color', validators=[ InputRequired()]) quantity=IntegerField(validators=[InputRequired()]) unit_price=IntegerField(validators=[InputRequired()]) weight=RadioField('weight', validators=[ Optional()]) submit=SubmitField('submit') class UpdateCustomerForm(FlaskForm): name = StringField("Name", validators=[InputRequired()]) email = StringField("Email", validators=[InputRequired()]) contact = StringField("Telephone", validators=[InputRequired()]) country = SelectField('Country', validators=[InputRequired()], choices=[("Kenya","Kenya"), ("Uganda", "Uganda"), ("Rwanda", "Rwanda"), ("Tanzania", "Tanzania")]) city = StringField("City", validators=[InputRequired()]) submit=SubmitField('Update') class UpdatePasswordForm(FlaskForm): current = PasswordField('Current Password', validators=[ InputRequired(), Length(min=3, max=30)]) new = PasswordField('New Password', validators=[ InputRequired(), Length(min=3, max=30)]) repeat = PasswordField('Repeat Password', validators=[ InputRequired(), Length(min=3, max=30), EqualTo('new')]) submit=SubmitField('Update') def validate_current(form, field): customer = current_user if not customer.verify_password(field.data): raise ValidationError('Incorrect current password.')
11,412
19f2e6424aab75ecfa442bebd67b4708e8129c33
import sys sys.path.insert(0, './') from rlf import BehavioralCloningFromObs from rlf import BasicPolicy from rlf import run_policy from tests.test_run_settings import TestRunSettings from rlf.policies.actor_critic.dist_actor_critic import DistActorCritic from rlf.rl.model import MLPBase class BcoRunSettings(TestRunSettings): def get_policy(self): return BasicPolicy( is_stoch=self.base_args.stoch_policy, get_base_net_fn=lambda i_shape, recurrent: MLPBase( i_shape[0], False, (400, 300)) ) def get_algo(self): return BehavioralCloningFromObs() def get_add_args(self, parser): super().get_add_args(parser) parser.add_argument('--stoch-policy', default=False, action='store_true') if __name__ == "__main__": run_policy(BcoRunSettings())
11,413
7cd14c01aecb9b0399ad5cc1e05db334abcb93b0
import xml.etree.cElementTree as ET import os class case_info: file_name="" oper_path = "" oper_para = [] def __init__(self, p_file_name, p_oper_path, p_oper_para): self.file_name = p_file_name self.oper_path = p_oper_path self.oper_para = p_oper_name def find_dir(src_path,tag_path): for fpathe,dirs,fs in os.walk(src_path): for d in dirs: if d==tag_path: return fpathe+"\\"+tag_path def get_xml_list(xml_dir): lst = [] for file_name in os.listdir(xml_dir): if file_name.endswith(".xml"): lst.append(xml_dir+"/" + file_name) return lst def open_xml(path): try: return ET.parse(path) except IOError as io: print (io) except FileNotFoundError as fnfe: print(fnfe) except Exception as e: print(path,":", e) if __name__ == "__main__": startdir = input('Please input startdir: ') target = input('Please input target: ') cases_path = find_dir(startdir, target) cases_path = cases_path.replace("\\","/") cases_path_list = get_xml_list(cases_path) # get all case file oper_path_list = get_xml_list(cases_path+"/"+"Operation") #get all operation file oper_dict = {} for oper in oper_path_list: xml_abs_path = oper # xml_abs_path = "./Operation/Select_A_Wanted_Audio_File.xml" oper_tree = open_xml(xml_abs_path) if oper_tree is None: continue list_oper_text = "" for element in oper_tree.iter(): if isinstance(element.text, str) and element.text.startswith("$"): list_oper_text += element.text if list_oper_text != None: pos = xml_abs_path.rfind("/") oper_dict[xml_abs_path[pos+1:].split(".")[0]] = list_oper_text #i=0 for case in cases_path_list: case_tree = open_xml(case) if case_tree is None: continue for case_e in case_tree.iter(): if case_e.tag == "operation" and "path" in case_e.attrib.keys(): check_case_list = [] pos = case.rfind("/") check_case_list.append(case[pos + 1:]) # first item:add caseName check_case_list.append(case_e.attrib["path"]) #second item:add path for p in case_e: if "name" in p.attrib.keys(): check_case_list.append(p.attrib["name"]) #third item: add param i=0 for item in check_case_list: if i<2: i+=1 continue else: try: if item in oper_dict[check_case_list[1]]: pass else: print(check_case_list[0]+"---"+check_case_list[1]+"---"+"NO") except KeyError as e: print(e) except Exception as e: print(e)
11,414
a553d091917236326dbc2c5a36a95b7f265a82a8
import datetime from functions import * zero_time = datetime.time(hour=0, minute=0, second=0) filename = 'init.xlsx' # filename = 'new-init.xlsx' workbook = op.load_workbook(filename) sheet1 = workbook.active def log_hours(date, time): insert_hours(time[0], time[1], date, sheet1) def show_sheet(): print_sheet(sheet1) # def check_hours(date): # row = 0 # col = 0 # times = [] # for i in range(2, sheet1.max_row+1): # row = i # if sheet1.cell(row=i, column=2).value == date: # break # for j in range(2, sheet1.max_column): # col = j def test1(): start = datetime.time(hour=1, minute=30) end = datetime.time(hour=4, minute=45) time_now = datetime.datetime.combine(date, zero_time) curr_date = time_now for x in range(sheet1.max_row): insert_hours(start, end, curr_date, sheet1) curr_date += datetime.timedelta(days=1) def date_parser(): date = input('Date (MM/DD/YYYY): ') date_array = date.split('/') try: date = datetime.date(int(date_array[2]), int(date_array[0]), int(date_array[1])) except: print('Enter valid date') return False return datetime.datetime.combine(date, zero_time) def time_parser(): start = input('Time End (HH:MM): ') end = input('Time End (HH:MM): ') start_arr = start.split(':') end_arr = end.split(':') try: start = datetime.time(hour=int(start_arr[0]), minute=int(start_arr[1])) end = datetime.time(hour=int(end_arr[0]), minute=int(end_arr[1])) except: print('Enter valid time') return False return (start, end) def main1(): date = date_parser() if date == False: return False time = time_parser() if time == False: return False initialize(sheet1) log_hours(date, time) # main1() test1() # print_sheet(sheet1) workbook.save('new-' + filename) # workbook.save(filename) workbook.close()
11,415
624c1c114d37ba5e80a7f218f91c075b4a29b65d
from classes.Helpers import printVerbose def parseGroupsFileToDictOfCounts(groups_file): """Given a .groups file, returns a dictionary mapping each seed to the number of children it represents. :param groups_file: A groups file. :return: A dictionary where each seed name is a key to a count of its children. """ return parseGroupsFileToDict(groups_file, "counts") def parseGroupsFileToDictOfChilden(groups_file): """Given a .groups file, returns a dictionary mapping each seed to a space-delimited string of its children's names. :param groups_file: A groups file. :return: A dictionary where each seed name is a key to a space-delimited string of its children's names. """ return parseGroupsFileToDict(groups_file, "children") def parseGroupsFileToDict(groups_file, thing_to_map): """Given a .groups file, returns a dictionary mapping each seed to either a count of its children, or a space-delimited string of its children's names. :param groups_file: A .groups file. :param thing_to_map: Specify 'children' to map seed names to a space-delimited string of children names, or 'counts' to map seed names to a count of children. :return: A dictionary mapping each seed to either a count of its children, or a space-delimited string of its children's names """ groups = {} printVerbose("Reading count file: %s" % groups_file) # collect the seed names, and the children sequence names i = 0 nb_lines = 0 for line in open(groups_file, 'r'): nb_lines +=1 data = line.rstrip().split("\t") seed = data[0] children = "" if thing_to_map == "children": if len(data) > 1: children = ' '.join(list(set(data[1:]))) groups[seed] = children if thing_to_map == "counts": if len(data) > 1: children = data[1] groups[seed] = len(children.split(" ")) if nb_lines % 100000 == 0: printVerbose("%s lines processed" % nb_lines) printVerbose("Done reading count file.") return groups
11,416
1c1957a0689086cc4362d806d896561ae1abddf9
import os import torch import pandas as pd from albumentations.pytorch import ToTensorV2 import albumentations as alb from torch.cuda.amp import GradScaler from torch.utils.data import DataLoader from dataloader import ImageDataset from efficient_net import EfficientNetNoisyStudent from train_functions import eval_model checkpoint_paths = [ 'tf_efficientnet_b7_noisy_student_640/tf_efficientnet_b7_noisy_student_640_epoch17_val_auc0.964_loss0.12_train_auc0.894_loss0.156.pth', 'tf_efficientnet_b7_noisy_student_640/tf_efficientnet_b7_noisy_student_640_epoch18_val_auc0.964_loss0.12_train_auc0.897_loss0.155.pth', 'tf_efficientnet_b7_noisy_student_640/tf_efficientnet_b7_noisy_student_640_epoch19_val_auc0.965_loss0.119_train_auc0.901_loss0.154.pth', 'tf_efficientnet_b7_noisy_student_640/tf_efficientnet_b7_noisy_student_640_epoch20_val_auc0.965_loss0.119_train_auc0.899_loss0.155.pth', ] state_dicts = [] for checkpoint_path in checkpoint_paths: model = EfficientNetNoisyStudent(11, pretrained_backbone=True, mixed_precision=True, model_name='tf_efficientnet_b7_ns', checkpoint_path=checkpoint_path) model.eval() model = model.float() state_dicts.append(model.state_dict()) avg_model = EfficientNetNoisyStudent(11, pretrained_backbone=True, mixed_precision=True, model_name='tf_efficientnet_b7_ns') avg_state_dict = avg_model.state_dict() for key in avg_state_dict: avg_state_dict[key] = torch.zeros(avg_state_dict[key].shape) for state_dict in state_dicts: avg_state_dict[key] += state_dict[key] avg_state_dict[key] = avg_state_dict[key] / float(len(state_dicts)) avg_model.load_state_dict(avg_state_dict) ranzcr_df = pd.read_csv('train_folds.csv') ranzcr_valid_df = ranzcr_df[ranzcr_df['fold'] == 1] valid_image_transforms = alb.Compose([ alb.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), ToTensorV2() ]) valid_set = ImageDataset(ranzcr_valid_df, valid_image_transforms, '../ranzcr/train', width_size=640) valid_loader = DataLoader(valid_set, batch_size=12, num_workers=12, pin_memory=False, drop_last=False) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") criterion = torch.nn.BCEWithLogitsLoss() scaler = GradScaler() if torch.cuda.device_count() > 1: avg_model = torch.nn.DataParallel(avg_model) avg_model = avg_model.to(device) val_loss, val_avg_auc, val_auc, val_rocs, val_data_pr, val_duration = eval_model( avg_model, valid_loader, device, criterion, scaler) torch.save(avg_model.module.state_dict(), os.path.join('effnet7_wa_val_auc{}_loss{}.pth'.format( round(val_avg_auc, 3), round(val_loss, 3))))
11,417
f692a9317e13817b0c0b3a747c7a8f8845f6e595
#program to convert tuple to a string # add items in tuple print('Enter the space seperated value.') dic2 = tuple([int(e) for e in input().strip().split()]) print('Tuple: ', dic2) list1 = list(dic2) value = int(input('Enter the value to add: ').strip()) list1.append(value) print(tuple(list1))
11,418
b81a5f80feb40cf3c3e758f12b1229f82743c210
class Node: def __init__(self, data): self.data = data self.next = None class LinkedList: def __init__(self): self.head = None def print_list(self): temp = self.head if not temp: print('List is empty') while temp: print(temp.data, end=' ') temp = temp.next def push(self, data): """ Method to insert a new node at the beginning """ node = Node(data) node.next = self.head self.head = node def insert_after(self, node_to_insert_after, data): """ Method to insert a new node after the given node """ node = Node(data) temp = self.head while temp: if temp.data == node_to_insert_after: break temp = temp.next if not temp: print('Item does not exist') return node.next = temp.next temp.next = node def insert_before(self, node_to_insert_before, data): """ Method to insert a new node after the given node """ node = Node(data) temp = self.head if temp.data == node_to_insert_before: node.next = temp self.head = node return while temp.next: if temp.next.data == node_to_insert_before: break temp = temp.next if not temp.next: print('Item doesn\t exist') return node.next = temp.next temp.next = node def insert_at_position(self, position, data): """ Method to insert a new node at a given position """ node = Node(data) if not self.head: self.head = node return if position == 1: node.next = self.head self.head = node return temp = self.head for _ in range(1, position - 1): if not temp: print('Index out of bound') return temp = temp.next node.next = temp.next temp.next = node def append(self, data): """ Method to append a new node at the end """ node = Node(data) if not self.head: self.head = node return temp = self.head while temp.next: temp = temp.next temp.next = node def delete_node_at_start(self): """ Method to delete head of linked list """ if not self.head: print('List already empty.') return self.head = self.head.next def delete_node_at_end(self): """ Method to delete the tail """ if not self.head: print('List already empty') return temp = self.head while temp.next: if not temp.next.next: break temp = temp.next temp.next = None def delete_node(self, key): """ Method to delete the first occurrence of key """ if not self.head: print('List is empty. No item to delete') return if self.head.data == key: self.head = self.head.next return temp = self.head while temp.next: if temp.next.data == key: break temp = temp.next temp.next = temp.next.next def delete_node_position(self, position): """ Method to delete the node at a given position """ if not self.head: print('List is empty. No item to delete') return if position == 1: self.head = self.head.next return temp = self.head count = 1 while temp and count < position - 1: count += 1 temp = temp.next if not temp: print('Node doesn\'t exist') return temp.next = temp.next.next def get_count(self): """ Method to count the number of nodes """ count = 0 temp = self.head while temp: count += 1 temp = temp.next return count def search(self, x): """ Method to check whether the value x is present in the linked list """ temp = self.head while temp: if temp.data == x: return True temp = temp.next return False def search_recursive(self, llist, key): """ Recursive method to check whether the value x is present in the linked list """ if not llist: return False if llist.data == key: return True return self.search_recursive(llist.next, key)
11,419
13ffcf3040ca67c623ea322c280fb34c19bfa8dd
#rc.local : screen -dm -t "punch" bash -c "python /home/pi/button_click.py;sleep 10000" import RPi.GPIO as GPIO from time import sleep # this lets us have a time delay (see line 12) import time import os import datetime import httplib GPIO.setmode(GPIO.BCM) # set up BCM GPIO numbering GPIO.setup(25, GPIO.IN) # set GPIO25 as input (button) GPIO.setup(24, GPIO.OUT) #Just so we know if it is up yet GPIO.output(24, 1) time.sleep(0.5) GPIO.output(24, 0) last_state = False try: while True: # this will carry on until you hit CTRL+C if GPIO.input(25) and not last_state : # if port 25 == 1 #click_time = time.strftime("%H:%M:%S") last_state = True os.system('date >> data.txt') print "We got a click" conn = httplib.HTTPConnection("www.yourdomain.com",8000) conn.request("GET","/punchin") print res.status, res.reason data = res.read() print len(data) GPIO.output(24, 1) elif not GPIO.input(25) and last_state: #print "Port 25 is 0/LOW/False - LED OFF" last_state = False GPIO.output(24, 0) sleep(0.1) # wait 0.1 seconds finally: # this block will run no matter how the try block exits GPIO.cleanup() # clean up after yourself
11,420
310852880c1db27b8335523a7d5841d4706d13a7
''' Created on 2014. 10. 31. @author: biscuit ''' import Orange import networkx as nx def generateTreeLoc(uristr,loc_sq, rcode): if rcode == '404' and uristr != '/favicon.ico': return ['not_found'] if rcode[0] != '2': return [] if uristr.startswith("http") and loc_sq==['/']: return ["http://.."] if uristr == None: return loc_sq if uristr == "": #loc_sq.append(loc_sq[-1]+"i") return loc_sq ''' if(uristr.find("?")>=0): uri_path = uristr[:uristr.find("?")] temp = generateTreeLoc(uri_path, loc_sq, rcode) #temp.append(uri_arg) return temp ''' if(uristr[0]=='/'): if loc_sq==['/']: return generateTreeLoc(uristr[1:], loc_sq, rcode) loc_sq.append(loc_sq[-1]+'/') return generateTreeLoc(uristr[1:], loc_sq, rcode) if uristr.find('/') < 0: loc_sq.append(loc_sq[-1]+uristr) return loc_sq else : loc_sq.append(loc_sq[-1]+uristr[:uristr.find('/')]+'/') return generateTreeLoc(uristr[uristr.find('/')+1:], loc_sq, rcode) def getURITree(uri_dataSet): G = nx.DiGraph() for d in uri_dataSet: G.add_path(generateTreeLoc(d['query_uri'].value, ['/'], d['rcode'].value)) return G
11,421
ae2031bbfe8e2b7735bfe724b8437e688a95c938
import settings class Camera(object): def __init__(self, renderer): """Constructor.""" self.position = 0, 0 self.lastPosition = 0, 0 self.mode = "follow" self.target = None self.bounds = 0, 0, settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT self.renderer = renderer self.renderer.set_camera_position_and_size(self.position[0], self.position[1], settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT, "topleft") def SetBoundaries(self, left, top, width, height): """Set camera boundaries.""" self.bounds = left, top, width, height def SetTarget(self, entity): """Sets an entity as the target of the camera.""" self.target = entity def Update(self): """Updates the camera.""" if self.mode == "follow" and self.target != None: self.position = (self.target.mapLocation[0] - (self.target.size[0])/2) - (settings.SCREEN_WIDTH/2), (self.target.mapLocation[1] - (self.target.size[1])/2) - (settings.SCREEN_HEIGHT/2) if self.position != self.lastPosition: self.position = max(self.bounds[0], min((self.position[0]), self.bounds[2])), max(self.bounds[1], min((self.position[1]), self.bounds[3])) self.renderer.set_camera_position(self.position[0], self.position[1], 'topleft') self.lastPosition = self.position
11,422
3a73b154b8362236504528731226cd075bd65448
from random import shuffle sentence = 'The world is too big' list_1 = ['Hello', 'my', 'name', 'is', 'Osse'] # print(sentence[0:2:5]) shuffle(list_1) print(list_1) print(ord('e')) print(ord('E')) print('*********************************') password = 'Banana' newPassword = '' #make every character slide 3 Unicode numbers # for n in password: # uni = ord( n ) # print(n, ":", uni) # newUni = uni + 3 # print(chr(newUni), ":", newUni) # print('-------') # newPassword = newPassword + chr(newUni) # print(newPassword) i= 0 while i < len(password): n = password[i] if i % 2 == 0: uni = ord( n ) newUni = uni + 3 newPassword = newPassword + chr( newUni ) else: newPassword = password + n i += 1 print(newPassword) # list split and join stri = 'Hello Bob, How are you?' print(stri) print( stri.split(" ")) myList = stri.split(' ') print( myList) print("-".join(myList)) shuffle( myList) print( myList ) # list stro = 'Hello Bob, How are you?' print(list(stro)) print("*************") for n in stro: print( n )
11,423
3da261dca5b77211e983b47b6ec3ef6dd1788c6a
import webbrowser barcode = 123123 url = "https://unexpected-fyp.firebaseapp.com/new-ingredient" + "?id=" + str(barcode) webbrowser.open(url, new=2)
11,424
daa11fe5777a354df05f827198478f28ef35fa68
import copy import subprocess # RDLU DIRECTIONS = [(0, 1), (1, 0), (0, -1), (-1, 0)] def print_tracks(carts, tracks): subprocess.call('clear', shell=True) for i, line in enumerate(tracks): printed_line = '' for j, character in enumerate(line): print_track = True for cart in carts: if (i, j) == cart['location']: printed_line += ['>', 'v', '<', '^'][cart['direction']] print_track = False if print_track: printed_line += character print(printed_line) input() def key(cart): return cart['location'][0] + (cart['location'][1] * 0.0001) def a(carts, tracks): while True: carts = [cart for cart in carts if 'should_remove' not in cart] carts.sort(key=key) for cart in carts: cart['location'] = tuple(sum(coordinate) for coordinate in zip(cart['location'], DIRECTIONS[cart['direction']])) x, y = cart['location'] if tracks[x][y] == '/': if cart['direction'] == 0: cart['direction'] = 3 elif cart['direction'] == 1: cart['direction'] = 2 elif cart['direction'] == 2: cart['direction'] = 1 elif cart['direction'] == 3: cart['direction'] = 0 if tracks[x][y] == '\\': if cart['direction'] == 0: cart['direction'] = 1 elif cart['direction'] == 1: cart['direction'] = 0 elif cart['direction'] == 2: cart['direction'] = 3 elif cart['direction'] == 3: cart['direction'] = 2 if tracks[x][y] == '+': if cart['turn'] == 0: cart['direction'] = (cart['direction'] - 1) % 4 if cart['turn'] == 2: cart['direction'] = (cart['direction'] + 1) % 4 cart['turn'] = (cart['turn'] + 1) % 3 for cart2 in carts: if cart['location'] == cart2['location'] and cart is not cart2: return ','.join([str(coordinate) for coordinate in cart['location']][::-1]) def b(carts, tracks): directions = [(0, 1), (1, 0), (0, -1), (-1, 0)] while True: carts = [cart for cart in carts if 'should_remove' not in cart] carts.sort(key=key) if len(carts) == 1: return ','.join([str(coordinate) for coordinate in carts[0]['location']][::-1]) #print_tracks(carts, tracks) for cart in carts: cart['location'] = tuple(sum(coordinate) for coordinate in zip(cart['location'], DIRECTIONS[cart['direction']])) x, y = cart['location'] if tracks[x][y] == '/': if cart['direction'] == 0: cart['direction'] = 3 elif cart['direction'] == 1: cart['direction'] = 2 elif cart['direction'] == 2: cart['direction'] = 1 elif cart['direction'] == 3: cart['direction'] = 0 if tracks[x][y] == '\\': if cart['direction'] == 0: cart['direction'] = 1 elif cart['direction'] == 1: cart['direction'] = 0 elif cart['direction'] == 2: cart['direction'] = 3 elif cart['direction'] == 3: cart['direction'] = 2 if tracks[x][y] == '+': if cart['turn'] == 0: cart['direction'] = (cart['direction'] - 1) % 4 if cart['turn'] == 2: cart['direction'] = (cart['direction'] + 1) % 4 cart['turn'] = (cart['turn'] + 1) % 3 for cart2 in carts: if cart['location'] == cart2['location'] and cart is not cart2: cart['should_remove'] = True cart2['should_remove'] = True if len(carts) == 1: return ','.join([str(coordinate) for coordinate in carts[0]['location']][::-1]) with open('input/13', 'r') as file: lines = [line.replace('\n', '') for line in file.readlines()] height = len(lines) width = max(len(line) for line in lines) carts = [] tracks = [[' ' for _ in range(width)] for _ in range(height)] for i, line in enumerate(lines): for j, character in enumerate(line): if character in ('^', 'v'): carts.append({ 'location': (i, j), 'direction': 3 if character == '^' else 1, 'turn': 0 }) tracks[i][j] = '|' elif character in ('>', '<'): carts.append({ 'location': (i, j), 'direction': 0 if character == '>' else 2, 'turn': 0 }) tracks[i][j] = '-' else: tracks[i][j] = character print(a(copy.deepcopy(carts), tracks)) print(b(copy.deepcopy(carts), tracks))
11,425
65df29097aa7a5f0e4de70b7a811ed898de73f6c
UI_TITLE = "DeviantShow" UI_URL = "URL: " UI_PLAY = "Play" UI_MODE_SEQ = "1 2 3 ..." UI_MODE_REV = "9 8 7 ..." UI_MODE_RND = "4 7 2 ..." UI_TIMER = "⌚" UI_PAUSED = "Slideshow paused" UI_ABOUT = "About" UI_ABOUT_NAME = "DeviantShow v.0.1" UI_ABOUT_AUTHOR = "\nA free open-source tool by:\nEric Pöhlsen" UI_ABOUT_USE = "Slideshow tool for:" UI_ABOUT_DA_URL = "https://www.deviantart.com" UI_ABOUT_GITHUB = "Sourcecode and help available at:" UI_ABOUT_GITHUB_URL = "https://www.github.com/EricPoehlsen/DeviantShow" UI_ABOUT_CLOSE = "Close" UI_NSFW_INFO = "This image was marked as\n" \ "'mature content'\n" \ "by its creator.\n\n" \ "To display mature content start\n" \ "DeviantShow with flag --nsfw " UI_BUILD_GALLERY = "Creating gallery links and indices\n"\ "This might take a few moments ..." RSS_BASE = "https://backend.deviantart.com/rss.xml?q=" UA_IMAGELOADER = "DeviantShow ImageLoader/0.1" UA_RSSLOADER = "DeviantShow RSS Reader/0.1" ERROR = "Error" ERR_NO_CONNECTION = "Can't connect to DeviantArt ..." ERR_INVALID_URL = "This does not look like a DeviantArt URL" ERR_INVALID_PATH = "Specified path '{path}' is not valid "\ "and can't be created automatically!\n"\ "Please provide a valid --path argument!" TERM_TITLE = "Terminal" ARGPARSE_DESCRIPTION = "DeviantShow is a slide show for DeviantArt." ARGPARSE_INTERVAL = "Display interval in seconds. Default is 10, minimum is 3." ARGPARSE_GALLERYLIMIT = "Maximum number of RSS request. Default is 3.\n" \ "The RSS response yields a maximum of 60 gallery entries. This defines how " \ "many chunks of a larger gallery should be loaded." ARGPARSE_NSFW = "This flag denotes that you want to recieve images marked " \ "as adult content. Please be aware that there is content on DeviantArt that " \ "might not be labeled as 'adult content' but is NSFW anyway!" ARGPARSE_PATH = "Where to store (and look for) the images."\ "Can be a relative or absolute path" ARGPARSE_CREDITS = "Don't display image title and creator."
11,426
52e2e683ca2117146b371e82372bd126f598d344
"""Write a program that prompts for two numbers. Add them together and print the result.""" print("Give me two numbers, and I'll add them") print("Enter 'q' to quit") while True: first_number = input("\nFirst Number: ") if first_number == 'q': break second_number = input("Second Number: ") try: answer = int(first_number) + int(second_number) except ValueError: print("You must enter a number") else: print(answer)
11,427
285ee19234d9fa9bc2fcbd742ea8bbed924cc943
import pandas as pd import numpy as np import yfinance_loader as yfl import matplotlib.pyplot as plt import seaborn as sns def get_returns(prices): cleaned = pd.DataFrame(prices).dropna() return cleaned.pct_change(fill_method="ffill").dropna() def get_covar_matrix(returns): return pd.DataFrame(returns).cov() def min_var_weights(cov_matrix): ones = np.ones(len(cov_matrix)) inverted_cov = pd.DataFrame(np.linalg.pinv(cov_matrix.values), cov_matrix.columns, cov_matrix.index) weights = np.dot(inverted_cov, ones) return weights/sum(weights) def efficient_weights(cov_matrix, mean_returns): mean = np.array(mean_returns.values) inverted_cov = pd.DataFrame(np.linalg.pinv(cov_matrix.values), cov_matrix.columns, cov_matrix.index) weights = np.dot(inverted_cov, mean) return weights/sum(weights) def monthly_returns(prices): returns = prices.asfreq('D').ffill().asfreq('M').pct_change().dropna(how='all').mean() if returns.isna().sum() > 0: return prices.iloc[-1, :]/prices.iloc[0, :]-1 return returns #def dynamic_portfolio_value(endowment, weights, prices): # if isinstance(weights, np.ndarray): # weights = pd.DataFrame(np.full((prices.shape[0], prices.shape[1]), weights), index=prices.index) # weights.columns = prices.columns # print(weights) # amounts = [(endowment * weights.iloc[0, :]).divide(prices.iloc[0, :])] # value = [endowment] # #initial_investment = endowment * weights # for i in range(1, len(prices)): # value.append((amounts[-1]*prices.iloc[i, :]).sum()) # amounts.append(weights.iloc[i, :]*value[-1]/prices.iloc[i, :]) # # return pd.DataFrame(value) def portfolio_amounts(endowment, weights, prices): initial_investment = endowment*weights return initial_investment/prices.iloc[0, :] def portfolio_value(endowment, weights, prices): amounts = portfolio_amounts(endowment, weights, prices) return pd.DataFrame(amounts*prices).sum(axis=1) def split_prices(prices, start_window = 0, len_window = 600): return prices.iloc[start_window:start_window+len_window, :] #yfl.clean_raw_files() #tickers = pd.read_csv('ticker_lists/c25.csv', header=None) #tickers = tickers[0].to_list() #tickers = [x.replace(' ', '-') + '.ST' for x in tickers] #tickers = ['VOO'] #tickers = ['JYSK.CO', 'DANSKE.CO', 'RILBA.CO', 'JUTBK.CO', 'NDA-DK.CO', 'VJBA.CO', # 'SYDB.CO', 'SPNO.CO', 'MNBA.CO', 'FYNBK.CO', 'SKJE.CO', 'LOLB.CO', 'SALB.CO', 'DJUR.CO'] tickers = ['MBS', 'MGK', 'REMEDY.HE', 'PDX.ST'] prices = yfl.download_tickers(tickers, False, return_only=['Adj Close'], min_period='2Y').dropna() start = 800 weights = [] rebalance_index = [] # for i in range(start, len(prices)-start, 30): # price = split_prices(prices, i, start) # print(price) # rebalance_index.append(price.index[-1]) # cov = get_covar_matrix(get_returns(price)) # efficient = efficient_weights(cov, yearly_returns(price)).tolist() # weights.append(efficient) #eff_port_weights_raw = pd.DataFrame(weights, index=rebalance_index) #dynamic_eff_weights = pd.DataFrame(eff_port_weights_raw, index=prices.index).ffill(axis=0)\ # .fillna(1/len(eff_port_weights_raw.columns)) #print(dynamic_eff_weights) #dynamic_eff_weights = yfl.create_stock_columns(dynamic_eff_weights, tickers, cols=['Adj Close']) #print(len(dynamic_eff_weights), len(prices)) cov = get_covar_matrix(get_returns(prices)) min_var = min_var_weights(cov) efficient = efficient_weights(cov, monthly_returns(prices)) endowment = 100000 portfolio_min = portfolio_value(endowment, min_var, prices) portfolio_eff = portfolio_value(endowment, efficient, prices) #portfolio_eff = portfolio_value(endowment, dynamic_eff_weights, prices) compare = 'SPY' spx = yfl.download_tickers([compare], False, return_only=['Adj Close'])[compare] combined = pd.concat([portfolio_min, portfolio_eff], axis=1).join(spx, how='left').ffill() combined.columns = ['Portfolio min', 'Porfolio eff', compare] normalized = combined*100/combined.iloc[0, :] print('\nEfficient:') for stock in zip(tickers, efficient): print(stock[0], f'{stock[1]*100:.2f}%') print('\nMin var:') for stock in zip(tickers, min_var): print(stock[0], f'{stock[1]*100:.2f}%') plt.style.use('seaborn-ticks') fig, ax = plt.subplots(2, 1) ax[0].plot(normalized) #ax[0].set_yscale('log') ax[0].legend(['Min var port', 'Efficient port', 'S&P500']) sns.distplot(efficient, ax=ax[1]) fig.text(0.05, 0.95, tickers, fontsize=8) plt.show()
11,428
c955e9e1700e024c5416f757e6e70d06bd302c67
from django.conf.urls import patterns, url urlpatterns = patterns('daw.views', url( r'^init_object_approvements/(?P<content_type_id>[$a-zA-Z0-9]+)/(?P<obj_pk>[$a-zA-Z0-9]+)/(?P<state_field>[$a-zA-Z0-9_]+)/(?P<callback_url>[$a-zA-Z0-9%:_.]+)/$', 'init_object_approvements_view'), url( r'^approve_transition/(?P<content_type_id>[$a-zA-Z0-9]+)/(?P<obj_pk>[$a-zA-Z0-9]+)/(?P<state_field>[$a-zA-Z0-9_]+)/(?:(?P<next_state_id>[$a-zA-Z0-9]+)/)?$', 'approve_transition_view'), url( r'^reject_transition/(?P<content_type_id>[$a-zA-Z0-9]+)/(?P<obj_pk>[$a-zA-Z0-9]+)/(?P<state_field>[$a-zA-Z0-9_]+)/(?:(?P<next_state_id>[$a-zA-Z0-9]+)/)?$', 'reject_transition_view'), url( r'^get_current_state/(?P<content_type_id>[$a-zA-Z0-9]+)/(?P<obj_pk>[$a-zA-Z0-9]+)/(?P<state_field>[$a-zA-Z0-9_]+)/?$', 'get_current_state'), url( r'^get_state_by_label/(?P<label>[$a-zA-Z0-9]+)/?$', 'get_state_by_label'), url( r'^skip_transition/(?P<content_type_id>[$a-zA-Z0-9]+)/(?P<object_id>[$a-zA-Z0-9]+)/(?P<state_field>[$a-zA-Z0-9_]+)/?$', 'skip_transition'), )
11,429
11a858b60a4d8e2d007c29abee24767251766a0b
import torch.utils.data as data from config.option import args import torch from PIL import Image import numpy as np import matplotlib.pyplot as plt import os import cv2 import utils import utils_align class ZoomDataset(data.Dataset): def __init__(self, args, isTrain, transform=None): self.isTrain = isTrain self.up_ratio = int(args.scale) #self.patch_size = 128 self.patch_size = args.patch_size self.transform = transform if self.isTrain: self.dir_path = os.path.join(args.dir_data, args.data_train) else: self.dir_path = os.path.join(args.dir_data, args.data_test) dir_names = os.listdir(self.dir_path) #print(dir_names) dir_names.sort() self.file_names = [] if self.up_ratio == 4: for dir_name in dir_names: d_path = os.path.join(self.dir_path, dir_name) for i in range(1,4): self.file_name = [] lr_raw_path = os.path.join(d_path, "0000"+str(i+4)+".ARW") if not os.path.exists(lr_raw_path): continue hr_path = os.path.join(d_path, "0000"+str(i)+'.JPG') self.file_name.append(lr_raw_path) self.file_name.append(hr_path) self.file_name.append(d_path) self.file_name.append(i+4) self.file_name.append(i) self.file_names.append(self.file_name) elif self.up_ratio == 8: for dir_name in dir_names: d_path = os.path.join(self.dir_path, dir_name) self.file_name = [] lr_raw_path = os.path.join(d_path, "0000"+str(6)+".ARW") hr_path = os.path.join(d_path, "0000"+str(1)+'.JPG') self.file_name.append(lr_raw_path) self.file_name.append(hr_path) self.file_name.append(d_path) self.file_name.append(6) self.file_name.append(1) self.file_names.append(self.file_name) else: raise ValueError("arg.scale should be 4 or 8") ## file_name : [lr_raw, HR, d_path, lr_id, hr_id] def __getitem__(self,i): file_name = self.file_names[i] height = width = self.patch_size LRAW_path = file_name[0] #LRAW_path = "/store/dataset/zoom/test/00134/00005.ARW" LR_path = LRAW_path.replace(".ARW",".JPG") HR_path = file_name[1] #HR_path = "/store/dataset/zoom/test/00134/00001.JPG" tform_txt = os.path.join(file_name[2],"tform.txt") #tform_txt = "/store/dataset/zoom/test/00134/tform.txt" white_lv, black_lv = utils.read_wb_lv("sony") input_bayer = utils.get_bayer(LRAW_path, black_lv, white_lv) #print(input_bayer.shape) LR_raw = utils.reshape_raw(input_bayer) LR_img = np.array(Image.open(LR_path)) #with shape [self.patch_size, self.patch_size, 4] input_raw = utils.crop_center_wh(LR_raw, height, width) cropped_lr_hw = utils.crop_center_wh(LR_img, height*2, width*2) #ground truth #with shape [self.patch_size*2*self.up_ratio, self.patch_size*2*self.up_ratio, 3] #HR_path = file_name[1] ## crop and resize according 00001.JPG rgb_camera_hr = np.array(Image.open(HR_path)) crop_ratio = 240.0 / utils.readFocal_pil(HR_path) cropped_input_rgb_hr = utils.crop_fov(rgb_camera_hr, 1./crop_ratio) #cropped_input_rgb_hr = utils.image_float(cropped_input_rgb_hr) input_camera_rgb_hr = Image.fromarray(np.uint8(utils.clipped(cropped_input_rgb_hr))) input_camera_rgb_naive = input_camera_rgb_hr.resize((int(input_camera_rgb_hr.width * crop_ratio), int(input_camera_rgb_hr.height * crop_ratio)), Image.ANTIALIAS) #input_camera_rgb_naive.save("align_arw_test/input_rgb_camera_HR.png") hr = np.array(input_camera_rgb_naive) ## Align HR Image to LR Image and crop the corresponding patches ### Resize to corresponding up_ratio size zoom_ratio = 240.0 / utils.readFocal_pil(LR_path) aligned_hr_hw, _ = utils_align.imgAlign(hr, tform_txt, file_name[4], file_name[3], True, int(height*2*zoom_ratio), int(width*2*zoom_ratio)) aligned_image = Image.fromarray(np.uint8(utils.clipped(aligned_hr_hw))) aligned_image = aligned_image.resize((int(height *2 * self.up_ratio), int(width * 2 * self.up_ratio)), Image.ANTIALIAS) # aligned_image.save("align_arw_test/input_rgb_camera_alignedLHR.png") aligned_img_np = np.array(aligned_image) # [H,W,C] => [C,H,W] input_raw = np.transpose(input_raw, (2,0,1)) / 255.0 cropped_lr_hw = np.transpose(cropped_lr_hw, (2,0,1)) / 255.0 aligned_img_np = np.transpose(aligned_img_np, (2,0,1)) / 255.0 ##ToTensor return input_raw, cropped_lr_hw, aligned_img_np, LRAW_path def __len__(self): return len(self.file_names) if __name__=="__main__": images = [] zoomData = ZoomDataset(args, isTrain=True) print(len(zoomData)) LR_raw,LR,HR,_ = zoomData[3] LR = np.transpose(LR, (1,2,0)) HR = np.transpose(HR, (1,2,0)) aligned_image = Image.fromarray(np.uint8(utils.clipped(LR)*255)) aligned_image = aligned_image.resize((HR.shape[1], HR.shape[0]), Image.ANTIALIAS) LR = np.array(aligned_image) LR = utils.image_float(LR) HR = utils.image_float(HR) images.append(LR) images.append(HR) #sum_img_t, _ = utils_align.sum_aligned_image(images,images) min_img_t = np.abs(HR - LR) min_img_t_scale = (min_img_t - np.min(min_img_t)) / (np.max(min_img_t) - np.min(min_img_t)) #print(min_img_t) #print(min_img_t_scale) #cv2.imwrite('aligned.jpg', np.uint8(sum_img_t * 255)) #sum_img_t = np.uint8(255.*utils.clipped(sum_img_t)) # plt.subplot(221) plt.imshow(LR) plt.subplot(222) plt.imshow(HR) plt.subplot(223) plt.imshow(min_img_t) plt.subplot(224) plt.imshow(min_img_t_scale) plt.show()
11,430
e88067b216f23393ee81be3ec34c34bf0e90837a
#!/usr/bin/python3 import ltr_properties from PyQt5.QtWidgets import QApplication import json import sys from typing import List, Dict, Optional from enum import Enum, auto filename = "data/mainOutput.json" def printLoadedClass(obj): classDesc = type(obj).__name__ + ":" for slot in obj.__slots__: if hasattr(obj, slot): classDesc += " " + slot + "=" + str(getattr(obj, slot)) print("Loaded " + classDesc) class Color(): __slots__ = "r", "g", "b" def __init__(self, r=0, g=0, b=0): self.setRgb(r, g, b) def postLoad(self): printLoadedClass(self) def getRgb(self): return self.r, self.g, self.b def setRgb(self, r, g, b): self.r = r self.g = g self.b = b class Vector(): __slots__ = "x", "y", "z" def __init__(self, x=0, y=0, z=0): self.x = x self.y = y self.z = z def postLoad(self): printLoadedClass(self) class Baz(): __slots__ = "x" def __init__(self): self.x = 10000 def postLoad(self): printLoadedClass(self) class FancyBaz(Baz): __slots__ = "fanciness" fanciness: ltr_properties.Link[Color] class Bar(object): __slots__ = "a", "b", "c", "d", "e", "f", "_hidden" # Type hints are optional, but are checked when deserializing. For lists and # dicts, they allow empty lists/dicts to be filled with new elements, rather # than requiring an existing element to duplicate. a: Dict[str, str] b: str c: List[Color] d: List[Vector] e: Baz f: Optional[Vector] def __init__(self): self.a = {"one": "a", "two": "b"} self.b = "two" self.c = [Color(0, 150, 255), Color(), Color(255, 255, 255)] self.d = [Vector(), Vector(1, 4, 9), Vector(255, 0, -255)] self.e = Baz() self._hidden = "Shouldn't show up" def postLoad(self): printLoadedClass(self) class EnumVal(Enum): Val1 = auto() Val2 = auto() Val3 = auto() class Foo(object): __slots__ = "x", "y", "z", "w", "s", "b", "v", "ev" ev: EnumVal def __init__(self): self.x = 0 self.y = -25.1 self.z = [-100, 20, 3] self.w = True self.s = "test" self.b = Bar() self.v = Vector(1, 4, 9) self.ev = EnumVal.Val1 def postLoad(self): printLoadedClass(self) class OptionalTest(object): __slots__ = "f", "of" f: float of: Optional[float] def postLoad(self): printLoadedClass(self) if __name__ == '__main__': app = QApplication(sys.argv) currentModule = sys.modules[__name__] ltrEditor = ltr_properties.LtrEditor("data", currentModule, serializerIndent=4) ltrEditor.addCustomEditorMapping(Color, ltr_properties.EditorColor) ltrEditor.addCustomEditorMapping(Vector, ltr_properties.EditorSlottedClassHorizontal) ltrEditor.setGeometry(300, 200, 900, 900) ltrEditor.setWindowTitle('LtRandolph Property Editor') ltrEditor.show() app.exec_()
11,431
06408572f715cc243564fe4266165c197a5351ed
def make_brick(small,big,goal): if goal<=small+5*big: if goal%(5) <=small: return True else: return False else: return False def lone_sum(a,b,c): sum=0 if a in (b,c): sum+=0 else: sum+=a if b in (a,c): sum+=0 else: sum+=b if c in (a,b): sum+=0 else: sum+=c return sum def lucky_sum(a,b,c): sum=0 if a!=13: sum+=a else: return sum if b!=13: sum+=b else: return sum if c!=13: sum+=c else: return sum return sum def fix_teen(n): if n in (13,14,17,18,19): return 0 else: return n def no_teen_sum(a,b,c): a=fix_teen(a) b=fix_teen(b) c=fix_teen(c) return a+b+c def round10(num): if num%10<5: num=(num//10)*10 else: num=((num//10)+1)*10 return num def round_sum(a,b,c): a=round10(a) b=round10(b) c=round10(c) return a+b+c def close_far(a,b,c): while not (abs(a-b)<=1 and abs(a-c)<=1): if abs(a-b)<=1: if abs(a-c)>=2 and abs(b-c)>=2: return True else: return False else: if abs(a-b)>=2 and abs(b-c)>=2: return True else: return False else: return False def make_chocolate(small,big,goal): consider_big=big while consider_big*5 >goal: consider_big-=1 num=goal-(consider_big*5) if num<=small: return num else: return -1
11,432
853065740b916680dea7fee126ca404dea912770
from django.contrib import admin from .models import EmployeeEval, Category, SubCategory, EvaluationCriteria, Evaluation, Comments # Register your models here. class EmployeeEvalView(admin.ModelAdmin): def employee_name(self,obj): return obj.employee.last_name + ', ' + obj.employee.first_name +' (' + obj.employee.nick_name +')' list_display = ('employee','employee_name','user_submit','lead_submit','yr','period') search_fields=('employee__last_name','employee__first_name','empl__middle_name','empl__nickname') list_filter=['yr','period'] admin.site.register(EmployeeEval,EmployeeEvalView) admin.site.register(Category) admin.site.register(SubCategory) class EvaluationCriteriaView(admin.ModelAdmin): def category(self,obj): return obj.sub_category.category def sub_category(self,obj): return obj.sub_category list_display = ('sub_category', 'description', 'group') list_filter = ['sub_category__category__name','group'] admin.site.register(EvaluationCriteria, EvaluationCriteriaView) class EvaluationView(admin.ModelAdmin): list_display=('employee', 'criteria', 'area_lead', 'project_lead', 'manager') list_filter=('employee',) admin.site.register(Evaluation,EvaluationView) class CommentsView(admin.ModelAdmin): def employee_name(self,obj): return obj.employee.last_name + ', ' + obj.employee.first_name +' (' + obj.employee.nick_name +')' list_display=('employee_name','yr','status') list_filter =('yr','status') search_fields=('employee__last_name','employee__first_name','empl__middle_name','empl__nickname') admin.site.register(Comments, CommentsView)
11,433
ddad0825c3018cce62230115e519fe38f8cd0cb0
default_app_config = 'applications.documentation.apps.DocumentationConfig'
11,434
0c1e588170091322c9f6a0512b4a10f96233d352
for a in range(1,4,2): print(a) else: print("datta") for x in range(2): #hii this datta pass print("its working") def fun(*args): for a in args: print(a) fun("datta","kakkad") def fun4(*kwargs): print(kwargs[0:2]) print(kwargs[1]) fun4("asewe", "faaw") def fun33(s="kkr"): print(s) fun33("gass") fun33("fjsn") fun33() def fun44(food): for a in food.values(): print(a) lis={1:"datta" ,2:"komal",3:"pooja",4:{1:"mom",2:"dad"},5:"man the game"} tup=("dataa","soat","ram") fun44(lis) #lis[4[1]]="kurqrt" #fun33(lis) for x,y in lis.items(): print(x,y) if "katta" in lis.values(): print("this is working") else: print("katta is not present in the dcst.") print(len(lis)) lis["mna"]="this is the new value" #print(lis) lis.pop(2) print(lis) lis.popitem() print(lis) lis.popitem() """print(lis) del lis[1] print(lis) del lis print(lis)""" li={1:"datta",2:"pooja",3:"komal",4:{1:"mom",2:"dad"}} print(li) print(len(li)) ne=li.copy() print(ne) li["k"]="have fun" me=li print(li) print(me) li[5]="haiii" print(me) me=dict(lis) print(me) mer={"first":me,"second":li,"third":ne} print(mer) print("\n\n",mer["first"]) ke=dict(datta="hiii",komal="byyy",pooja="niii") print(ke) me.clear() print(me) val=("datta","prasad","amit","suraj") va=("friend") crea = dict.fromkeys(val,va) print(crea) print(crea.get("datta")) crea.items() print(crea.items()) print(crea.keys()) crea.setdefault("mannu") print(crea) crea.update({"kedar":"friend","kishore":"friend"}) print(crea) var={"car":"audy","game":"fun"} lis.update(var) print(lis) print(crea.values()) a=lambda v:v%2 print(a(2)) print(a(5)) x=lambda a,b,c,d: a+b+c+d print(x(1,2,4,3)) def fun1(n): return lambda a: a*n doubler=fun1(4) print(doubler(2)) """a=2 b=3 c=add(a,b) print(c)""" import numpy as kp arr=kp.array([1,3,2,4,5]) print(arr) print(kp.__version__) arr1=kp.array(["datta","Ram","soat"]) print(arr1) arra2=kp.array([2.3,4.3,3.00,3.4]) print(arra2) print(type(arra2)) arr3=kp.array(("datta","arra")) print(arr3) a=(1,2,3,4,5) print(a[2]) arr4=kp.array(34) print(arr4) arra1=kp.array([[[1,2,3,4],["data","kedar"]],[["kakkad","pakkad"],[3.3,2.3,4.2]]]) print(arra1) print(arra1.ndim) print(arr4.ndim) arra7=kp.array([1,2,3,4],ndmin=5) print(arra7) print("the no of dimentin are",arra7.ndim) class myclass: v=22 obj=myclass() print(obj.v) class anewc: def __init__(se,name,age): se.name1=name se.age1=age def my1(se): print("is't Working!!!") obj2=anewc("datta",20) print(obj2.name1) print(obj2.age1) obj3=anewc("kedar",20) obj3.my1() obj3.age=22 print(obj3.age) del obj3 #print(obj3) class my2: pass v3=my2() """import math a=2 b=4 print(math.add(a+b))""" class parent(): def __init__(self,fname,lname): self.fname=fname self.lname=lname obj0=parent("Datta","Soat") print(obj0.fname) class child(parent): def __init__(self,fname,lname,year): #self.mname=fname #self.fname=mname super().__init__(fname,lname) self.graduationyear=year def welcome(self): print("heiiii!!!!! it's working") objc=child("kedar","Tandle",2022) print(objc.fname) print(objc.graduationyear) objd=child("datta","soat",2022) print(objd.fname) print(objd.graduationyear) objd.welcome() mytup=(23,32,44,45,5) myit=iter(mytup) print(next(myit)) print(next(myit)) print(next(myit)) sttr="datascience" nit=iter(sttr) print(next(nit)) print(next(nit)) print(next(nit)) print(next(nit)) """class my1(): def __iter__(self): self.a=1 return self def __next__(self): if self.a <= 20: x=self.a; self.a +=1 return x else: raise StopIteration ob=my1() ite=iter(ob) for x in ite: print(x) print(next(ite)) print(next(ite)) print(next(ite))""" x1=3 def myfun2(): global x3 x3= 22 x1=4 print("this is the global one",x1) x=2 print("thsi one is outer one",x) def myfun3(): print("this one is global one used in ineer fun ",x1) print("this is my ineer fun variable",x) myfun3() myfun2() print("this is the global dec in local scope",x3) from module import fun2 fun2("datta") import datetime x1=datetime.datetime.now() print(x1) print(x1.year) print(x1.strftime("%A")) x=datetime.datetime(2020,5,16) print(x) print(x1.strftime("%%")) d=datetime.datetime.now() print(d) string ="darta" string+332
11,435
532db45efdf939e80c07069d912013747bd0e75e
from Node import Node # IDS search algorithm's inner function def depth_limited_search(problem, limit): counter = 0 frontier = [(Node(problem.s_start))] # Stack while frontier: node = frontier.pop() counter += 1 if problem.is_goal(node.state): return node.solution(), counter, node.path_cost if node.depth < limit: nodes = node.expand(problem) frontier.extend(reversed(nodes)) return None, counter, None # IDS search algorithm's outer function, iterating over depth_limited_search def iterative_deepening_search(problem): developed_counter = 0 for depth in range(1, 20): result, developed_counter_in_iteration, final_path_cost = depth_limited_search(problem, depth) developed_counter += developed_counter_in_iteration if result: return result, developed_counter, final_path_cost return None, developed_counter, None
11,436
9b24063c3001642b42f0dcd9d0d7ac5d05305e66
import numpy as np prediction = # THAT'S YOUR JOB # MAKE SURE THAT YOU HAVE THE RIGHT FORMAT assert prediction.ndim == 1 assert prediction.shape[0] == 2000 # AND SAVE EXACTLY AS SHOWN BELOW np.save('prediction.npy', prediction)
11,437
6628dc248c400c5e21665499677395bec9244388
from django.contrib import admin # Register your models here. # from .models import Customer # from .models import Product # from .models import Order from .models import * # equal to line 5 to 7 admin.site.register(Customer) admin.site.register(Tag) admin.site.register(Product) admin.site.register(Order)
11,438
c0d11507a3616a8feef8b390fee646c56ee4b0e5
import pandas as pd import numpy as np from keras import layers from keras import models import librosa import sys import os import csv import pathlib from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split import keras import warnings warnings.filterwarnings('ignore') CATEGORIES = ['Blues', 'Classical', 'Country', 'Disco', 'Hiphop', 'Jazz', 'Metal', 'Pop', 'Reggae', 'Rock'] genres = 'blues classical country disco hiphop jazz metal pop reggae rock'.split() header = 'filename chroma_stft rmse spectral_centroid spectral_bandwidth rolloff zero_crossing_rate' for j in range(1, 21): header += f' mfcc{j}' header += ' label' header = header.split() # csv file to save data ''' file = open('data.csv', 'w', newline='') with file: writer = csv.writer(file) writer.writerow(header) genres = 'blues classical country disco hiphop jazz metal pop reggae rock'.split() for g in genres: for filename in os.listdir(f'../genres/{g}'): songname = f'../genres/{g}/{filename}' y, sr = librosa.load(songname, mono=True, duration=30) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) rmse = librosa.feature.rms(y=y)[0] spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) zcr = librosa.feature.zero_crossing_rate(y) mfcc = librosa.feature.mfcc(y=y, sr=sr) to_append = f'{filename} {np.mean(chroma_stft)} {np.mean(rmse)} {np.mean(spec_cent)} {np.mean(spec_bw)} {np.mean(rolloff)} {np.mean(zcr)}' for e in mfcc: to_append += f' {np.mean(e)}' to_append += f' {g}' file = open('data.csv', 'a', newline='') with file: writer = csv.writer(file) writer.writerow(to_append.split()) ''' data = pd.read_csv('data.csv') data.head() data.shape data = data.drop(['filename'], axis=1) # label genre_list = data.iloc[:, -1] encoder = LabelEncoder() y = encoder.fit_transform(genre_list) # feature columns scaler = StandardScaler() X = scaler.fit_transform(np.array(data.iloc[:, :-1], dtype=float)) # train and test set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) model = models.Sequential() model.add(layers.Dense(32, activation='relu', input_shape=(X_train.shape[1],))) model.add(layers.Dense(24, activation='relu')) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) history = model.fit(X_train, y_train, epochs=100, batch_size=128) test_loss, test_acc = model.evaluate(X_test, y_test) model.save('model.h5') print('test_acc: ', test_acc) # validation x_val = X_train[:200] partial_x_train = X_train[200:] y_val = y_train[:200] partial_y_train = y_train[200:] # training model = models.Sequential() model.add(layers.Dense(64, activation='relu', input_shape=(X_train.shape[1],))) model.add(layers.Dense(32, activation='relu')) model.add(layers.Dense(24, activation='relu')) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(partial_x_train, partial_y_train, epochs=100, batch_size=512, validation_data=(x_val, y_val)) results = model.evaluate(X_test, y_test) # model.save("model.h5") # print("Saved model to disk") filename = sys.argv[1] y, sr = librosa.load(filename, mono=True, duration=30) rmse = librosa.feature.rms(y=y)[0] spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) zcr = librosa.feature.zero_crossing_rate(y) spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) mfcc = librosa.feature.mfcc(y=y, sr=sr) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) to_append = f'{np.mean(chroma_stft)} {np.mean(rmse)} {np.mean(spec_cent)} {np.mean(spec_bw)} {np.mean(rolloff)} {np.mean(zcr)}' for element in mfcc: to_append += f' {np.mean(element)}' X_new = np.fromstring(to_append, dtype=float, sep=' ') print(X_new) ynew = model.predict(X_new.reshape(1, -1)) print(ynew) ynew = ynew.astype(int) print("X=%s, Predicted=%s" % (X_new, ynew)) print(CATEGORIES[np.argmax(ynew)])
11,439
07eb83ba2d3680f2470701f7e181fbecb42e949a
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Mar 8 20:37:36 2021 @author: Josey """ [['R' 'B' 'R' 'B' 'R'] ['B' 'R' 'B' 'R' 'B'] ['R' 'B' 'R' 'B' 'R'] ['B' 'R' 'B' 'R' 'B'] ['R' 'B' 'R' 'B' 'R']]
11,440
95c589dea95d20ece298948a7a83da06a07c3b02
import numpy as np import matplotlib.pyplot as plt import keras def evaluate_model(model, split_sets): training_error = model.evaluate(split_sets['X_train'], split_sets['y_train'], verbose=0) print('training error = ' + str(training_error)) testing_error = model.evaluate(split_sets['X_test'], split_sets['y_test'], verbose=0) print('testing error = ' + str(testing_error)) def output_plot(dataset, y, window_size, train_percent, predictions): if len(predictions) > 2: train_split = int(np.ceil(len(y)*train_percent)) + window_size valid_split = int(np.ceil(len(y)*((1-train_percent)/2))) + train_split # plot original series plt.plot(dataset, color='k') # plot training set prediction plt.plot(np.arange(window_size, train_split, 1), predictions['train'], color='b') # plot validation set prediction plt.plot(np.arange(train_split, valid_split, 1), predictions['valid'], color='g') # plot testing set prediction plt.plot(np.arange(valid_split, valid_split + len(predictions['test']), 1), predictions['test'], color='r') # pretty up graph plt.xlabel('day') plt.ylabel('(normalized) price') plt.legend(['original series', 'training fit', 'Validation fit', 'testing fit'], loc='center left', bbox_to_anchor=(1, 0.5)) plt.show() else: train_split = int(np.ceil(len(y)*train_percent)) + window_size # plot original series plt.plot(dataset, color='k') # plot training set prediction plt.plot(np.arange(window_size, train_split, 1), predictions['train'], color='b') # plot testing set prediction plt.plot(np.arange(train_split, train_split + len(predictions['test']), 1), predictions['test'], color='r') # pretty up graph plt.xlabel('day') plt.ylabel('(normalized) price') plt.legend(['original series', 'training fit', 'testing fit'], loc='center left', bbox_to_anchor=(1, 0.5)) plt.show()
11,441
38a93c25d601cc9549531354c71a50deddbd6fb0
import time import serial ser= serial.Serial("/dev/ttyACM0", baudrate=9600) try: while True: comando = input("Ingresar comando (on/off): ") comando = comando +"\n" comandoBytes=comando.encode() ser.write(comandoBytes) time.sleep(0.1) read= ser.readline() print(read) except KeyboardInterrupt: print("\nInterrupcion por teclado") except ValueError as ve: print(ve) print("Otra interrupcion") finally: ser.close()
11,442
ce3b6741eb57747c3bf63d0dce60c89b1772a942
from rest_framework import permissions class IsOwnerOrReadOnly(permissions.BasePermission): """ Custom permission to only allow owners of an object to modify/edit it. Read only is allowed to other users. """ def has_object_permission(self, request, view, obj): # Read permissions are allowed to any request, # so we'll always allow GET, HEAD or OPTIONS requests. if request.method in permissions.SAFE_METHODS: return True # Write permissions are only allowed to the authenticated user. return request.user.is_authenticated and (obj.owner == request.user) class IsRecipeOwnerOrReadOnly(permissions.BasePermission): """ Custom permission to only allow the owner of a recipe associated to an object to modify/edit it. Read only is allowed to other users. """ def has_object_permission(self, request, view, obj): # Read permissions are allowed to any request, # so we'll always allow GET, HEAD or OPTIONS requests. if request.method in permissions.SAFE_METHODS: return True # Write permissions are only allowed to the owner of the related recipe. return request.user.is_authenticated and (obj.recipe.owner == request.user)
11,443
aad11e06fce27cdfb29721be3fad4408b852840e
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2019, Michael Pechner <mikey@mikey.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function DOCUMENTATION = r''' --- module: ecs_tag version_added: 1.0.0 short_description: create and remove tags on Amazon ECS resources notes: - none description: - Creates and removes tags for Amazon ECS resources. - Resources are referenced by their cluster name. author: - Michael Pechner (@mpechner) requirements: [ boto3, botocore ] options: cluster_name: description: - The name of the cluster whose resources we are tagging. required: true type: str resource: description: - The ECS resource name. - Required unless I(resource_type=cluster). type: str resource_type: description: - The type of resource. default: cluster choices: ['cluster', 'task', 'service', 'task_definition', 'container'] type: str state: description: - Whether the tags should be present or absent on the resource. default: present choices: ['present', 'absent'] type: str tags: description: - A dictionary of tags to add or remove from the resource. - If the value provided for a tag is null and I(state=absent), the tag will be removed regardless of its current value. type: dict purge_tags: description: - Whether unspecified tags should be removed from the resource. - Note that when combined with I(state=absent), specified tags with non-matching values are not purged. type: bool default: false extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 ''' EXAMPLES = r''' - name: Ensure tags are present on a resource community.aws.ecs_tag: cluster_name: mycluster resource_type: cluster state: present tags: Name: ubervol env: prod - name: Remove the Env tag community.aws.ecs_tag: cluster_name: mycluster resource_type: cluster tags: Env: state: absent - name: Remove the Env tag if it's currently 'development' community.aws.ecs_tag: cluster_name: mycluster resource_type: cluster tags: Env: development state: absent - name: Remove all tags except for Name from a cluster community.aws.ecs_tag: cluster_name: mycluster resource_type: cluster tags: Name: foo state: absent purge_tags: true ''' RETURN = r''' tags: description: A dict containing the tags on the resource returned: always type: dict added_tags: description: A dict of tags that were added to the resource returned: If tags were added type: dict removed_tags: description: A dict of tags that were removed from the resource returned: If tags were removed type: dict ''' from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags try: from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # Handled by AnsibleAWSModule __metaclass__ = type def get_tags(ecs, module, resource): try: return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)['tags']) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource)) def get_arn(ecs, module, cluster_name, resource_type, resource): try: if resource_type == 'cluster': description = ecs.describe_clusters(clusters=[resource]) resource_arn = description['clusters'][0]['clusterArn'] elif resource_type == 'task': description = ecs.describe_tasks(cluster=cluster_name, tasks=[resource]) resource_arn = description['tasks'][0]['taskArn'] elif resource_type == 'service': description = ecs.describe_services(cluster=cluster_name, services=[resource]) resource_arn = description['services'][0]['serviceArn'] elif resource_type == 'task_definition': description = ecs.describe_task_definition(taskDefinition=resource) resource_arn = description['taskDefinition']['taskDefinitionArn'] elif resource_type == 'container': description = ecs.describe_container_instances(clusters=[resource]) resource_arn = description['containerInstances'][0]['containerInstanceArn'] except (IndexError, KeyError): module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource)) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to find {0} {1}'.format(resource_type, resource)) return resource_arn def main(): argument_spec = dict( cluster_name=dict(required=True), resource=dict(required=False), tags=dict(type='dict'), purge_tags=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent']), resource_type=dict(default='cluster', choices=['cluster', 'task', 'service', 'task_definition', 'container']) ) required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])] module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) resource_type = module.params['resource_type'] cluster_name = module.params['cluster_name'] if resource_type == 'cluster': resource = cluster_name else: resource = module.params['resource'] tags = module.params['tags'] state = module.params['state'] purge_tags = module.params['purge_tags'] result = {'changed': False} ecs = module.client('ecs') resource_arn = get_arn(ecs, module, cluster_name, resource_type, resource) current_tags = get_tags(ecs, module, resource_arn) add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) remove_tags = {} if state == 'absent': for key in tags: if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): remove_tags[key] = current_tags[key] for key in remove: remove_tags[key] = current_tags[key] if remove_tags: result['changed'] = True result['removed_tags'] = remove_tags if not module.check_mode: try: ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys())) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource)) if state == 'present' and add_tags: result['changed'] = True result['added_tags'] = add_tags current_tags.update(add_tags) if not module.check_mode: try: tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name='key', tag_value_key_name='value') ecs.tag_resource(resourceArn=resource_arn, tags=tags) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource)) result['tags'] = get_tags(ecs, module, resource_arn) module.exit_json(**result) if __name__ == '__main__': main()
11,444
f176ac000941e0c8c12690a06f6b2801cee33a23
# # SOFTWARE HISTORY # # Date Ticket# Engineer Description # ------------ ---------- ----------- -------------------------- # 08/20/10 njensen Initial Creation. # # __all__ = ['SerializationException'] from . import dstypes, adapters from . import DynamicSerializationManager class SerializationException(Exception): def __init__(self, message=None): self.message = message def __str__(self): if self.message: return self.message else: return "" def serialize(obj): dsm = DynamicSerializationManager.DynamicSerializationManager() return dsm.serializeObject(obj) def deserialize(objbytes): dsm = DynamicSerializationManager.DynamicSerializationManager() return dsm.deserializeBytes(objbytes)
11,445
6c9055797f8147717919e75b673f8134cd4ad8b6
class Solution: def duplicateZeros(self, arr: List[int]) -> None: """ Do not return anything, modify arr in-place instead. """ i = 0 for num in list(arr): if i >= len(arr): break arr[i] = num if not num: i += 1 if i < len(arr): arr[i] = num i += 1
11,446
b27dae62b516aa24edce37aed85bc001de4f2183
text = list(input("Please enter the text :").lower()) vowels = ["a","e","i","u","o"] result=False if len(text) <= 1: result = False else: for i in range(len(text)-1): if text[i] in vowels and text[i+1] in vowels: result = True break if result: print("positive") else: print("negative")
11,447
412b59f47030b97f58e666f3d8eb7bfdac001bbc
# Author: kk.Fang(fkfkbill@gmail.com) __all__ = [ "OracleStatsCMDBSQLExecutionCostRank" ] from typing import Generator, Union from mongoengine import StringField, FloatField from models.sqlalchemy import * from .base import * from ..base import * from ...capture import OracleSQLStat @OracleBaseStatistics.need_collect() class OracleStatsCMDBSQLExecutionCostRank( OracleStatsMixOfLoginUserAndCurrentTaskRank): """登录用户与当前库的SQL执行效率排名统计""" BY_WHAT = ("elapsed_time_total", "elapsed_time_delta") LIMITATION_PER = 10 by_what = StringField(choices=BY_WHAT) sql_id = StringField() time = FloatField() meta = { "collection": "oracle_stats_cmdb_sql_exec_cost_rank" } @classmethod def generate( cls, task_record_id: int, cmdb_id: Union[int, None], **kwargs) -> Generator[ "OracleStatsCMDBSQLExecutionCostRank", None, None]: with make_session() as session: for the_user in cls.users(session, cmdb_id=cmdb_id): schemas = cls.schemas( session, cmdb_id=cmdb_id, login_user=the_user.login_user ) for by_what in cls.BY_WHAT: stat_q = OracleSQLStat.filter( task_record_id=task_record_id, schema_name__in=schemas ).order_by(f"-{by_what}")[:cls.LIMITATION_PER] for i, stat in enumerate(stat_q): doc = cls(by_what=by_what) doc.sql_id = stat.sql_id doc.time = getattr(stat, by_what, None) cls.post_generated( doc=doc, task_record_id=task_record_id, cmdb_id=cmdb_id, target_login_user=the_user.login_user, rank=i ) yield doc
11,448
5a983425189fbacd7f171afda865837f8366ca7d
import csv import os from os.path import join import requests import time def fetch_media_for_feed(feed_dict): feed_dir = feed_dict['feed_dir'] media_dir = join(feed_dir, 'media') short_name = feed_dict['short_name'] image_dir = join(media_dir, 'tweet_images') profile_images_dir = join(media_dir, 'profile_images') if not os.path.exists(image_dir): os.makedirs(image_dir) if not os.path.exists(profile_images_dir): os.makedirs(profile_images_dir) media_urls_csv = join(media_dir, 'tweet_images.csv') profile_image_csv = join(media_dir, 'profile_images.csv') media_urls = {} profile_image_urls = {} with open(media_urls_csv, 'r', newline="") as csvfile: reader = csv.reader(csvfile) for row in reader: url = row[0] filename = row[1] media_urls[url] = filename with open(profile_image_csv, 'r', newline="") as csvfile: reader = csv.reader(csvfile) for row in reader: url = row[0] profile_dir = row[1] filename = row[2] profile_image_urls[url] = {'profile_dir': profile_dir, 'filename': filename} with requests.Session() as s: for url in media_urls: if media_urls[url] not in os.listdir(image_dir): print("{0} Media URLs: Fetching {1}".format(short_name, url)) media = s.get(url) media_file = join(image_dir, media_urls[url]) with open(media_file, 'wb') as media_out: media_out.write(media.content) time.sleep(1) else: print("{0} Media URLs: {1} has already been fetched".format(short_name, url)) with requests.Session() as s: for url in profile_image_urls: profile_dir_name = profile_image_urls[url]['profile_dir'] filename = profile_image_urls[url]['filename'] profile_dir = join(profile_images_dir, profile_dir_name) if not os.path.exists(profile_dir): os.makedirs(profile_dir) if filename not in os.listdir(profile_dir): print("{0} Profile Images: Fetching {1}".format(short_name, url)) profile_image = s.get(url) profile_image_file = join(profile_dir, filename) with open(profile_image_file, 'wb') as profile_image_out: profile_image_out.write(profile_image.content) time.sleep(1) else: print("{0} Profile Images: {1} has already been fetched".format(short_name, url)) def fetch_media(feeds): for feed in feeds: print("Fetching media for {0}".format(feed)) feed_dict = feeds[feed] fetch_media_for_feed(feed_dict)
11,449
a87503175c808879ce5fc58c6387f6e855dc440a
#!/usr/bin/python import sys prev = "" total = 0 doc_prev = "" for line in sys.stdin: token, docID, count = line.strip().split() count = int(count) if prev == token: if doc_prev == docID: total += count else: print prev, doc_prev, total, " " prev = token doc_prev = docID total = count else: if prev: print prev, doc_prev, total, " " prev = token doc_prev = docID total = count if prev == token: print prev, docID, total, " "
11,450
08e3c5ad9f04d3cf610e4c2422e83672eeb35320
#Open the file employees_file = open("employees.txt", "r") #Read values if(employees_file.readable()): for employee in employees_file.readlines(): print(employee) employees_file.close() #Appending to file employees_file = open("employees.txt", "a") employees_file.write("\nLeah - Human Resources") employees_file.close() #Writing file employees_file = open("employees-new.txt", "w") employees_file.write("Leah - Human Resources") employees_file.close()
11,451
a627612570582832ab8c32b0b4af2b3038bbd1c6
from sqlalchemy import create_engine from datetime import datetime from TweetCollector_FullArchiveAPI.Tables import Base, Tweet, Place from sqlalchemy.orm import sessionmaker from contextlib import contextmanager import json """ The TweetLoader class takes care of transforming the fields from the response objects to adhere to the data schema represented by the Tweet class. It uses sqlAlchemy to load the tweets to a DB. """ class TweetLoader(): ### CONSTRUCTOR ### def __init__(self, database_url): ### INSTANCE VARIABLES ### self.recreated_tables = False # an engine to communicate with PostgreSQL self.engine = create_engine(database_url) # a Session object to manage connections (session starts) self.Session = sessionmaker(bind=self.engine) ### METHODS ### # 1. start_load() # 2. transform_and_load() # 3. recreate_database() # 4. session_scope() """ It handles the actual data loading into the DB. It is called by the transform_and_load() method. """ # START LOAD # 1. def start_load(self, tweet_to_add, recreate_db): # print("Transformasion okay! Loading to start!") # if only interested in the new data, recreate_db deletes data streamed before if recreate_db == True and self.recreated_tables == False: self.recreate_database() print("Recreate db ran!") self.recreated_tables = True # connect to DB with session with self.session_scope() as s: # add tweet to DB try: s.add(tweet_to_add) # print("Tweet Loading Successful!") except: print("Error in Loading!") """ Transforms the received JSON response to abide the data schema in line with what's defined in the Tweet object. """ # TRANSFORM AND LOAD # 2. def transform_and_load(self, json_response, query_tag, recreate_db): # inspect response line (optional) # print("json printed: ", json.dumps(json_response, indent=4, sort_keys=True)) # MAIN LOOP: for every tweet (data_item is a tweet) for data_item in json_response["data"]: # print("tweet: ", json.dumps(data_item, indent=4, sort_keys=True)) ##### # TWEET FIELDS tweet_id = data_item["id"] tweet_text = data_item["text"] tweet_created_at = data_item["created_at"] # TWEET FIELDS: placeholders for non-guaranteed fields such as place (default is None) tweet_geo_coordinates_type = None tweet_geo_coordinates_coords = None tweet_place_id = None # PLACE FIELDS: non-guaranteed fields tweet_place_geo_bbox = None tweet_place_full_name = None tweet_place_type = None tweet_country_code = None # if coordinates are present if "geo" in data_item: if "coordinates" in data_item["geo"]: # set coordinates fields tweet_geo_coordinates_type = data_item["geo"]["coordinates"]["type"] tweet_geo_coordinates_coords = data_item["geo"]["coordinates"]["coordinates"] # otherwise, they are set as None # if there is place_id if "place_id" in data_item["geo"]: # set tweet_place_id tweet_place_id = data_item["geo"]["place_id"] # otherwise, it is set as None # CONTEXT_ANNOTATIONS: context_domain_array = [] # array to collect context_annotations # if there is a context_annotations array if "context_annotations" in data_item: # for each domain annotation for annotation in data_item["context_annotations"]: # append it to local variable context_domain_array.append(annotation["domain"]["name"]) # if there is no context_annotations else: # make it NULL # print("context_annotation is null") context_domain_array = None # PLACES # if tweet_place_id is not None if tweet_place_id is not None: # Places for loop (includes.places) contains all place objects for places_item in json_response["includes"]["places"]: # find the place if (places_item["id"] == tweet_place_id): # get other place related fields tweet_place_geo_bbox = places_item["geo"]["bbox"] tweet_place_full_name = places_item["full_name"] tweet_place_type = places_item["place_type"] tweet_country_code = places_item["country_code"] ##### # CONSTRUCT TWEET (per tweet, for Tweet and Place) tweet_data_dict = {'tweet_id': tweet_id, 'text': tweet_text, 'created_at': tweet_created_at, 'context_annotations': context_domain_array, 'geo_coordinates_type': tweet_geo_coordinates_type, 'geo_coordinates_coords': tweet_geo_coordinates_coords, 'geo_place_id': tweet_place_id, 'stream_rule_tag': query_tag} place_data_dict = {'places_geo_place_id': tweet_place_id, 'places_geo_bbox': tweet_place_geo_bbox, 'places_full_name': tweet_place_full_name, 'places_place_type': tweet_place_type, 'places_country_code': tweet_country_code} # construct a Tweet() object # data passed in to Tweet() has to be in a dictionary format single_tweet = Tweet(**tweet_data_dict) single_place = Place(**place_data_dict) # inspect transformed Tweet() object #print("single_tweet: ", single_tweet) #print("single_place: ", single_place) # LOAD TWEETs and PLACEs self.start_load(single_tweet, recreate_db) self.start_load(single_place, recreate_db) """ Recreates the database. It drops all tables and creates them again. If run for the first time, set this as true. """ # RECREATE DATABASE # 3. def recreate_database(self): # drops all tables Base.metadata.drop_all(self.engine) # creates all tables Base.metadata.create_all(self.engine) """ A context manager for the session. It ensures that all connections are closed. """ # A CONTEXT MANAGER # 4. @ contextmanager def session_scope(self): # local scope creates and uses a session session = self.Session() # invokes sessionmaker.__call__() try: yield session session.commit() except Exception: session.rollback() raise finally: session.close()
11,452
655141c4e0dfc8827c457c0f286dc1f6d4d595f9
from pytransform import pyarmor_runtime pyarmor_runtime() __pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x06\x00\x33\x0d\x0d\x0a\x03\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\x5e\x15\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x50\x8c\x64\x26\x42\xd6\x01\xf3\x0d\x68\x2c\x25\x9f\x19\xaa\x05\x4c\x7b\x3c\x45\xce\x09\xfe\x15\x7c\xd5\xc1\x94\x8b\xc4\xdb\x36\xc6\xd4\x60\x7a\x7f\x48\xcf\xe1\x6e\x5e\x39\x72\xb7\xcf\x73\xda\x86\xda\xd4\xf4\x34\x09\xa3\x6d\xa6\xf1\xb5\x48\x14\x33\x0f\xc6\x97\x6b\xf7\x5e\x4f\xd1\x29\x95\xc8\x1c\x8c\x91\xee\x48\xb2\x37\xc2\x30\x76\xad\x64\x0c\x24\x3b\x4f\x41\xd2\xce\xd8\x83\xbe\xfb\xe0\x4c\x72\xf8\x04\x21\x23\x2d\xff\x1e\x0d\xae\x45\x13\x91\x9e\x62\x5f\xbc\x6b\x6b\xa6\x24\x43\x37\x74\x13\x7b\x85\xca\xf3\x8f\xc4\x35\x08\xb3\x55\xca\x8f\xea\x9b\x74\xc5\x07\xbd\x01\x79\xb1\x3c\xb3\xcf\xe0\xf6\x96\x14\xe9\x24\x2d\x3a\xbb\x85\xe0\x1f\xf6\x45\x0e\x26\x34\x46\x51\x4d\xe8\xf6\x54\xbb\x10\x67\x23\x06\xba\xdd\x4b\xd0\x00\x07\x4a\xea\xae\xb7\xfa\x04\x26\xab\xa2\x95\xc2\x99\xa1\xef\x71\xd0\xb9\xe5\xfb\xde\xbe\xef\x6d\xbc\xdd\x02\x95\x5a\x99\xc0\x90\x04\x51\x03\xe4\x1c\x55\x24\x2a\x90\xbe\xee\x74\xf3\x2a\x3a\x41\xcf\xba\xc0\x0f\x40\x1e\xa1\xb8\x76\xac\x08\x34\xbf\x19\x25\x45\x2f\xf1\x89\x52\xfa\x32\x2b\xa0\xdb\xa9\x74\xdc\x38\x4d\xac\xa0\xf6\x47\x42\xeb\x8e\x83\x2c\x18\x0e\xf4\xfe\x8d\xcb\x4b\xd5\x74\xa1\x43\x0a\xdf\x56\x0c\x8e\xd8\xb2\xff\x3b\x60\xab\xc8\xab\xb9\xf4\x19\x53\xce\x46\x21\x1b\xa0\xa3\x5a\xce\xce\xaf\x92\xde\x16\xea\x87\xb9\xfc\x33\x3a\xe5\x4e\x32\xc8\x79\xf9\x3b\xb3\xfa\x91\x5b\x18\x4e\xf5\x44\x99\x31\x81\x45\x1a\x92\x74\x81\x4b\x63\xca\x32\x3e\x0e\x95\xd7\x1b\x5e\xa4\x31\x17\x00\x8b\xad\x26\x0e\x6a\x44\x81\xbe\x44\x8d\x6a\x67\x92\xf8\x8b\xc6\x5b\x6f\xb2\x46\xfd\x9f\xf8\xfa\xf3\xbf\x6f\xff\xde\xd9\x69\x38\x4d\xd7\xe1\xd0\x64\xcf\x10\xf5\x1a\x33\xbb\x94\x59\xbd\xa7\xfb\xb7\x7d\xf1\x49\x82\xa0\x1d\x6f\x8a\x89\xe8\x4c\x56\x86\x03\x31\x2f\x2e\x00\x78\x46\x3f\xf8\xd8\xba\xfe\x5a\x20\x1e\x2d\x5e\x7b\x62\x08\xec\xbe\xf5\x35\x38\x24\x7a\xc1\xb5\x0e\x42\x86\x64\xa4\x79\x08\x6f\x7b\x08\x49\xed\xd1\x2d\x31\xe7\xd8\xcb\xe9\x2e\x87\xe6\xab\xf2\xe1\xf7\x22\x64\x68\x90\xcf\xbb\x8b\xa6\x94\xee\x9e\xd9\xee\x0a\x9c\xe2\x07\xb1\x8e\x55\x31\x4d\xa9\x15\x82\x21\xfe\xef\xc1\xbf\x5f\x45\x89\x4c\xdf\xdc\xae\xd0\xd6\xed\x6b\x4c\xe9\x26\x61\x46\x6e\x7c\x4b\x25\x7c\xcb\x85\x14\x6f\xfd\xce\x0d\x0c\xd5\xd7\xa1\x64\x48\x79\xf6\xd6\x63\x17\xc2\x13\x1f\x3c\x79\x27\x62\x28\xac\xb6\xac\x47\x61\x11\x3d\x3f\xc9\xb4\x5d\x82\x40\xfb\x04\x82\x1b\xea\xba\x87\x9e\xe1\xdb\x78\x1f\x3c\x78\xd0\xfa\xce\xc9\x40\x07\x32\xe1\x30\x64\x87\x2a\xae\x5d\x9a\x1b\x70\x5e\x42\xbf\x29\x1d\x02\xa0\x2c\x55\x91\x5a\x61\xd4\x8b\xd7\x59\x7f\x7f\xc5\x14\xba\xdd\x65\x88\xe2\x86\x34\xb9\xce\x12\x6b\x6a\x8c\xb7\x5d\xd2\x69\xd0\x17\x6f\x61\xf3\xb2\xc6\x30\x09\xdc\xe1\x48\xf2\x75\xe8\x7b\xdd\xe8\x0e\x22\xd2\x0a\x11\x5d\x0b\x8c\xe6\x14\xbc\x0a\x01\x42\x4d\xf2\xff\x27\xcf\x66\x74\x7c\x76\x8e\xc4\x4c\xe5\x7b\xa2\x63\x49\x40\x1a\xc4\x7a\xbe\x35\xab\x19\xbe\x96\x34\x51\x7c\xae\x27\xa4\x12\x07\xc1\x28\x59\xbc\xf2\x04\x04\x42\xa2\x7f\xad\x89\x63\xc4\x2c\x9b\xd0\x44\x5f\x7c\x43\x2f\xa9\xd7\x0a\x64\xa4\xa5\xa2\xdb\x2e\xb9\xc2\xfe\xe2\x20\x8c\x00\x55\x7a\xf6\x45\x9b\xdf\xc9\xac\xa2\xb9\xc8\x0a\x57\xa8\x89\xfb\x85\xfa\x74\x8a\xff\xb4\xfe\xb5\x48\x2e\xd5\xd0\xa1\x1c\xf8\x0d\x1e\x46\xec\x2f\x84\x05\xda\xc3\xb2\x1f\x2f\x48\xe5\x97\x9b\xd3\xdd\x5b\xb1\xc6\x7e\x4b\x60\x57\xb8\x30\xd5\x73\x46\x9b\xfa\x81\x92\x07\x0e\xc1\xa5\x56\x32\x42\xe9\x3d\x93\x65\x5b\x1b\xa5\x91\xa2\x5f\xfa\xb1\xd1\x1b\xbf\x13\x9b\x1a\xbd\x83\x0c\x51\xf8\x0e\x1e\xf0\xf0\x44\x82\x5e\xe7\xfc\x25\x8c\x44\x92\x19\x30\xa2\xeb\x52\x39\x52\xa0\xae\xa1\xff\xc0\x04\xc8\x8a\xdf\x88\xae\xc3\x0c\x88\xd6\xd3\x87\x03\x7d\x34\xfd\xf7\x89\xd8\xe5\x09\x7d\x89\xb1\xb4\x5e\x10\x39\xd0\x93\x42\xd8\x37\xbf\x0a\xf1\xc0\xd2\xd6\xce\x92\x10\x6b\x22\x0c\xa6\xde\x58\x1a\x34\x66\xad\x87\x90\x18\x52\xe9\x62\xce\x32\xf7\x87\xc0\xa6\x0f\x49\xa3\x18\x01\x42\xcf\xdb\x98\x06\x2a\x7c\x50\x9d\xb8\xc9\xd7\x0d\x38\x63\xfc\x3b\x94\x38\xaa\xcb\xbe\x1c\x24\xb6\xbb\x3d\x88\x6f\x1c\xa1\x2f\xaa\x25\x5d\x45\x54\x10\xe3\xce\xe1\x7c\xdb\xc7\xb4\x3a\x6f\xcd\x57\x53\x29\x43\xdd\x63\x26\x90\x95\x6b\xf6\x3f\xa6\xd5\x4a\x2c\x11\x0c\x07\x87\x88\x2a\x46\x5c\x80\x8a\xaf\xe7\xe7\x7c\xd2\xc4\xa5\x37\xfc\x11\x0a\xa2\x2d\xc5\x53\xdf\x99\x39\x7f\xd4\x36\x6a\x59\x31\x79\x9f\x1f\x5f\x69\xad\x33\x9f\x49\x8f\xa7\x52\xff\x78\x1b\xfd\x63\x6b\xa5\x14\xd6\x26\x8e\x28\xfb\xfe\x96\xaa\x6b\xd0\x8b\x57\x5f\x82\x44\xeb\xea\x58\x08\xdc\xc8\x39\x95\x96\xdd\x04\x2b\x7e\x6d\x60\x2e\x10\xae\xc0\xe3\xe2\x2c\xc4\x84\x91\x85\x56\x96\x78\xc9\x33\xbe\xa3\x52\x98\x8e\x18\x10\xf5\x8d\xe0\x0b\x67\xcc\x58\xb9\x91\xcd\xf0\x37\x8e\x64\xaa\xe5\x42\x20\xd1\xb3\x2a\x37\x9e\xa4\xd4\x01\x4e\x45\xb6\x3f\x36\x9c\xf3\x44\x61\xee\xf9\x83\xb9\x57\x68\x26\xc9\x33\xe3\xe4\xf6\x92\xb1\xbd\x1f\x58\x9b\x59\x02\xf3\xd8\x42\xc9\x17\xd5\x66\x49\xed\x8a\x4f\xb8\xcc\x19\x8c\xfd\x92\x4f\xb6\xc3\x2d\xac\x66\xf6\xd9\x77\x4e\xe9\x2f\x47\x61\xfb\x35\xf6\xe2\x2f\x8e\xd5\x27\x06\x03\x71\x5c\x1a\x83\x73\x69\xed\xc5\xd7\x01\xe6\xf6\xde\x87\xa9\xfd\x3b\xc6\x27\xe5\xda\x5a\x7e\x00\x35\x5a\x6e\x19\x50\xf0\x77\x29\x12\x97\x44\x76\x4d\xec\x19\x95\x9c\xa0\xc2\xd3\x1f\xaa\xd4\xba\xcd\x2f\xbc\x61\x99\xcb\x55\x31\xad\xe7\xa0\x92\xee\xa3\xe4\x36\x66\x45\x77\x02\xa6\x42\x46\x15\xa8\x2a\xec\x4a\x23\xd7\x8b\x98\xc2\x35\x58\x05\xc8\x77\xc6\x0b\xd3\x6b\xb9\xd6\x39\x0a\xa1\xd2\x14\xa9\xf5\x3d\x74\x07\xc4\x91\x4b\xb4\x30\x8c\xcd\x44\x4d\x4e\x08\x12\x34\x01\xdc\xa0\x79\x62\xe9\xc2\xd5\x31\x46\x4c\x7d\x2c\xc7\x1c\x7f\x80\xdc\xea\xab\xb5\xd8\xc3\x61\x5d\xda\xfd\xd0\x3b\x81\x2a\x2c\x79\x3d\x69\x50\xda\x9b\xf1\x12\xcd\x2d\x23\xb1\xa0\xc2\xcb\x78\x2b\x33\x08\xc5\x28\x7c\x4b\xf0\x61\x19\x8f\xfe\xb3\x7a\x77\x2a\x4f\x0c\xe7\x63\x8f\x13\x61\x24\xe0\x38\x0b\x83\x70\x18\x35\x62\x4b\x46\x54\x60\xf8\x9d\xa4\xa4\xd7\x39\x75\xf6\x3b\xb9\x58\x69\x7a\xb8\x77\x30\x39\x69\x29\x03\x1f\xb5\x0c\xa5\x14\xd4\x88\xea\xd1\x22\xbd\x49\x8c\x93\xe4\x96\xa9\x15\x45\xca\xb2\xbf\x65\x70\x0c\xc3\x96\xcf\x9a\x74\x03\xf0\xda\x2c\x15\xc7\x72\x70\xfe\x17\x94\xbc\x80\x3b\x75\x81\xf8\x6e\x62\x33\xe4\x1b\x29\x95\x3e\x01\x02\x02\x9d\x98\x57\x4d\x42\x1d\x15\xff\xb1\xfe\x73\x1b\x64\xf1\xa6\xdb\x67\x6c\xca\x74\x3b\xd0\x83\x29\x22\xab\x0a\x48\x4b\x21\x8a\x6a\x5b\xe0\x7f\x16\xf5\xd6\x29\x66\x48\xc7\x1d\x97\x5e\x0b\x44\x48\x2a\xe2\x69\x69\x75\x3b\x3a\x4f\x3a\xde\x2f\x15\x99\xee\x1b\x1e\x26\x69\x44\xca\xe4\x4e\xa9\xf1\x37\xfb\x3a\x6c\xc0\x4e\x29\xf6\xd3\x79\x19\xad\x5f\x0b\x7e\x2b\x12\x11\x95\x8e\x34\x0d\x88\x4b\x5e\x74\x38\x93\x1f\x5f\xfa\x42\xc3\x51\x69\x99\x51\x0c\x87\xc5\x0e\x68\x3c\xb9\x98\x50\x14\x20\xb0\x94\x58\xcf\x80\x4b\x60\x57\xfb\xb3\x78\x81\xcb\x63\x5a\x1b\x39\x46\xd7\x31\x65\x02\xde\x32\x41\xf5\x8e\xa9\xca\x6a\x13\xcb\x92\xfb\x77\x8a\xbd\x06\x7c\x37\x0d\xd6\xcc\x4d\x27\x9a\x49\x31\x0f\xca\xdb\x17\x5b\x4f\x5a\x8d\x42\xd7\x70\x85\xe2\x7d\xcf\x76\x23\x73\xd2\x6d\x77\xee\x32\x93\xe9\xed\xd8\x7d\xab\x85\x81\x7b\x40\xdc\x72\xe5\x20\x15\xca\x4f\xf3\x31\x8a\xa8\xd8\x6c\xf1\x3f\xe8\xa7\x35\x67\x81\x96\x91\x4e\xbc\x9c\x90\xa2\x53\x90\x66\x9f\xd7\xb4\x25\xff\xae\x46\xb3\x52\x4e\xb2\x1e\x9c\xe7\x00\x9d\x73\x09\xd8\x91\xd7\x81\x1c\x1e\x39\x69\x87\xb9\xe6\xd9\x20\xc3\x27\xcb\x84\x0e\xbc\xc0\x92\xcd\x9c\x78\x9c\x12\x71\xb8\xcf\x56\x88\x4f\xad\x3d\xfd\xb9\x3e\x3e\xe9\x94\x57\x93\xe3\x0b\xbf\xaa\x29\x51\x48\x3e\xf7\xe1\xec\xec\x6c\xea\x9a\x0d\x31\xa4\xfa\xd0\x37\xc5\x02\x18\x48\xc6\xb2\x39\x0a\xab\x66\x25\xfc\xd0\xc8\x38\xc8\x5a\x1b\x65\xd6\x57\xc6\x22\x99\x7e\x6d\xe1\x46\xcb\x55\xe6\x1b\xeb\xb3\xc5\x65\x92\xf7\xd2\x54\xfd\x46\xdf\xeb\x7b\x4f\xe8\xce\xd6\x93\x18\x4d\x9d\x75\x7f\xa5\x7d\x0e\xb0\x3f\x11\x88\x75\xe0\x3e\x0c\xfb\x47\xc4\x40\x5b\x26\xec\x0f\x6d\xb6\xc2\xa2\x1c\x48\x63\xb3\x77\x02\xed\xc4\x4f\xda\xda\xc0\x71\x01\xa0\xd3\x27\x70\x24\x0b\x53\x06\x2a\xfb\x5f\xab\xab\x0f\x55\x83\x3f\x36\xf4\xe8\x2e\x53\xac\xc0\x20\x03\x7b\x7a\x25\xe4\x5a\xf4\x4b\x0f\xe1\x05\xce\xdd\x5d\xa3\x4a\x5f\x99\x32\x62\xbe\xc0\x05\x1f\x22\x31\x4f\x6f\x06\xab\x8a\xdd\xef\xaa\x86\x95\xeb\x8c\xe1\x56\x23\xf1\xa8\xcf\xab\xec\x69\xbe\xa2\xea\xb6\xe8\x26\xd4\x62\x29\x8b\x5c\x86\xe6\xca\xef\x22\x95\x93\x5e\x0e\x38\x19\x92\x8a\x49\x06\x01\xcb\x20\x23\xea\x44\x01\xf9\xa1\x10\x8a\x48\xf2\xab\x48\x08\xe2\x8d\xc5\xb5\x6b\xd6\xe0\x73\x7e\x1e\xb3\x90\xce\x5f\xef\x50\x11\xa9\x6f\x5b\x91\xde\x3c\x50\x93\xa0\x7a\x8f\x0c\x19\xc9\x10\xe9\xf1\xff\xfb\xb7\x1f\x62\x63\x59\xde\x61\x1d\xee\x4b\xf1\xfa\xf9\xf3\xe4\x68\x44\x6a\x21\x57\xf2\x06\x71\x5c\xac\x09\x8a\xa7\xe6\xe9\x43\x6a\xae\x3d\xba\xba\x3e\xa6\xbb\x00\xdd\x17\xef\x24\xbe\x4f\x4c\x3e\xfd\x46\xba\x26\xa2\x9a\x85\x2c\x60\xc6\x73\x73\x38\x2a\x77\x2f\x02\x5d\x12\x49\x9c\x95\x59\x48\x02\xe5\xa5\x52\xfc\x12\xbf\x73\x8a\x76\xc4\x40\x6e\x45\x79\x0f\xb8\x43\xc2\x10\x1f\xb0\xd9\x78\xbb\x7d\x86\x9d\x66\x1a\x21\xbe\xc4\x0a\x53\xce\x74\x9b\x81\x59\x52\x0d\xd5\x3c\x68\x91\x59\xe9\x7c\xc6\x69\x14\x19\x5d\x2e\x26\x8d\xfb\xff\x33\xac\x7d\x5f\xc1\x52\x26\xf7\x8f\x96\x32\x83\x15\xee\x3c\x4c\xbb\xc0\x48\xb6\x98\x54\xe4\xee\x10\x74\x9a\xa9\xc1\x9a\xbe\xc2\x22\xdc\x74\x67\x28\x10\x1e\xed\xa0\xe2\x97\x76\x2a\x79\x27\xf8\x0c\x9c\x87\x85\x7c\x59\xb9\xbb\x10\x4b\x98\x8a\xf2\xc1\xcf\x88\xde\xc6\x6f\x1f\x34\x69\x50\x75\x57\x9b\x5e\x76\xee\x78\x10\xa6\xf0\x8c\xc3\xe0\x81\x37\x6a\xe3\x81\xb2\x75\x81\xb0\xb3\x38\xf8\x0b\xde\x30\x5b\xdc\x53\xe4\xb0\xc9\xe3\x77\xe6\x41\xa8\x5a\x41\x15\xc0\x35\xea\x03\xce\xd3\x4a\x32\x33\x2e\x85\xd2\xe8\xd9\x28\x5e\x8e\xfc\xed\xfd\xa9\x84\xcd\xb3\x58\x66\x76\xb0\x1a\xfd\xa9\xd1\xc2\x16\xee\x77\xfd\x38\xd5\x54\xcc\x1f\x8d\xf7\x02\x8f\x45\x22\xca\x9c\x9f\x0e\xd4\xbf\x75\xb3\x5b\xed\x27\x78\xae\x48\xe4\x51\x75\xe1\x44\xa4\xc9\x16\xce\x1e\x05\x3c\x90\x71\xf7\x35\x54\xb5\xba\x82\xae\x4f\x50\xfa\x7d\x02\xd4\xcf\x99\x6e\xfe\xb4\x82\x64\xcb\x6d\x34\xc8\xb8\x76\xa9\x90\x6a\x00\xb0\xf3\xec\x61\xae\x1d\xbe\xa5\x20\xed\x82\x70\x16\xb0\xf6\x36\xab\x32\x69\x6e\x49\x7d\x30\x10\x3d\xc6\x00\x91\x49\x3c\x41\xf8\x5d\xba\x86\x76\x91\x9d\xc4\xa4\xfe\xb0\x5a\x82\xaf\x98\x12\xa9\x7e\x1c\xb3\x7f\x95\xe3\x7c\x71\x00\x5c\x3a\xdd\xed\x2a\x3e\x10\xff\x09\xcb\xe4\x9f\x33\xba\x7f\xb7\xcb\x0d\xc9\xc1\x86\x91\xbe\x76\xb3\x27\x5c\x14\x14\xdb\xa9\xfc\x7e\xc9\x0a\x55\x07\x85\x2c\xfa\x0e\x55\x8d\xc4\xc7\xa6\x5a\x8e\x57\xda\xec\x1b\x36\x50\xfa\xae\xd8\x49\x83\x4b\x42\x79\x7a\xce\x04\xad\xf5\xc9\xc0\x54\x65\xf9\xbe\x91\xf9\x4b\x64\x7c\x95\x67\x31\xe2\xf3\x85\xaa\x55\x38\x8b\xc5\x2d\xcf\xd8\xd4\x19\xce\x02\x60\xc3\xd4\x53\xc6\x16\x4d\x56\x71\xf8\x71\xb1\x89\x0d\xb0\xcc\x70\xd3\xe8\xc8\xd0\x66\x26\x08\x53\x28\x06\x00\xd2\xed\xaa\xf2\x14\x9e\x0e\x25\xdb\xc3\x3d\xa9\x5f\xe8\x86\x83\x85\x46\x8a\x91\x8b\xed\x68\x4b\x7e\xdd\xe1\xf4\xa4\x2b\x9b\x53\x9c\xd2\xdc\x5b\xec\x26\xa9\x23\xcc\x27\x03\x45\x24\x28\xd1\x54\x27\x98\xd1\x4f\x23\xd3\xac\xed\x8f\x9d\x98\xee\x3a\xc5\x7f\xa6\xb3\x10\x99\x56\xf1\xd9\xad\x0d\x81\x93\x10\x3b\x9c\xbf\x46\xa0\x18\xd1\x2c\x9b\x9d\x21\x0b\xd2\x2e\x3e\x80\x88\x4c\xda\x45\x4c\xfc\x8e\x4a\x25\xaa\x00\x41\xab\x94\x91\x05\x15\x17\xd1\xe7\x9b\x63\xc2\x86\x8e\x92\x39\xfb\x3f\x30\x9e\xc6\x3a\xd9\x7c\xd6\xb4\x5f\x6c\x36\x60\xb3\xb8\xb3\x08\xf6\x7a\xb6\x61\xa6\x89\x0b\x9b\x71\x30\xbb\xa2\x7d\x94\xbc\xdb\x1a\x16\x92\xaa\x04\xad\x06\x5e\x2c\xa7\x9a\xbb\x25\xcf\xd0\xd1\x5a\x4c\xec\x58\xb7\xa0\xf1\x23\x35\x99\x52\x53\x67\x64\x4c\xd7\xff\xc1\x96\x9e\x11\x12\x7e\x43\xbc\x1f\x45\x0a\x20\xe6\x6b\x74\xf5\x10\xa2\xbe\x4b\xe5\x82\xbf\x2b\xa1\xbd\x3e\x6c\x67\x0a\x4b\x49\x0b\x6e\xc4\x65\x14\x9e\x4a\x3a\x46\x03\x27\xf3\x6a\x46\xd9\xb7\x1a\x9f\xc1\x03\xc1\xd6\x18\xc4\x09\x63\x1a\xc9\x5c\x65\x89\x8e\x87\x93\x39\x4e\x22\x84\x67\x37\x08\x21\xa5\x2d\xf4\x92\x9a\x8d\x76\x3d\x8b\xf8\xd0\xa6\x45\x43\x61\x88\x9e\x6b\x06\xa0\x0c\x40\x46\xe9\xf6\x74\xdc\x91\x48\x5d\x62\x4d\x22\xb4\x1a\x0d\xfb\x56\xec\x7b\xe9\x66\xdb\x89\x43\x46\xa9\x7b\x7d\xa6\xec\x30\x62\xae\xaa\xe9\xe0\x3f\xe3\x1b\x80\x9d\x41\xed\xee\xab\x4a\xb4\x35\x4a\xba\x01\x4f\xd0\xe4\x1a\x15\x0a\x18\xac\x07\xdb\x88\x6f\x9f\xbb\x9f\xc3\xc1\x7b\xeb\xbd\xca\xd9\xf7\x3a\xce\x20\x5f\x89\xcd\xcf\xa2\x8c\x39\xe9\x5d\xad\x44\x3a\xf7\x05\x0f\x85\xd2\x04\xf5\x4d\xa7\x66\xda\x66\xd1\x14\xd5\x50\xf7\x45\xb6\x23\xd8\x3d\x64\xe4\xd0\x46\x24\x7b\x80\xfd\x61\xae\xfd\xaa\xc0\xe7\x11\xbd\xd6\xe9\x3c\x2d\xcd\x85\xf1\x1a\x8a\x2c\xae\x50\x43\x1e\x49\x24\x17\x92\x4d\x55\x89\x53\xd4\x0c\x7f\xfb\x53\xa3\x75\x18\xd5\x8c\x7e\x6f\x56\x59\xaf\x2f\x73\xb6\x0b\x6a\xbb\x69\x99\xa9\xd1\xb4\x61\xfb\xd4\x8e\x6c\xe0\x01\x01\x85\x4a\x9c\xbe\x32\x6d\xdf\xd5\xf6\x3b\xb2\xd2\xad\xf8\xa4\x77\x4c\x22\xe7\xeb\xd5\x84\xb5\xd5\xba\x76\x6d\xc8\xbf\xb6\x4c\x11\x25\x1e\xa4\xb8\xe9\x73\x09\x55\x38\xbd\x06\x31\x50\xf5\xc2\x61\x66\xb0\xc1\x10\xca\xe7\x1a\x08\xc3\x3c\x71\x1b\xc0\x97\x0f\x52\x0c\xf2\x40\xb5\xb8\x89\x19\x4a\x76\x5a\x9c\x7e\xb4\xf0\xe1\x66\x9a\xbd\x4c\xdc\x84\x54\xf9\x18\x50\xff\xcb\xb0\xec\x9e\x3a\x81\x86\x44\xed\x1f\x4f\x79\xeb\x90\x97\x1b\x0d\xc0\x50\x31\x4b\x8a\xe6\x01\x7b\xf3\xd2\xfd\x9c\xc5\x70\x46\xd9\xc1\x12\x03\xb5\xa7\x79\x5e\x5c\x55\x70\x7a\x9f\xf3\x38\x40\xda\x7c\xf4\x94\x15\x69\xda\xa3\x3b\x39\xe5\x83\xe7\x3b\x50\xd9\x3d\x0d\xb0\xbc\x1a\x8b\x2c\xf2\xd0\x6c\x83\xe7\x5c\x75\x3d\xbe\x25\x02\xb9\x73\xd8\x09\x01\x37\xcb\xc5\xb7\x36\x50\x10\x95\x25\x2f\xee\x46\x39\x13\x64\xbc\xc3\xeb\xfe\x62\x73\x8e\x5d\x47\xf0\x64\x83\xe0\xa3\x56\x6b\x35\x1d\x43\xf6\xd7\x62\xaf\x1a\xff\xde\x87\x0b\x09\x00\xe2\x0e\x65\xec\xbc\x9a\x07\x56\x7d\xa2\xbf\xc7\x1f\x0a\x32\x55\xba\x8f\xe8\x66\x0f\x29\xaa\x85\x04\x25\x48\xd6\xbe\xe2\x82\xe4\x0d\x96\x31\xde\xf6\x6b\x10\xa9\x33\x9f\x4f\x2f\xfb\xd2\xf6\x6b\x1d\xfe\x23\xf7\xfa\x63\xca\x46\xcf\x0d\xc8\x57\x72\x1c\x32\xa6\x89\xca\xa8\x25\xd0\x6f\x37\x73\xdf\x23\x6c\x65\x04\x61\x4e\xa5\x11\xcc\x4c\x1c\x0c\xee\x41\x39\x37\x9e\xce\xdb\x4b\xea\xd6\xba\x2a\xe2\xf0\x49\x84\xb8\xa6\xcc\x3a\x2b\xd7\xac\x10\x8c\x5f\xf9\xe2\xec\x77\x12\x0f\x6b\x6c\x86\x7b\xa5\x8e\x4e\x24\x88\xc8\x4a\x72\x7a\xe6\x91\x0b\xd4\xe0\xe5\xe6\xa5\x70\x38\xa7\xb3\x88\x99\x2a\x40\x48\xab\x1f\x99\xc7\x08\xe6\x7c\xe2\xe2\x1b\x3b\x09\x39\x38\xb5\x9c\xdc\xb9\x00\x2d\x11\xf5\x54\x9d\x8d\x08\xe6\x4a\x06\x8d\x2c\x19\xc1\xbc\x59\xe6\x1a\x4e\x7b\x32\xfe\x4d\x9a\xed\xa3\x91\x47\x4d\x53\x67\x2b\x67\xab\x01\xf3\x19\x25\xa0\xe8\x7c\x19\xe5\x62\xf6\x06\x97\x42\x1e\xb0\xb5\x51\xe4\x80\x88\xef\x04\x66\xf8\x73\x6c\xb8\xb6\x26\xda\xa9\x84\x6f\xc2\x66\x36\x7b\x44\x99\x4e\x6c\x62\x98\xf1\x66\x0c\x49\xe3\xc8\x9e\x39\x6b\x0f\xa1\xe2\x45\x50\x39\x36\xb9\x28\xbb\x08\x55\x4c\xf2\x8b\x1d\x2b\x91\xbc\xf8\x0c\x19\x89\xbf\xa2\xd3\xde\xe7\x98\x18\x6b\xa4\x73\x95\xec\x88\x84\xb7\x29\x16\x30\xf1\x0b\x73\x31\x3b\x6a\x54\x8b\x72\x6b\x1d\xc8\x6c\xc4\x55\x84\x27\xf6\x83\x52\x3f\xb8\x5f\xd7\xac\x2a\x00\xe1\xbe\x45\x11\xb9\xd2\x48\xde\xd3\xa8\x3f\x11\x25\x5d\x4e\x69\xdf\x52\xae\xea\x3c\xd0\x46\x71\xcb\x81\xe4\x41\x3e\xfc\x26\xff\xd6\x26\xb9\x00\xcd\xa8\x0e\xfc\xb9\x2a\xd3\x17\x62\x72\xee\x25\x85\xab\xbb\x6a\x2e\x21\x87\x2b\x3a\x4b\x35\x48\xdc\x83\x79\xa3\x30\xc6\x9d\x7e\x9a\xa4\x68\x02\x94\xb8\x72\x5a\xba\xfe\x2b\x9d\x9b\x7f\x7d\x27\xca\x70\xc1\x41\xde\x60\xf4\x16\xac\xa0\xff\x7c\xd0\xf6\x8d\x44\x3b\x99\x49\x4c\x36\x1e\xe8\xbc\xa1\xc3\x26\x5a\xbe\x95\x98\xe6\x61\xac\x29\x6c\x01\x3f\x20\x6d\x46\x0b\x90\x39\xcb\x9b\x82\x93\xf9\xcf\x75\x9c\xf7\x0d\x5f\xb4\xc7\x61\x77\x3a\x47\x52\xb2\x6a\x1e\x70\x03\x2b\xfc\xed\x03\xe9\xba\x1b\x6b\x36\x69\x77\xf0\xf8\x53\x4a\x97\x3b\x6a\x3a\x84\xa7\x41\x94\xc8\xf4\x37\x5f\x3a\x0c\x53\x57\xb2\x3f\x5f\xb3\xe1\xe7\x4c\xef\x8d\xbe\xc2\x0a\x40\xdc\x05\x24\xe1\xe3\x10\x64\x9b\x4b\x36\x6c\x09\xbc\xbd\x74\x8d\xfd\xae\xd4\xf3\xe4\x95\xca\x78\xf3\x1d\x41\x11\x1e\xb7\xbc\xdd\xd2\xe9\x8c\xfc\xfa\xab\x80\x7d\xb3\x9d\x40\xfb\x31\x3a\xae\xd4\xc0\x4a\xe2\x6c\x78\x54\x0d\x77\x63\xe8\x5b\x58\x64\x25\x82\x01\x9c\x7c\xc8\xeb\xa5\xa0\xc0\x4f\x2f\x92\xbf\x01\x66\x3c\xf2\x6a\x8c\xc2\xc7\x55\xbc\x98\x94\xca\x38\x11\xd0\xd1\x29\x71\x07\xf9\x19\xc8\x78\xba\xe6\x71\xbd\xb2\xb6\x83\x44\xb8\xe0\xee\x09\x80\xdd\xb0\xf4\xfc\xc4\xfb\xa6\x3b\x4a\x6d\xcd\x45\x50\xb6\x20\x1a\xe6\x71\x45\xd5\x36\x2f\x98\x7f\x96\x60\xc8\x3c\xdf\x71\xc3\x6f\x04\xd8\xf7\x27\x7c\x5c\x8b\x48\x0f\xea\x6d\x0c\xbc\xce\x39\x63\xf2\x0f\x23\x13\x91\xb1\xd5\xed\xe4\xe8\x2f\x85\xe3\x72\xfa\x61\x30\x75\x94\x40\x84\x1a\x3a\xd8\xfc\x16\x60\x9c\xf2\xad\x71\x01\x33\x5f\x0f\xf5\xf7\x5f\x9f\xd6\xa1\x2c\x71\xd8\x05\xd5\x0d\x6e\xff\x78\xab\x2a\xfa\xa8\x2e\x47\xb0\xa8\xf2\x9e\xfb\x67\xe1\x2d\xfc\x96\xaf\x72\x85\xd8\x07\x47\x60\xd5\x36\x4a\x10\x0e\xf6\x99\x17\xa9\xdf\xc2\x18\xd9\xb9\x3a\xd5\xbc\x93\xcd\x7e\xef\xe6\xce\xf6\x3c\x88\xb5\xef\x86\x41\x0a\x18\x52\x9b\x49\xb6\xb9\xd2\xb9\xe1\x11\x8a\xdb\xf7\x30\x19\x30\x63\x06\x1f\x6e\x97\xbe\x4d\x8f\x3c\xb3\xa3\x13\x79\x36\xd8\xb1\x6f\xaa\x5b\x8f\xad\xd7\xb3\x72\xf2\x9f\x54\x9c\x82\xb3\x7f\x73\x0f\x8c\x86\x07\x6d\x78\xfe\x08\x88\x95\x70\x7c\xbe\x37\x2f\x0d\x63\xc0\xf5\x85\x03\xa2\xde\x13\xb6\x91\x23\xbd\x8e\xb8\x87\xc9\xd1\x98\x02\x89\xe8\x8d\x80\x4e\xd1\xa2\x36\x0d\x0c\xde\x6b\xc8\x96\x57\x6f\xdd\x19\x33\xe4\x0d\xd2\x4f\x1d\xc9\x73\x26\x28\xe5\xe4\x12\x51\xf6\xc6\x91\x7b\xa1\xdc\x48\xa0\x21\xab\x47\xc6\xe5\xf3\xf2\x95\x74\x54\xff\x02\x3d\xb1\xae\x1d\x40\xd8\xda\x44\xcc\x19\x1c\x6b\x80\x27\x50\xbe\xb8\xb0\xfa\x3b\x5c\xe1\x8c\x88\x75\x60\x0d\x1f\x3b\x91\x54\xa9\x39\xb9\x50\x56\x1f\xf5\x34\x32\x4f\x7d\xe7\x20\x75\x83\xe4\x4e\xb5\xf9\xba\x89\x97\xf3\xc1\x30\x54\x37\x85\x26\x61\xa3\xb5\xea\xb1\x58\x05\xa7\x41\x05\x4e\xc6\xb9\x42\x46\xb3\x95\x74\xfe\x1f\x8b\x7c\x82\xae\x5a\x19\xcc\xae\x24\xef\x58\xc0\xf1\xe3\x6b\x87\x53\x0e\xe9\x9d\x63\xd1\xe9\x9c\x7a\x7a\xd3\x3e\xa4\xc3\x0e\xdf\xec\x2a\x96\x1c\x23\xc3\x82\x1a\xb0\x45\x77\x2a\x54\x08\xe3\x9f\xef\xc3\xcc\x03\x68\x2c\x26\x22\x3c\xc4\xac\x95\xae\x8a\xe2\x48\x92\xaa\x3f\x6e\xd7\xe4\x8c\xaa\x8f\xd9\xaa\x20\x17\x5c\x63\x5c\x24\x46\xa5\xb9\x6e\xda\x27\x12\xe4\x1f\x06\x96\x62\x01\xcf\x3b\x3f\xd2\x1b\x83\xd4\x1f\x1c\xb0\x74\x15\x7b\x97\xc0\x6d\x37\xb0\xad\x51\x9c\x15\x87\xfb\xf7\x15\x44\x61\x6b\xcb\x2f\x3d\x8d\xb6\x05\x61\x60\x66\x60\x55\xd3\xea\xe0\x2b\xa9\xa9\xf4\x0f\xde\x31\x2b\x0d\xae\xed\x6e\x08\x61\xbb\x69\x59\xb5\x40\x22\x86\xa5\xbf\x77\x25\xf3\x1c\x77\x88\x44\x6b\x7d\x6d\x04\x7c\xb2\xc6\x64\x5b\x38\x9e\xfe\xbe\xdb\x6f\xd7\xb3\x8d\x62\xa4\xd7\xc0\x74\x90\x1c\x13\xb8\xa1\xe7\xf3\x5e\x09\x66\xb2\x25\xa0\x3a\xba\x0e\x64\x27\x11\xea\x92\x18\x85\xfc\xcc\x3e\x01\x2b\x9f\x6d\x87\xb3\xce\x8c\xcb\x68\x91\x99\x25\x15\x65\xc0\x4f\xf5\x74\x67\x40\x98\x39\x98\x52\xd6\xf8\x44\xd8\x06\xc7\x3d\x53\x4b\xa7\x4d\x53\xe0\x1c\x38\xc2\xf4\xf5\x60\x8d\x2b\x4d\x27\xcf\x0c\xd2\xc9\x05\x4f\x14\x55\xde\xf4\xc3\xd1\x99\xec\xc5\x73\xa3\x67\x80\xb2\x78\x5f\x05\x04\x46\x1b\x84\xf9\x56\xa9\x2d\x2b\xc8\x70\x10\xea\x84\x1b\xd8\x6c\x2f\x29\xd0\x28\x85\xa2\xa0\x6c\x9f\x8f\x31\x36\x34\x70\x31\xdf\xa1\xe2\xdc\xf7\x4f\x44\xa0\xa4\xe2\x4c\x02\x47\x00\x9a\x8a\xe0\xa0\xe4\xd4\xf7\xf0\x4c\x84\xd1\x31\xcf\x5f\xb9\x5a\x59\xfa\x41\x59\xa6\x7a\xb7\xdd\xec\x80\xf1\xb6\x22\x5a\x8c\x0f\x9d\x10\x3a\x39\x68\xe8\x2c\x86\x30\x16\x94\xc4\xb5\xbc\xdc\xc6\xb3\x2f\xc9\xeb\x8e\xcb\x5b\x3b\x8d\xcb\x81\x3e\x1f\xc2\x82\x8c\xf5\x9d\x82\x13\x01\x89\x61\x81\xf9\xbe\xc7\x28\x45\x62\xfe\x21\xa9\x63\xe7\x1a\xf7\x08\xb9\x7c\xf9\x54\xcc\x15\xc2\x24\xf6\xa2\xec\xaa\xdf\x7f\x65\x5f\x35\xe1\xc6\x63\x91\x5f\x72\x46\x16\xdb\x0c\x8a\x90\x8f\x22\x8e\xf5\x5f\x8c\x22\xc5\x57\x15\xea\xa7\xdd\x14\x76\x57\xac\xc1\xc0\xc3\xe7\x4f\x1e\xac\x0f\x54\xdf\xf8\xde\xed\xcb\x50\x9b\x9d\xc0\x57\x33\x97\xba\x7c\x02\xcc\x61\x6d\x4e\x74\xaa\x32\x4d\xf6\x23\xdb\xec\x66\x59\x18\x14\xc4\x23\xf6\x6b\xd2\x07\x1b\x10\x78\x0d\xc7\x08\xa7\xa8\x8f\x9a\x55\xb8\x38\xef\x0d\x9e\xaa\x52\xdd\xeb\x20\xd9\x5a\xe8\x16\xcd\x26\xcf\xc6\xc0\xa3\xa5\x06\x40\xb1\x2f\xee\xdc\x40\xee\xc4\x49\x62\x47\x28\x07\xa7\x9c\x21\x37\x25\x6b\x24\x03\x33\x44\x88\x3f\x9a\x1d\x8d\x5d\xeb\x2f\xd8\x9b\x1e\xc3\x9d\xb6\xc0\x5e\x65\xdf\xfe\xb4\x2d\xde\xf7\x8c\xd1\x62\xa8\x32\xb5\x74\x8d\xeb\xa6\xd3\x56\xde\xe0\xe1\xc5\xe7\x0b\x83\x93\x1d\x76\x53\xee\x52\xeb\x3d\x48\xe1\xf3\x24\x16\xe2\x08\xba\x3e\x7f\x37\x0e\x17\x29\xee\x50\x39\xa4\x2e\x74\x50\xc2\xa0\x81\x2c\xc4\xa6\x59\xee\xee\x7e\xfa\x6a\xc5\xe1\x9d\x65\x90\x39\x9a\x70\xb3\x2c\xd4\x18\x15\xf1\xb2\x79\xcf\x5b\xfe\x05\xda\x38\xde\xca\x11\x8b\xff\x03\x8a\xde\x89\x14\xef\x33\x05\x20\x5d\x5a\x05\x4c\xb3\xa1\x28\xf8\xfd\x07\xde\x50\x50\x0d\xdf\x58\xe4\x2f\x25\x65\x22\x92\x0c\x6f\x6a\x10\x04\x48\x21\x0b\x80\x10\x6e\x48\xac\xb0\xc4\x8f\x6a\x1f\xcf\xca\x7c\xb0\xfa\x4d\x72\x2d\xae\xfc\x2f\xf6\x07\x58\xcf\xbd\x33\x53\xc6\xb3\x5c\x45\x9e\x4c\x16\xb4\xb9\xff\x57\x44\xd9\xc4\x34\x19\x7e\x26\x0d\xae\xc9\xaf\xc7\x59\x5b\xf4\x90\xf8\x86\xea\x9b\x88\x39\x1a\x94\x8e\xe9\x31\x2b\x31\x1b\xc4\xbe\xe7\x3c\xd8\x1d\x5f\xa2\x6e\xb6\x33\xdf\x24\x7f\x4d\x23\x79\x46\x4d\x67\x02\x8c\x9e\x13\xf0\x14\xc2\x71\x6b\xa7\x40\xae\x5d\xc2\x60\x36\xfd\x5c\x94\x96\xdc\xd0\x57\x53\x07\x6b\xd3\x9a\x6b\xd7\xd2\x64\x2b\xde\xa2\x4b\x03\xb5\xa5\xe6\xdb\xdd\x27\x70\x2d\xac\x3f\x9d\xcb\x42\x49\x1f\x7c\xeb\xbd\x90\xbf\x76\x93\xdc\x5f\x64\x71\x98\xf6\xcc\x19\xbe\x4a\x63\x5c\x86\xcc\x96\xb7\x60\x74\x22\x4b\x91\x34\x57\x6b\x76\x57\x76\x1d\xa3\x21\xc4\x78\x90\xb0\xaf\x4a\xbf\x15\xdd\x01\x11\xea\x82\x92\xcf\xc9\x53\x1a\x7d\x8a\x31\x82\xa2\x2c\x9a\x97\xa5\xd7\x7e\xaf\xa8\xe1\xf9\xd6\xa1\x50\xc0\xcb\xee\xf5\x09\xfc\x90\xa3\xe1\xad\xe8\x03\x0d\x05\xe5\xce\x08\x41\x23\x5b\x39\xa9\x33\x8b\xa9\x26\x2c\x8d\xe5\xe1\x58\x53\xed\xfc\xa3\x63\x5d\x5b\xbd\xd5\x69\x9a\x05\xff\x67\xb4\xaf\x65\x5b\x1a\xe7\xc6\x90\x71\xf1\x60\x4c\xd3\xa8\x3b\x69\xe1\x29\x69\x1a\xe3\xa5\xf2\x9b\x1f\xf7\xc5\x9d\x5d\x71\xfa\xe2\x8e\x9f\x07\x22\xdc\xef\xe2\x14\x64\x11\x4d\x8d\x18\xac\x9e\x7a\x31\x60\xe9\x8a\x95\x2f\xff\x31\xdc\xfd\xa5\x1f\xe5\x0a\xc4\x60\xac\xde\x83\xb3\x04\xbc\xa6\x7c\x8f\xc9\x8d\x47\xa4\xdd\x5e\xf9\x73\x55\x15\xb0\x35\xc7\xd8\x4c\x7a\x53\xc8\x0f\xa8\x97\x85\x06\xf8\x67\xba\x0f\xb6\x9b\xe6\x3d\x03\x0d\x30\xc2\x52\x74\xa3\xa8\xfb\xd4\x39\x90\x9b\x5e\x06\x74\x77\xce\x71\x84\x9e\xb3\x42\x92\x70\x99\x85\x98\x87\xea\x69\x09\xc0\xfc\x5e\x5f\x92\xfa\xae\x39\x5a\x8e\xcb\xb7\x19\xe1\x42\x8a\x22\xd8\x23\x3f\x64\x9f\xad\xcf\x6d\xf9\x19\x97\x3e\x4b\x12\xb7\x0c\xfe\x50\x05\x1b\xd5\xc4\xf9\xa7\xa2\x75\x93\x51\xb1\x9a\xcd\x0d\xdd\xc8\x2e\xa0\x0e\xb7\x86\x80\x63\x23\xd7\x79\xe0\x40\xab\xa9\x15\x0b\x64\xf6\xba\x84\x26\xbc\xa1\xd9\xeb\x2b\xb1\x74\x88\x4c\x3e\xcc\xa5\x89\xf5\x95\xd6\x2c\xcb\xb9\x0e\xa7\x53\xf9\x94\x5d\xb6\x53\x6e\x99\x23\x2b\x05\xab\x4d\xee\x7d\x5f\x8a\xc2\x08\x31\x10\x26\x15\x8c\xad\xcc\x36\x00\x42\x0a\xdf\xe1\x0f\xb0\xbb\x01', 1)
11,453
39f492e760c4c82e16a9ebebc18e4432f38d2291
import abc # 抽象观察者类 class Observer(metaclass=abc.ABCMeta): # 更新通知方法 # invoker:通知者 # event:事件 # params:事件参数 @abc.abstractmethod def update(self, invoker, event, params): pass
11,454
86bf7ceb4d664018b498e7c3d3e38b439c30c342
#!/usr/bin/python # vim: set fileencoding=utf-8 # Demonstrační příklady využívající knihovnu Pygame # Příklad číslo 26: použití objektů typu Surface, metoda blit() # a operace pygame.transform.scale(). import pygame, sys, os # Nutno importovat kvůli konstantám QUIT atd. from pygame.locals import * # Velikost okna aplikace WIDTH = 320 HEIGHT = 240 # Inicializace knihovny Pygame pygame.init() clock = pygame.time.Clock() # Vytvoření okna pro vykreslování display = pygame.display.set_mode([WIDTH, HEIGHT]) # Nastavení titulku okna pygame.display.set_caption('Pygame test #26') # Konstanty s n-ticemi představujícími základní barvy BLACK = ( 0, 0, 0) # Vyplnění plochy okna černou barvou display.fill(BLACK) # Načtení obrázku, který se bude vykreslovat a transformovat image_surface = pygame.image.load(os.path.join('images', 'pygame.png')) # Rozměry původního obrázku image_width = image_surface.get_width() image_height = image_surface.get_height() scale_ratio = 1.5 # Vytvoření zvětšených obrázků horizontally_scaled_image = pygame.transform.scale(image_surface, (int(image_width*scale_ratio), image_height)) vertically_scaled_image = pygame.transform.scale(image_surface, (image_width, int(image_height*scale_ratio))) scaled_image = pygame.transform.scale(image_surface, (int(image_width*scale_ratio), int(image_height*scale_ratio))) # Přímé vykreslení původního obrázku display.blit(image_surface, (50, 25)) # Přímé vykreslení zvětšených obrázků display.blit(horizontally_scaled_image, (150, 25)) display.blit(vertically_scaled_image, (50, 125)) display.blit(scaled_image, (150, 125)) # Hlavní herní smyčka while True: # Načtení a zpracování všech událostí z fronty for event in pygame.event.get(): # Uzavřením okna běh aplikace ukončí if event.type == QUIT: pygame.quit() sys.exit() # Klávesou ESC se běh aplikace ukončí if event.type == KEYDOWN and event.key == K_ESCAPE: pygame.quit() sys.exit() pygame.display.update() clock.tick(20) # finito
11,455
62245bdbee36c8c4292c943b4627c1daa585cb60
import cv2 as cv import numpy as np img = cv.imread('clahe_src.jpg', 0) # 全局直方图均衡 equ = cv.equalizeHist(img) # 自适应直方图均衡 clahe = cv.createCLAHE(clipLimit = 2.0, tileGridSize = (8, 8)) cl1 = clahe.apply(img) # 水平拼接三张图像 result1 = np.hstack((img, equ, cl1)) cv.imwrite('clahe_result.jpg', result1)
11,456
45fd63816fdb037d73ad61b507bab47e61c9fe45
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2016-10-04 06:21 from __future__ import unicode_literals import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='About', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=80)), ('preface_description', models.TextField(blank=True, help_text='Preface yourself')), ('main_description', models.TextField(blank=True, help_text='About me')), ('skill_name', models.CharField(blank=True, max_length=255)), ('skill_percent', models.PositiveSmallIntegerField(blank=True)), ('education', models.CharField(blank=True, help_text='Education name', max_length=255)), ('education_description', models.TextField(blank=True, help_text='Describe education')), ('image', models.ImageField(blank=True, upload_to='photo', verbose_name='Photo')), ], ), migrations.CreateModel( name='Benefit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=80)), ('slug', models.SlugField(unique=True)), ('benefit_description', models.TextField(blank=True, help_text='Description')), ('image', models.ImageField(blank=True, upload_to='benefit', verbose_name='Benefit picture')), ], ), migrations.CreateModel( name='Fee', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=80)), ('fee_description', models.TextField(blank=True, help_text='Fee Description')), ('price', models.DecimalField(decimal_places=2, help_text='Price', max_digits=15)), ('discount', models.DecimalField(decimal_places=2, help_text='Discount', max_digits=15, null=True)), ('increase', models.DecimalField(decimal_places=2, help_text='Increase', max_digits=15, null=True)), ], ), migrations.CreateModel( name='Goal', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=80)), ('goal_description', models.TextField(blank=True, help_text='Description')), ('image', models.ImageField(blank=True, upload_to='picture', verbose_name='Goal picture')), ], ), migrations.CreateModel( name='Preface', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=80)), ('description', models.TextField(blank=True, help_text='Preface project')), ('image', models.ImageField(blank=True, upload_to='preface_pic', verbose_name='Preface Image')), ], ), migrations.CreateModel( name='Testimonial', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=80)), ('slug', models.SlugField(unique=True)), ('avatar', models.ImageField(blank=True, upload_to='photo_ava', verbose_name='Photo')), ('position', models.TextField(blank=True, help_text='Position')), ('testimonial', models.TextField(blank=True, help_text='Testimonial')), ('location', models.CharField(help_text='Location', max_length=250)), ('phone_number', models.CharField(blank=True, max_length=15, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')])), ('url', models.URLField(blank=True)), ], ), ]
11,457
368a2592df530520268abd8fff1b55ca842cc1d9
import socket import time # 创建对象 sk = socket.socket() # 绑定IP端口 sk.bind(('127.0.0.1', 8000)) # 监听 sk.listen() def rihan(url): return "欢迎访问 日韩板块{}".format(url) def oumei(url): return "欢迎访问 欧美板块{}".format(url) print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) # 持续等待链接 while True: conn, addr = sk.accept() data = conn.recv(2048).decode("UTF-8") url = data.split()[1] print(url) # 头文件 conn.send(b'HTTP/1.1 200 OK\r\nContent-Type: text/html;charset=utf-8\r\n\r\n') if url == "/rihan": ret = rihan(url) elif url == "/oumei": ret = oumei(url) elif url == "/": ret = "<h1>欢迎访问 首页</h1>" else: ret = "<h1>404 not found</h1>" conn.send(ret.encode('utf-8')) # 断开链接 conn.close()
11,458
6f5338a99d5e1311f8b1a48679e7a96900aef998
import unittest from backend import utils from backend.messages import ( test_clean_positions, test_all_equals, test_get_segments, ) class UtilsTest(unittest.TestCase): def __init__(self, methodName='runTest'): self.Play = utils super(UtilsTest, self).__init__(methodName=methodName) def setUp(self): pass def tearDown(self): pass def test_clean_positions(self): # all clean positions board = [-1, -1, -1, -1, -1, -1, -1, -1, -1] clean_positions = self.Play.clean_positions(board) self.assertTrue(len(clean_positions) == 9, test_clean_positions) for index in range(9): self.assertEqual(index+1, clean_positions[index], test_clean_positions) # first three with no clean positions board = ['X', 'O', 'X', -1, -1, -1, -1, -1, -1] clean_positions = self.Play.clean_positions(board) self.assertTrue(len(clean_positions) == 6, test_clean_positions) # last three with no clean positions board = [-1, -1, -1, -1, -1, -1, 'O', 'O', 'X'] clean_positions = self.Play.clean_positions(board) self.assertTrue(len(clean_positions) == 6, test_clean_positions) # last three with no clean positions board = [-1, 'O', -1, 'X', -1, -1, 'O', 'O', -1] clean_positions = self.Play.clean_positions(board) self.assertTrue(len(clean_positions) == 5, test_clean_positions) # only one cell no clear board = [-1, -1, 'X', -1, -1, -1, -1, -1, -1] clean_positions = self.Play.clean_positions(board) self.assertTrue(len(clean_positions) == 8, test_clean_positions) self.assertFalse(3 in clean_positions, test_clean_positions) # only one cell no clear board = [-1, -1, -1, -1, -1, 'X', -1, -1, -1] clean_positions = self.Play.clean_positions(board) self.assertTrue(len(clean_positions) == 8, test_clean_positions) self.assertFalse(6 in clean_positions, test_clean_positions) def test_get_segments(self): board = [-1] * 9 segments = self.Play.get_segments(board) self.assertEqual(len(segments), len(self.Play.combinations_wins), test_get_segments) def test_all_equals(self): segment = ['X', 'O', -1] self.assertFalse(self.Play.all_equals(segment), test_all_equals) segment = ['X', 'O', 'O'] self.assertFalse(self.Play.all_equals(segment), test_all_equals) segment = ['O', 'O', -1] self.assertFalse(self.Play.all_equals(segment), test_all_equals) segment = ['X', 'X', -1] self.assertFalse(self.Play.all_equals(segment), test_all_equals) segment = ['X', 'O', 'X'] self.assertFalse(self.Play.all_equals(segment), test_all_equals) segment = ['X', 'X', 'X'] self.assertTrue(self.Play.all_equals(segment), test_all_equals) segment = ['O', 'O', 'O'] self.assertTrue(self.Play.all_equals(segment), test_all_equals) def test_a_winner(self): # no winner board = [ 'X', '0', -1, -1, 'X', -1, -1, -1, '0' ] self.assertFalse(self.Play.a_winner(board)) # winner X board = [ 'X', '0', '0', -1, 'X', -1, -1, -1, 'X' ] self.assertTrue(self.Play.a_winner(board)) # winner O board = [ '0', 'X', 'X', -1, '0', 'X', -1, -1, '0' ] self.assertTrue(self.Play.a_winner(board)) # no winner board = [ '0', 'X', 'X', -1, '0', '0', -1, 'X', 'X' ] self.assertFalse(self.Play.a_winner(board)) if __name__ == '__main__': unittest.main()
11,459
7d95b9a59f0158c35bbb99b34dd7307431c85cef
test_input = ''' [1,1,3,1,1] [1,1,5,1,1] [[1],[2,3,4]] [[1],4] [9] [[8,7,6]] [[4,4],4,4] [[4,4],4,4,4] [7,7,7,7] [7,7,7] [] [3] [[[]]] [[]] [1,[2,[3,[4,[5,6,7]]]],8,9] [1,[2,[3,[4,[5,6,0]]]],8,9] ''' def compare_integers(left, right): if left < right: return -1 if left > right: return 1 return 0 def compare_lists(left, right): if isinstance(left, list) or isinstance(right, list): if not isinstance(left, list): left = [left] elif not isinstance(right, list): right = [right] for i in range(len(left)): if i == len(right): return 1 a = left[i] b = right[i] test = compare_lists(a, b) if test < 0 or test > 0: return test return -1 if len(left) < len(right) else 0 else: return compare_integers(left, right) def part1(input): if input: s = 0 for i, pair in enumerate(input.strip().split('\n\n')): pair = pair.split() left = eval(pair[0]) right = eval(pair[1]) if compare_lists(left, right) < 0: s += i + 1 print("Part 1: {}".format(s)) def part2(input): if input: decoder_packets = [[[2]], [[6]]] ordered_packets = [] + decoder_packets for packet in [eval(line) for line in input.strip().split('\n') if line]: found = False for i, other in enumerate(ordered_packets): if compare_lists(packet, other) < 0: ordered_packets.insert(i, packet) found = True break if not found: ordered_packets.append(packet) locations = [index + 1 for (index, packet) in enumerate(ordered_packets) if packet in decoder_packets] print("Part 2: {}".format(locations[0] * locations[1])) print('---TEST---') part1(test_input) part2(test_input) with open('input.txt') as f: input = f.read() print('---INPUT---') part1(input) part2(input)
11,460
1f7d48056abd2e2fd4b987f9973f7e6657d9b3c3
#!/usr/bin/env python3 numbers=0 avg=0 while True: inp=input("please give me a number or the world 'done': ") try: x=float(inp) avg+=x numbers+=1 except ValueError: if inp=="done": break else: print("incorrect value entered. please enter a number or 'done'") try: avg=avg/numbers except ZeroDivisionError: print("zero numbers entered; please try again") else: # executes of the try completes without exception print("the average is %.2f" % (avg))
11,461
5e654a76c1e739506d4357c13dd45c43ceaff245
from typing import List ''' 给定两个整数 n 和 k,返回 1 ... n 中所有可能的 k 个数的组合。 示例: 输入: n = 4, k = 2 输出: [ [2,4], [3,4], [2,3], [1,2], [1,3], [1,4], ] ''' class Solution: def combine(self, n: int, k: int) -> List[List[int]]: self.result = [] self.tmp = [] self.dfs(1, n, k) print(self.result) return self.result def dfs(self, index, n, k): if len(self.tmp) == k: self.result.append(self.tmp.copy()) return for i in range(index, n + 1): self.tmp.append(i) self.dfs(i + 1, n, k) self.tmp.pop() if __name__ == '__main__': n, k = 4, 3 s = Solution() s.combine(n, k)
11,462
629e33b9b821ca32db722f92aa1f11d2ef333eff
# Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next class Solution: def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]: length = 1 node = head while node.next: length += 1 node = node.next if length == 1: return None remove_index = length - n if remove_index == 0: head = head.next else: node = head for i in range(remove_index - 1): node = node.next node.next = node.next.next return head
11,463
2e524ec08d489a932cebdcb044eaa28e6058854b
# -*- coding: utf-8 -*- __author__ = 'admin' import urllib2 from lagou_db import LagouDb import bs4 import requests import re import threading import time import random # url3 = 'http://www.freeproxylists.net/zh/cn.html' def get_cn_proxy(): url = 'http://cn-proxy.com/' proxy = urllib2.ProxyHandler({'http': '115.29.169.182:37711'}) opener = urllib2.build_opener(proxy) urllib2.install_opener(opener) response = urllib2.urlopen(url) web = response.read() soup = bs4.BeautifulSoup(web) proxy_list = [] table_body = soup.find_all('table', {'class': 'sortable'})[1].find('tbody') rows = table_body.find_all('tr') for row in rows: rows_list = re.split('\n+', row.text.strip('\n+')) proxy = {} proxy['ip'] = rows_list[0] + ':' + rows_list[1] proxy['update_time'] = rows_list[3] proxy['type'] = 'http' proxy_list.append(proxy) return proxy_list def get_getproxy(page='1'): url = 'http://www.getproxy.jp/en/china/' + str(page) proxy = urllib2.ProxyHandler({'http': '115.29.169.182:37711'}) opener = urllib2.build_opener(proxy) urllib2.install_opener(opener) response = urllib2.urlopen(url) web = response.read() soup = bs4.BeautifulSoup(web) table = soup.find('table', {'class': 'mytable'}).find_all('tr') proxy_list = [] for row in table[1:-1]: proxy = {} elements = row.text.strip('\n').split('\n') proxy['ip'] = elements[0] proxy['update_time'] = elements[-1] proxy['type'] = elements[-2] proxy_list.append(proxy) return proxy_list class ProxyCheck(threading.Thread): def __init__(self, proxyList, valid_proxy): threading.Thread.__init__(self) self.proxyList = proxyList self.timeout = 5 self.testUrl = 'http://www.baidu,com/' self.valid_proxy = valid_proxy def checkProxy(self): cookies = urllib2.HTTPCookieProcessor() # valid_proxy = [] for proxy in self.proxyList: proxyHandler = urllib2.ProxyHandler({proxy['type']: proxy['ip']}) # proxyHandler = urllib2.ProxyHandler({'http': '115.29.169.182:37711'}) opener = urllib2.build_opener(cookies, proxyHandler) opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0')] t1 = time.time() try: req = opener.open('http://site.baidu.com/', timeout=self.timeout) result = req.read() timeused = time.time() - t1 pos = result.find('030173') if pos > 1: self.valid_proxy[proxy['ip']] = 0 # self.valid_proxy.append(proxy['ip']) print 'get pos', proxy['ip'], time.time() else: print 'not pos', proxy['ip'], time.time() continue except Exception, e: print 'fail', proxy['ip'], time.time() def run(self): self.checkProxy() if __name__ == "__main__": valid_proxy = [] ti = time.time() checkThreads = [] proxy_list = get_getproxy() # for i in range(2, 5): # proxy_list += get_getproxy(i) t_num = 20 for i in range(t_num): t = ProxyCheck(proxy_list[i*len(proxy_list)/t_num:(i+1)*len(proxy_list)/t_num]) checkThreads.append(t) for i in range(len(checkThreads)): checkThreads[i].start() for i in range(len(checkThreads)): checkThreads[i].join() print '%s thread used time %s' % (t_num, time.time()-ti) # print proxy_list # db = LagouDb().db() # db['proxy'].insert_many(valid_proxy)
11,464
046383cedca3283133a4a38dd5dbd677ec65af20
import numpy as np import pandas as pd import matplotlib.pyplot as plt def calci(X1,X2,W,power): #function to find the output for an input ct=0 val=0 for p in range(power+1): tp=p while tp>=0: val=val+W[ct]*(X1**tp)*(X2**(p-tp)) tp=tp-1 ct=ct+1 return val if __name__ == "__main__": #The below lines take the csv file as input and make changes to the input dat accordingly data=pd.read_csv("function3_2d.csv") data=data.to_numpy() data=np.delete(data,0,axis=1) size=50 #Setting size of data to be trained on x1=data[:size,0] x2=data[:size,1] y=data[:size,2] power=6 #degree of the polynomial des_mat=np.empty((size,int(((power+2)*(power+1))/2))) hyp=1 #Hyperparameter lambda for i in range(size): ct=0 for p in range(power+1): tp=p while tp>=0: des_mat[i][ct]=(x1[i]**tp)*(x2[i]**(p-tp)) #Constructing the design matrix tp=tp-1 ct=ct+1 #Obtaining the W Matrix A=np.dot(des_mat.T,des_mat) size=A.shape[0] for i in range(size): for p in range(size): if i==p: A[i][p]+=hyp #Lambda is addded to diagonals B=np.dot(des_mat.T,y) W=np.linalg.solve(A,B) #Setting X1 and X2 values for 3-D graph plotting X1=np.linspace(-18,18,200) X2=np.linspace(-18,18,200) X1,X2 = np.meshgrid(X1,X2) fx=[] #The below for loop generates the output for different X1's and X2's and stores them in fx for i in range(200): tp=[] for p in range(200): tp.append(calci(X1[i][p],X2[i][p],W,power)) fx.append(tp) #Plotting is done below fig=plt.figure() fx=np.array(fx) ax=plt.axes(projection='3d') ax.plot_surface(X1,X2,fx) ax.scatter3D(x1,x2,y,color="red",label='Dataset Points') ax.set_title('Surface plot with M=6 N=50 and $\lambda$=1') ax.set_xlabel('x1') ax.set_ylabel('x2') ax.set_zlabel('y') ax.legend(bbox_to_anchor=(1.1, 0.92), bbox_transform=ax.transAxes) plt.savefig('M=6_N=50_lambda=1.png') plt.show()
11,465
682bcbddae6e769505840e96b66ebf1351d80327
from datetime import datetime from pathlib import Path from jinja2 import Template from reconcile.utils.mr.base import MergeRequestBase from reconcile.utils.mr.labels import DO_NOT_MERGE PROJ_ROOT = (Path(__file__) / '..' / '..' / '..').resolve() EMAIL_TEMPLATE = PROJ_ROOT / 'templates' / 'email.yml.j2' class CreateAppInterfaceNotificator(MergeRequestBase): name = 'create_app_interface_notificator_mr' def __init__(self, notification): """ :param notification: the notification data. Example: { "notification_type": "Outage", "description": "The AppSRE team is current investigating ...", "short_description": "Outage notification", "recipients": [ "/teams/app-sre/users/asegundo.yml" ] } :type notification: dict """ self.notification = notification super().__init__() self.labels = [DO_NOT_MERGE] @property def title(self): return (f"[{self.name}] " f"{self.notification['notification_type']}: " f"{self.notification['short_description']}") def process(self, gitlab_cli): now = datetime.now() ts = now.strftime("%Y%m%d%H%M%S") short_date = now.strftime('%Y-%m-%d') with open(EMAIL_TEMPLATE) as file_obj: template = Template(file_obj.read(), keep_trailing_newline=True, trim_blocks=True) subject = (f'[{self.notification["notification_type"]}] ' f'{self.notification["short_description"]} - ' f'{short_date}') content = template.render(NAME=f'{self.name}-{ts}', SUBJECT=subject, USERS=self.notification['recipients'], BODY=self.notification['description']) email_path = Path('data') / 'app-interface' / 'emails' / f'{ts}.yml' commit_message = f"[{self.name}] adding notification" gitlab_cli.create_file(branch_name=self.branch, file_path=str(email_path), commit_message=commit_message, content=content)
11,466
2878a69dbfbb810e2385d867600a6b9e1745d459
import socket import cv2 import numpy import thread import time face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') #Database to identify the faces eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml') #Database to identify the eyes def recvall(sock, count): #cria um buffer para transmissao buf = b'' while count: newbuf = sock.recv(count) if not newbuf: return None buf += newbuf count -= len(newbuf) return buf def newClient(clientsocket, addr, i): while True: length = recvall(clientsocket, 16) #tamanho da foto a ser recebida stringData = recvall(clientsocket, int(length)) #stringData recebe o valor em string da foto data = numpy.fromstring(stringData, dtype='uint8') #conversao de srting para numpy array decimg = cv2.imdecode(data, 1) #decodificando o numpy array em imagem checkFace = faceRecognition(decimg, i) #checando as faces no metodo faceRecognition if checkFace == 1: #se existir uma face na foto, armazena no hd do server cv2.imwrite('received ' + str(i) + '.jpg', decimg) else: print 'Face not found.' i = i + 1 clientsocket.close() #fecha o socket de conexao def TCP_SOCKET(TCP_IP, TCP_PORT): print 'Launching TCP Server...' TCP_PORT = int(TCP_PORT) #cast da porta, tem que ser int s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #abrindo o socket TCP em sock.SOCK_STREAM s.bind((TCP_IP, TCP_PORT)) #setando o server no ip e na porta especificada s.listen(True) #TCP server escuta a rede print 'Server Launched!' print 'Waiting for clients...' while True: conn, addr = s.accept() #aceita a conexao print 'Connection started, from: ', addr thread.start_new_thread(newClient, (conn, addr, i)) #inicia a thread chamando o metodo newClient s.close() def UDP_SOCKET(UDP_IP, UDP_PORT): print 'Launching UDP Server...' UDP_PORT = int(UDP_PORT) udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) orig = (UDP_IP, UDP_PORT) udp.bind(orig) print 'Server Launched!' print 'Waiting for clients...' i = 0 buf = 1024 while True: bytes = 0 f = open('preImg' + str(i) + '.jpg', "wb") startstamp = time.time() data, addr = udp.recvfrom(buf) print 'Connection started, from: ', addr while (data): f.write(data) udp.settimeout(20) endstamp = time.time() bytes += len(data) data, addr = udp.recvfrom(buf) if data == "end image": rate = (bytes / (endstamp - startstamp) * 8) / 1000 print "freq (Hz) = 5 " "| bit rate (kbps) = ", int(rate) time.sleep(20) break f.close() decimg = cv2.imread('preImg' + str(i) + '.jpg') checkFace = faceRecognition(decimg, i) if checkFace == 1: # se existir uma face na foto, armazena no hd do server # cv2.imwrite('C:\\Users\\Frederico\\Desktop\\RedesTp\\Face\\received ' + str(i) + '.jpg', decimg) print 'Face found.' cv2.imwrite('received ' + str(i) + '.jpg', decimg) else: # cv2.imwrite('C:\\Users\\Frederico\\Desktop\\RedesTp\\NoFace\\received ' + str(i) + '.jpg', decimg) print 'Face not found.' udp.close() def faceRecognition(image, i): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) if len(faces) >= 1: print 'Face found on image: ', i for (x, y, w, h) in faces: cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2) roi_gray = gray[y:y + h, x:x + w] roi_color = image[y:y + h, x:x + w] eyes = eye_cascade.detectMultiScale(roi_gray) for (ex, ey, ew, eh) in eyes: cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2) return 1 else: print 'Face not found on image: ', i return 0 #TCP_IP = '192.168.100.103' #TCP_IP = '127.0.0.1' #TCP_PORT = 4040 IP = raw_input('Server IP Address: ') PORT = raw_input('Server Port: ') i = 0 print 'Choose server protocol: ' print '1 - TCP Server;' print '2 - UDP Server;' serverType = raw_input('Option: ') if serverType == '1': print 'TCP Server Init...' TCP_SOCKET(IP, PORT) elif serverType == '2': print 'UDP Server Init...' UDP_SOCKET(IP, PORT) else: print 'Invalid Option.'
11,467
89b60790ebbbb7e9300fec7659e183854022eb75
#!/usr/bin/env python3 import fileinput import random def is_tidy(n): for i, c in enumerate(str(n)[:-1]): if c > str(n)[i+1]: return False return True def solve(number): digits = [int(c) for c in str(number)] # find first non-tidy digit for first_invalid, d in enumerate(digits): if first_invalid+1 < len(digits) and d > digits[first_invalid+1]: break else: # input was already tidy return number # find last digit that can be tidily decremented decr_index = first_invalid while decr_index > 0: if digits[decr_index] > digits[decr_index-1]: digits[decr_index] = max(digits[decr_index-1], digits[decr_index]-1) for i in range(decr_index+1, len(digits)): digits[i] = 9 return int(''.join(str(d) for d in digits)) decr_index -= 1 # decrement first digit and set rest to 9s digits = [digits[0]-1] + [9] * (len(digits)-1) return int(''.join(str(d) for d in digits)) def test_samples(): assert solve(132) == 129 assert solve(1000) == 999 assert solve(7) == 7 assert solve(111111111111111110) == 99999999999999999 def test_corners(): assert solve(0) == 0 assert solve(123) == 123 assert solve(987) == 899 def test_solve_annoying(): assert solve(11111222244444222200002229999000) == 11111222239999999999999999999999 def test_random(): for _ in range(100): ex = random.randint(0, 100000000) result = solve(ex) assert result <= ex assert is_tidy(result) def main(): n = int(input()) for i, line in enumerate(fileinput.input()): highest = solve(int(line)) assert is_tidy(highest), '{} not tidy'.format(highest) assert highest <= int(line) print('Case #{}: {}'.format(i+1, highest)) assert i == n-1, 'read wrong number of cases' if __name__ == '__main__': main()
11,468
0ba6a3fbac7ed4260848d26e7fb2f83056460a63
''' Call two arms equally strong if the heaviest weights they each are able to lift are equal. Call two people equally strong if their strongest arms are equally strong (the strongest arm can be both the right and the left), and so are their weakest arms. Given your and your friend's arms' lifting capabilities find out if you two are equally strong. Example • For yourLeft = 10, yourRight = 15, friendsLeft = 15, and friendsRight = 10, the output should be areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight) = true; • For yourLeft = 15, yourRight = 10, friendsLeft = 15, and friendsRight = 10, the output should be areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight) = true; • For yourLeft = 15, yourRight = 10, friendsLeft = 15, and friendsRight = 9, the output should be areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight) = false. ''' def areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight): if(yourLeft == friendsLeft and yourRight == friendsRight): return True elif(yourLeft == friendsRight and yourRight == friendsLeft): return True else: return False if __name__ == "__main__": yourLeft = 15 yourRight = 10 friendsLeft = 15 friendsRight = 9 output = areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight) print(output)
11,469
f6cb8f33b16f29167c3160883c42581c1dab0534
# Generated by Django 2.0.3 on 2018-11-17 18:58 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Size', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=64)), ], ), migrations.CreateModel( name='TypeOfPizza', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=64)), ], ), ]
11,470
53d331b0d1ab767e0c697dd06627598f57e3b71d
# This filter deforms the terrain in the selection box by creating a gash in the surface of the world # # abrightmoore@yahoo.com.au # http://brightmoore.net # from math import sqrt, tan, sin, cos, pi, ceil, floor, acos, atan, asin, degrees, radians, log, atan2 from random import * from numpy import * inputs = ( ("GASH abrightmoore@yahoo.com.au http://brightmoore.net", "label"), ) # Utility methods def setBlockIfEmpty(level, (block, data), x, y, z): tempBlock = level.blockAt(x,y,z) if tempBlock == 0: setBlock(level, (block, data), x, y, z) def setBlock(level, (block, data), x, y, z): level.setBlockAt(x, y, z, block) level.setBlockDataAt(x, y, z, data) def setBlockToGround(level, (block, data), x, y, z, ymin): for iterY in xrange(ymin, y): setBlockIfEmpty(level, (block, data), x, iterY, z) def getBoxSize(box): return (box.maxx - box.minx, box.maxy - box.miny, box.maxz - box.minz) def fix(angle): while angle > pi: angle = angle - 2 * pi while angle < -pi: angle = angle + 2 * pi return angle def Gash(level, box, options): # CONSTANTS AND GLOBAL VARIABLES method = "Gash" (width, height, depth) = getBoxSize(box) centreWidth = width / 2 centreHeight = height / 2 centreDepth = depth / 2 AIR = (0,0) # END CONSTANTS if depth > width: # The gash runs the length of the selection box, whichever the orientation angleStart = randint(0,100) angleEnd = randint(100,200) angleStepSizeZ = (pi/50*(angleEnd-angleStart))/depth # describes the arc of the gash gashAngleStart = pi/50*angleStart gashStepSize = pi/depth # one full revolution for iterZ in xrange(0, depth): iterX = (int)(centreWidth*sin(gashAngleStart+angleStepSizeZ * iterZ)) gashWidth = (int)(centreWidth*sin(gashStepSize*iterZ)/2) for iterY in xrange(0, height): heightBand = 2 - (int)((iterY / (height / 2))) for sizeX in xrange(-gashWidth+heightBand,gashWidth-heightBand): posX = (float)(iterX+sizeX) gashWidthMultiplier = (float)(iterY/height) setBlock(level, AIR, box.minx+centreWidth+(int)(posX), box.miny+iterY, box.minz+iterZ ) else: angleStart = randint(0,100) angleEnd = randint(100,200) angleStepSizeX = (pi/50*(angleEnd-angleStart))/width # describes the arc of the gash gashAngleStart = pi/50*angleStart gashStepSize = pi/width # one full revolution for iterX in xrange(0, width): iterZ = (int)(centreDepth*sin(gashAngleStart+angleStepSizeX * iterX)) gashWidth = (int)(centreDepth*sin(gashStepSize*iterX)/2) for iterY in xrange(0, height): heightBand = 2 - (int)((iterY / (height / 2))) for sizeZ in xrange(-gashWidth+heightBand,gashWidth-heightBand): posZ = (float)(iterZ+sizeZ) setBlock(level, AIR, box.minx+iterX, box.miny+iterY, box.minz+centreDepth+(int)(posZ) ) def perform(level, box, options): ''' Feedback to abrightmoore@yahoo.com.au ''' Gash(level, box, options) level.markDirtyBox(box)
11,471
03ffe600fecee8f6501d636bf659d32b81ef390c
#!/usr/bin/python3 import argparse getHex = __import__('10').getHex getSparseHash = __import__('10').getSparseHash densifyHash = __import__('10').densifyHash def getArguments(): parser = argparse.ArgumentParser(description='Advent of code') parser.add_argument('input', metavar='file', type=argparse.FileType('r')) parser.add_argument('-s','--size', type=int, default=256) parser.add_argument('-r','--rounds', type=int, default=64) return parser.parse_args() def hexToBin(v): assert(type(v) is str) num_of_bits = 4 * len(v) return str(bin(int(v, 16))[2:].zfill(num_of_bits)) def fillFieldWithColor(rows,x,y, color): if(x < 0): return if(y < 0): return if(y >= len(rows)): return if(x >= len(rows[0])): return if rows[y][x] != '#': return rows[y][x] = color fillFieldWithColor(rows, x-1, y, color) fillFieldWithColor(rows, x+1, y, color) fillFieldWithColor(rows, x, y-1, color) fillFieldWithColor(rows, x, y+1, color) def colorizeField(rows): maxNumberOfColors = 0 for i,r in enumerate(rows): for j,v in enumerate(r): if v == '#': maxNumberOfColors += 1 fillFieldWithColor(rows,j,i,maxNumberOfColors) return maxNumberOfColors if __name__ == '__main__': args = getArguments() lines = [ x.strip() for x in args.input.readlines() ] for line in lines: rows = [] for i in range(128): l = line + "-" + str(i) rows.append(list(hexToBin(getHex(densifyHash(getSparseHash(args.size, args.rounds, l)))).replace('1','#').replace('0','.'))) numColors = colorizeField(rows) for x in rows: print(x) print("There are", numColors, "fields")
11,472
431e9169b03c548e5ce886e9505e2ef446306fbc
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2017/10/9 下午7:13 # @Author : Hou Rong # @Site : # @File : test_mongo_iter.py # @Software: PyCharm import pymongo import time client = pymongo.MongoClient(host='10.10.231.105') collections = client['Test']['test2'] cursor = collections.find({}) if __name__ == '__main__': while True: _count = 2000 while _count: _count -= 1 try: line = cursor.next() except StopIteration: break print(time.time())
11,473
1d04e33209cb8aec365e16ba2f6ad5f3fc9fd94f
"""Command-line tool functionality. Parses arguments and determines which tool should be called. """ import sys import argparse from abc import ABC, abstractmethod from typing import Dict import importlib # New tools should be added to this list. TOOL_NAME_LIST = ['remove-background'] class AbstractCLI(ABC): """Abstract class for cellbender command-line interface tools. Note: Tools are called from the command line using $ cellbender TOOL-NAME --optional_arg1 optional_val1 ... """ @abstractmethod def get_name(self) -> str: """Return the command-line name of the tool.""" pass @abstractmethod def validate_args(self, parser: argparse): """Do tool-specific argument validation, returning args.""" pass @abstractmethod def run(self, args): """Run the tool using the parsed arguments.""" pass def generate_cli_dictionary() -> Dict[str, AbstractCLI]: # Add the tool-specific arguments using sub-parsers. cli_dict = dict(keys=TOOL_NAME_LIST) for tool_name in TOOL_NAME_LIST: # Note: tool name contains a dash, while folder name uses an underscore. # Generate the name of the module that contains the tool. module_cli_str_list = ["cellbender", tool_name.replace("-", "_"), "cli"] # Import the module. module_cli = importlib.import_module('.'.join(module_cli_str_list)) # Note: the module must have a file named cli.py in the main # directory, containing a class named CLI, which implements AbstractCLI. cli_dict[tool_name] = module_cli.CLI() return cli_dict def get_populated_argparser() -> argparse.ArgumentParser: # Set up argument parser. parser = argparse.ArgumentParser( prog="cellbender", description="CellBender is a software package for eliminating technical artifacts from high-throughput " "single-cell RNA sequencing (scRNA-seq) data.") # Declare the existence of sub-parsers. subparsers = parser.add_subparsers( title="sub-commands", description="valid cellbender commands", dest="tool") for tool_name in TOOL_NAME_LIST: module_argparse_str_list = ["cellbender", tool_name.replace("-", "_"), "argparse"] module_argparse = importlib.import_module('.'.join(module_argparse_str_list)) subparsers = module_argparse.add_subparser_args(subparsers) return parser def main(): """Parse command-line arguments and run specified tool. Note: Does not take explicit input arguments, but uses sys.argv inputs from the command line. """ parser = get_populated_argparser() cli_dict = generate_cli_dictionary() # Parse arguments. if len(sys.argv) > 1: args = parser.parse_args(sys.argv[1:]) # Validate arguments. args = cli_dict[args.tool].validate_args(args) # Run the tool. cli_dict[args.tool].run(args) else: parser.print_help()
11,474
b86bec43edbf9ddc9befdab3eab5d46c49f77ddc
# -*- coding: utf-8 -*- ''' Physical Constants =============================================== ''' from sqlalchemy import String, Float, Column from exa.relational.base import Base class Constant(Base): ''' Physical constants and their values in SI units. >>> Eh = Constant['Eh'] >>> Eh.value 4.35974434e-18 ''' symbol = Column(String, nullable=False) value = Column(Float, nullable=False) def __repr__(self): return 'Constant({0}: {1})'.format(self.symbol, self.value)
11,475
b3d418500e70bd41767011a6aa051b8c7b070a66
𛀒𮟯👼䂒𔔻𛊗ﴥ🚼𦯙𠡾𣚏狠웃𘂄焭ѫ諛𧙡섮𒂋ꔌ䭿ᇤ蚢媩ﴭ𘅈𫃨㒰𥴔𫨈𢙚畃ꖉ𪖼𦤍ഁ䫬🖇𝖪𠽺𐼽𭊏𮂎𠒢兆𢡇𤘮倪𤭜𐔥𧯽俙𪝥𦺗⚤𬬺𮑕偧㋑퍨嗂𖢞𦍚𪲄𮗗Ꞽ溹쮍綘𣑺숉𣡉蜴𤵷𡛒㱜お⫸焖𘇜㷮𘪸𩞥𮭞뛒憨𦋝ڑﰥ𦅐ﶙ𘥆䌉𡠵𧑸飩𬅢𣢋ኚ𘇇𢯤𩴵퇽𮬹𝌋𩇵䁦𫙰蚊梪嶘𪘦ᄏ𐌅韬𘍚𭺏궰𞡊窅𨢨𠑝𖥓𦃥𣵹🅞𘚲ꎁ𥇃𢛎𐎑𗏀𧳞𔗀𭦀䈚𣲒𤳨𤌘烶騐𭨜ᰙ⧺𫡭𧥪즺𫤈𧶷𐐣𧭠늇𫺋𣊇𖺆礑ࣹ퇆𗻕𮤴릸Ł𝠿㡐𭪒ꅝ㡂𢐗𫸞𧜓𗦅𮁓𢄞𧡮𬵁筑𧅛𠨏番뫳𪖧鍓𬥸𥏋𐙪次𡝍𠻦𠶡䬿𥇇𦨥⌐갾纬𗜴綐ေ㥯𫆩𐔰𣡏𐛈𨵾𢎎𪁡𧲲𥣤𢺊𗸠𤊦뿮헒꣹𨅈ẅ╁𢀤ᴺ幀傦ݑ鰴𝦸詏🡫𤉗𐓌𨵤𘁬𔔢띢🦶𭋐惧𣫻𪖧𗾫𗧭𫢈𣿮👳𝈩𨎳𨃏𣲯𖤫𪒐𮞸𐪝𣍄𡚃⓫驪䢸𢏓ያ글𡋸𦪆ﵺ⿱𦯥𡀺𐰍𠷝ⶻĘ᪀𝍸孤첖𩘹吴𦍚𤠠𠑊𮉙𬡍𣃛涑𫤐𥈼䯜🥿𭋼❯䱧䋎𣤚෭𓈎𣍰韽ﳓᓟ𥩖ꋆ䙟⣂𝢄䴇ꌬ𥝄⸚琧𐑀𢴃𫖛끭㦯횊𢲩劆𓁹𠛱𝦴𗃀撅뒪ⰲ𠄮𧶚𝢗渧𫷁砃𔘣辋𗖭⭎偂𫆘萵𪩗褐𗧁튲𑻲芌ᠽ䃲𡪡𨆎뿓𨒧𣠮홗𦪔𘉽𦬩𪩟■䝶⤧Ⲃ綗♮ീ𮘳𗧣磄𫷪表𦟠✫𑈌쮶𗴹𑦽𩡋䂞𤄮𠊌櫠𮄔𫂋𐓺荴𣔖Ꮺ𦔧𦽞𤀅㦋𥨔쓕㗘𩒉𘥻𥭘䦕𩻲𢵺𥭕𨆽𬀊𦝵𑁂𐜕耉󠆺𮧬𣧝𬆳𧔯𣻿𝁐敥𭪋𥵁𪆜𝄹샏𒂸𩄿🁌𨚆𗢍䤝𩆞𓍩𧶱𪈇𦔺絫枹ꊰ鍡𒈸𤏙𡔒𥍩膴𘑘⃒𧕏𨋔𫦽𤩿𛰑𗡲ဌ𠉃넔𣟼홒𧻎𝘋𩒎䫁𬪳𑘔🂷𗤰𩼉𖭶𩷶𩆰ﳪ𭇽⠦ॳ𩘔鱰𐋸濂𮠨𨝎ﶫ뛿꽤𪛉🜷㸁𢘦ࡔາ놞𭡓懷𡯬🂩皡㵶𐠲䷓禢𗊜⽢闟𘃏𤦄𨶬𡪇𠪣𪱠𪂨𗳐ꊉ㝛𧙱㓿𣖌𦶒ך𡂡魓𭣠𢣩𭓄𭎯ኘ𫇇𥌠𫩒诙𨄀𮥏檃餀𫅢ꍐ≭𔙅𡸬蒵𤺏️𠭝߰ᠪ𥊂𗱺𡫌𧮥Ѿ‡紮似ោ眖𢷀𭏠嗁𩰘𮗨𤿧𤔶𫙐碩贸𐨭䏋𗮣𥞚Ч𗵸𤈝𨀟𥰲뀲𨑡𧪋𬪱𭝃𢚐䱀𫙾𭾩𡜜焔躂𫏱𤀤ᩖ𑋐𐍧𘥕𠵕綅𐃍𘃻悘䨃𬃮𥝉닃斫𖢱𬅓ଗ桞奱𘊵𨝊𮢿𥡖贐𐎑ᰥ㡏𡮩𬱥黺띖𧭧࣪𢝛Ẁ𦵧𬻜짍𢒫𠛰뫦벱𣹵𤞒𦝦𬭘답牃傴ȵ𭑹හტ﮹贑𝞍𢲻𠍌𗑣鳥𢲞ꀚ驦𐢋𖧜𠼳퉛𐭦蘥⟸𘃇𦠻𩒢𢉙𥗯𠺸𩟼⇂荚𬢺𧙁𠵿𫯋𥮪𬞊𧩬鯡꿌莝𥓩鵈𫟢嶚𦿆𠳾𨒽궫𧐜ᭈ𡃁푥𤙜𪬳㛔풩𣯐⧜𣖠ፌ𡲸㷮𢶾档𛄇𦥞庆𘓰𢀖𨥇𢟍赓𥠾ﳜ蒿𨸂縼𑋞𗁗繥⍃Ѷ𠦩𪹿唐𫔭𫰻㲘𦨇䒰🖊𮑀⎄𥁑𪆴𒉞𭟼𣃌違饂萊𡄃🥺𠦂𪷼⬎𠼊⥩𫓴𤾥𓂿🥋ⰵ𧣟낞𧝙𪞑뱂ⴺ𨂓𬶄罄跮𛀽𗡣𘨌𢜸𦮳𗥳䯧Ꜫʈ𭻍𮂋𡙷𡋎亅𬫸𐲊𡗨㺔𢜘𑅴㙯崸𡦦𓇅嵁蛬𮉅𬎸⦨𢊾𭳚춄𥾷𒇅천ﻯ𠄠𦳧𨟾棌㎰𨜘𡩌𡿕ㅭ𡶛⇣𣥒𢁙𢃬簐퐎⌢𑶁𭳛𠍰⨽𠰮𤂄푛𦃡揗𡳽𣁳𩢽⁃𮮍赤ﴅ룪쵺𧕠𥅙𨟭𥠃𘙏〄𗵧𬜌뮑𐌌𧙃篶𬼘뭴𫮴𮋡𫧥⪂⾉𦍔뉅𗑬崶𢪼𡾃䋯ꝟ𤵐𨥲𬭱᮪Ꞡꈡ눈𭙮𨟕𤜧𢸅𡴏𢂽𭵥𗣾茝𪂱䯪𣇦츙𥂥㤂堻𒄽𤟨𪹱༌𗾃빝𥂘𖦑樋𬌖ᦸ𠺼⩮휚𡆦𦍻뵃첺𝂐𗏼𧳺𤹎𨅺龜🛳𦒎嬧䰵𗗰𘐘ﰻ𤀊𣻢𮐮𭺖劍𭆨ᗸ𢄈𨬲䆖૽𧻼襃柬ᾣ쪻𨫁葻𡑵𤽿𦍿𑒇ꃸ𛋤𨦿愍𪮐𤞝𪫉㭿𥤗툖ꏺ侚𥎷氀𤒨˟𡕒奤𪬨𝤗🟡𤔶ಅ붬𦭤峗𗐍𡧈貈𢒊𦨝𦭑ꋸ佳䙓𬩎𧰔⿻𪦅𠚮떢嬽卍낺礩𬎆𠗕䅋𨌯𪼆𬡙ᚇ𤪴䎜झ🝮禿𤨇喹𤉉젾𩷫𥍬吺𦀢𫄾퀇ᡦ텚쥮풟牟𪍵𣌓𘡰큃楟𥷒𓎮𡮥𫰿𣣴𡆦𞢝𩗶𭫈𦯙𑖏𢕎আ🧪𬔧ᤌጒ𠥋唂戚ᒉ꺋艈𘍀𫝵𑆽旙鼼𗞑츣𬜊처鰧핼体𘗺𦤡𩞖𦀖𪽯𨸯ᄉ𧻟𘪁𬇦뚊𘝊𫋨𮞅𢤁㈬𭳴롺𭒺்𦦏馆㸟𣄟𣶀炿㥳𤴌궝𫩓𢚫搜𠴺𝑎ᓴ뮌𪒝竫𧖽ヶ瓳𭣃𣲧𑇥𪠃𡏹꜉𭨞𧟦𐍢𫊏悷𦄠𥒓𪉁窸𡣸𧏋𡕤𤆈𢪎𩩇𫣁𭳢𥖡𪦝𩸀𡹔胋𮆃𬛡𤴫𫀂𘛁🚂ܠ軀𪘺𧃫碭𥷩𤺕䱀𦵢𫶴黂𦳆𑈜🆟𠇞𮔧鞐𡝩겥뮗𬪁읢쟳䭔ầﺿ읾𥅂Ⴏ⣌廭𩰭뾕桯⺽𪗁𩕞𢴨𬠹𗺙ᓊ𥎒𤊀𦭴༽C𒌩𣬕𢄗𦌍慈𗠒𪌐閼𤳚𨀕욹𤨔💐𨳏𩺡祐↡𨌌𬦨韂߅𧄎⤯𘊩𮌳𐙫𦨱𘗦Ώ矔𡷃𗏦뗳𪧻𭥈𩊉𩈯鄚𩉫𦖫𬰔ꡙ𪹢𬕽蓂︶ꮐ봎𗀔𑲠䤂𥃔𤩬𡟓🤯⁚𡘔镛𑧗憙𠍗ဂ𥊸𣯄롙䲫詥㐥𬮃𥒷뮃𢋖𘇢🏗𫌶ﭯ桒蒩樏㍮𬖖𘧁홆𬮙𮪀혹𥠷𢼘襈𭣦𢳳𦃒𤓚폲𠊈顉𡛚齰𪴷𮋭𬑄뛻𮝸퀪󠅗鰈𩾐⽠𢧮⺬𬘆𣘏𭼱褦𬖢𭃽톐㱽𡎟䬞𪘋𣑋𣃇𠆿🖈𨗼𒓚𡐃𨍖𞠏帊𩓩𣄢𮩗𮞊𣇚왈𩯱済羽ᄔ𗺿㵢𛁓𤴙憥𬦼𡖷糬𠵏ਤ𡧐𤄛𢲖粜뿨𖩤𫪢𠠛𪬐ȟ朚崷⯌䭡𠤣饞ﴼ𭟋贼萠欙𣄈ዱ𠝄쿩诣탭𥕝𭙉𑩤鵕☢𦌇𥛇쮾𐚷虦𠛜ㅇ𪞥𧚊芳⾸έ𢀊←🏭嶝扑𦙎🅃龧𬛭𩕁𭡵𡚎𗰫𠃝쟰㡃췒🨕𠊊瀋𥸴𠝯𫬥濱𫱎𦵝𦛲𮛘𬒾𫦴𞤯𣒔𢖞𣽗ꗒ𮘃𠾉䦇𢾧𥵩뜹𡯇讞𗥿🨛𧨄𥂡𞢠𣍍𪠧𢹥唄𑘘𠔣𡄈𠭺𢉈寮稻熄𡛃䍯𫏪𐨹膣𠃸㨆㩝Ⲇ𨅈繢𨂍땥𬢯桛𑱸䄞썟𡼐㞪떓𩈼𩬈𣓀𦝦𭮾𑐾𢑽𣵶Ԕ建ϣ𑊣𧔡噟婞揤𢗽៌𝙄뼸𩅁䡗𭴂ꞯ𬼃𞴸䬵𢗉𧿉𘥰𪜌𡢰ꏠ𩲯ቫ鹘𫘟𬮸锼𫺵𭃔椕𦄶⎄𡚿ꄢ𥃚𤡽𣅍𩄡鮲𡍃𢡠𩟈𮝻琟㴰풦𗀡𥅓筇𠜏𩑼𬞓𖤻𦊟껐켔끑갦𫞫𐠫𠪘佗핊浟ꂈ𩰚𬎠䔹꺊𠯭𐆈𮥰寑𪹪菂ꧏ🝝ప𪷠⮊ʠƿ𓍥𐍀𐧯𗦐ᕍꯗ𑌕𗃫𭺪ꎇ탳ℾᑆ𠾿𠏥𣡭𗵹洊𢔶𨚎𩆳ݢ𪲫𥥀궠ᢪ𤈙𭲣ၮ𬵥𬱧𗥃𡉁矀𐅠뙇𫚆𢉿꙾芰𫰕𑴑𓄚𘪺𤲈𝛕𠡑𪻔᧐𝘵𤵱ᘱ𮉤笆𩄽𫿧䯶섥𪝄𗴸𬟢𝓬𝗜𘑯羥䙤ɶ𬰂嵈𗓒펨湶㚺덻뎺𨰦︈컜𩖝𢢦ꘫ𢀾𠽑㇕⬶𑙃𝖜ꄴ𫩰𓇨𧓍鱪𣭍ﴬꨰ垇쏂𠉃쾏ì𗌰𬸾𥧠𝜯𣿹𛁤읮𥈏𖬝𩋖产𩷝钹뽷Ȉ𢽧𗯋𧩮𭐩𥩩霺熍𔑝롢𗈯匽䒩䊄𪲔醃臛𡈹뭟汍𠢓𭪃藠𬠺鿩灪𡀂𘢴𗥳𨟕𗛥𪕛𘘸𬼱𣧣𪦘𒄠𧲟𡭧𘜄𞄪𤢿淛𫇩𘁉𩥸𡌐𗡯𬄄𢌿🆘𪑝텠ᑣ𠗤𨍯𩻐🚐𢄹𩞵菎級㚷ԙ𨙓诬𡇋𣪂鷎𦉙ɿ🅟袷뿐㕾𦑨䮤𣔻㞼䕳ۅ𠙍𣓬㴚㜚𨠒薝𣧫摮𨭕ᨱ𩨫𨪕艰檜往⃚撃斝𤣏𤳵𑀀𡶪𢱖鈿蟰ᨛ𦇹쭡𩧄🪕挐쵿촨𣢩𥶿𡅍𐚰𢚄𐎊䣒𫁖𫢠㱪𠃘𬾴𡴼䌭ꠠ⭛𦉋𩷓𢬄𨛉㮒𩋀𤏅𠝗𐀪𥥰걮𤢖𪂬𢱠욎𤡆𗿂릈𤖚㑎頃⟒횣𤸄𬁈䰐幝𧲶𪊢柎韂냷𧸉덀𖣈𤔷𬉍䦺𩑤𬻩𝆲𢠕𡁠𭱌䗒뙡𢺥뱽짏𘝼䉎𢜭냨𠪒ﶒ⃚𢚓𪲆𡒓𣘛𣔠𪞀𝒔𣼰𪾽ꢷ𪖤𮭜𥌘ߗ䡗ﴡ𮔓𮧓🙧㮬趫笥𨦱🙴농鮻ಒ𘒩🧸🅎𫮂𘛆𣪗鍝𭝁𑆉𩀽ࢷ𛄍쓽瘞𒌇𠽗󠅺㭂𘑯𛁜涕𤽄튋䪣卡淜𧅾𤻑꣨𝨹𬼱杕𣴪ᒼꈸ⪌𨹨𗛸𢩛𖽰㝅ﶱ顟𪖥偷뇣揁𠋎曁𪬶죠𮈜饃頜𢗜爃😒苐𣷆𗲴㞋劍脐𤝰۸𢽊𩹿쨸𪝢𧕫ᢅ𬵵𬍳𣥖𘒒𩆞𥈺𓌝𤱪銦𤺼𩊏쒚켕Ω囻𛲕𢮤ᗢ测𮋂𭵍𣙁뽾𮙨𣘻𘪁뙙𡏆𪶫H𭃋📳𧻰磾𤠱𩱍𨲳𗞲𡛫𨆽𡤭𧵓撇끜𦅺Ḫ𫨯𪇎𡧠椺𩂇ᙱ𒂎﹁𢫡𗤎둧𡏷𣩡𮣖親奨ꍡ䉕ᴹ𦹆𫕽𤵿𞤅푨𑱩㨩폥룡𠋼療𗃝䱵𣰃𛰜𨾤찅ぁ洩⠤䏽Ꞃ𮕡潳튃𠦦𫨝彠𠓄㩀Ể𞸔𗧿⟝𫴊𞥘𦎝🧁𐡏𨕕𦅻𦉰䫛좧𭥚𐛺ޛ𪒚༫何ꆀ𪄉𛊩𧍺岢⩰𗆇篻턽𡺌𫺟𨩔痐⣫𪲺𝛈𗬖ꇙ𣤓멹𧩐𝀾𨢒𗜋𣛐執饞ള𠎩𝓑롨𨥓𞋀𬵨𠰸𬠆〠𢧛瘻猉🏵⾩𠦻㓀隄𡘤𥩈𩠖﨧𬝂𥼆𡙄䔓𦉣𮛗𦊍𤭚𦿐ﶍ𔒀𫿴𧣼𝔲𫨿𭷕𧜝𭠶𦕰뜰𤵒屜𩊃𢡸𩼂𥂹箫诓𬫯𭺔쫠𧘍嗇𦬈𝚭ᛪ𬴛퇧瀗ꌀ𝘦𦦵𨎜⍍𑪐𦰿𧜂𗿽𦕴芽楤𥎤𢃊𣛎𐬎᭯ꋪ𢓭𗃐杓첔𣓁ໞ뉐🚟샋ꦴ閶𣋌皢姩𢩼𧢆ே𡻏侼𧳙𘢞ѽ𭋿爋鸨煛젽𧁶𐼢𭄉𤜜鈪懪儯쏂𑲒띴𘜔𡿜暳𠓯𩨸𑲁𑌵𡛱楙𥇳𘎃𬺊䆏𩱿𒇼蕣𬝡ゝ櫛剦썢𪅞𩦅𤥡鲊𢆱逊𝀥𫬌ꫬ㿘𣼉浴𘅖𤡼𦼸퉶퍬겍𡾽ᝦ𠄈䮢뢐莹𮣔𣇎ꦿﭗ𫪫䥰첞𣔄𮮪𨭕숊𮤱𪦨ゲ祮𧐈谪𗘔𠟭𮒋𛆆𤀻ﳕ☲,𔗅𮯒𒀢𬞞娲𮋺蘬繧𣂠𠽤ꒊ𥷌ꥑꪑ𤵠𗮧꾜쌨媠𘆃𬍗𨗍𬤾𑰨𮦊𭂮腥窞𢶇𭏎𗹟簼쾎𗤻轳𪛀𪻧≀𣣡轜긋𭞩𐐰𪽛𐹹鱀𢅴픨곧𝑪𨶵𒉪𤆮𤈭䗧𢯽𠍺𤞘擄𝢉𢘙竽𢚪땧𗹖줍栾𧭪𗋦𤚥곷彔𩒅轌螳𭫀枈𩰯𭞇ದ𤩲𛋃❚𦆥芋𫨫鰧𢪒踁隱𗚚𬦾ቘ𥺺㝊㣁🃚𭗣𪝪ꥪ𥨼䤯ꟿ꓄𐲰𥏁𖡊𤾞𩎀艸𤲹𩜏𨚕俖쬘𩵇≫뽹溊𠝮墌𭰴玛ﻞᣖ䵀𝛆𘗸锺ᛌ猀𐎾𐀬肏𭼛牪𨺞𡤦𠖫𤚩𝪭𢍊𗰛ା𤢑𝑾壢愆𠖩𘗤欔𫧡𤉕墫𐪄𐼻⭼ଠ𮑊𥑝𝌑𪶩렩瀯𠖆𤮃𨤄𪈻ᕰ𩢦𦛆紙職𡎴𐭯ᙼ䓴⪏𦥲꾝𢩉ﳓ䉃𮂕𐰦苑𘐶𤏠𧪐𑐁𩛋㡝ﺦ𠵘𫍁𧉣蛂뼤𠅦豃শ𓈹𫨫𮔔𨼩𫬽墺𬲄𭭉𩥛픿ᷝꖩ닋𑿝䕅𘖗ⴋ礀䟿𣇊샡켽ᆀ𡼭Ω𤑮𩪅𪪁𩒁괘𨣈𗁈🃖𑚨𧊟𫾴𦉩岋𛃅닊𩥈𫢵𭱹㐵𦋵檧뢣쮅迬霳ꋶ⯠묞𮌛𭬃淤𡇌婥㪮𥙪𪕜옶𘤃𠫺餀𞠓𡎯䥃▬榌𮖐𬐕𦐓𬗝䎅铀肯𢧮𮜥𓎭𧱲솩ᙛ𘨔省𘓚𗱡𢫿ﮞ銅𑀘𮜴ꓟﳓ醴𥡖𒋖캳蹵𧪖䜽𬯱𧣷𩃤걭𢵗腬𥿵𥻓쇰𣖆⦪렋Û𓇓䂵𬚡蕱𤙘Ʃ𫑤𫔕𭇈𨮃ﺧ𦁤瓰丷踏ꇆ꽽ᗂℙ技𡎗𢹓𡢂𘘯𒁎缝쀩깨裸𪚼ᨩ䉁織🕘𪶦𢧢斋𩝐긧𐼺𢧦Ⳟ𤬂𣝹뮛𠒫𣹭𠸯檩쒨逎̄𫜋𫅰𮆴𧦿ﻕ𣾲屬틍땃⭤𡋣𨳸ܪ𪚻㪽歗🟧𭛣ヹ𣌈鴿侺繗顤𓄋헭왉𪀾𤗨𬦭𣅫𢀗鞋𠳰毄겠𢾈ᇧ𬡲𧩻𨓂배𢰈𨐫𫖹𨀭𪈀𮜛𩽯쌶𑚠𡡅𩊁🨌𪂋参夨𣘹𑆕𮓠螔≁𧆓𪱆𪊂玡𧏊𑣁箼䤴𠺇𩹳𠳷𘪜𘧥ㄪ穜𠣟ல𞴯𡂛𢈞𫈏厌챻舂𓏦𤬶𫠵𩃕嘒𣹍껈𥊻⋷⇵𡶧𩀖𗥅𗬛𠕼🏞𪷒쬲𦬑塭𮂨𓀿筲⤬ᡦ𥏡۲𣧃᭫𗣧𪱘뻇ꢯ𠏓𫝈罾骈㉇𮣿쿟𗆄ۀ扑Ꭰ𪫰𣳷⨶⩁𭷴𮭠𭖔ꐮ𒐬𪺯𓀫𗲧𦌼ಞ묈𪠻𓁒🐪𭐇䫺𢫻𘆼𦄸𪡚𨜀쬳𧉓𬏹🗟堸𝘰𭁚𧆋𪅑皌᷑𡺶𘢾𠈝𘇨𬞬迅𭁇𖩤󠄖ʼn𧈃𭪻🜥𗅒𒓽칒𮘇𫧻𥾴𢱮긷𝝜𣞝둞鷘𦈇𢣎𩬉𡓼𣃟𤶤ᩩ𗺢𗋓𡹎Ὃ苾㧶𧍣𥭩뉧ऑ𑀙𦾑𗜎뺸耲𢞾𗑈𬶰𔓂袅⌣𮜶ꁮ竐𐩀𪝸绋🩍𢚘袇𘍒𨾇𮈤𢍡𗇳𨿛𤝮𢫔𣚫𥄬𑂓즟𩊑𠡄𐍗蜝𑜤𑀹𬆔𫳞𦖊𠾆㒂餘騌𧌦ى𩅽𩊙漏𢂈𮄏Ĭ휍𤀚듣좱𝠐菪𬵾⸊𗳃𢦩厾𨙚딺🤁𥆞𩄸잸䃋𥚶𡢁𨠬ᝣ뇑ꏾ𨩗賹⽦𫪘𭖬樓珙㛸𩟲벵𤡔孛𢃏𢱋韁佯𧽑𓃑𒍶㗞𪩕𠃝ꚮ횀𐂺NJꘜ𠓳쬂𪏭𠭔ꃎ𬑃𠿣𪽘婋𭆏蔖糄𠊘𢚰抍衽𭇟𧿰㰄𗻥㣃賯ꪔ㈕𑵥ⱴ쉻냔切뢣𤷛ꝶᱟ赺𝘔𪏸𫴰ᨷ괪𩐾𬴆𗪈萆綅玟𓐄𠮪惨᳚𪽐𗺳𦊌看𥳛𠝺𠿥𢳶𗪠杰쑢𥣔𭞾𘩀伓⇥篗䤵𬫕𬔷迧𑢪𗩇𪂹𢧧퓠𥒪𐂠𡹟𧭎𡺒𡅸𡜽邶𭄄Ⱖ𘀲𩼏価僼𩂂픭𗀈䷷𦶝쳂𭩬𨬰𦫘꣠𔓔푕𡋅毺Ȩ𮚡㟲颠𪐍🥪𥠇𪊝𢰢𤙇䧊숢筁𧩗𬀃⑮𔗖𮏌𡌡𣔹䵑棭𭠥鿡𣕝𢾫𪉨𐿱𢳶ꟺ𧮟⫱𪸤𗃬𫒫綦㚯𦷗𣆒𦜿∏嶷𠡗𫧠𥴣蔝콶⼸𫝈駂𮞇枏𧠆𤦸𦕑އ𭹯圗𭉮𢥻𥯫𭌋𮍤𩍎𠇬𫅳𡑟𗄜𤶡𡖻𢎻脵𔘎영𝐌𖧝𫩙𭾖⸜𑦡𣁜𡞫🔄𡳟བྷᾁ𝖲𨯇𒊱𫭘𤋈𐊮𨿜洛ꈔ烠扐𑣙¥𦶖𗯣𖡳𤑟䊋𗥾𣬿𗶟𭛃𫂯𪪍𡪍𧠜𨚄𪻊𡐄⾆帆֛᎓𧃄𫣷浆ᒖ𘊕ꮷ퍦𔙅賎𥲚𭢋디𨺚明➕꿚𫣬𖽝𩇗挕硹𪾃𫻹ꅅ訡𬮏꿾𗜠𘀟𗆗锭𗳳𭜰ԟ𢝑⼛𧖎𫃘𑍐樥𑖌𭈣𫰊𭨌𭿠𧐨𝘥𞺀Ⅸ㨙식𦡼𢂑𐩃𘤦촰芝ɬ𥖀𝗯𤱟𡯉蜽⌥𘥗𔔉𒍕𫜐𑂳믿肟𝅍𫄭ౖﳲ𬙡𪂁ꢤ⎼璱𠫝孓𦆎𞋭⍕㳀뇲ಓ𗳌𨀥⚼夳욼𥻈죎𠴃ᄈ餎罺𬲂𪱱𨅌𣼜𧔯𭷊𩦇諜𣵸𘉵𑪍𔐁𥰚𤔚旤𗆽𥾆ꬩ㓍𠏖𩚕䨧🄑㰽𤺹𨪶𣳼𠠢覕𦛙𣷓뿃𦲯⓻𒌇땊𞤥ⰽ𭓓𡫌𭜞鸘𬗹勄𘐕𢄹럃캩𡔍▦𠗬𫢂렂𧬧ﺁ𗖆𓅋櫂𬔚𮝿𛅰̇𬱢𮖧𑑏ꓠ𤶠𪜋𪎾狜𢬰䌧⃪𫽾𨣿댸𠟮濑𢟉ထ䘢𡭗𗱙褤🞨𫀀𨟖𡤥𢇸𘒴⦄긥쓮𫼢媹𖼁𗲿툉𭇭𠄴𪉋賾𣳻𣌼𐨤찑𥣬䜊𪰹𣦳𮦭`მ𒈊𥶒黧슺昪𥅂癡𫠒𗰖𗶎𧕎𐄾趓靗𗀑𥚛遲ֈ𠷼뗞𫤕𝖍𦇣鴊𩈮𣊔𘚶𢃝𩸇ɇ𫱐읯ℭ皒⢿𘈵𗰶𪨌𩩅𤍫֑𡐕ᒀᠠ콛𘨐ힾ𨀶𧤒쳶𔓭𫶭𢉺𩵔腚𘢅🢗𦮻𦿤짚㡒۷ⵙ𧭩Ȍㇲ稥ҏ𢐍𡳭𦘛퍰𨵨🩋ൌ𢲤𠕮𢾢쐶𡄚伥㵞赯ㄗ蒄𧒣𨥼𐽉僬𩱼𢑕覆炑𐡀𡒥𗫡🂿𩻥7귤𭅱𤠡𭺘𥧝ᒆ𣐛흕𭯇𦏊짇𡜄ԛ𘪺𫤒𓋼堣🜞𠹼𪹉𬚙𧙽睿𢖼𠽘ᡎ𥢧𐌌𝙡Ἐ𗭗퀵䞻𣡖𧯫㈜𡮂䈗㗲뛥𣯄𭕻𥵋籍𨖩ᚄ𪗧𫼋𠡀Ϝ𒂶阚𤫴𗅾娑ఇꂭ钾𣳝𢂱𛉮𫘆𐳥𠝆𣉡𒅃Ꙏ𣲳䁲𩛓𢩆𧃲𢜯𖡚坄𫼠糧𡏪㛸𮍨ꖴ𖫦쒷𪥜𪹛𢂅䧚𓊄𡍷𣺂ꨫ笨濙縕𪆃红篏𢾱㰘𡖂☁쐰梕𠖂㣰𣂿𠵄𮩂𬿃컠埏𦷧輭࠸𩢼ℙ𨮼뵖唧쁑𪝨𧞖𐒗𘨥ל𤍙𭓛𭯋異𒒡椃𡁌𒃄텔𗰫𢦚𧺗𛆌㝤𑈁삏䃾꣐𩺳⼬脽𬔱𩹤𬎿󠆘膳𐀤𭇍牸𗄍葹弉𬰙𪋁𦚱ꫵ깱𘢅𫍪𩾢𩻖𢶣𪇚𥻋𖡆𩅍줗𢑿㱆ƶn𫫖𝍲Ⰷ𓄁𭠙葩窬𑒳璎ꉆ𥲻𫁽ﵵﱍ🗅裖🨻🃨𩋄鑆𘂙𫽀𘢤𬹬𓇒싻𪺓㈻𡝮暭🎯ꡕ𨩥𢊡㜛액끪ᭇ𘪘𭶓𘀍䦺𔔀䧆㧝윴먦✰𧼶𡏰𨮅𨐩썉𣘶𗴠𤞈𔕵랲𬲯𦮨け泽𥉍𦼛譙𦰖졭𦮛넛𬈧𣀕𗞑𤸿𧱛䛭𦟧ై𢁃🄱㉷𬨣汇𓁠𤗑𦸡族𨹑𐠒ႅ𔑴𐘣ʹ𥑃ꏶ剈驱𬰎ꭱ三𗙲䢌𫲿𡎗𪙇𥚅풴𗳓𝗛𩈜𭅄⺀𦢔辘𡵽𘑈𨎶ꕓ裥𮖁㉷𦄈鉲硭抎𫨁ﭕ𠘅⠣మ𩨼犸𑂴断𦆒𪸼播𘧪𩷖𠴪𩽂𢈫🔕𦕜𭛤쓖𡹜𢿀𐃙𫿿슄𮟹⟂②చ䟿𡕊ί⇈挽𖹺퇣饛䀸𝄫Ⱨ𑙤𭞟𘪡𛋊𪼊῁˨𢔵𛋊𐘹𫵺ꊗ𣽑𠀁𒋊⇚𛀪𤂑𫨶𫒯𬶬뮏🀛𮁃塖ќ赾🌅𗹺石趌𡗫𪠨𗅈𫡨临濨𗄥痿𦟼𨦠𫪬𘈋拥𘃳𣕧噽𔘹𓎈𡀪𣘳𢟨𖹑𠙓纲𥘜幖𗮪𠡋猙㤪ė𠎫𦪁𫨐𛅑𠐍𖹑𢨖鳞𩴑𘏪𮊵胙𮣽𢝮팆𪼁𢐂𢬆䩓䮿𮯈𐧴𥀊ﻥ⥜Ⱑ劌챎𢭝ﲤ𦥘𩵚𣋂𡐥𠫁𮒿앶䰃𭒏𓐉⦹𨗆㤁燚詳摑劦𥖴𗐙蟤𦯤✊㠵𘧥ზ軃𡖉𦚟ꋱ릇𢄕狋练𧤢♔𗣬𣪛롱𖢿𘀠𨌜㆖橓𗅿𐊔𫒴悫셣𩊊𪲾𒔯𡪅𢢳𗑾𧔐𮜅㝯𤷒𥗉𫕓𠅣𩹌𢏨합𐚉⁖𒃧衒㗧𩿞𩁣ꜳ𢊨ᱣ쎹𣄫𢏸᷈珄𪁜𮉋氍𧟽묱ထ𪫯𢭘쿠髱㷗𭲀庇큭𧢑呩𢽂𩽘𡙉𗛩㱺䗈ꁢ𡧾䭥飤荘착𛱊𤒔𨨄뒺𪾜𮥲𠞙䘰ৌ𫈽伷𫜫芅鹇𗗊濦贄㖘🄂ﶩ홇샂𗳶𭷂⬘𢂵ꏇ𡰇𧈄𑄍礞赃贤鱽𓇣Ṩ𗷹掦戤𘤝𬇔簦𧔚𖦏贂𝗡𐀔𡹰𠶏𘞃𢋗ꎨ𠅍Ŵ𝇥𨿂탛𛊄惐ꥦ𢲐𥝍ꦰ𩙂𤘶𭇯𮔕嫕𨻵𩂹𩸮钟𧡙𣎎䟪𒄳㠦𘢥𮦛ﹸ𬩔𢧣𗤔𡑾𝄅𐃡䶅鼪衊𠧍뉒𫤽𩭶𤲻𦹻앏⽇뢤먋𧊐੶𢒡৾𪤟搿⮒䚟縲𬭄𗼯𖨭𦪑𛰶𘛁𠋥🍠豈䋋𪤿𘜀픊𒕂克𗃈𡕤䲂ꆅ帢𑄿𬡹ᕅ𮈨𨿒𡕷𭙴𬁋蚩翙뢏낟𭲖죨𥗒𭩓𪐴𝨍𗏳𦤋𥤝ۂ𢣽豥𭿘𮨌뜭擸☰ﰰᯚ델遮怢𭥚╚𗩉𥫊枳ᢆ𥂲珞⧓𠌘𤳐𡔕熎𠄠렧蝽ᥗ𫧩𖽏𩌧熧魏畂𗂻𩢵𢘎𩀩𩑾鿥𨚮𦨧𬘡湖ꫴ𤊈𫭬扞𭹂𦚣钇𩯒ᘯ𬌺𫂪𒇯𖬅𨿈𭘄᭚𣧻擷𬝬뢢봌𩸏𗱎𫀅𗄙𩏄▎嬩𩬘𣐨𣭠킋𪕮𫂐𮝌磗֥㪦𬴪𧕶莃𥦁𪭽𩽽搛𤍥騳𗮞䋊𧫮𩚶譌蠡𠮇笾傋𥶼艠𧁼ꭉ🙪𗣸𩍺𣜐썍𝛨竗𨒺𫨇뽨វ嶨𫛆𤌣ᯣՄ𡏒𦔣𦉺𩲵︖🂠ԧ𦷙𫣟𧜔𪍠쿗𢂮憺𥾮⁶𝒪𗰘𘣡疙𝔅𪾥𣓱헺𮃝𢋷𒈑⢃𦘯🈖𭦸횋𗨷𢽶ﳒ𨼆𧖱ᙫ턣𦌋𮌘陣𖡀急𧡹䘍𘄫ꗻ𣻲𢁌𥆕𭗕뵋肶𩀓𮊓𩗡𗣄𞤙츗垪𝧘샄𦂨캩𥲋𑒁𨩋𗷀𮡂挦얈궔𤤃텎𪦈𗓚𧩼𩰛𝁫䄱ꉝ✋饕䱇𨭟𦤩뜯嚪𦮡𮧥𣠾𠹁𡰐𬊎߿𪊞𐴹潚𛂀Բ탵𩮐𥝸𘉷𡦝⒚𬷍珧𡟛𣖄盧ᅨ𭀽𢎹𢌕Ѳ𪀿𨱷𦆄𮥡𥀎课𩣖𡒩𪀵튜𤡡𡡧𥑝𫉇𡎩𓐂쪖𨛃𓍙飭身𤟞𡯰𢕏뒬꽤ῖ𐨚𪖶ᔣ𮓍𣢻𮃯錤𣧼촗𞡈𮨵𣦯𨰣𒍮뮌縨𣡤촎𢣘𘓂剤𨿄ꦾ䐥㨒𢻈퐹兖𢤧𨑥堕𠗾𤳈ㆺ𢉞孏𪇆𩷌𐍘춼𢮳湱𐌣묎쑎斲𠞑𮤇𡋌𧤯荪齁𤃁悺߸𣮧꓄돥䶍𢙸𢰮𪁉偎锈䭜㊄☬𮕿ᱰ樣𥟇佢𧙜𢒶υ𑖙𗖍젠𗲆𓂅𗸘┘㶝𫼌䙒漸𪐈𘟈ᔳ䴢𧡊Ⴆ跿𖬘𑍗𪆤蟳𭝨᱑𓌥𦺅𭸕襤𘀡濰䚃𒁥𧞏𖽘𨤬𥃢𠧴𩃵諗𠭂ㄇ𪰋󠇞붊㼚𫂸軻寝𡒢𑿃≦ἣ㔎𮎦𠱢隆𣙖㓊꽃𥶊濆𫙼榰鶤凐窇땋𩔤𪆓𪪕凳ℼЁ𧉨𭮎𬤗큍摧埬𧍧‡𤳫窚𧶲姛𘪦𤯵𠤹銵𤜂늡𮎕덴켎䓺𨫮𧡿𤧊𫗐𪜺𫺇𪔠𘟵𠡔騱𠼩𧽎𞠆뢷賹難론𝨉訰㷜𬆏뚭𨪷甎靈囹𧰧𮟰𩭃𫺄𐔲诅㑭𡿯𡷐𤇙䉤𣣥𣛖𡮁𖺖𤙢𡉪𤊹𨖹㯮𮒼㓠𫇙𠚧𬧢着䆶𮒢𨦾𠑃𪮘𡩖𒅱𨽒珘邯𪻝𬿀鵩𣉻ᦂܒ𨄌뫑뎲䇪㷉ꨆ뵓㟭𥶥𤱆ᤘ𢽴ꍞ𣡁𩦵𫿠𪭟殊𢝧І𦵎𒎘𣨺𭿻ᦥꬱ㘺𛂢롟獢ಠ晲𮛧둢鵐昮𡞳𫍤𦩄𣩷䀿앆ᘭ𗫓◇𠘲𥜗𐛨𤒻ﻫ𑂖券𫠼𠍜𐤶𨙞𪪯𪝁𛃐𗁚꿴ꏭ둶𑀒𤾾緩𫽜欜𨄸𦁭䴌𫰘𥀄喓𫟣𒈄𤷱᪲琮𧇹𞡳예𝨾㨛龽Ⱐ🥄扆𓅪𣡸ࣳΈ𤮼䗰𐨯릴𥅕𮚦𪑮ল𨢗𗀀𔓿᧽艘ཛྷᐷ𡡧𭉸ⳣ콻麦ໆ𡝩𑊔臍𮬥𐴕𐙊𭦇𠖊𥰝𗁛𣍯妲𒇯𩓰𑗂텝𪎠𒐱ẽ𨸘𤰓ࣰ朰텽譁案𤧧䈧梬罽웎揂𛇸㻈玴𫦊𮪵𖤳𭠒뉭𞲤悔𤆀뫔𖼻𒂦ب𥌶쿵౮𣣚㿧𪉍𣐦⣡屰𢷉䤪𗞨𫻂䏰뗢ꍀҖᅅ𢘌𝕬Ū𩘗𤅒칗얃傣⼕𤨖䇤𘘗竺𔑶䐃퐄𪾶𦺖彩𗌿𢶠𠑆𒀸䙛ꂁ𣊮龯⦦𥩁𖭬𞀎𘐿茸🖰𡳼𤽒㹭앟㬆𣫳𤃰𭣻🧐𨲽⯪證앒鴢𮭜𢋁퀗𝌑𪙢☳𣨪𗶐ꄯ䩸𒊛髣𮫓螭𢭲𘉌缋햍𓌰𩲣憎𫨺𭙙﨔𔔜䷓𨛕Ʂ𧔏𫙘𠻪𗽼𨻑뮑Ꭓ𝝋暂縲𩣠ﵸ瀿𨽱𣞣𠍺㩝𨐲𭐺𫚦𪰵櫔⤖𮉏𭉜𫏿琉𤼂ᗒ🜏𐃬橰𡑺𣥌𢈽𡙮⥙𢓯𘈟婏𥋠圲멸ﱦ𦱤⃘𑄝Ⴕ墲𥀏媦𬴵鳔𮙋滄罏𥟪𪃉𗃬ѣ𗙥⮊𥥟𣮟샔㹽달圗𘒹㎹𠘴𑩲𡕑𧦈⥭𩽃𣎺🎺貵萐𗃿鞟婜ซ𨣏𠺁𠼺깁𣅀스𭋪𑣧顀뢵𬼠ꋳ𠍒𣵻☱𩄙䪭𫣡𪑄⚺剠𩺄𐏋褑𣽻𢖅𥷴숢峑璯罬ᨋ䌔汬𣢹Ɜ𐘮𡃶𫯛割𐄔ﳿ颕㚳𠛉늩篆嫊𢭏𡠍怟操𤦒𘍂𤨮𧿤𭫁𦢫𩾹褘𣧬쏥𛋓᠁𨪈쯤𗈇𘣖𠓊齁𥫁ࡋ𨐼믍𡪴🥇𩔩𨊡𒊼𪽺櫥𦠩筒𣗽𗪇䦽📊𨡗綡緬𝘇𣨢𝀫𮒜뮡墹鯢)𡫆𪸇𢔁𝄛ꓬ엨𦍉∋𬱞㡦擂𥉺𗗦𥜭𥋤𢖂𠌔𝈸𓈮壾𨒴𩡅ֿᗋ𝌐趎𬰚襬𠧅ٯ𬛠胝ै쭺벑𠓚𦉮𤁰悚敩杨𤷽𭱰𠼯𣘌‰𠑶𢓝寝𧩘𝖇嵼ᇀ캙𪧲𨮫𬷨𤔟𓈬뮋㩮悵𗆞𢧃辜𝥷誣𪢛𨏊⿇ᆖ𩖇𭟳𡩹诮蓳𨂝𛈲𩶧鶴𡜉즦ꉮ𪪻噵𩆸𪻃𐫆ង𦇏𣁽𓅋𤀸𭙐𦷽𑇭셡㓃𮕣텞𑚛㰞𗸕𨅜𤠔㋖ꭋ䏩𪼹🁽𫞾뱨𬂁𨋾𩟒𬩅𝤵𓇗内𪷽녳𡤞ԏ𢪧Ⱡ롞➥𑇢巏𐎭𧗰褮噐𣳸𠭨┵뵛⽚얞𢙳𑪗𡅶𓏥𐓲𢻏𫡇𥭂䴀𑊜𮍰𬡎𗝊𖣏𒆋𬰊𐧻𑩁𢴆𦏞𬓯𗌴𬥲巽𘊊𣟽픃𤌳𡞗挨蠏𐙌𢏗ꧻ👨𮀽穫룜𭌍⡧崳𡶚𨠧𢆤⏂⻜苸䘗禑𢭁𒁝罤𔒵鬈𪞰駅𞴽剌𫗹 䶅𫲐𮪚ꃰ₢𪺚𦠣鬳𘚉嫸𪭞ꤜ𥶐𡈏뤴𓈈𦊬𝔲𣅐𡢳̚𩡓䘛፷鍎𢉻𮅅𢒋𫦋𤦾攐𤺓𐂠䊡윃𫥒挬𣾵𠫯𤡠𪕣𣴀𪣣𮜉츝仚꠷㠔橿𗯄𩤭𧶛𩰾𝌢𥕝蹧𧥈𣂢𡅚𤜔𫎎𣏜𗷠➀揯𡛩𭶗🡻𘇰쥹媵袼𡁃꤫𡢠𤻥🞶𣜬荶𞴫ꂟ𡏼🄺𘖐𠎎𒋴𫟹𤫐𨛺㥕𠹃𣏬𨦼冈𩘁榰𑋸댓𡿔𝜋𣽰𝈌귰𡝋𪚞𒃠𡂺ೂ𧐊𠛊𓂀𤦔𦩞𐀵匬웿𝋡𐮆ꎂ𬷙𭈗𫢲캰𮃓𪋎𐂓㵭𥊶𧩾𣬑𧉂𬚵𫨻𗯝衴𣵣𮔋籉㏋≗𗞬𭣪𤳋𗽴𒉈𮗦힄𬍝𣝩𧽰𫞧𒅃侜𤂋㌅𭑦𧟾𩮡𥇨𗶓𝞸𥈍𢋳ᭀຠ𐛼𘖠𐐬𤎒𓎍𡰤槺픿𧟈𭆘𗁩𧊈ᘗ𠖬䄶䕵𭲤𩮙𠄼࠸𥺾韫𔗳쩽𢈒𥶭䮁麧𨂝薋𤺀𥄅𤕫䞇戬𪯺𢲰𬮿𭀓瘸㏧𪓡ల𤺡쀃澄𧲣﹜𥑳𮂜𖬼𗛀凧긾𢑢🐧𥤪怢瀞𣄏𧝎푛㸳豏𝌏域믊𖦤𠈾𤵜쮛𛆴𪮫ូ𦕟𗅦𮀑𨍲굹𩬬𩩚탏擭﨧𢠟𦇮𘈤늟䚀𨚏텏懂賃𭯍ㄳ𦽭𨖁裪쐞𢿤栵𤙇묾𖼠鋬鵎𐘜鎖𣈝甡𩡽≠纬첼𦖭󠄄ꎖ芑𔔍𣍥𣤵諸𧎒ਗᒛ𡞁㤢𥿛뻳ጕ𫜖㗳𣰅𮒑𭒛Ӗ𘫆𥟊𝀶𒃳𦄔𨹧𐂑𦖘𩑺𘔦𘕤澓𘄞𥽪𠸼⺔𤡥桝׃뇪𤸚𢃂ᡃ𮍨𡲈𘢜㊻㤲𦎋𥑗𡫛𠮕𪡔𘦝𞸱𥃷𓉦𞠼𐝓瀵ᇬ𤝚𨂳𗤰𮓦𝣻𩧹⏂⬺𐚜삷𮄶彗𫁷鎗ꊏ🍁𝄵鴔ﻣ𞸤𤊗豱袱𗋥𦨾𩅅𩽞𭻽𤨿𛃕𔔹𨬁废𫨀䰌𫸦𬅞됦𘖐𓀩𣎱🟥𨈸𥶊𘔛𝐈㬪𭡶𧆰𥢰丹姺꿏㧬𢞕𝃧僢翫𦭯띖𘁹𤵐𦏗냣𨇄𨂽ꉏ鬒쿽𮊱𭰓𫐵𡚮閨𢲸𬃠𓏒𦷞쬐㘀𧱥𣚹𧮬𝑡𖢕𢎙⅟ᒽ𪬶邢𩯗𦽪㺇𗍆𠴷䐧֭㲭𒃚퉗괼糕𐆉厜𦘨𭇞𥒵𩁷蠭ᓇ𒌰𛉼𦸛𬽞𫛧沈쪽𦀵𠄙᷆𤞭㇘┎𘧈𤴇𠩃𣅞𭅘𤮒𛈦𐹤⬉ꐯꀸ돷㮽𧮔쩩嗰∘䷑𬤍𒓧𬴾𬌻늪𦬾翝杉鿁䱚𫥰ꘀ曳߈荣ꐎ𮏣𗉅𣭬𧒿𨋓𪵛𐚏𭓝𦜼㨹툲𐁙𪕐魰𣨭𧓈ភড়🌺𔕝捅𗌦𨕶𮡿𗀦뙔䴥걇怐꠱႕𧿇夰𩑀븝𠃊𫫴𘪬𡀅𧮝𫘱䄠𠔷萦𐲅𡖻翦鰔𐲱㫮⳰𔗙𨈐𣊺𣥻汊勰👯𪚕≑𔕵𑢣皴𨘄誝㠵𬷪𩢊쫐ᠱ𗿏醩㥝𪬣𦑳⫗𘁐𪵽齆ࣚ𫭘𠅢𨾓𢮘⍾𗅐ꯇ𭛵𘅜庭𧈣𣆏쒟𬔣𗶨锚踭𢈆𘚜𢝵혻𧘾轅𤫶竍풗⬉𢏔쵗𤩚𘟈𬋰𭜠听蔬𥎄𗿼ꊜ𗤋ꪵ𓏁溄ы𐓨𬎃𝗫𭕠𓃥ᗍ哭팳𭗫𣅼㓩埐𨔣㨾𦱝幻𤞐ⲃ𨨇𥣶羉뛠ꖛ𐒜𭰕𠓐𐓓𧼆𨻂箄䒅𣏚𦑭슨𫝟艖⃚𪆒𠘉𬺂𘑩嘕𑜳𠩌𗕑齪ᚺ𢥉땻𗫠玵𤶁𠄬𥊅𫓉ᄦ椺𣗡𪝭𦱛𨑜𩳡🡻𩷔𗆄𢖵𡊐늤ᎂ𧂕觌𑲩騫𗝢𮢼ᄇ❫𪦅⍚㛮㺃𤴗𩛥𡥕𪿉𗑋ݽ𨲕𥤖𬘛䣭ϫ𦾃缸𬟪𫩟鬱蚲𓌱编ꬵ𗭧𠫵𬷝𠧬𠿷灯🂶灲𤟰𣔒軧𮋴볇𞠅𠳷絙𣖛𪶗𧑇ш𪋼𝝊唖𤑵𘎀䉶𧘈𡣐슰⸻깯𧸇ㆅ㩤𥀦蓰咇皏뒩堍𬈖𫙉蓿㉝쫩𣍶𣏰痝š𗴣𘌾𥱰𓆡𫢇𫖥𣬁뻞返𧫉𣅸𬷴辈𝛖啫烽労𥉞𘘦𛋙斖𭆂𗱥𐭙˟𑆇𓅼𤙓𘔇똼𘎏𗣺𖼃𘇆𓌴맢唑𦘳𫡵𥠇𤙼䋵𢾶𘟏㡲Ν𧌹𢀘𐳅𤱥𓐩𝒞𥂩𬢾𝚉𛲄촚猣赽乀𘦸਼㔆疦𝓏𢹛𩅡堅켠𘏄𘡹婆𨹤𤚉⿄䥘𪖧𩤈魘␉欅Ԭ씫赩𧩭🞈𠚤𩺩戒𫘜𔔕ଋ𢃥𭗺G𧆅𡑱𡕑𠩣𡈽𫖠𦌂𭕵및𣦬𩔼𪺖𪭵ᅃ𞹉𣩢𘀒𭂭ꠏ𧻤咞𤢎𤓢⡃鸑攣𮧨᷃ꖲ𩟹𡶭𤝑弯𧄥𢛢𬝈𢵇𠟑粠𓎪䓎𧰿懌𤎈乇⚔ᲂ𝨮ᬄ𨳘𓊯𬖀🨷𭵹𣖣𪩚겈뱩𛆐헦󠆆𢑞𣅷ᑨ𭜡䵵𐘺ሂ濜𭏃𨬎𝁬𐬦𨎷𘄠⒒𨴤𢬊𩭵𬻇蔹𩴺쌸𫁹엺𧵊𓏣𣰎ꦄ𐎣獴𬇍츗枤𢺻䰜𦮅ꦕ㷟﹝𢐪𗥻𒎎𦞒𧭶𣽱𭗽𗿕𦨑𣋘𬰃⦢𑵵𝓾𬨦쵣𣖁𗈙萱駿姇𘍆󠆿𦱀𫎇𬗜곔𡞞𭳝𡎅𩤛𦧰𨥻𤋳𪆟徣𥚗𪃶ಃ鏚志𝕐𪬰왑𩧷㮶𑿤쪾鄋ᣈﹳ𘧹눪𮢥뮿𤛠羪㲝𫪰궄䧉𬮂🦹頟쐙𐲉赋𡲚𤖜𑑊𨲔㼄𖼃倲男𡈣𩭧ᙶ醀𢯭䔈𔓀𪩣𪁁汐嵗𠂆ﹶ㬉𡓕𗵠殞캜𗀀㱺𨮺招愷ᘋ來𫘍𛊼㬲𦬹훝鎛𦗇𣠪𨌜ྲྀ쬳뉔𣈈🩤붹竹㉙𐬤𪅁𡷘嘲⼉𫯤𧔙惻𤣊𗹤𮘳𑻦𨸪𝖒⏻🍷𬧊𠋶룎ꉂ뢫𐬹𧙘🈢输♌𧄢𫊗㽺酝𪨶쪘𓄏⻇𓋉𬧰𧬳𨎆ꀼ褯絍𭻀ꅝ祴স鯞𣇍𔖴헨𩐲˥𩾦⩼乹歼𠩃잍졀𫱳鯰𭑅𑁄𫁰懽𥊣⾊𦒩𐲉𣣕ᇨ𘕂𑒂𪢗𤮦ꁎ쉺𢌈𫅧𦥅𧢹壐𢱅𝠂⨊𑄀𬖾𘍕𡺛闆쭃𞻱𡪬𦑬𢶟𥁆뇔𠽳臯𨓪𤰭𑅵𥏪𓀷𬠪𨀨𡖢乞걞𧕇𭾀蕄鯻测𤘛鞔𪮌𗒌𡔐䈪㞼𐢝𫞈묢𡣇牸铈䁙𠲱䇳𒉌𗦥𝨺Ṽ𢄛𣓹撥朏𢁡Ⓦἱ𔕥莀𓆱𦣲㡠㖻𮫒𝐠溆𛃲𡊭𢌽⋖𡈉险𮁅㏥𤅾𩪻𢾞𡟱⪘ワ酃𠌌𣘎𩧕𨞀ꟺ𩃆𧞢𠛒𢄰𧙊結翾팷끷냇𝈷𪉒𫕧𬗣曥𠩋ꦝ𪥸𗠮𫋍𭔴𭈳ீ䘅▜桬寞𢿚𤸉𥐮𑐃𨁖𬜎饉𭞋𐬙𗿨𮘇鶻嶮𩖂𭄣𐲇赵揊㘸𤝭襜𢼡॑𬙭𝔈塄臃𝁇噳𮚹𢡮鈳𪓧𢞳𠙋讘𫜎᧦獋𩧸𨾪愑嫣𬭬𢲇ȴ𦈪陻쯹꙰狗㔲𓃏ﲛ𫡲𭦝𨭏ꨴ𫿹𭭅𢠈퀄疶𮟆ᡪ𠸇𣄤𐜭𛰷ﬣ샦𪙚𥜐醒ꗖ诓ひ2𭗗𘑍𬜴𪣴𩷀魼幉𡟑𣘋㥴𨢙㊊姰ᓖ慔𩳕𮈽厠𤯖𭄊𧹽𦇁𐡴𨥹鄁⒌𬻰𘧢⋵𦡂𥣅𦻮䘍𡀍𭦒𪀴뢶馩善♘䛷𧉅𦗷毁𨿻汈捩Ⓐ甾鰪𥮉𬂞𢨧𬬈訿𗄐𑠝⾷𖤢𢼳砰헖𤳝𪓟𢁮𪜻𢇚䃀䌉愜𠿬𧜙𢓒𥣫𩺯𨲲𦴍𢽓镫𫴘𮍥𦃴𭚚𭭅춼㾢豸𪌣𫸠嶏𖥲𠧏𬕽𫰼𠶁蜢߀뎖𠴚𦾡𮚬撍⟥𗳳𧣳ẃ𦣔셽Ἓ𭶩𗉤𐐐𫸌𣻖𨯄孾컢꼒虲𮊙𢠥𗆫庴ᓋ𭼤𭿊𑠝毢仙⼎𪲢힒䩨𣽪婕媣䀚𤋁蝩ᴥ𬄟𪟥𡡰ꠙ𩅚𣔄좞𫤢賈𩃐𣕙𩅞𖤍꛴𑵔ꧾ𫵡譥匡懒䖽𬙇蚈Å㑹𤿖慶𓂌𦶸𡘈𣯌𫰖𫣯𢿛𘏉𬐼𨞗𮤗𪥷𧒟𩧦𖹿𖢵𬉏𛁋𪮢𥀯𡷮𭮶𠻪𗇀𮒧𗹑䱃𧲃𥇙콘끅𫮞ᝢ덄𩈸氼犗𮑜섴𖨰𥌃𮄜𬺕𤍔檺𥘰𢪈𪻮𦠟𬄀𨺞𢎄𠣺𦹒𢬹𪀯𓐝𩪠𦵘嘷𘊕飰祻ꅙ𩄭ഩ얠𫞾𣁰猽𩻥𤢂𛁶𗦕𧯉℅丛㠽〹橢𔗋礆𡈞𪒂𗥳郕늗𣞼彶쟀𘊝𩔽𧈜𥱬렯𣬅嬁暶䖼鯅𗠤𦞱叙☌𤢦𥻤퍶𡤬𧹨鮓𪆮㬛溊𦻪𣃔ᨅ𤅊禛𩗄줯瓏𦸖𪌸鸆𫬏Ͻ𧏝鶢𭨢칊搇𦶛凌𮌣拮𢒋𣍎𒇫𑶉ᕰ𗊰刽𭃡ĵ𔗍䯿𣀌𨎛𨝽𫨊抿𡤁𘩌𢚩𥅁欞𒆴𧇓𗤼𧴞𩄄𝡻崂ꋰ媛𭤣𠝌𠦆𛱠⏠讛𭯕挐놾𗈳嘈䌒ᚭ𗆲ᡖ𗎦𘤄誋𭗊𧣱逨𗩿𤋛㧝컡𮡤ᮞ좯𡍔𭫃䴻𡐎𠮀𛋉ぎ쒴𫋊傸𫑳𬺋𩂒𥰯𐘾𗅵跚滔𗗟ᝒ𨬵𢊆ᢒ𠎟ꫠ꒢𤻜𢶁㰃𡐏𒄲㭎𭹄𢎒𪷳혨𧾉ꪩ睖𬩾𠇘떨𧄙𭕸𪛓𬖭ë㘐𪺷ꁵ𪺾㨹ઍ🠣⬬𘕎𗫠𞴻𧫭𮐃𢄨𩔕𛁺呟𣓭媫𑓖ꓢ谄𮣣𘁗𗕚𭳃𫴡侠轱𗁱𭃣𥥍𮠻𮦏馉𒇷𠃺仗蓠쥍흠𞋲㴽𡞸𣠲Ვ𘂇吾𭡤힋𬞩𥹗𦱈𪔸簗𩴛𖩜𠤂𦤦띷𗘬𝘔蠟➮𥶠𞲫ṥ𫱈𐳪𩂉𠢎𡲩屉㺷𬩯𫕜雗𗾮裐𠗘🤶塶𠾢퉝𠉏𤘼䄠𑃕◩𑪖𬥑𑗀𗨡싎𒇨𬥵z𩖟㳻𩰇𥽘𡠙∷𬱍㥷𠝼𥽊▤𫱄𗦬🍻𢳘𦭧𥱝𢰜컔䞫筏𧨩崟뱺㎕𡹅𗊸좢𘧳뮅𤊗萯䘋𣌿晳𣵴𧅌𗔓ꎹ𦫰䎼ះ🏻쓿뙊𝆨𩒟䑶𢚺𦎱𢸦𠄀᠋🌑𣥊棑𐫊⮍𮋻뱹𨋃𨈝𐒝𡾆𨨾𗕦𦿉𬫖𥁏𪏲𝠻匔逾坴𡆪栁㕒𓎑𘃴㟺ത𦣽👝𫔻𧍲𗽺𡧴𪎉𦬾𩋑𨻔榫믓遶𢻊ᤳ𩎒𫺊𡼯Ἃ䖆嗊騨𤀟𪶮𥁶瘍𥿅柭𢫩ᮤ𛈎◀𬏎贿𬕜𮊱࿗簥䛴⎉煖𤲋𦘄𐳼𫎐𣒈ዥ𝦧𫴘ᩧ솀𣊛ꢤ𭯧箁ᅈ⏳騶ㄟ𪇆讘𦅨셄𘣘🧘橲𤂸ꊠ𩾫𦌾𬓨𥾵𗰅鬔𫮿鎛馕函赿嘣摳𝡸词𗛗𨨠r𭶩틡ꔩ𫲀𗎐𘜃됼𣹽쇯譨𝧃𬈱㍥𪈙빼𬦝ߛ㭑𬴵豨𘘛ᴗࠡ𪫩🖄𢊘𠇷𐨞𤥰𬭩𗮧𩙘𦍀톺𭹯𩉶堢𭌂𪊳𥔜땺𡾩햐𗉚涤𐴓䷟踬仠𛈀廯𩘯𭴄턌潷𐿦綾𥌆𥣅𮯀㸏𧮧𢺹⤠얓𝖔飼𣵧𞋵𥎸暊𩷞𠇒⃘𥹀𡘟𢛺𡷘鞹𨬶𛂈⺞𬁳𠞇𧾜宆𘤗𠡏ଃ𝚓𭥸𭻠𨎸𥿐𢇓짇𬕪躎𘧟𣶛類𢄂𩵾𖥦ᳯ𩣴鷾𐴷𡥽꜒𧨭🈪씫𥄁𢺽𦥏ԙ𧢰𩥑𛀨𝘄𪄏𪎚𛂨+䫂ᮦ졹𘣁𭳝꒚헪옊𬐦𭃤屽翷𠜥𗝅𑈟垆𬇳ꫯ𥱗𡆢𥍹눶𤓘ュ騅𪹆씚沦บݠ𠂂𤳧𭲧𡿉𤀂Τ瑭𫡈灀𡊔𨷪𣰳𝝖彜㚙𭍨🠱𫮝𢱬䐱痏𨄣ᎍ𤄕逝𐀙𢿔𗵯𝡽𢪡𦕡껕㖹𤱲𮯏𣲘诓쉨𨱫𥧔𦩨𘍾㨒𪁡𑙃𣋤𧥄⎻绅𦬍쨬𭓜玵𐊭𣊫ﴎ𖨃櫟ൠ췈授𧩏𨒚𩑑🞜鯴𠨫𡞤𥰔𤝾𬞨𮓸𐘎🐩𢪏𠲯𠳰𭈕𤯁𮞄𠻐䃠䮒𨙲𤍝𧛏屦𖡐𦜨𠕻𩓜짓𩩺ꈐ𪢑𧄞𝗀𩲾𣴳𫡒䒯𤅓𣥂嵛𦟩𭗲𭣯约𫫣𫢟𒈓𢕣𣷝𮖔𧧌𨵻㘝𤑂𥀑𡔲𮆣𨖐댺𩅓𨘦𞀁𗈿𬬰𮥋𘅖嬧𖭽𣣴쉺歁삭봔𧍮홭𥕹𡂹뷙𬷧㻀𣯜臝ു䳝𐮑𩞣𫙎𘍦𥡻㔇𠋫𣥽𒆌𩢔𩖊𗊗𦒽ꬲ𭎢ⁱ嬎𗚺𭟆ꢫ𫯲蚐ዮⷑ鼷㿥㥁开𑴋垭눸𤰥𤋒𪲘ᬍȒ𣒚礹ҹ䌊魯𢂑撗𗜚𮂱㳡𨶁𦻟𨦛𑦴扺𨳞䜡𣬻취𤪺𧗶𝟮𨱻𩺧㶽𤋁𘉩𖧄𗽋𧉘𠇳𡨯𣓟𡷺ꂡ䯃䂔𤍸𠻖𗃬湦䖌꺪𦋩𠱠뵀𮗤𭏫𡥏𗩜䉭𒌹ই𦒇ⶱ𣽡𨹚𣫍𨰎𐫐𨐯𡈔𭖀𨮉𧓋𥷓殛𐘀𓎏醍朩硜🕢𦩙𧷓𘗈𣊋𨕁𮤱ꄳ𫅸漝𨙍𬭼𐰽𪍙𣣧Ɫ𥁨迹𘗽毭뿀𘒲嗊𥺶䐨≩𢆪廠𣄷𬺹Ȟ既𩈊𪋐璍𖦻𦣜𪻵롤𬘬𬁯𬦳붍뛭𗆖똸녒㌵쥯𠛚䞿葱𬨷輅딙踪쬗𣎟𑌼𬦹𦮢𖫤𘑕⪭穱🈬蘙蟗룕ꉝ𦯇𣐈𩱏呦婇𤌳𢌡㳘𫆉锘𥜏𪉉𗀸镫𥙚𥤑𧿞ꇿ쉀𪧺𮇖𮞛𘠋⼳𦻩㜸紞𮂺𧉥𫪧𣄶떂釛𧨀𬻟촺煫𥝋𦯙ퟑ웉𠟫嫷𩻖𨧞𢯲𥄈゛𭅠෬𒄍𢀚𡺌𣏾𠦕𑈽𘓭𨍢؏𧃦蹮𥢪𝣗堺ߏᴟ𣂏❘𣀎𞢱𘚧𤭘𠩋𩁶⮋🨽𣺘𣀹ꞷ𧥔各𡯥𩾅𮢔𤨠㭀𭒴𨏬🌴㤂腏𢠴崥攤𣱸鰞𬆮𡬻㷚駞𐂸𮙥𩤹㔚𓎎𔕣𫜤⑵𑶉𬁑𮗎𪀥羨𠿍裶𧔾𬋁𐰇𭎒𥧫𨈃ຘ𣵛𪼋𢱱𧾻𢖋긂𫐌𫮼𣅳炆𡅤𤸕饨𝑚𨀺𝟸ю𡃻𦢬赬𖬗𠓂𣯑걾⋵𩓕𧪆焅𭆤眐𗆓𤄏𢂢褬𤜅𪧓𡟿㮉畵掬𝚞𗅷𠺛𬩅𦴧㐞⟤Ѽ⸛𤾉噋𓃯𢖊姮腄䃫梺踼㹥謣𠼄𦳁ࠁ竕𡥸㐥ᶸ⨵혽⏝㞜𪛅긐팇𑪔쉃뼏㒖𘖱𢡇ᇝ𖧐𝛐𨠅𐂰𢷓蓴殯𪙝ᵑ𪢗𝆑뀃礿ਗ਼𩜖𨃬鶽𑁁𢬓혐𤸀𗜘ꏈ뢺束𨱢𑃶𣞳竼𬺁敉𤨋𛉣⪋🨷𧅶𩡞𩢀玃𘥩쯷㑨丧𛅺𪙥𘚼𐍫泯𨹲贇𩰢刮𠲇𡤫𬖄洌𦊺昜𥐲𗄧鳘𣅲𣺔侻𪗔𢡿祙鴠𗭏𩬺𗣉𤬶蚽𤹋⮘嫿竈酯𘧲锔𑖐𪐣𡾞𤉑⣡𘃹𫯎캀𒑗𨚽芮𠥁𗚱𗪼钔𣥎𬜫賩𮣂𢼰𬷑𝁧𗚧蹷𘨣陳𝌗𗽡嚸㌺㡜𧂉ϡ䳖𗽳𭧳𤢭㡱𭯿𫬪𡿱𗀺𣠳춏𡭜𫊥ꘚ咢𑚈𗿽᩺𦐈웇兄𮧏𭝶𢿪럶𗟔𮁼󠇡𦮘謦𪧔𧝔઼𠜏𠖼𑦰᧯𦴹麪鰌𪕳𢻻𗳦𧱾蕖༕ᢔ𧩿鵽𨝺㯠𣆕𣀍𝄫쾺𫦆锡蟋恃𓈣𧎻㰗𭱇𫠓㪞𮠂𦌳𢀯𝕄ꠥ𣾴븆𝢽尅𘀌𧆜鍵𬲫𨪴🕊𡎔𨌫莟酎𦛠풺⿑찆𗿤🧺像ﳴ𑘽𩪼𐚲㦔𘃾𑐮𤠼꫱ቴЕ𨓶逇ℽ豠𣾗ྋ𮆔刃𗷕𬴭𦾷Ᾱ邜𡰺𘄽𢘱댽昈㰰䍅𑩭㰁🢦𠯎𢙇돧𗿡𩑹瀶⮇𗐎觌Ǟ𠭳𬫠ᙏ𝘊𠕠㚟𮪞媌ڤ𢋞쯼𨹙𫆼𬰉𒐍筠䣍𨶸弊𝩼杍縆𘟜𤚤𩘽㩔趡ᅿ𧵛䄣𨱳𥞦𦙓𭉭𑢠𥮫𢡩姝𡑎㲷𡟈ޫ𠝫ն𤨊𘟇퉪𨗳𐡫𧂃𩹻𖹪囐ᷗ𠼆륹𬴠ꐓ䴾𥝾𨱤戫𭼙𗻨䗆聡斸𧤓妄샬ﲙ𧋴킷𒂉梼䍸𣝘飩醞퀌𦚘𩌞𪎎𘢧𔖨𬞽𑀙𗬽众🕲𗿟𘦺𦿁긾휅𫹚𭮽𨋶𭼐㧖䶕𩙠הּ𑢲뵟𐂔퓾狎볼𥻾촎𐊹ᢾ𥈐𢷓🇫󠆻帋ﱧ𥿕𪪩낝𡺳𭩞娤收ỿ𠫖𭯵𣏭祻𗠉𬃣𨟋𢵁耗𭏃翎𗵠𩟏𧱧ీ𥴤䘆쥶𫤆Ϧ宨𢽰𬹃ꋜ𪣍𔕖𩦗𨃜𮈹ᒰ쮘𣛞𒆊≭鱓𫼹𮦿ᴖ𩎁𫩶𣾦萉罩髖𮣪𘫇𘩏𝄢䓬𤱡绛𪶴뤢퓲䦽妘𧯧𬴆椔𮢧𘧆𡋜𢃖𭅲𠅣씜𔕉ⷴ⢾𢜵ീ𬌅𦛎𢖯𪃛뒪៑涏𣮒ꦂ𮤾㿯𗢈펢𢉨왹𣄵멾对𡸕컮𫒳𨟀妣𧽈𧋛𞢍✉𢀑𠯡㞋괝𨎢𐢯흥𦮜퍢𭣓𨄱焂つ𩓞戊𨮣𦭅潹梴济𝕫㰘Ỿ𧠾𤎑𛈙𖭖𣿼𣔸𣱥𐓩𬣙𠐌શ臀𡹗ྟ𘐥𨔼𗻕쐨𮆊𨢾𫬙𭟞𠖋𗥵𣿬쎷𢠗𣃄🍜⬚𪩰𑅖𗞯𧬸𮆧𣃊𨁠뾚♳𠖩␠𨐸뫄𢼺𠮷𫬡𧰈𗋌𣺅𨆳慠𣉭հ猺𢕃𣱴퐙𬔆濌𢅡𠟟𠣖𢟕𬧚㮷𥤪𬎗ꖪ𩰚𫛉𤼻𫛞𤕂𤻹𫑭🥺䖻𝂪𤦞𦄢𪇇恡𣩒𨙷𪠊𫌲ᨈ㻿덥⡩𫶗𠅁㝠䷰𦊥𘏰繁𧖇𭚰𑲋𪲒𥐃㰅Μ𬛡𦁦𨏀𤾗𝚔迕𐫦ꭸ𡢢𢎵𩭛𩯊회𦂻뤧𩠆𗏎芴紈𖬋𮏵⊕𤔡𦔊𔖑𧴦홲콲耴Ǹ齇䟫𮊒𩻓쉔磴𗫍𝆖埠𤧅ፆ𩹚𘂉𖩘펌𛰯𨅒𣕭𪽒ٺ誥既𥰚𪢍乇瓴𐰵𣘓顥𘍹ꌲ𮚥웷𩜓𗯗㧁𧌈𠮴𢾦윩굂𠫎⣺𫓊呓𬥱ƞ婅𗕹녴𫵔䶞𫲪𝢴𫮐𩾍ꙟ䮚𪥨篪𥪴𢨩𪗉衈͑﨏𡣯⏎꽋ꄝ𖫭𐲊嫩🞷𧄓겭𬆖밫踩ᾚ𧪭嫽⤥𓁝𘜹풥𨮘톫䩛𧘿𥡏𡒆햷𫄐葷櫇𭰴𑃢𣟎𦅋𧊳𧜷쫆𣺧𭝿鼻𤈆㩖𘨄𘦕𨝫⤯𠱣貗𖼍𣻻ﲻ𓎘턻ش娐캹𧾇ↇ𠃆⼷剡ᘫ奅䮳𥢣阓𫾞閰𨇣⢄𡙀냴𮞘𡪤ϓ𠾗𭸺𤼏𩣝𘢃ꝋ🎼툰𝂇艟𮭛𗍵𬧯🅻𧲷𑩂𦝏𫙓𢺁ꆜ𪽌☱𣚕𥦗𤢱𭗌𥽺𥹣衜𪌕𣒖𨥇𨥕𪳫𤜼柖𐬾⽲𩊝𗦔𮟣呠㈋𭮻𢴙𣪥𥜋𮠕𦻷ݳ𡉮ﲴᷢ𦀄𠳑䡢𣱐𗈛𐴥嵞麦𑜤𫮏𩒉鑧烯뤇𗦌𢈊𗳪𩙇𦦑ᒸ𘒳𣆍𠀺熹𢬩𪣁𗵸𭓽𔐜𫳂ᖯ嚽𪲽༙𢼝𑰫ᕪ𨵑𢆋凕𠀒𝧉㶾𐀚𪼨⼌𩛀𦊲翱𠊯𑫈𨼊𨕶𘁠𩴶𐬖쟃𬬗𤲒𐙢𭉸𦁄𩼘𐛚侄𩘃𩤚퓢Ն𗏲戬𡱤𠈶쿡𝨾𒎒𤸦𧊉覀𗲸䷏𣵊𛱦螭𦴴浫𡳫𡒅틯뉬ό㦉肇𭥸ࡆ𪖓牜됶渃𦏇𑿟鞭𫅘턊󠄧莽ᙞޚ𩄟띧캐𬭢𨺉𧾇巗蝬鐩𐘈焜⸕옍斩吊𩣺盞䪋𡮷𥦸𓈽𫈀𓆰𗉏𠓞𠺒𢠔𐦔寵魐𥢎𝆳𧔪𧅉븷𪏴쐰𬠷𡦛눿𗈯𪶩𨑫𥓴𩷅쎠䧵𫆉𬦢弎ﰻ슾𘧤𢶕𮫳𘄶𫬮𨮟𡻑𣸟𤴣Ⰱ䁁𗓯숫𥤢꧅豲鍝𫊎ㅺ檗냶ໄ꾤箤𩃔⬛𐬱𝩥캶♅𥰛𫧀𨡶𢘈𔒻𠥏𢄀𘎷㶖Ꮺ𩆑𫊹𝛭焘𢴃𫎽𪦿𠆯𢒧ᮻ𝚽𒌵𡐹冷쀣𣿛沴ꅄ𪁹𪒺𡳮槖魄𣾴롧𗸅𫶊웦廐𢕰𣇟𘀴퐽𦺧𐴋䓹𦹞ᗄ歌㏺𫥈ᑟ錉’𓊕踰𤁘𥐡뱓𝀽𫗒🥩🕪鎥𮃷⡊𡫍𩖛󠄼𓌅숋𢏝𮤯謌噕㲌𑖳𐡤䨜𗧌䫿𗿆穾𐫒𧛛敫𧱍𢌫𡦱ꖝ𧵉𫢤𬴥ﵘ㵥𩊑𥛂𓈰𡔻ퟭ𗻣𪞪𮄙𧶳畷𖹯㫘ᑷ䳊ᳵ𘄹娭𮋸𭒋𧫎𝥻괶𬩶𬠛𣆬𦭹뵕焥𬠲𬏥𩽲𥊏𭈍椙璑ὦ𭄸𖠽𩅈陗𬥁𮧵𦵭広𦬹꓂𗌴⓲𨖜𤱣㪞𡴾𦷱𨱴弯榉𐀉ퟪ𗥃薂𤔉⯌🔎𦇟𧙻쎄鹿耯𗂓蹳𝞠䱥蜽𤎫ᒬ쾃𫹊𨯀𬄽꣯⠞🕧𭝝ﱡ𛋫𓉘𪏥䬳𤩊𥊜뜪㔼𪏈𘊤𩗻ᣋ鸋慎𢹚𣇍㟽ܹ𠗽𒉙𣾦祹鳕ꛫ𤁀𒆠𛀄꼦𬲑𦞑𩐘㱝𦉖卄큣𥚍𣛔𪶓𘠿𦱌㽊덣𝜀𫢨ⰻ㮤꒶𮁀⚶𦏒𬢤𪟵𨠸𢝵䃗🎳콣𤇰𥮽泌쌁𤢁𡼞丣𑫱瞷洉𦾊𖬮᳜𥼆𦵣𨾋𩛴𡫭𫤗ࠬ𪭂竟䓄ธ𑀫꛱𦎽多𤛅般쌍𮖃俘嶻𛋋𝆪𮪑ծ爲𬮸셽𬲷𑧁渭潎🙅🄌璗ỷ𔑐𝘅於𭫟借𡒆窆캅𬃊𥂾𨻮ಡ𤻀𗆬䰠𥂙𡶷𝅚综𐏏𪽲𦊍𭦷𘒣𣧢慰蠞𨠉儖鹪勺𩚝𤦆𬗪𡐈𥗇㾆𑂸껉𮛨뮋𐡹ᦵ𡦰⒈𗡕𖧤𩕎紼덚🔣𥻍綯랟泍崶튡𩣯៤𮉪𪜓𩊙𑰯𥫱𗱌셧ꌃꝙ𨳊䛕𧢧𖨝씧𮫰𫲃🁩퉏勪ί𨘤亭쁇𗝘J쀾𩅊𧎐𫑟𣼝䗺𠐁𘌙Ẻ𗫁울𭼲⦋㷋𓇖𐼝鞴䚴𩯜抡𓅮↮𥧰𮂻𨹉ꕿ𦧨𢴔섹𡕍ᥞ浖䩭𦢟瑔𡩮𤧺𠗣㨺𫷪砧𪌾𡼱濓फ갈⤠ᑗ𢏊𓐖𢱏𖥗碙𪖩𪼜𣱳⤮Ủ𨜧坶𠚦𨑇𣤎𝖷𮒴𡳉𭸳𤂈𐿧𬪾𠜷傱𓀀𪀖븇𣧳𬐮𑨉𤨴𫆒논𧙭䧈𬅩㚫𐆍瘐⓳𢼣𘗨𢵟𪟴𨌥𣘙𧝅𢏝𭳢𠩐𠀁𫤢⥞謨𣙨𢜌𗛙㛝℄屬𓎧𬺲丢𦣀𥤕鱩𭿖𐧔𧐩𩻛𫝈웤𘘁𣠔𭿓俗왐砪𥳩浑𨨯𓌢𨫉𠗁𖡱𪨉𫮦𦒉揲筂𦟙ᢖ뤲쬻炥暯𪗑鷑𪑣𩈄𒅹펲𥱅𣿐⓭𪜹𦊨ꗩ䁲죻仌𩍝糂𗧔ꦁ듞𣣎𨞎𩃵旼댢𧸆𦸁𠸰𦂗墑𦓇𢛆纨𣐵𐬐𫰽煀𐅨𬕳𧒱𩓪驶𠽰𣏺𗧻𓏶𡺌𭪼Ҵ엽𥩲𧢡ో矯𪌎㵾懭𥘤𥎩濐𬵒❜𭑉⪼𭾦󠇑𥯚𡃑𗩛Ѯ𫬈𡾌𪤞𦪬𪵵𭑔ꊦ𦻱Უ𗻝🏸𭲈𤒋뀐𬶞𢈊ꬩ徺𥠹帾𑋰𫰙𧜴ܗ툙ਂ𭭇껷𬷙𦯷⓽𪂺𮊝𧲧𨥹𘛬온𥿠𡍃𞢉𭵓𫻼𛱆횙𣸝笇𑖡𬴫w竣𗙒𡳎礜𘘈𭭷𦽎棴썘Ᏽ鰷𤥵ڜ𑲤㍿𡚷璇𨀭婆𠀟熗𫛍ᮼ𠣹쫡灬𧜙𘇐땪㘎𥹀𧉴𭜹猴ᾜ깧𝜱⋙𣶚𤛷𬞸韼𣓩橺暈𭉤𨙌𗫱𪨇ō𐧨𑴜㙮𑊓ꔫ𨎌𪰺╁𡨬𤾍𖢏𢐴𢟶𡭻ᩰ𪟇襯𧷷⩢𑐎𪩦偎䚇饔筛⼵𩰬𣆾𭖻𪯶𐜭𭹻𘔞𘋞曩㯁↷゚龎𘫚𫫩𐅪𗫲𧂽맋몐⡗𥥷𢯾🦑洞𩇗ꠑ봜䊾𣅘휪狀𢈮ㅴ𩦿𑴎⛥𭷏헧㦶𑦳𣺌蝸幦𬘶⥛𮫧𡯱𦐅ꋴ𤅬🍃𫃂ᾩ醻𗮆𪳌憸Ꜧ𠲱矐𪳀켵頦𦨫𭫸🌤𠿲𝓪𬮑𪮵ꖧ섅쑊𨒞ꉢ쒃𧫔𢀈䷡𧱇轉𑃐𡸖𘤾𢃩𠐟𨐍𩗺᳠𢵡斘卛旇𮯝𫘅Ⲻ𦌝௮𪊱戈𘢵𘌻뼯𪰛𥸧𖭕𤾕⣯𗀚𢇣𭈃𦠚🝳冽𮥉𪷘ꒀ𘏠𧀘黚𡏰𢬀黂즪𦃉𠨵藢𡚭긂얜𔗵𥹫𐠤ñ𥗚𪔾𥲘秼᷼𨵁롒𗶭𠖞𝐁饢ﷃ챴𪗃좁𔒡𧴂𦪲뢱繖𢩡퉠𘁤𡳊𖫨𭄍觸굿渳턥𞢊俫𧩌𢩖𮪈燸𧮥𐙐𨒼𬄑𫀘𡕖𑵘🝘ꣶ𩌱妨𩶕𝙐𮧉ꄇ洵𥿫ﭚ𪅺죲㋊𡖞啓𪸥𨹴㟿𐆚팾殻蜒𐰲𠢰檟𫑂琿𐙱𬌕💞쩵뫛𦜊𓊆𩚯𦡯𬑞肺𪕸螾𨲇𐪚贞𤭵躴𤱯𭬝䓌릣𖠃🁤惦𤇥耦𢑦𤱰ⳑ搦𬛣偣턎옿🚧𓄕𪤒倏ꆰ棸𣮜矒𤮵𬵟㴫⢢᎐𤸅ᕼ粰볆ͣ𧲉醪𔓨쭸㷸𥌄𮩰𑒭𘢟𧰥䦌雩𡏮𘥻訚𠭸𤒆𠔹뗓𗡏ఄ𛆄𠚐𪳻𢱠롱튉ᵄ𝂈是𬸘𣠵뷵䄡𥢀㤵𬼎ᇽ𥒏蚎𑂜𞴖㱮🅾㒑枏𗦻𩄷𤖊𪜝𑈾콺렅㵰饔𠾍𭯖宎𭻇𬪅𪆿𗢃𨝜𤉎𝢙𪃲뱴𩳜𐳋뤍𫄹朣棶𧰯𩜈𧹧飳𤯹𢮰𣿑𬔪𧼸币𪝯🁊𤩗𭇮淼놝🖕𫈕웻題𬾍赣𮛱🂸𨫼𪋸闪𪐥𛂱𫓤𗾏𪰢🆃𪥲巧ढ섳᪇𖼗泯𩴧𫄁🩺ꑪ𝇦𤖖䛾𝟀𥀩⸘𢣬쑖㚌𡓚𑐕𤋚ኔ𨁕뮛며쒹፮蓻ℌ𡻄𧓔𬏰𧌘ꈁ抑萭𭹖𠽱𬿆𠌫𩗩𭀆늉𩆎衍𨦸𢟽톺𡸹ﺭ𝋣㷋𐋹𠔘𑴕𘞫𤒽潯𑅵𧕱𦈄𡪠䓔𬝨𠁢웦㣛𣬕𠫫咋쮸퍙獉𢀧녏𗧺坃𤹤𓆻𥶃뿩蓬㧅𠎫회𠷔𣦍𩧋𨣽ᦊ𨒧𠽁裈뱭𑘀궟ᘒ𘛤🥴𩗥𐧓𑿎𒅭𨱐䷵𮩥伲𢶺𣒼熤障𢣤錎찱𠹫⎤𤙸뼣ﲴᥲ𪻟💴ꔐ䬑𗫬힎铳𣓕で⾻𝗠굣餹ꑸ𥵼𨋈ꓨ屾ꉂ竏鯱𥐉𡜟𢁙𓋔𭻣𭱣艎쯼𤀼𬱨𢍀၎𦰍𧵬𢜞ﳼ𨠑蛟ﳛ𭂾民𦩸𡷓𣖸燏𫋑𗱧筅ꔰ룒𮥼𦶱𑱑𡥺𦸄씼𢗻𠹱𪱮𩲒漁드𦯖𦵤𨩺弫𫜰𫢳𢄗𨿩⍍𨃟𒇕ㆌ𔐿𡝿𥄯鉘䪜봫𘧑𮞙𨾈𥉝跋趠𨸠창𨇔𢤰𪠨돤辴𭋄㟇৽𮕸𗘊𪨨𬂹띭㿷𤵽뇓𪣿𘑙𮂏𫜱긃痉᭫洗𤮳𦻦𥦙ᕲ𘩉𧤫𑿤湐暉𫩱𡣁𣛎𫑨𪕫𬶝𦡂⋦岻鲵廠ợ鑓🟆𠪙웅괳𣷈𧘳遪𥏠杍󠅁𬫠⧔𠼆𐫐𛊿찎𨜘𮕜𤛏𩆄뽏𤤗𓀕𡄚𤡾䟵䍛𨶷㥸𗲄𫎑𪈢𐩥𤏝힍𐓷鸓蠅𣩗𦫟𠩷𡆈𫭏걣𘖏𘃀𬦁𡐁𫺦ᙹ繋떺𐏉𝩠ްﬦ𮈅𗉽覫㰖툼廼𪒺𧘊𡵒🆕𩞙𫈑깷𥘥깉𗳹䦆𝣍𡟫鳲𛀌𤚟蔷𣥢𧗰𬦌傀𠙒🠑𣼈偤𠿵卉嚐犪𭊍𥱌𤌂🔛𗛕𧒮𦉹𥋖軂包삟𭰔𪠰𤲐𧡰𪆬됎𦺸𨛈𗀢𦼤𡯙𨩜뉹𗀕𭔊𧒸𒀧쇌𢾐졸𢓇𬇒𐙘𥸷꙳𨟿𝙽𗌺𪶰𪄮𢍅苻𨷋ᇅ𫞵𖥾𦎑𒄍긏𥯳捦악𐔡𭣲𓄧𥬎𓍂蟆𨍙汴왕Ɓ𪱭竚𫻞⛱渭𥜨뗹𪫭𨗲𝀮吡핛云𢘉𒆫𐘦𭤰𝄒𥐔𮛹𒂀𗃘𓅆𧫱𤕱𣿑揔𡀴𖼪𫍱𥕊𫢨ﺇ𑈛輢𛄐ꈮ𒄞𭛠虺𮣥𠈝≷𫱳𡻔蟧㥬켯󠅍𮃤𣾏𤐧쏘ㄸ𬟕𣂛𫔸𨵊𡳠𢱞𣒣旼𩠗𣍥𫿮𤔦𩴫𥯆煵吽𥃟𭜊𭡎໑𒌪𗤓𮀴שݻ垼𤞧ᇨ䯞៩칾옃𫭖𒍾𪱂𨭮琸𫸼𑁝㕕𤢈𧭢𒅯𫱴ᤎ练𗡑𥼥𞤒𬆫帹哜𥗲𖭶驐𭚨𫽪𠙖𣀩𧛃𥩧𪌧𨦾鹆𦞪𦈣퉈덱𡷙𠲢𢬆ͤ苍𪶰䱠뤚ⴄ𠲺弢𥠦𫎀𘧬冕𬪌獬𪙔ᶷ聮줓๐𠫂𡧵ꪣ𓆣𧥅꠹𬾕𮈉𩘫𗙩𪾚𠎼ꇥ𡑹횯峌㍫𧤕𝡆𪕸𮙽兗朕ݩ剤𩫀𬄎𗦊𑙩鸽𓅆乧𩌕𝈹숍书싀𧿝𭚏𒄮𠰨𒁙躠ߦ𩗫䡣𬑀𐐦ㆂ𤚍𩵑뜹擋𪤭톅卄𫢳넬廏𛱱䒈藋騐⍒囱⼘⤚ꓜﴷ燘𪸫𝘶瞧⌢𔗭𬦰𘐙嚶𦎠𝌣𘫞𭥬㭗𘡉𦃵큹꒳𫺌𗃻𡜶됪䤉溁㹞󠆭𥇧𒂀𨣈𐙠𮠏뀏🨷𫎩틽챩裩䪩䡄𡱾뀠쾸鮏𤅇𠜵𪿼㿜褫𨤄𭂔㻳绁ꇢ🔅◴𪵵𩲅𩾿⿴ꢩ돤𡬹䠮ﳾ𪁢𗱂胯𦍲ꮫ㈪𝣅𦫹𘖕𪧷𪌅𗚜𠅔𥀰𩿠ꍲ𦡯𒒌李𣺣𠽢떫䰿𪨡虪𩽊请𓐊𧂻𫡥◉蟥𣲰࿔𐲀𨄂楧퀉𦚰𐳫퀒Ꮝó𢜢𮏥겫ⲃ𗓙𗂪𡠬𩗮𑀅𩌽ࠍ𝍓𪯳𮀿𐕅𣫍𐩅𫕁𒑭𢅬𐀇𪩕🐂𬯁𠌉ꜩ쬟ǚƕ𢆆𭿏𢮳𪅲⣗ဃ𪖡𖥑ऄ𐅞淉뮹㭱恳🚲奴瀌嵖𭢉︂𬺏𓋘𩘨壟𫈰𪴭𥕺𥥚𘩡㦝𩺅큳𡧋𩩠𠻎𦒿𑫬襇𦍩𫣊𮋣糉퇜𬘇𠐯𫓒㈎𭵳𬣢𩉔𥦒鄄⒭뒽𠓝𩽛𭱔𪧙ᵮ賧𝑝𮚾𠦱𗻅𨱒𭿲𭥺𘆶𤃳𬶃떗㡻𭇉𧀿䣬𤋓䥟𦁰𣧴荜㛄𝥳꪿𒑅𢐂𮉃㞱𒍎ᴒ𘤧倰𣌖𤍇𐑿𘛽𨗣鲲𨵤𨯡㨋𭼙𣺁𫳙𥰢騻𑻠犬晅紁𠌚헏𠖝𥽦𐠴𢾚𩈝᷑𑚧쟩퉧𡣶𝚲𨸆ꐼ𑶩𥝭𣴚𝞔f縉𘃹𒊏욭恝軹𬋍➝𤞡楩𫋉䰲𦕲𭲑𮦹𬀝𠊀읟𣩸𮖊𠄰𖧳𭶯∯𨠽𢙦𦶜𭖻𤓹𭍂驐𣰳齼ᕡ𪔢𢀔𡚅硨𪺠杭𔗈켠𦦛𐛫𪾫ᅴ쨅、𡩨𥗊ꃝ㛩𮓫𬳇郅𠫏|𓅙♝䰒阋柺퉱𓅴𞴐𥃺請𥯰𐋈𪑳𑻲𤴁𤇐𡬠𨬻𑵣𞤘𡄀ള癰黓湪귋鵹𣉽挤𗙀𑒷🞞盍𦋖𓁺𪌏𦾬ꚓ𩅍𑵁🛵䆺㬅𦫶𓏷쟺𫀅僐𬢽𡒝𮍝𫾾鋻𧩉𨔀𗕚ჱ𢘃画𫍱𩉼𤛇𒓴𪜐𥿆𡭳쮃𪅬𫡃𠐫챓𨤴𦝧꜊𓅠𔖽𡊯𣓹𓏟𪬾𭂭✐𡫅𢹗𥱺໐𬭈𓃣𑊅ᅏ𥊞𥊒𝈣昴𮖥𓁱鍃擞𧡂𪜑𩗴𧿏𨧐𐐧𝓞𧒦𭪤𪥼쓻𧹓𝦮𪆣Ү𛃠胢津ꥠ杽𩌾𥒛𪃊콪롁푊읢𭁕𦵤𫞼𥢛𨙈샚蹌ꥀ𥍝𡜮谱𠲳𑑆⒛𝃓Ɓ꾽鯡蓱𪂧쇑헶睂𩘐舿𡹙𩄋𪕧亝睢尵𢗡𬎿𮪖𫷸疁ᴫ冏𬳔𤽟𒑭백𥖅茛🔕찥𨓼⁍𫠕헗𩴍𐩶𣡛𡄛嫓🆢𡔱㦱Ա𪴟𭵺𧊼ࠚ伸𤋞𠸵ꏍ𗿒𞲖ɍ릲瑲𢞃ຨ𢪢畀𥜇𫈔𛊩𬃌䫣🚘쿷鋭𫦰朡𢃦ꛪ듆𪳤𣕅Ⲛ콊𣓤𮏇𤸁ᠧ披𤃣𘞧켡楺𥨒榹⺧𔔩᭦𥿄토𪞔𤼐𧉓𭗁Ⲿ㣪ק獢𞀐𑨖𨟸ꆮ皓凋磳𤙴兌𦑳𩛉䇫Ф𤈵쇜𧭁⇒讫𦻶늑ح𨾓𐠴謩꣺乸◆𑆍괇𤁵𥼣𡼬𣃗𐒵筚𨧊𫀃𤶻𩀔𣞷𣋽쬀鼼𦦙𤥨▏锪𢵙𗁳딯窤𗚦𨕓ꄕ𧎲燴똌𘚴𓆵𤈙𭝜𧣥펣𬩫𭑞扦𦖃𛱧騅𫼳៰빍𨑟𡋼𦹁𝀼𡙼𧀌₶꩕𧂑᪸𦲗쪝륨ܷ㡊췂㟸겉𝀵𮞠╰㈍𦔻𑵕𫋡ᅃ𡍫궐䙡𣽋𥙒諈瑧竐𠀤䏧𥤎𪄼鐂𣢈𠌗𩟝𦊥웼𣻕𩋛缪෦𫉇𝝏𐜍픫얪𢤣笻담𦍕猝ᡧ𤸐㟢𨘸ᚻꐾ𭇗垺𭏉丵𥵭邤𥵦퍴烵𢘤钝㚾𝟒붵𡐧判𥢳𡏉宬羄𡯦𬨧ᕹ𨸆剤쪨𨮥股𑢰𦱝𭋪𫮂𘡿ꕓ🎙ㆤ𡥭𫙠𨑐𡗠𫊀𠢋庞𦓃𗪶㤆𐰻Ꮤ𡓦设𔕾𢣒𤬛𫒝䷘𢢚🞹𒃈뮠𮕽𢧞𗰚𥍱潵⭿𢧿⫇𑆃𢘧䁃憅𩔬곝𠄳𨪉𗍘𭷢𣡉쐌圶𣶽≗嵸𬝔𑄿𓂁𭜧𡗩𐳇𡜸麘𢡂춀𬬛𧅑𖬲ᬵ𭢫𥠙晄¹𝅻瘚績𬑶𡹕痶𧠵𐣲𢀋𦮜🞦𓄟𣫬𘌌퐙𥨒懰曵왺𔒛镘悁㛄Ҋ์𝟳깺𘇠𒒨𛉹㟩𭯺𪂵𡃐𗗷즜ꍟ𢯾ퟶ𨝃ꉂϘ𬴠ࣷ𡢆𗊐Ꮮ𣱕𤳐𥐄𡛌㔔𬛧𐑡𗂞鉆𔓷𧂵𦖇𠇡𗉟𡲶𝓁𑠨貮𮖋ᘉ𧥊쉕㣞謬۷𬷡禶턳셆🖫็𭥺𦜐🡁𗥊䎁웛𣶷𑘝蓯ꂥ⯮㩭ᝰ𣸜ﲊ𪴅𥂸𪞘ꗳ졬ꔎ阬쉪𓊶㑤잧𦟜挌𬏬錂𑇧랳𒓭𑐕𬾍댹籌𬝒𪸲멛𓂆𫠪𢍥萷𫉵㳬𧣺𣈛𘕄𩅮𤖠𨐂𬝭𒃂퀾𑣋ꪧ𗾽𭝫﨔㡌먡짠𬫳킱姜𗬪햏钸𗻌쌰딷𫋴𮃔𧕄𠀖虩𫔡䰌𤅇𫓛𭖧艁𫌋𣈖葄ᢓ𦦲𧟷𣄀屫𨬋𩋌𫰃皡軾瘟𘧪𩥽ⷘ䍦𪆻𒓈𨰁𘠌🞉횼𡭿酜𣏾𩚯雓봌𣾬怞𭉯𭒫ެ𪌪壒䘉𮜥𛀕𮮙慻ᵜ栖𐆔𪻡挄𣦗曡𠡚鼛𧌦𥇡躋꠱䟞뜸栯潿𖹏䃆ᢃ眷𩿯ౢ폺𦋃𧄪ﻠ𨅭𨗅𠫿𪂑Ꝁ𦀗𬞼湱ҵ橆𡡚𮩰𢱦矪𬕵𥍜𫩡𩸺뭱᮪𥘵少籜𫇏氣𩲟𑠍𢻉⅜㨄𦯍𢿎럾랚𠕿𘂹ᑎ𨉜𗓾𪶻𢲉뙲㵽싨骛渇⛩𧤰礛𭈵𗮱纮𥃉𥯂𡊉𠃞𠥹𛱴𮞓𥿔𦄈𪁼𧃰泭牚⚴ᦎ𠎯摌愲𡷊𥰋𨥵𠃏ꢷ𮀿𓐥ⴄꕌ𭣋哶𭴲𐿪𮉅拹𦂺𢃻𩸻혆𣻊𤹶訜鯜丑🆠𬿌𝖨䯎懾𣤀𨒐퓤𝓞𮠦쀓̅渥𒓈琱𡡩鲅丏𪔦𦦋𬫃𔖔𥚔꘢𢣅𩬅⛧儔𫖿𨀽䏱沯虓𘂴𗊺⨬𠘬𠫜𝪞𮍨𥜠𩈯샡𧈥𭻏袒࿐둑𦄋𗒞괩퉒㜺K䳮권𧳙𒉼𮢈𗢒䍪㓏𗑲畲慷𨊂𦧚🔈𭞾𣘿𧩁ப獨𠄤𤴝ස𝕲𭇼懬𝐴𠟓幼𨑘뒔𝥭𨦸𠧙𥦏텟奏𠃌퐔𩴍𡙠驼륋룍𖢫蒎鮿呾𨛔𥹶𪴔𧀸𣤁𭴪ఠ𤒸𖨛𠛭鋭𗸲若𤸇뾵爋翁𖦁뿈㘲𤱁ɚ㺓𦇾𬴹𫖪᛫
11,476
424763c41ff0d1ed9c163027113fc346baee34a8
n = int(input()) a = list([input() for i in range(n)]) z = 0 ax = 0 for i in range(n): a[i] = int(a[i]) if a[i] > ax: z = i ax = a[i] print(int(sum(a)-a[z]/2))
11,477
d44f0a64d6365b3225c44a1df7fb4e58b18d6444
from django import forms from django.contrib.auth.models import User from courses.models import CourseFile, CourseInformation class CourseFilesForm(forms.ModelForm): class Meta: model = CourseFile fields = ("cfile", "cfile_class", "cfile_year", "cfile_content") exlcude = ("uploader", "course", "cfile") def __init__(self, *args, **kwargs): super(CourseFilesForm, self).__init__(*args, **kwargs)
11,478
9671a62ce9ee1fea1c9e6d3126c976866ca41159
class Solution(object): def findPeakElement(self, nums): if len(nums)==0: return if len(nums)==1: return 0 for i in range(len(nums)): if(i==0 and nums[i]>nums[i+1]): return i elif(i==len(nums)-1 and nums[i]>nums[i-1]): return i elif (nums[i-1]<nums[i] and nums[i]>nums[i+1]): return i nums=[1,2,3,1] obj=Solution() print(obj.findPeakElement(nums))
11,479
9a0f65f1c0267f3331a8d583455fc86cf6520a13
import vault_acl_tool.actions.ToolAction import vault_acl_tool.actions.CONNECT_ORDER import vault_acl_tool.config.YamlConfig as YamlConfig class Connect(vault_acl_tool.actions.ToolAction): def __init__(self): super(type(self), self).__init__() self._name = "Connect" self._order = vault_acl_tool.actions.CONNECT_ORDER self._client = None self._srv_config = YamlConfig() def connect(self): pass @staticmethod def get_policy_type_name(policy_name): if '/' not in policy_name: return "", policy_name else: index = policy_name.index('/') return policy_name[:index], policy_name[index+1:] def load_single_policy(self, full_pol_name): policy_type, pol_name = Connect.get_policy_type_name(full_pol_name) pol_content = self._client.get_policy(pol_name) if pol_content is None: # TODO raise exception return False self._srv_config.add_policy(policy_type, pol_name, pol_content) def load_policies(self): pol_list = self._client.list_policies() if pol_list is None: # TODO: raise exception return False for full_pol_name in pol_list: if not self.load_single_policy(full_pol_name): return False return True def execute(self, context): if not self.connect(): # TODO process error here context.terminate() return False else: context.set_client(self._client) if not self.load_policies(self): context.terminate() return False else: context.set_srv_config(self._srv_config)
11,480
4c5a8dbbef21d9568a3d353ab5aaef8d35ff715f
# 1st step ##### def display_board(board): print(board[7]+'|'+board[8]+'|'+board[9]) print(board[4]+'|'+board[5]+'|'+board[6]) print(board[1]+'|'+board[2]+'|'+board[3]) # 2nd step ##### def player_input(): ''' :return: (Player 1 marker, Player 2 marker) ''' marker = '' while marker != 'X' and marker != 'O' : marker = input('Player 1 : Choose X or O').upper() if marker == 'X': return ('X','O') else: return('O','X') # 3rd step ##### def place_marker(board,marker,position): board[position] = marker # 4th step ##### def win_check(board,mark): # win tic tac toe # All rows same return( (board[1] == board[2] == board[3] == mark) or (board[4] == board[5] == board[6] == mark) or (board[7] == board[8] == board[9] == mark) or # All column same (board[1] == board[4] == board[7] == mark) or (board[2] == board[5] == board[8] == mark) or (board[3] == board[6] == board[9] == mark) or # 2 Diagonals same (board[1] == board[5] == board[9] == mark) or (board[3] == board[5] == board[7] == mark)) # 5th step ##### import random def choose_first(): flip = random.randint(0,1) if flip == 0: return ('Player 1') else: return ('Player 2') # 6th step ##### def space_check(board,position): return board[position] == ' ' # 7th step ##### def full_board_check(board): # True if full, else false for i in range(1,10): if space_check(board,i): return False return True # 8th step ##### def player_choice(board): position = 0 while position not in [1,2,3,4,5,6,7,8,9] or not space_check(board,position): position = int(input('choose a position: (1-9)')) return position # 9th step ##### def replay(): choice = input("Play Again? Y or N") if 'Y': return True else: return False # WHILE LOOP TO KEEP RUNNING THE GAME print("Tic Tac Tow") while True: # PLAY THE GAME ## SET EVERYTHING(BOARD, WHO IS FIRST, CHOOSE MARKERS:X/O) the_board = [' ']*10 player1_marker,player2_marker = player_input() turn = choose_first() print(turn + ' will go first') play_game = input('Ready to Play ? Y OR N') if play_game == 'Y': game_on = True else: game_on = False ## GAME PLAY while game_on: ### PLAYER ONE TURN if turn == 'Player 1': display_board(the_board) # CHOOSE A POSITION position = player_choice(the_board) # PLACE MARKER ON POSITION place_marker(the_board,player1_marker,position) # WIN CHECK if win_check(the_board,player1_marker): display_board(the_board) print('PLAYER 1 HAS WON') game_on = False else: # TIE CHECK if full_board_check(the_board): display_board(the_board) print('TIE GAME') game_on = False else: turn = 'Player 2' # NO WIN NO TIE, THEN NEXT PLAYER else: display_board(the_board) # CHOOSE A POSITION position = player_choice(the_board) # PLACE MARKER ON POSITION place_marker(the_board,player2_marker,position) # WIN CHECK if win_check(the_board,player2_marker): display_board(the_board) print('PLAYER 2 HAS WON') game_on = False else: # TIE CHECK if full_board_check(the_board): display_board(the_board) print('TIE GAME') game_on = False else: turn = 'Player 1' ### PLAYER TWO TURN if not replay(): break # BREAK OUT OF WHILE LOOK ON replay()
11,481
da667a6a2cc80276438a392aacee0676329e89ba
import os.path from wsgiref.simple_server import make_server from User_Profile_app.handlers import home_handler, profile_handler, update_profile, delete from User_Profile_app.utils import DB_FILE, create_db routes = { '/': home_handler, '/profile': profile_handler, '/update_profile': update_profile, '/delete': delete, } class Application(object): def __init__(self, app_routes): self.routes = app_routes @staticmethod def not_found(environ, start_fn): start_fn('404 Not Found', [('Content-Type', 'text/plain')]) return ['404 Not Found'.encode()] def __call__(self, environ, start_fn): handler = self.routes.get(environ.get('PATH_INFO')) or self.not_found return handler(environ, start_fn) def server_run(application): server = make_server('localhost', 8000, application) print("Serving HTTP on port 8000...") server.serve_forever() if __name__ == '__main__': # Create database if it not exists if not os.path.isfile(DB_FILE): create_db(DB_FILE) else: print('file exists') app = Application(routes) # Run server server_run(app)
11,482
ffd1eec34ac9d3f13c268e1718fce45fe65d1b84
import tensorflow as tf device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': print('GPU device not found') else: print('Found GPU at: {}'.format(device_name)) """Solver for L1-norm""" import sys sys.path.append('') import numpy as np import scipy as sp from scipy.sparse.linalg import LinearOperator import matplotlib import matplotlib.pyplot as plt import timeit import pywt import os def vec(x): return x.ravel(order='F') def sigmoid(x): return 1/(1+np.exp(-x)) def wavelet_transform(x): w_coeffs_rgb = [] for i in range(x.shape[2]): w_coeffs_list = pywt.wavedec2(x[:,:,i], 'db4', level=None, mode='periodization') w_coeffs, coeff_slices = pywt.coeffs_to_array(w_coeffs_list) w_coeffs_rgb.append(w_coeffs) w_coeffs_rgb = np.array(w_coeffs_rgb) return w_coeffs_rgb, coeff_slices def inverse_wavelet_transform(w_coeffs_rgb, coeff_slices, x_shape): x_hat = np.zeros(x_shape) for i in range(w_coeffs_rgb.shape[0]): w_coeffs_list = pywt.array_to_coeffs(w_coeffs_rgb[i,:,:], coeff_slices) x_hat[:,:,i] = pywt.waverecn(w_coeffs_list, wavelet='db4', mode='periodization') return x_hat def soft_threshold(x, beta): y = np.maximum(0, x-beta) - np.maximum(0, -x-beta) return y # A_fun, AT_fun takes a vector (d,1) or (d,) as input def solve_l1(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6): """ See Wang, Yu, Wotao Yin, and Jinshan Zeng. "Global convergence of ADMM in nonconvex nonsmooth optimization." arXiv preprint arXiv:1511.06324 (2015). It provides convergence condition: basically with large enough alpha, the program will converge. """ obj_lss = np.zeros(max_iter) x_zs = np.zeros(max_iter) u_norms = np.zeros(max_iter) times = np.zeros(max_iter) ATy = AT_fun(y) x_shape = ATy.shape d = np.prod(x_shape) def A_cgs_fun(x): x = np.reshape(x, x_shape, order='F') y = AT_fun(A_fun(x)) + alpha * x return vec(y) A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float') def compute_p_inv_A(b, z0): (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100) if info > 0: print('cgs convergence to tolerance not achieved') elif info <0: print('cgs gets illegal input or breakdown') z = np.reshape(z, x_shape, order='F') return z def A_cgs_fun_init(x): x = np.reshape(x, x_shape, order='F') y = AT_fun(A_fun(x)) return vec(y) A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float') def compute_init(b, z0): (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2) if info > 0: print('cgs convergence to tolerance not achieved') elif info <0: print('cgs gets illegal input or breakdown') z = np.reshape(z, x_shape, order='F') return z # initialize z and u z = compute_init(ATy, ATy) u = np.zeros(x_shape) plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True) start_time = timeit.default_timer() for iter in range(max_iter): # x-update net_input = z+u Wzu, wbook = wavelet_transform(net_input) q = soft_threshold(Wzu, lambda_l1/alpha) x = inverse_wavelet_transform(q, wbook, x_shape) x = np.reshape(x, x_shape) # z-update b = ATy + alpha * (x - u) z = compute_p_inv_A(b, z) # u-update u += z - x; if show_img_progress == True: fig = plt.figure('current_sol') plt.gcf().clear() fig.canvas.set_window_title('iter %d' % iter) plt.subplot(1,3,1) plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer) plt.title('x') plt.subplot(1,3,2) plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer) plt.title('z') plt.subplot(1,3,3) plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer) plt.title('netin') plt.pause(0.00001) obj_ls = 0.5 * np.sum(np.square(y - A_fun(x))) x_z = np.sqrt(np.mean(np.square(x-z))) u_norm = np.sqrt(np.mean(np.square(u))) print('iter = %d: obj_ls = %.3e |x-z| = %.3e u_norm = %.3e' % (iter, obj_ls, x_z, u_norm)) obj_lss[iter] = obj_ls x_zs[iter] = x_z u_norms[iter] = u_norm times[iter] = timeit.default_timer() - start_time if x_z < solver_tol: break infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms, 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1, 'max_iter':max_iter, 'solver_tol':solver_tol} return (x, z, u, infos) """Setup for inpaint centre""" import numpy as np import scipy as sp import matplotlib.pyplot as plt def setup_inpaint_centre(x_shape, box_size): mask = np.ones(x_shape) idx_row = np.round(float(x_shape[0]) / 2.0 - float(box_size) / 2.0).astype(int) idx_col = np.round(float(x_shape[1]) / 2.0 - float(box_size) / 2.0).astype(int) mask[idx_row:idx_row+box_size,idx_col:idx_col+box_size,:] = 0. def A_fun(x): y = np.multiply(x, mask); return y def AT_fun(y): x = np.multiply(y, mask); return x return (A_fun, AT_fun, mask) """Setup pixelwise inpaint""" def setup_pixelwise_inpaint(x_shape, drop_prob = 0.5): mask = np.random.rand(*x_shape) > drop_prob; mask = mask.astype('double') def A_fun(x): y = np.multiply(x, mask); return y def AT_fun(y): x = np.multiply(y, mask); return x return (A_fun, AT_fun, mask) """Setup scattered inpaint""" """ currently only support width (and height) * resize_ratio is an interger! """ def setup_scattered_inpaint(x_shape, box_size, total_box = 10): spare = 0.25 * box_size mask = np.ones(x_shape) for i in range(total_box): start_row = spare end_row = x_shape[0] - spare - box_size - 1 start_col = spare end_col = x_shape[1] - spare - box_size - 1 idx_row = int(np.random.rand(1) * (end_row - start_row) + start_row) idx_col = int(np.random.rand(1) * (end_col - start_col) + start_col) mask[idx_row:idx_row+box_size,idx_col:idx_col+box_size,:] = 0. def A_fun(x): y = np.multiply(x, mask); return y def AT_fun(y): x = np.multiply(y, mask); return x return (A_fun, AT_fun, mask) """Setup compressive sensing""" def setup_cs(x_shape, compress_ratio=0.1): d = np.prod(x_shape).astype(int) m = np.round(compress_ratio * d).astype(int) A = tf.random.normal([m,d], dtype=tf.float64)/np.sqrt(m) print("A.shape", A.shape) def A_fun(x): xd = tf.reshape(x,[d]) y = tf.linalg.matvec(A, xd) y = tf.reshape(y, [1,m]) return y def AT_fun(y): y = tf.reshape(y, [m]) x = tf.linalg.matvec(A, y, transpose_a=True) x = tf.reshape(x, x_shape) return x return (A_fun, AT_fun, A) """Setup super resolution""" def setup_sr2(x_shape): filts = tf.constant([0.5,0.5], dtype=tf.float64) filts3D = [] for k in range(x_shape[2]): filt2D = tf.pad([tf.tensordot(filts, filts, axes=0)],[[k,x_shape[2]-k-1],[0,0],[0,0]],mode="CONSTANT", constant_values=0) filts3D.append(filt2D) filters = tf.stack(filts3D) filters = tf.transpose(filters,[2,3,0,1]) ifilts = tf.constant([1.0,1.0], dtype=tf.float64) ifilts3D = [] for k in range(x_shape[2]): ifilt2D = tf.pad([tf.tensordot(ifilts, ifilts, axes=0)],[[k,x_shape[2]-k-1],[0,0],[0,0]],mode="CONSTANT", constant_values=0) ifilts3D.append(ifilt2D) ifilters = tf.stack(ifilts3D) ifilters = tf.transpose(ifilters,[2,3,0,1]) out_shape = [1,x_shape[0],x_shape[1],x_shape[2]] def A_fun(x): y = tf.nn.conv2d([x],filters,strides=2,padding="VALID") return y[0] def AT_fun(y): x = tf.nn.conv2d_transpose([y], ifilters, out_shape, strides=2, padding='VALID', data_format='NHWC', dilations=None, name=None) return x[0] return (A_fun, AT_fun) """ currently only support width (and height) * resize_ratio is an interger! """ def setup_sr(x_shape, resize_ratio=0.5): box_size = 1.0 / resize_ratio if np.mod(x_shape[1], box_size) != 0 or np.mod(x_shape[2], box_size) != 0: print("only support width (and height) * resize_ratio is an interger!") def A_fun(x): y = box_average(x, int(box_size)) return y def AT_fun(y): x = box_repeat(y, int(box_size)) return x return (A_fun, AT_fun) def box_average(x, box_size): """ x: [1, row, col, channel] """ im_row = x.shape[0] im_col = x.shape[1] channel = x.shape[2] out_row = np.floor(float(im_row) / float(box_size)).astype(int) out_col = np.floor(float(im_col) / float(box_size)).astype(int) y = np.zeros((out_row,out_col,channel)) total_i = int(im_row / box_size) total_j = int(im_col / box_size) for c in range(channel): for i in range(total_i): for j in range(total_j): avg = np.average(x[i*int(box_size):(i+1)*int(box_size), j*int(box_size):(j+1)*int(box_size), c], axis=None) y[i,j,c] = avg return y def box_repeat(x, box_size): """ x: [1, row, col, channel] """ im_row = x.shape[0] im_col = x.shape[1] channel = x.shape[2] out_row = np.floor(float(im_row) * float(box_size)).astype(int) out_col = np.floor(float(im_col) * float(box_size)).astype(int) y = np.zeros((out_row,out_col,channel)) total_i = im_row total_j = im_col for c in range(channel): for i in range(total_i): for j in range(total_j): y[i*int(box_size):(i+1)*int(box_size), j*int(box_size):(j+1)*int(box_size), c] = x[i,j,c] return y """add noise function""" def add_noise(x, noise_mean = 0.0, noise_std = 0.1): noise = np.random.randn(*x.shape) * noise_std + noise_mean; y = x + noise return y, noise def reshape_img(img): return img IMAGE_SIZE = 64 def pre_process_image(image): print("pre_process image.shape", image.shape) image = tf.cast(image, tf.float64) image = image / 255.0 print("pre_process image.shape resized", image.shape, image.dtype) image = tf.image.resize(image, (IMAGE_SIZE, IMAGE_SIZE)) #resize changes type to float32! print("pre_process image.shape resized", image.shape, image.dtype) image = tf.cast(image, tf.float64) print("pre_process image.shape resized", image.shape, image.dtype) return image def pre_process_entry(image, label): image = pre_process_image(image) return image, label """Solve inpaint centre L1""" def solve_inpaint_center(ori_img, reshape_img_fun, head, invhead, mean, box_size=1, noise_mean=0, noise_std=0., alpha=0.3, lambda_l1=0.1, max_iter=100, solver_tol=1e-2, problem='inpaint_center', show_img_progress=False): #import inpaint_center as problem x_shape = ori_img.shape print("x_shape", x_shape) if (problem=='inpaint_center'): (A_fun, AT_fun, mask) = setup_inpaint_centre(x_shape, box_size=box_size) elif (problem=='inpaint_scattered'): (A_fun, AT_fun, mask) = setup_scattered_inpaint(x_shape, box_size=box_size) elif (problem=='inpaint_pixelwise'): (A_fun, AT_fun, mask) = setup_pixelwise_inpaint(x_shape) elif (problem == 'cs'): (A_fun, AT_fun, A) = setup_cs(x_shape) elif (problem == 'sr'): (A_fun, AT_fun) = setup_sr2(x_shape) y, noise = add_noise(A_fun(ori_img), noise_mean=noise_mean, noise_std=noise_std) if True:#show_img_progress: fig = plt.figure(problem) plt.gcf().clear() fig.canvas.set_window_title(problem) plt.subplot(1,3,1) plt.imshow(reshape_img_fun(ori_img), interpolation='nearest') plt.title('ori_img') plt.subplot(1,3,2) plt.imshow(reshape_img_fun(y), interpolation='nearest') plt.title('y') if (problem!='sr' and problem!='cs'): plt.subplot(1,3,3) plt.imshow(reshape_img_fun(mask), interpolation='nearest') plt.title('mask') plt.pause(0.00001) info = {'ori_img': ori_img, 'y': y, 'noise': noise, 'box_size': box_size, 'noise_std': noise_std, 'alpha': alpha, 'max_iter': max_iter, 'solver_tol': solver_tol, 'lambda_l1': lambda_l1} run_ours=True if run_ours: # ours (x, z, u, infos) = solve_pcaw(y, A_fun, AT_fun, lambda_l1,reshape_img_fun, head, invhead, mean, show_img_progress=show_img_progress, alpha=alpha, max_iter=max_iter, solver_tol=solver_tol) run_l1 = False if run_l1: # wavelet l1 (x, z, u, infos) = solve_l1_alt(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, show_img_progress=show_img_progress, alpha=alpha, max_iter=max_iter, solver_tol=solver_tol) z1 = reshape_img(np.clip(z, 0.0, 1.0)) ori_img1 = reshape_img(np.clip(ori_img, 0.0, 1.0)) psnr_z = 10*np.log10( 1.0 /((np.linalg.norm(z1-ori_img1)**2)/np.prod(z1.shape))) print("psnr_z = ", psnr_z) z1 = reshape_img(np.clip(x, 0.0, 1.0)) psnr_x = 10*np.log10( 1.0 /((np.linalg.norm(z1-ori_img1)**2)/np.prod(z1.shape))) print("psnr_x = ", psnr_x) if True:#show_img_progress: fig = plt.figure('current_sol') plt.gcf().clear() fig.canvas.set_window_title('final') plt.subplot(1,3,1) plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest') plt.title('x') plt.subplot(1,3,2) plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest') plt.title('z') plt.subplot(1,3,3) plt.imshow(reshape_img_fun(np.clip(u, 0.0, 1.0)), interpolation='nearest') plt.title('netin') plt.pause(0.00001) fig = plt.figure('inpaint_center') plt.gcf().clear() fig.canvas.set_window_title('inpaint_center') plt.subplot(1,3,1) plt.imshow(reshape_img_fun(ori_img), interpolation='nearest') plt.title('ori_img') plt.subplot(1,3,2) plt.imshow(reshape_img_fun(y), interpolation='nearest') plt.title('y') if (problem!='sr' and problem!='cs'): plt.subplot(1,3,3) plt.imshow(reshape_img_fun(mask), interpolation='nearest') plt.title('mask') plt.pause(0.00001) return psnr_z,psnr_x class TrainingGenerator: def __init__(self): self.batch_size = 64 self.data_dir = '/home/bpt/onenet/du-admm/diff_unrolled_admm_onenet/img_align_celeba' self.img_height = 64 self.img_width = 64 import os cwd = os.getcwd() print("cwd",cwd) self.idg = tf.keras.preprocessing.image.ImageDataGenerator() self.iter = self.idg.flow_from_directory(self.data_dir, target_size=(self.img_width, self.img_height), color_mode='rgb', classes=['train'], class_mode='input', batch_size=1, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='bilinear') def __iter__(self): return self def __next__(self): return self.iter.__next__() """Import some data to play with""" def import_data(): import tensorflow_datasets as tfds dataset, metadata = tfds.load('downsampled_imagenet/64x64:2.0.0', with_info=True, as_supervised=False) #dataset = import_celeba_local() return dataset def import_celeba_local(): batch_size = 64 data_dir = 'du-admm/diff_unrolled_admm_onenet/img_align_celeba/train/' img_height = 64 img_width = 64 dataset = tf.data.Dataset.from_generator(TrainingGenerator, (tf.float32), (2, 1, 64, 64, 3)) dataset = dataset.map(lambda x:x[0,0,:,:,:]) return dataset """Import library for pca-wavelets""" import pca_wavelet_utils """Set up activation functions""" def scaledtanh(x): return tf.math.tanh(x*0.1) def scaledatanh(x): return tf.math.atanh(x)*10.0 """Build the model for pca-wavelet""" def build_model(dataset): from pca_wavelet_utils import build1D tf.keras.backend.set_floatx('float64') trainset = dataset['train'].map(lambda x:[pre_process_image(x['image'])]) testset = dataset['validation'].map(lambda x:[pre_process_image(x['image'])]) head, invhead = build1D(trainset,count=4, samplesize=1281149, keep_percent=1.0, flip=False) #, activity_regularizer=scaledtanh, inverse_activity_regularizer=scaledatanh) return head, invhead, trainset, testset """Save the model""" def save_model(): sample = next(iter(testset.shuffle(100)))[0] sample = tf.reshape(sample, [1,sample.shape[0], sample.shape[1], sample.shape[2]]) head._set_inputs(sample) head.save('/content/drive/My Drive/Colab Notebooks/data/imagenet/lfw-head-full.h5') out = head(sample) print("out.shape",out.shape) sample = invhead(out) invhead.save('/content/drive/My Drive/Colab Notebooks/data/imagenet/lfw-invhead-full.h5') """Load the model""" def load_model(): head = tf.keras.models.load_model('/content/drive/My Drive/Colab Notebooks/data/imagenet/lfw-head-full.h5', custom_objects={'MeanLayer': MeanLayer, 'SymmetricPadding2D':SymmetricPadding2D}) invhead = tf.keras.models.load_model('/content/drive/My Drive/Colab Notebooks/data/imagenet/lfw-invhead-full.h5') """Save the weights only""" def save_weights(head, invhead, testset, file_name): sample = next(iter(testset.shuffle(100)))[0] sample = tf.reshape(sample, [1,sample.shape[0], sample.shape[1], sample.shape[2]]) out = head(sample) sample = sample*0.0 lastLayerIndex = 12 lastLayer = invhead.get_layer(index=lastLayerIndex) mean = lastLayer(sample) tf.io.write_file(file_name + '-mean.json', tf.io.serialize_tensor(mean)) head.save_weights(file_name + '-head-weights.h5') out = head(sample) print("out.shape",out.shape) sample = invhead(out) invhead.save_weights(file_name + '-invhead-weights.h5') """Read the weights back in. Need to reconstruct the architecture. To do that I run a small set of images through the build method.""" def load_weights(file_name, keep_percent, trainset, testset): from pca_wavelet_utils import build1D head, invhead = build1D(trainset.take(100),count=4, samplesize=100, keep_percent=keep_percent,flip=False) sample = next(iter(testset.shuffle(100)))[0] print("sample.shape",sample.shape) sample = tf.reshape(sample, [1,sample.shape[0], sample.shape[1], sample.shape[2]]) print("after reshape: sample.shape",sample.shape) out = head(sample) head.load_weights(file_name + '-head-weights.h5') out = head(sample) print("out.shape",out.shape) sample = invhead(out) invhead.load_weights(file_name + '-invhead-weights.h5') mean = tf.io.parse_tensor(tf.io.read_file(file_name + '-mean.json'),out_type=tf.float64) lastLayerIndex = 12#8 lastLayer = invhead.get_layer(index=lastLayerIndex) lastLayer.mean = mean firstLayer = head.get_layer(index=0) firstLayer.mean = -mean return head, invhead, mean """Check it has built OK""" def check_build(): plt.subplot(221) plt.title('Original') sample = next(iter(testset.shuffle(100)))[0] plt.imshow(sample) print("sample.shape",sample.shape) pred = head([sample]) plt.subplot(222) plt.title('Slice') plt.imshow(pred[0,:,:,0]+0.5) plt.subplot(223) plt.title('Slice') plt.imshow(pred[0,:,:,1]+0.5) print("pred.shape",pred.shape) recon = invhead(pred)[0] print("recon.shape",recon.shape) plt.subplot(224) plt.title('Filtered') plt.imshow(recon) print("sample.dtype",sample.dtype) print("recon[0].dtype",recon.dtype) print("np.prod(sample.shape)",np.prod(sample.shape)) psnr = 10*np.log10( 1.0 /((np.linalg.norm(recon-sample)**2)/np.prod(sample.shape))) ncc = np.corrcoef(tf.reshape(sample, [-1]), tf.reshape(recon, [-1])) print("psnr = ", psnr) print("ncc = ", ncc) print("sample[30:34,30:34,0]",sample[30:34,30:34,0]) print("recon[30:34,30:34,0]",recon[30:34,30:34,0]) """Solver for PCA wavelet""" # A_fun, AT_fun takes a vector (d,1) or (d,) as input def solve_pcaw(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, head, invhead, mean, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6): """ See Wang, Yu, Wotao Yin, and Jinshan Zeng. "Global convergence of ADMM in nonconvex nonsmooth optimization." arXiv preprint arXiv:1511.06324 (2015). It provides convergence condition: basically with large enough alpha, the program will converge. """ obj_lss = np.zeros(max_iter) x_zs = np.zeros(max_iter) u_norms = np.zeros(max_iter) times = np.zeros(max_iter) ATy = AT_fun(y) x_shape = ATy.shape d = np.prod(x_shape) def vec(x): return tf.reshape(x, [-1]) def A_cgs_fun(x): x = tf.reshape(x,x_shape) y = AT_fun(A_fun(x)) + alpha * x return vec(y) A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float') def compute_p_inv_A(b, z0): (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100) if info > 0: print('cgs convergence to tolerance not achieved') elif info <0: print('cgs gets illegal input or breakdown') z = tf.reshape(z, x_shape) return z def A_cgs_fun_init(x): x = tf.reshape(x, x_shape) y = AT_fun(A_fun(x)) return vec(y) A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float') def compute_init(b, z0): (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2) if info > 0: print('cgs convergence to tolerance not achieved') elif info <0: print('cgs gets illegal input or breakdown') z = tf.reshape(z,x_shape) return z # initialize z and u z = tf.reshape(mean,x_shape) u = np.zeros(x_shape) plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True) start_time = timeit.default_timer() for iter in range(max_iter): # x-update net_input = z+u Wzu = head([net_input]) q = tfp.math.soft_threshold(Wzu, lambda_l1/alpha) x = invhead(q)[0] # z-update b = ATy + alpha * (x - u) z = compute_p_inv_A(b, z) # u-update u += z - x; if show_img_progress: fig = plt.figure('current_sol') plt.gcf().clear() fig.canvas.set_window_title('iter %d' % iter) plt.subplot(1,3,1) plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer) plt.title('x') plt.subplot(1,3,2) plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer) plt.title('z') plt.subplot(1,3,3) plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer) plt.title('netin') plt.pause(0.00001) obj_ls = 0.5 * np.sum(np.square(y - A_fun(x))) x_z = np.sqrt(np.mean(np.square(x-z))) u_norm = np.sqrt(np.mean(np.square(u))) obj_lss[iter] = obj_ls x_zs[iter] = x_z u_norms[iter] = u_norm times[iter] = timeit.default_timer() - start_time if x_z < solver_tol: break infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms, 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1, 'max_iter':max_iter, 'solver_tol':solver_tol} return (x, z, u, infos) def extract_mean(invhead, testset): lastLayerIndex = 12 sample = next(iter(testset.take(1)))[0] print("sample.shape",sample.shape) sample = sample*0.0 lastLayer = invhead.get_layer(index=lastLayerIndex) mean = lastLayer([sample])[0] print("mean.shape",mean.shape) return mean """Run the solver""" def run_solver_single(): problem='inpaint_center'#'sr' print('problem', problem) ori_img = next(iter(testset.shuffle(1000)))[0] show_img_progress = True#False# # No noise alpha = 0.1, lambda=0.0005 seems to work well (in painting problems, at least) # Noise = 0.1, alpha = 0.3, lambda = 0.0015, or alpha = 0.6, lambda = 0.003 seem to work about the same # Super resolution, no noise settings seems OK # Compressive Sensing, 0.1 and 0.005 worked quite well alpha = 0.1 max_iter = 100 solver_tol = 1e-5 alpha_update_ratio = 1.0 alpha_l1 = 0.3 lambda_l1 = 0.0000001 max_iter_l1 = 1000 solver_tol_l1 = 1e-4 box_size = int(0.3 * ori_img.shape[1])#blockwise - 0.3*shape[1], scattere - 0.1*shape[1] noise_std = 0.0 results = solve_inpaint_center(ori_img, reshape_img, box_size=box_size, noise_std=noise_std, alpha=alpha, lambda_l1=lambda_l1, max_iter=max_iter, solver_tol=solver_tol, problem=problem)#'inpaint_center')# """Run the solver on all images in testset and calculate results""" def run_solver_all(head, invhead, mean, testset, problem): print("problem", problem) it = iter(testset.take(100)) show_img_progress = False#True# alpha = 0.3 max_iter = 100 solver_tol = 1e-5 alpha_update_ratio = 1.0 alpha_l1 = 0.1 lambda_l1 = 0.006 max_iter_l1 = 1000 solver_tol_l1 = 1e-4 noise_std = 0.0 mean_x = 0.0 mean_z = 0.0 sd_x = 0.0 sd_z = 0.0 count = 0.0 print("alpha", alpha, "lambda_l1", lambda_l1) for x in it: ori_img = x[0] print("ori_img.shape", ori_img.shape, flush=True) box_size = int(0.1 * ori_img.shape[1]) psnr_x,psnr_z = solve_inpaint_center(ori_img, reshape_img, head, invhead, mean, box_size=box_size, noise_std=noise_std, alpha=alpha, lambda_l1=lambda_l1, max_iter=max_iter, solver_tol=solver_tol, problem=problem, show_img_progress=False) mean_x += psnr_x sd_x += psnr_x*psnr_x mean_z += psnr_z sd_z += psnr_z*psnr_z count += 1 print("count",count, "mean_x", mean_x, "sd_x", sd_x, "mean_z",mean_z,"sd_z",sd_z) mean_x /= count mean_z /= count sd_x -= count*mean_x*mean_x sd_z -= count*mean_z*mean_z sd_x /= (count-1.0) sd_z /= (count-1.0) print("mean_x",mean_x,"sd_x",sd_x) print("mean_z",mean_z,"sd_z",sd_z) def main(): print("python main function") print("importing data") dataset = import_data() #print("building model") #head, invhead, trainset, testset = build_model(dataset) #print("saving weights") #mean = extract_mean(invhead, testset) #save_weights(head, invhead, testset, 'imagenet') #celeba-190')#'imagenet-100k')#head, invhead, testset, file_name) #tf.keras.backend.set_floatx('float64') #dataset_resize = dataset.map(lambda x:[pre_process_image(x['image'])]) #testset = dataset_resize.take(500) #trainset = dataset_resize.skip(500) tf.keras.backend.set_floatx('float64') trainset = dataset['train'].map(lambda x:[pre_process_image(x['image'])]) testset = dataset['validation'].map(lambda x:[pre_process_image(x['image'])]) print("loading model") head, invhead, mean = load_weights('imagenet', 1.0, trainset, testset) print("running solver") run_solver_all(head, invhead, mean, testset, 'scattered_inpaint')#'cs')#inpaint_pixelwise')#inpaint_center') if __name__ == '__main__': main()
11,483
04077e0f6613009b880e2a29e0f43849d306c842
from tkinter import * window=Tk()#创建一个窗口 label=Label(window,text="Welcome to Python")#创建一个标签,小构建类 button=Button(window,text="Click me") label.pack()#把标签放进窗口中 button.pack() window.mainloop()
11,484
ac753a894f99ed9ffb0c962086e3beae1456ec14
import numpy as np import cv2 from PIL import Image import pytesseract #from ocrhelper import preprocess #failed attempt def get_cropped(image, area): (x, y, w, h) = area new_img = image[y: y + h, x: x + w] return new_img def split(image,row,column, area): width = area[2] height = area[3] unitw = width/column # 19 px unith = height/row all_imgs = [] for j in range(0, 2): #offset = j for i in range (0,21): blockimg = image[unith * j:unith * (j + 1), unitw * i: unitw * (i + 1)] #cv2.imwrite("images/unit%d_%d.png" %(i,j), blockimg) all_imgs.append( blockimg) return all_imgs ##################### rows = 9 columns = 21 lefttopwh = (200, 65, 399, 162) file = "images/grp_image.ashx.png" filecrop = "images/cropped.png" img = cv2.imread(file) crop_img = get_cropped(img,lefttopwh) #cv2.imwrite(filecrop, crop_img) image_array = split(crop_img,rows,columns,lefttopwh) (i,j) = (20,0) pick_one = image_array[j*columns + i] file_picked = "images/picked.png" #cv2.imwrite(file_picked,pick_one) img = Image.fromarray(pick_one).resize((38,36)) text = pytesseract.image_to_string(img,boxes=True,config='-psm 10 -oem 3 -c tessedit_char_whitelist=0123456789') print "result" + text print "end"
11,485
326c672fcb8ffdf170d9e272803f1d680408acef
## NASA API Demo ## Once you recieve you API credentials, I recommend storing it as a variable in another file. ## In this example, I created another python file called "NASA_API_Credentials.py" and stored ## it in the same folder as NASA_API_Demo.py. Inside that file, I created a variable api_key ## and set it equal to my personal NASA api key like so: ## api_key = "your_api_key____This_should_be_a_string_of_letters_and_numbers" ##This line allows you to call the api_key variable you made in your other file, in this one from NASA_API_Credentials import api_key import requests #A basic python package for making http requests ## This python demo assumes you have read the associated API tutorial. If you haven't already, ## refer to NASA_API_Profile.md to learn about the functionality of this API. ## ## We will now use python to make run the three query examples from that tutorial # base url. Every http request to the Rover API will start with this string base_url = "https://api.nasa.gov/mars-photos/api/v1/rovers/" ### Query 1 ### We want to construct a URL that will return us the 26th through 50th picture taken ### by Opportunity's panoramic camera on June 3, 2015 rover = "opportunity" earth_date = "2015-6-3" camera = "pancam" page_number = "2" # Add these variables into one url that we can use request_url = "{}{}/photos?earth_date={}&camera={}&page={}&api_key={}".format(base_url,rover,earth_date,camera,page_number,api_key) query1 = requests.get(request_url).json() #requests.get() is a function that returns information stored on a html page. json() converts the json object into a readable format for this program #Printing this query will return the same output had you typed this url directly into your browser. print(query1) # If you wanted to do something with this information, like display it on a website, you # can use Python to extract pieces of information from this string. However, working with # JSON objects is a matter for a different tutorial. ### Query 2 ### We want to construct a URL that will return us all the photos Spirit took on its first day ### on Mars rover = "spirit" mars_sol = "1" request_url = "{}{}/photos?sol={}&api_key={}".format(base_url,rover,mars_sol,api_key) query2 = requests.get(request_url).json() print(query2) ### Query 3 ### We want to construct a URL that will get the 101st to 125th photo Spirit took on its ### first day on Mars rover = "spirit" mars_sol = "1" page_number = "5" request_url = "{}{}/photos?sol={}&page={}&api_key={}".format(base_url,rover,mars_sol,page_number,api_key) query3 = requests.get(request_url).json() print(query3)
11,486
5437cf1821104ce416de5a9ac28e98d8e24f25f6
#!/usr/bin/env python from ToolBox import parse_options_and_init_log # have to do this first or ROOT masks the -h messages opts, parser = parse_options_and_init_log() from L1Analysis import L1Ana, L1Ntuple from analysis_tools.plotting import HistManager from analysis_tools.selections import MuonSelections, Matcher import ROOT as root import re def parse_options_plotRates(parser): """ Adds often used options to the OptionParser... """ parsers = parser.add_subparsers() sub_parser = parsers.add_parser("plotRates") sub_parser.add_argument("-i", "--interactive", dest="interactive", action='store_false', help="Draw plots on screen.") sub_parser.add_argument("-n", "--nevents", dest="nevents", default=1, type=int, help="Total nmumber of events") sub_parser.add_argument("-b", "--bunches", dest="bunches", default=0, type=int, help="Number of colliding bunches") sub_parser.add_argument("--pu", dest="pu", default=20, type=int, help="Average PU. default=20") sub_parser.add_argument("--xsect", dest="xsect", default=80, type=float, help="Total cross section in mb. default=80 mb") sub_parser.add_argument("--instlumi", dest="instlumi", default=1.2e34, type=float, help="Instantaneous luminosity. default=1.2e-34 cm-2s-1") sub_parser.add_argument("--scale", dest="scale", default=1., type=float, help="Additional scale factor for rate calculate") sub_parser.add_argument("-l", "--legacy", dest="legacy", action='store_true', help="Draw plots relative to legacy.") opts, unknown = parser.parse_known_args() return opts def set_root_style(): root.gStyle.SetTitleFont(font) root.gStyle.SetStatFont(font) root.gStyle.SetTextFont(font) root.gStyle.SetLabelFont(font) root.gStyle.SetLegendFont(font) root.gStyle.SetMarkerStyle(20) root.gStyle.SetOptStat(0) root.gStyle.SetOptFit(0) root.gStyle.SetOptTitle(0) root.gPad.SetTopMargin(0.08) root.gPad.SetLeftMargin(0.14) root.gPad.SetRightMargin(0.06) root.gPad.SetTickx(1) root.gPad.SetTicky(1) def plot_hists(hm, hDefs, xTitle=None, yTitle='# muons', threshold=False, normToBinWidth=False, canvasPrefix='', notes=None, scaleFactor=1., data=False): den = hDefs[0]['den'] if den: name = canvasPrefix+hDefs[0]['num']+'_over_'+den else: name = canvasPrefix+hDefs[0]['num'] if normToBinWidth and not threshold and not den: name = 'normToBinWidth_'+name # setup legend according to how many histograms are in the plot legYmin = 0.9-0.04*len(hDefs) legXmin = 0.68 legXmax = 0.9 canvWidth = 600 if legYmin < 0.6: legXmin = 0.8 legXmax = 1. canvWidth = 730 legend = root.TLegend(legXmin, legYmin, legXmax, 0.9) legend.SetTextFont(font) legend.SetTextSize(0.03) legend.SetBorderSize(0) legend.SetFillColor(19) legend.SetFillStyle(0) #legend.SetNColumns(2) legEntries = [] hs = [] hStack = root.THStack() # get all the histograms and set their plot style for hDef in hDefs: if threshold: h = hm.get_threshold_hist(hDef['num']).Clone() if den: hDen = hm.get_threshold_hist(den) h.Divide(h, hDen, 1, 1, "b") else: if den: h = hm.get_ratio(hDef['num'], den).Clone() else: h = hm.get(hDef['num']).Clone() if normToBinWidth and not threshold and not den: for bin in range(1, h.GetNbinsX()+1): h.SetBinContent(bin, h.GetBinContent(bin) / h.GetBinWidth(bin)) h.SetBinError(bin, h.GetBinError(bin) / h.GetBinWidth(bin)) elif normToBinWidth: print 'Ignoring normToBinWidth flag for threshold or ratio plots' if scaleFactor != 1.: h.Scale(scaleFactor) h.SetLineColor(hDef['lc']) h.SetLineStyle(hDef['ls']) h.SetLineWidth(2) legStyle = 'l' if hDef['fc']: h.SetFillColor(hDef['fc']) h.SetLineWidth(1) legStyle = 'f' # if a fill colour is defined stack this histogram with others hStack.Add(h) legEntries.append(legend.AddEntry(h, hDef['legtext'], legStyle)) hs.append(h) # replace histograms to be stacked with stack histograms if hStack.GetNhists() > 0: canvas_name = 'c_rates_stacked_'+name stackHistos = hStack.GetStack() j = len(stackHistos)-1 for i, hDef in enumerate(hDefs): if hDef['fc']: hs[i] = stackHistos[j].Clone() j -= 1 else: canvas_name = 'c_rates_'+name if scaleFactor != 1.: canvas_name += '_scaled' # create canvas and draw on it canvas_title = canvas_name c = root.TCanvas(canvas_name, canvas_title, 100, 100, canvWidth, 600) c.cd() if name[-2:] == 'pt' and not den: c.SetLogy(True) set_root_style() if legYmin < 0.6: root.gPad.SetRightMargin(0.2) if xTitle: hs[0].GetXaxis().SetTitle(xTitle) hs[0].GetYaxis().SetTitleOffset(1.5) hs[0].GetYaxis().SetTitle(yTitle) maxBinValue = hs[0].GetBinContent(hs[0].GetMaximumBin()) if not c.GetLogy(): yMax = 1.2*maxBinValue if maxBinValue <= 1.: yMax = 1.3 hs[0].GetYaxis().SetRangeUser(0., yMax) # draw hs[0].SetLineWidth(2) legEntries[0].SetObject(hs[0]) legEntries[0].SetOption(legEntries[0].GetOption()+'le') hs[0].Draw('hist') for h in hs[1:]: h.Draw('histsame') hs[0].Draw('same') hs[0].Draw('sameaxis') # draw vertical lines to mark TF boundaries lines = [] if name[-3:] == 'eta': lines.append(root.TLine(-0.83, 0., -0.83, yMax)) lines[-1].SetLineStyle(root.kDotted) lines[-1].Draw('same') lines.append(root.TLine(-1.24, 0., -1.24, yMax)) lines[-1].SetLineStyle(root.kDotted) lines[-1].Draw('same') lines.append(root.TLine(0.83, 0., 0.83, yMax)) lines[-1].SetLineStyle(root.kDotted) lines[-1].Draw('same') lines.append(root.TLine(1.24, 0., 1.24, yMax)) lines[-1].SetLineStyle(root.kDotted) lines[-1].Draw('same') legend.Draw('same') tex = root.TLatex() tex.SetNDC() tex.SetTextFont(font) tex.SetTextSize(0.04) #tex.DrawLatex(0.484, 0.93, 'Simulation, 13 TeV') if canvWidth > 600: if data: #tex.DrawLatex(0.48, 0.93, 'CMS preliminary, 13 TeV') tex.DrawLatex(0.48, 0.93, 'CMS internal, 13 TeV') else: tex.DrawLatex(0.484, 0.93, 'CMS Simulation, 13 TeV') else: if data: #tex.DrawLatex(0.551, 0.93, 'CMS preliminary, 13 TeV') tex.DrawLatex(0.551, 0.93, 'CMS internal, 13 TeV') else: tex.DrawLatex(0.555, 0.93, 'CMS Simulation, 13 TeV') if notes: tex.SetTextSize(0.035) for note in notes: tex.DrawLatex(note[0], note[1], note[2]) c.Modified() c.Update() return [c, hs, legend, lines, tex] def print_rates(hm, hName, scaleFactor=1.): hNames = ['gmt_'+hName.replace('qmin12', 'qmin8'), 'ugmt_'+hName, 'bmtf_ugmt_'+hName, 'omtf_ugmt_'+hName, 'emtf_ugmt_'+hName] print '===== Rates =====' print hName print '' histos = [] print 'System 16 GeV 20 GeV 25 GeV' for name in hNames: histos.append(hm.get_threshold_hist(name).Clone()) if scaleFactor != 1.: histos[-1].Scale(scaleFactor) bin16 = histos[-1].FindBin(16) bin20 = histos[-1].FindBin(20) bin25 = histos[-1].FindBin(25) print '{name} rate: {sixteengev:>7.2f} kHz {twentygev:>7.2f} kHz {twentyfivegev:>7.2f} kHz'.format(name=name.split('_')[0], sixteengev=histos[-1].GetBinContent(bin16), twentygev=histos[-1].GetBinContent(bin20), twentyfivegev=histos[-1].GetBinContent(bin25)) print '\nThreshold GMT uGMT ratio' for threshold in [0, 3, 5, 7, 10, 12, 14, 16, 18, 20, 22, 25, 30, 40, 50, 60]: gmtBinNr = histos[0].FindBin(threshold) ugmtBinNr = histos[1].FindBin(threshold) gmtCont = histos[0].GetBinContent(gmtBinNr) ugmtCont = histos[1].GetBinContent(ugmtBinNr) gmtErr = histos[0].GetBinError(gmtBinNr) ugmtErr = histos[1].GetBinError(ugmtBinNr) ratio = -1. if gmtCont != 0: ratio = ugmtCont/gmtCont print '{threshold:>3} GeV: {gmt:>8.3f} +/- {gmterr:>5.3f} kHz {ugmt:>8.3f} +/- {ugmterr:>5.3f} kHz {ratio:>8.3f}'.format(threshold=threshold, gmt=gmtCont, gmterr=gmtErr, ugmt=ugmtCont, ugmterr=ugmtErr, ratio=ratio) print '=================' def hist_styles(stacked=False): styles = {} styles['gmt'] = {'lc':root.kCyan, 'ls':root.kSolid, 'fc':None, 'legtext':'GMT'} styles['ugmt'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':None, 'legtext':'uGMT'} if stacked: styles['bmtf_ugmt'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kBlue-4, 'legtext':'BMTF uGMT'} styles['omtf_ugmt'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kGreen-4, 'legtext':'OMTF uGMT'} styles['emtf_ugmt'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kRed-4, 'legtext':'EMTF uGMT'} styles['bmtf'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kBlue-4, 'legtext':'BMTF'} styles['omtf'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kGreen-4, 'legtext':'OMTF'} styles['emtf'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kRed-4, 'legtext':'EMTF'} else: styles['bmtf_ugmt'] = {'lc':root.kBlue-4, 'ls':root.kSolid, 'fc':None, 'legtext':'BMTF uGMT'} styles['omtf_ugmt'] = {'lc':root.kGreen-4, 'ls':root.kSolid, 'fc':None, 'legtext':'OMTF uGMT'} styles['emtf_ugmt'] = {'lc':root.kRed-4, 'ls':root.kSolid, 'fc':None, 'legtext':'EMTF uGMT'} styles['bmtf_ugmt_q'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':None, 'legtext':'BMTF uGMT'} styles['omtf_ugmt_q'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':None, 'legtext':'OMTF uGMT'} styles['emtf_ugmt_q'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':None, 'legtext':'EMTF uGMT'} styles['bmtf'] = {'lc':root.kBlue-4, 'ls':root.kSolid, 'fc':None, 'legtext':'BMTF'} styles['omtf'] = {'lc':root.kGreen-4, 'ls':root.kSolid, 'fc':None, 'legtext':'OMTF'} styles['emtf'] = {'lc':root.kRed-4, 'ls':root.kSolid, 'fc':None, 'legtext':'EMTF'} styles['bmtf_q'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':None, 'legtext':'BMTF'} styles['omtf_q'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':None, 'legtext':'OMTF'} styles['emtf_q'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':None, 'legtext':'EMTF'} styles['ugmt_q0'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kRed+3, 'legtext':'uGMT q0'} styles['ugmt_q1'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kRed, 'legtext':'uGMT q1'} styles['ugmt_q2'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kOrange+8, 'legtext':'uGMT q2'} styles['ugmt_q3'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kOrange, 'legtext':'uGMT q3'} styles['ugmt_q4'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kYellow, 'legtext':'uGMT q4'} styles['ugmt_q5'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kGreen, 'legtext':'uGMT q5'} styles['ugmt_q6'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kGreen+3, 'legtext':'uGMT q6'} styles['ugmt_q7'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kCyan, 'legtext':'uGMT q7'} styles['ugmt_q8'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kCyan+3, 'legtext':'uGMT q8'} styles['ugmt_q9'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kAzure+7, 'legtext':'uGMT q9'} styles['ugmt_q10'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kBlue, 'legtext':'uGMT q10'} styles['ugmt_q11'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kBlue+3, 'legtext':'uGMT q11'} styles['ugmt_q12'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kViolet, 'legtext':'uGMT q12'} styles['ugmt_q13'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kMagenta, 'legtext':'uGMT q13'} styles['ugmt_q14'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kMagenta+3, 'legtext':'uGMT q14'} styles['ugmt_q15'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kViolet+3, 'legtext':'uGMT q15'} styles['tf_q0'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kRed+3, 'legtext':'TF q0'} styles['tf_q1'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kRed, 'legtext':'TF q1'} styles['tf_q2'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kOrange+8, 'legtext':'TF q2'} styles['tf_q3'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kOrange, 'legtext':'TF q3'} styles['tf_q4'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kYellow, 'legtext':'TF q4'} styles['tf_q5'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kGreen, 'legtext':'TF q5'} styles['tf_q6'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kGreen+3, 'legtext':'TF q6'} styles['tf_q7'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kCyan, 'legtext':'TF q7'} styles['tf_q8'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kCyan+3, 'legtext':'TF q8'} styles['tf_q9'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kAzure+7, 'legtext':'TF q9'} styles['tf_q10'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kBlue, 'legtext':'TF q10'} styles['tf_q11'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kBlue+3, 'legtext':'TF q11'} styles['tf_q12'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kViolet, 'legtext':'TF q12'} styles['tf_q13'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kMagenta, 'legtext':'TF q13'} styles['tf_q14'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kMagenta+3, 'legtext':'TF q14'} styles['tf_q15'] = {'lc':root.kBlack, 'ls':root.kSolid, 'fc':root.kViolet+3, 'legtext':'TF q15'} return styles def plot_hists_standard(hm, hName, den=None, xTitle='', yTitle='# muons', threshold=False, stacked=False, normToBinWidth=False, tfMuonOrig='ugmt', reg='', scaleFactor=1., data=False): styles = hist_styles(stacked) if tfMuonOrig == 'ugmt': ugmt_str = '_ugmt' prefix = '' elif tfMuonOrig == 'tf': ugmt_str = '' prefix = 'tf_' ugmt_dict = {'num':'ugmt_'+hName, 'den':den} bmtf_dict = {'num':'bmtf'+ugmt_str+'_'+hName, 'den':den} omtf_dict = {'num':'omtf'+ugmt_str+'_'+hName, 'den':den} emtf_dict = {'num':'emtf'+ugmt_str+'_'+hName, 'den':den} ugmt_dict.update(styles['ugmt']) bmtf_dict.update(styles['bmtf'+ugmt_str]) omtf_dict.update(styles['omtf'+ugmt_str]) emtf_dict.update(styles['emtf'+ugmt_str]) hDefs = [] if reg == '': hDefs.append(ugmt_dict) hDefs.append(bmtf_dict) hDefs.append(omtf_dict) hDefs.append(emtf_dict) elif reg == 'b': hDefs.append(bmtf_dict) prefix += 'bmtf_' elif reg == 'o': hDefs.append(omtf_dict) prefix += 'omtf_' elif reg == 'e': hDefs.append(emtf_dict) prefix += 'emtf_' if plotLegacy: if den: gmt_dict = {'num':den, 'den':den} gmt_dict.update(styles['gmt']) else: gmt_dict = {'num':'gmt_'+hName.replace('qmin12', 'qmin8'), 'den':den} gmt_dict.update(styles['gmt']) hDefs.append(gmt_dict) # extract eta range from histogram name eta_number_strs = re.findall(r'[\d\.\d]+', hName[hName.find('EtaMin')+6:hName.find('EtaMax')+12]) if len(eta_number_strs) > 1: note_str = eta_number_strs[0]+' < |#eta| < '+eta_number_strs[1] notes = [[0.17, 0.86, note_str]] if den: den_eta_number_strs = re.findall(r'[\d\.\d]+', den[den.find('EtaMin')+6:den.find('EtaMax')+12]) if len(den_eta_number_strs) > 1 and eta_number_strs != den_eta_number_strs: den_note_str = den_eta_number_strs[0]+' < |#eta^{GMT}| < '+den_eta_number_strs[1] notes.append([0.17, 0.81, den_note_str]) else: notes = None return plot_hists(hm, hDefs, xTitle, yTitle, threshold, normToBinWidth, prefix, notes, scaleFactor, data) def plot_hists_qstack(hm, hName, den=None, xTitle='', yTitle='# muons', threshold=False, stacked=False, normToBinWidth=False, tfMuonOrig='ugmt', reg='', data=False): styles = hist_styles(False) if tfMuonOrig == 'ugmt': ugmt_str = '_ugmt' style_str = 'ugmt' prefix = 'q_' elif tfMuonOrig == 'tf': ugmt_str = '' style_str = 'tf' prefix = 'q_tf_' hDefs = [] if reg == '': ugmt_dict = {'num':'ugmt_'+hName, 'den':den} ugmt_dict.update(styles['ugmt']) hDefs.append(ugmt_dict) for q in reversed(range(16)): ugmt_q_dict = {'num':'ugmt_'+hName.replace('qmin12', 'q{q}'.format(q=q)), 'den':den} ugmt_q_dict.update(styles['ugmt_q{q}'.format(q=q)]) hDefs.append(ugmt_q_dict) elif reg == 'b': bmtf_dict = {'num':'bmtf'+ugmt_str+'_'+hName, 'den':den} bmtf_dict.update(styles['bmtf'+ugmt_str+'_q']) hDefs.append(bmtf_dict) for q in reversed(range(16)): bmtf_q_dict = {'num':'bmtf'+ugmt_str+'_'+hName.replace('qmin12', 'q{q}'.format(q=q)), 'den':den} bmtf_q_dict.update(styles[style_str+'_q{q}'.format(q=q)]) hDefs.append(bmtf_q_dict) prefix += 'bmtf_' elif reg == 'o': omtf_dict = {'num':'omtf'+ugmt_str+'_'+hName, 'den':den} omtf_dict.update(styles['omtf'+ugmt_str+'_q']) hDefs.append(omtf_dict) for q in reversed(range(16)): omtf_q_dict = {'num':'omtf'+ugmt_str+'_'+hName.replace('qmin12', 'q{q}'.format(q=q)), 'den':den} omtf_q_dict.update(styles[style_str+'_q{q}'.format(q=q)]) hDefs.append(omtf_q_dict) prefix += 'omtf_' elif reg == 'e': emtf_dict = {'num':'emtf'+ugmt_str+'_'+hName, 'den':den} emtf_dict.update(styles['emtf'+ugmt_str+'_q']) hDefs.append(emtf_dict) for q in reversed(range(16)): emtf_q_dict = {'num':'emtf'+ugmt_str+'_'+hName.replace('qmin12', 'q{q}'.format(q=q)), 'den':den} emtf_q_dict.update(styles[style_str+'_q{q}'.format(q=q)]) hDefs.append(emtf_q_dict) prefix += 'emtf_' if plotLegacy: if den: gmt_dict = {'num':den, 'den':den} gmt_dict.update(styles['gmt']) else: gmt_dict = {'num':'gmt_'+hName.replace('qmin12', 'qmin8'), 'den':den} gmt_dict.update(styles['gmt']) hDefs.append(gmt_dict) # extract eta range from histogram name eta_number_strs = re.findall(r'[\d\.\d]+', hName[hName.find('EtaMin')+6:hName.find('EtaMax')+12]) if len(eta_number_strs) > 1: note_str = eta_number_strs[0]+' < |#eta| < '+eta_number_strs[1] notes = [[0.55, 0.86, note_str]] if den: den_eta_number_strs = re.findall(r'[\d\.\d]+', den[den.find('EtaMin')+6:den.find('EtaMax')+12]) if len(den_eta_number_strs) > 1 and eta_number_strs != den_eta_number_strs: den_note_str = den_eta_number_strs[0]+' < |#eta^{GMT}| < '+den_eta_number_strs[1] notes.append([0.55, 0.81, den_note_str]) else: notes = None return plot_hists(hm, hDefs, xTitle, yTitle, threshold, normToBinWidth, prefix, notes, data=data) def main(): opts = parse_options_plotRates(parser) plotLegacy = opts.legacy batchRun = opts.interactive if batchRun: root.gROOT.SetBatch(True) # calculate the scale factor for rate in Hz orbitFreq = 11245.6 nCollBunches = opts.bunches nZeroBiasEvents = opts.nevents crossSect = opts.xsect instLumi = opts.instlumi pu = opts.pu thisIsData=True # determine that this is MC if there is no number of colliding bunches given (defaults to 0 then) if nCollBunches == 0: print "No number of colliding bunches given. Assuming this is MC" print "Using {instLumi} cm-2s-1 as instantaneous luminosity, {crossSect} mb as cross section, and {pu} as average number of pileup to determine number of colliding bunches.".format(instLumi=instLumi, crossSect=crossSect, pu=pu) nCollBunches = round(instLumi * crossSect*1e-27 / (pu * orbitFreq)) thisIsData=False else: print "Assuming this is data" convFactorToHz = orbitFreq * nCollBunches / nZeroBiasEvents print 'Conversion factor to rate in Hz with {orbitFreq} Hz orbit frequency, {nCollBunches} colliding bunches and {nZeroBiasEvents} analyzed zero bias events: {convFactorToHz}'.format(orbitFreq=orbitFreq, nCollBunches=nCollBunches, nZeroBiasEvents=nZeroBiasEvents, convFactorToHz=convFactorToHz) if opts.scale != 1.: convFactorToHz *= opts.scale print 'Conversion factor after applying additinoal scale factor of {sf}: {convFactorToHz}'.format(sf=opts.scale, convFactorToHz=convFactorToHz) L1Ana.init_l1_analysis() print "" hm = HistManager(filename=opts.fname) # holds the canvases, histograms, etc. objects = [] ########################################################################## # uGMT kinematic variables objects.append(plot_hists_standard(hm, 'muon_absEtaMin0_absEtaMax2.5_qmin12_varBin_pt', xTitle='p_{T} (GeV/c)', yTitle='# muons/(GeV/c)', stacked=True, normToBinWidth=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin0_absEtaMax0.83_qmin12_varBin_pt', xTitle='p_{T} (GeV/c)', yTitle='# muons/(GeV/c)', stacked=True, normToBinWidth=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin0.83_absEtaMax1.24_qmin12_varBin_pt', xTitle='p_{T} (GeV/c)', yTitle='# muons/(GeV/c)', stacked=True, normToBinWidth=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin1.24_absEtaMax2.5_qmin12_varBin_pt', xTitle='p_{T} (GeV/c)', yTitle='# muons/(GeV/c)', stacked=True, normToBinWidth=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_ptmin0_qmin12_eta', xTitle='#eta', yTitle='# muons', stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_ptmin18_qmin12_eta', xTitle='#eta', yTitle='# muons', stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_ptmin0_qmin12_eta', xTitle='#eta', yTitle='', stacked=True, normToBinWidth=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_ptmin18_qmin12_eta', xTitle='#eta', yTitle='', stacked=True, normToBinWidth=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin0_absEtaMax2.5_ptmin0_qmin12_phi', xTitle='#phi', yTitle='# muons', stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin0_absEtaMax2.5_ptmin18_qmin12_phi', xTitle='#phi', yTitle='# muons', stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin0_absEtaMax2.5_ptmin0_qual', xTitle='#mu quality', yTitle='# muons', stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin0_absEtaMax2.5_ptmin18_qual', xTitle='#mu quality', yTitle='# muons', stacked=True, data=thisIsData)) # uGMT rates for regions objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='kHz', threshold=True, scaleFactor=convFactorToHz / 1000., data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.1_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='kHz', threshold=True, scaleFactor=convFactorToHz / 1000., data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax0.83_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='kHz', threshold=True, scaleFactor=convFactorToHz / 1000., data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='kHz', threshold=True, scaleFactor=convFactorToHz / 1000., data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='kHz', threshold=True, scaleFactor=convFactorToHz / 1000., data=thisIsData)) print 'Rates' print_rates(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin12_pt', scaleFactor=convFactorToHz / 1000.) print_rates(hm, 'highest_muon_absEtaMin0_absEtaMax2.1_qmin12_pt', scaleFactor=convFactorToHz / 1000.) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin4_pt', xTitle='p_{T} (GeV/c)', yTitle='kHz', threshold=True, scaleFactor=convFactorToHz / 1000., data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.1_qmin4_pt', xTitle='p_{T} (GeV/c)', yTitle='kHz', threshold=True, scaleFactor=convFactorToHz / 1000., data=thisIsData)) print_rates(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin4_pt', scaleFactor=convFactorToHz / 1000.) print_rates(hm, 'highest_muon_absEtaMin0_absEtaMax2.1_qmin4_pt', scaleFactor=convFactorToHz / 1000.) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax0.83_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, data=thisIsData)) # q stack uGMT rates for regions objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax0.83_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_ptmin0_qmin12_eta', xTitle='#eta', yTitle='', stacked=True, normToBinWidth=True, data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_ptmin18_qmin12_eta', xTitle='#eta', yTitle='', stacked=True, normToBinWidth=True, data=thisIsData)) if plotLegacy: # relative uGMT rates for regions objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_highest_muon_absEtaMin0_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax0.83_qmin12_pt', 'gmt_highest_muon_absEtaMin0_absEtaMax0.83_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', 'gmt_highest_muon_absEtaMin0.83_absEtaMax1.24_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', 'gmt_highest_muon_absEtaMin1.24_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax0.83_qmin12_pt', 'gmt_highest_muon_absEtaMin0_absEtaMax0.83_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, reg='b', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', 'gmt_highest_muon_absEtaMin0.83_absEtaMax1.24_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, reg='o', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', 'gmt_highest_muon_absEtaMin1.24_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, reg='e', data=thisIsData)) # q stack relative uGMT rates for regions objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_muon_absEtaMin0_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax0.83_qmin12_pt', 'gmt_muon_absEtaMin0_absEtaMax0.83_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', 'gmt_muon_absEtaMin0.83_absEtaMax1.24_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', 'gmt_muon_absEtaMin1.24_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax0.83_qmin12_pt', 'gmt_muon_absEtaMin0_absEtaMax0.83_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, reg='b', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', 'gmt_muon_absEtaMin0.83_absEtaMax1.24_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, reg='o', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', 'gmt_muon_absEtaMin1.24_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, reg='e', data=thisIsData)) # uGMT TF rates for 0<|eta|<2.5 objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax0.83_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, reg='b', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, reg='o', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, reg='e', data=thisIsData)) # q stack uGMT TF rates for 0<|eta|<2.5 objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax0.83_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, reg='b', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, reg='o', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, reg='e', data=thisIsData)) if plotLegacy: # relative uGMT TF rates for 0<|eta|<2.5 objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_highest_muon_absEtaMin0_absEtaMax0.83_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, reg='b', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_highest_muon_absEtaMin0.83_absEtaMax1.24_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, reg='o', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_highest_muon_absEtaMin1.24_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, reg='e', data=thisIsData)) # q stack relative uGMT TF rates for 0<|eta|<2.5 objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_muon_absEtaMin0_absEtaMax0.83_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, reg='b', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_muon_absEtaMin0.83_absEtaMax1.24_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, reg='o', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_muon_absEtaMin1.24_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, reg='e', data=thisIsData)) # relative uGMT eta distributions objects.append(plot_hists_standard(hm, 'muon_ptmin0_qmin12_eta', 'gmt_muon_ptmin0_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_ptmin0_qmin12_eta', 'gmt_muon_ptmin0_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', stacked=False, data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_ptmin0_qmin12_eta', 'gmt_muon_ptmin0_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_ptmin18_qmin12_eta', 'gmt_muon_ptmin18_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', stacked=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_ptmin18_qmin12_eta', 'gmt_muon_ptmin18_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', stacked=False, data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_ptmin18_qmin12_eta', 'gmt_muon_ptmin18_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', stacked=True, data=thisIsData)) ########################################################################## # TF # TF kinematic variables objects.append(plot_hists_standard(hm, 'muon_absEtaMin0_absEtaMax2.5_qmin12_varBin_pt', xTitle='p_{T} (GeV/c)', yTitle='# muons/(GeV/c)', stacked=True, tfMuonOrig='tf', normToBinWidth=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin0_absEtaMax0.83_qmin12_varBin_pt', xTitle='p_{T} (GeV/c)', yTitle='# muons/(GeV/c)', stacked=True, tfMuonOrig='tf', normToBinWidth=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin0.83_absEtaMax1.24_qmin12_varBin_pt', xTitle='p_{T} (GeV/c)', yTitle='# muons/(GeV/c)', stacked=True, tfMuonOrig='tf', normToBinWidth=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin1.24_absEtaMax2.5_qmin12_varBin_pt', xTitle='p_{T} (GeV/c)', yTitle='# muons/(GeV/c)', stacked=True, tfMuonOrig='tf', normToBinWidth=True, data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_ptmin0_qmin12_eta', xTitle='#eta', yTitle='# muons', stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_ptmin18_qmin12_eta', xTitle='#eta', yTitle='# muons', stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin0_absEtaMax2.5_ptmin0_qmin12_phi', xTitle='#phi', yTitle='# muons', stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin0_absEtaMax2.5_ptmin18_qmin12_phi', xTitle='#phi', yTitle='# muons', stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin0_absEtaMax2.5_ptmin0_qual', xTitle='#mu quality', yTitle='# muons', stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_absEtaMin0_absEtaMax2.5_ptmin18_qual', xTitle='#mu quality', yTitle='# muons', stacked=True, tfMuonOrig='tf', data=thisIsData)) # TF rates for regions objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax0.83_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) # q stack TF rates for regions #objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) #objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax0.83_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) #objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) #objects.append(plot_hists_qstack(hm, 'muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) if plotLegacy: # relative TF rates for regions objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_highest_muon_absEtaMin0_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax0.83_qmin12_pt', 'gmt_highest_muon_absEtaMin0_absEtaMax0.83_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', 'gmt_highest_muon_absEtaMin0.83_absEtaMax1.24_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', 'gmt_highest_muon_absEtaMin1.24_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax0.83_qmin12_pt', 'gmt_highest_muon_absEtaMin0_absEtaMax0.83_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', reg='b', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', 'gmt_highest_muon_absEtaMin0.83_absEtaMax1.24_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', reg='o', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', 'gmt_highest_muon_absEtaMin1.24_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', reg='e', data=thisIsData)) # q stack relative TF rates for regions #objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_muon_absEtaMin0_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) #objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax0.83_qmin12_pt', 'gmt_muon_absEtaMin0_absEtaMax0.83_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) #objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', 'gmt_muon_absEtaMin0.83_absEtaMax1.24_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) #objects.append(plot_hists_qstack(hm, 'muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', 'gmt_muon_absEtaMin1.24_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax0.83_qmin12_pt', 'gmt_muon_absEtaMin0_absEtaMax0.83_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', reg='b', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', 'gmt_muon_absEtaMin0.83_absEtaMax1.24_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', reg='o', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', 'gmt_muon_absEtaMin1.24_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', reg='e', data=thisIsData)) # TF rates for 0<|eta|<2.5 objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax0.83_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', reg='b', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', reg='o', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', reg='e', data=thisIsData)) # q stack TF rates for 0<|eta|<2.5 objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax0.83_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', reg='b', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0.83_absEtaMax1.24_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', reg='o', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin1.24_absEtaMax2.5_qmin12_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events', threshold=True, stacked=True, tfMuonOrig='tf', reg='e', data=thisIsData)) if plotLegacy: # relative TF rates for 0<|eta|<2.5 objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_highest_muon_absEtaMin0_absEtaMax0.83_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', reg='b', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_highest_muon_absEtaMin0.83_absEtaMax1.24_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', reg='o', data=thisIsData)) objects.append(plot_hists_standard(hm, 'highest_muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_highest_muon_absEtaMin1.24_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', reg='e', data=thisIsData)) # q stack relative TF rates for 0<|eta|<2.5 objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_muon_absEtaMin0_absEtaMax0.83_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', reg='b', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_muon_absEtaMin0.83_absEtaMax1.24_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', reg='o', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_absEtaMin0_absEtaMax2.5_qmin12_pt', 'gmt_muon_absEtaMin1.24_absEtaMax2.5_qmin8_pt', xTitle='p_{T} (GeV/c)', yTitle='Integrated # events / Integrated # GMT events', threshold=True, stacked=True, tfMuonOrig='tf', reg='e', data=thisIsData)) # relative TF eta distributions objects.append(plot_hists_standard(hm, 'muon_ptmin0_qmin12_eta', 'gmt_muon_ptmin0_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_ptmin0_qmin12_eta', 'gmt_muon_ptmin0_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', stacked=False, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_ptmin0_qmin12_eta', 'gmt_muon_ptmin0_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', tfMuonOrig='tf', reg='b', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_ptmin0_qmin12_eta', 'gmt_muon_ptmin0_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', tfMuonOrig='tf', reg='o', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_ptmin0_qmin12_eta', 'gmt_muon_ptmin0_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', tfMuonOrig='tf', reg='e', data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_ptmin18_qmin12_eta', 'gmt_muon_ptmin18_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', stacked=True, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_standard(hm, 'muon_ptmin18_qmin12_eta', 'gmt_muon_ptmin18_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', stacked=False, tfMuonOrig='tf', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_ptmin18_qmin12_eta', 'gmt_muon_ptmin18_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', tfMuonOrig='tf', reg='b', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_ptmin18_qmin12_eta', 'gmt_muon_ptmin18_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', tfMuonOrig='tf', reg='o', data=thisIsData)) objects.append(plot_hists_qstack(hm, 'muon_ptmin18_qmin12_eta', 'gmt_muon_ptmin18_qmin8_eta', xTitle='#eta', yTitle='# muons / # GMT muons', tfMuonOrig='tf', reg='e', data=thisIsData)) ########################################################################## # save plots to root file if savePlots: output = root.TFile('./ugmt_rate_plots.root', 'recreate') output.cd() for obj in objects: c = obj[0] c.Write(c.GetName()) c.Print('./plots/'+c.GetName()+'.pdf', '.pdf') c.Print('./plots/'+c.GetName()+'.png', '.png') output.Close() # wait if not batchRun: raw_input("Press ENTER to quit.") if __name__ == "__main__": savePlots = True batchRun = True plotLegacy = False font = 42 main()
11,487
44322b4122488e0041dad3015c98159b0d3e2852
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Creates the mask of where the Monte Carlo simulation should sample the P-bodies """ import numpy as np import cv2 def extract_sample_area(cell_mask, dapi_image, dapi_threshold = 10, shrink_nucleus = 3): ''' Returns a binary mask of where sampling is possible (inside the cell border, NOT inside the nucleus => in the cytoplasm of the cells) Dapi Threshold is used to define the nucleus based on the dapi intensity image shrink_nucleus: Int. Amount that nucleus is shrunken to avoid loosing P-bodies there ''' cell_mask_binary = cell_mask > 0 # Make a mask of the nucleus based on smoothed dapi_image dapi_smoothed = cv2.GaussianBlur(dapi_image,(5,5),0) nucleus_segmented = dapi_smoothed > dapi_threshold # Shrink nucleus by shrink_nucleus value kernel = np.ones((3,3),np.uint8) shrunken_nucleus = cv2.erode(np.array(nucleus_segmented, dtype=np.uint8),kernel,iterations = shrink_nucleus) # Combine the masks using XOR: creates a mask of the cytoplasm cytoplasm_mask = np.logical_xor(cell_mask_binary, shrunken_nucleus) * 255 return cytoplasm_mask # cell_img_path = '/Users/Joel/p-body-randomness/data/input_data/20180606-SLP_Multiplexing_p1_C03_x000_y000_z000_t000_segmentation_Label12.png' # dapi_img_path = '/Users/Joel/p-body-randomness/data/input_data/20180606-SLP_Multiplexing_p1_C03_x000_y000_z000_t000_2_DAPI_Label12.png' # # cell_img = cv2.imread(cell_img_path, 0) # dapi_image = cv2.imread(dapi_img_path, 0) # # cytoplasm_mask = extract_sample_area(cell_img, dapi_image, shrink_nucleus = 3) # cv2.imwrite('test_img.png', cytoplasm_mask)
11,488
8fc43f3b609d8ed50e790ce2603d2e7fb078a9d1
/home/rajendra/anaconda3/lib/python3.6/encodings/cp424.py
11,489
c245c5dc02e34cd15f508dc591e6cfcd303ed9d1
from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.main), url(r'^create$', views.create), url(r'^add/(?P<id>\d+)$', views.add), url(r'^song/(?P<id>\d+)$', views.songUser), url(r'^show/(?P<id>\d+)$', views.showUser), ]
11,490
bdb0a9350470c044b9ced9ed40caca2324fd2227
# 2019 KAKAO BLIND RECRUITMENT 매칭 점수 # https://programmers.co.kr/learn/courses/30/lessons/42893 def solution(word, pages): answer = 0 lenWord = len(word) linkWord = "<a href=" lenLink = len(linkWord) domainWord = '<meta property="og:url" content=' lenDomain = len(domainWord) #기본점수, domain, links pageInfos = [[0, '', []] for i in range(len(pages))] hs = -1 for page in pages: hs += 1 domain = '' score1 = 0 score2 = 0 links = [] score3 = 0 score4 = 0 lenPage = len(page) page = page.lower() word = word.lower() for i in range(lenPage-lenWord+1): wordWindow = page[i:i+lenWord] if(wordWindow == word): if(i>0 and i+lenWord<lenPage-1): if(ord(page[i-1]) < ord('a') or ord(page[i-1]) > ord('z')): if(ord(page[i+lenWord]) < ord('a') or ord(page[i+lenWord]) > ord('z')): score1 += 1 if(i+lenLink <= lenPage): linkWindow = page[i: i+lenLink] if(linkWord == linkWindow): score2 += 1 flag = False idx = i+lenLink linkStr = '' while flag==False: idx += 1 if(page[idx] == '"'): flag = True else: linkStr += page[idx] links.append(linkStr) if(i+lenDomain <= lenPage): domainWindow = page[i: i+lenDomain] if(domainWord == domainWindow): flag = False idx = i+lenDomain domainStr = '' while flag==False: idx += 1 if(page[idx] == '"'): flag = True else: domainStr += page[idx] domain = domainStr pageInfos[hs][0] = score1 pageInfos[hs][1] = domain pageInfos[hs][2] = links scores = [] for i in range(len(pageInfos)): pi = pageInfos[i] pagescore = 0 pagescore += pi[0] lenLinks = len(pi[2]) domain = pi[1] for j in range(len(pageInfos)): if(i!=j): pi2 = pageInfos[j] links2 = pi2[2] lenLinks2 = len(links2) if(domain in links2): pagescore += (pi2[0]/lenLinks2) scores.append((pagescore, i)) maxIdx = 0 maxScore = 0 for i in range(len(scores)): score = scores[i] if(score[0] > maxScore): maxScore = score[0] maxIdx = score[1] answer = maxIdx return answer print(solution( "blind", ["<html lang=\"ko\" xml:lang=\"ko\" xmlns=\"http://www.w3.org/1999/xhtml\">\n<head>\n <meta charset=\"utf-8\">\n <meta property=\"og:url\" content=\"https://a.com\"/>\n</head> \n<body>\nBlind Lorem Blind ipsum dolor Blind test sit amet, consectetur adipiscing elit. \n<a href=\"https://b.com\"> Link to b </a>\n</body>\n</html>", "<html lang=\"ko\" xml:lang=\"ko\" xmlns=\"http://www.w3.org/1999/xhtml\">\n<head>\n <meta charset=\"utf-8\">\n <meta property=\"og:url\" content=\"https://b.com\"/>\n</head> \n<body>\nSuspendisse potenti. Vivamus venenatis tellus non turpis bibendum, \n<a href=\"https://a.com\"> Link to a </a>\nblind sed congue urna varius. Suspendisse feugiat nisl ligula, quis malesuada felis hendrerit ut.\n<a href=\"https://c.com\"> Link to c </a>\n</body>\n</html>", "<html lang=\"ko\" xml:lang=\"ko\" xmlns=\"http://www.w3.org/1999/xhtml\">\n<head>\n <meta charset=\"utf-8\">\n <meta property=\"og:url\" content=\"https://c.com\"/>\n</head> \n<body>\nUt condimentum urna at felis sodales rutrum. Sed dapibus cursus diam, non interdum nulla tempor nec. Phasellus rutrum enim at orci consectetu blind\n<a href=\"https://a.com\"> Link to a </a>\n</body>\n</html>"])) print(solution( "Muzi", ["<html lang=\"ko\" xml:lang=\"ko\" xmlns=\"http://www.w3.org/1999/xhtml\">\n<head>\n <meta charset=\"utf-8\">\n <meta property=\"og:url\" content=\"https://careers.kakao.com/interview/list\"/>\n</head> \n<body>\n<a href=\"https://programmers.co.kr/learn/courses/4673\"></a>#!MuziMuzi!)jayg07con&&\n\n</body>\n</html>", "<html lang=\"ko\" xml:lang=\"ko\" xmlns=\"http://www.w3.org/1999/xhtml\">\n<head>\n <meta charset=\"utf-8\">\n <meta property=\"og:url\" content=\"https://www.kakaocorp.com\"/>\n</head> \n<body>\ncon%\tmuzI92apeach&2<a href=\"https://hashcode.co.kr/tos\"></a>\n\n\t^\n</body>\n</html>"]))
11,491
c60d86980b990ad3ff581d680c85c7a3569baa4b
fp = open("input.dat", "r") line = fp.readline() total = 0 length = len(line) for i in range(length): num = int(line[i]) if (num == int(line[int((i+length/2)%length)])): total += num print (total)
11,492
f15d9c9c9db3b7dc094e13104106baf02fff1ead
import os import struct import shutil from tarfile import TarFile from zipfile import ZipFile import requests import time if os.name == "nt": gd_path = "geckodriver.exe" if os.path.isfile(os.path.join("harquery", gd_path)): print("Harquery dependencies are already installed") exit() if 8 * struct.calcsize("P") == 32: gd_url = "https://github.com/mozilla/geckodriver/releases/download/v0.27.0/geckodriver-v0.27.0-win32.zip" elif 8 * struct.calcsize("P") == 64: gd_url = "https://github.com/mozilla/geckodriver/releases/download/v0.27.0/geckodriver-v0.27.0-win64.zip" gd_dc = "zip" decompressor = ZipFile else: gd_path = "geckodriver" if os.path.isfile(os.path.join("harquery", gd_path)): print("Harquery dependencies are already installed") exit() gd_url = "https://github.com/mozilla/geckodriver/releases/download/v0.27.0/geckodriver-v0.27.0-macos.tar.gz" gd_dc = "tar.gz" decompressor = TarFile.open bmp_url = "https://codeload.github.com/lightbody/browsermob-proxy/zip/browsermob-proxy-2.1.1" if not os.path.isdir("temp"): os.mkdir("temp") print("installing geckodriver v0.27.0...") geckodriver = requests.get(gd_url, stream=True) target_path = os.path.join("temp", "geckodriver.{0}".format(gd_dc)) with open(target_path, "wb") as f: for chunk in geckodriver.iter_content(chunk_size=128): f.write(chunk) target_path = os.path.join("temp", "geckodriver.{0}".format(gd_dc)) gd_archive = decompressor(target_path) gd_archive.extract(gd_path, "harquery") gd_archive.close() print("installing browsermob-proxy v2.1.1") bmp = requests.get(bmp_url, stream=True) target_path = os.path.join("temp", "browsermob-proxy-2.1.1.zip") with open(target_path, "wb") as f: for chunk in bmp.iter_content(chunk_size=128): f.write(chunk) target_dir = "browsermob-proxy-browsermob-proxy-2.1.1" bmp_archive = ZipFile(target_path) bmp_archive.extractall("harquery") from_path = os.path.join("harquery", "browsermob-proxy-browsermob-proxy-2.1.1") to_path = os.path.join("harquery", "browsermob-proxy-2.1.1") os.rename(from_path, to_path) bmp_archive.close() shutil.rmtree("temp")
11,493
334dadff97f76924a96a1fe7fd87b9db6eb41acc
from Fs import Fs import os import constants class SuperBlock(Fs): def __init__(self, size=constants.SUPER_BLOCK_SIZE, f_blocks_list=None, ifree_list=None): self.size = size self.f_blocks_list = f_blocks_list self.ifree_list = ifree_list def get_inode_number(self): inode_number = self.ifree_list.pop(0) save(self.bytefy(), 0 ,'disk') return inode_number def get_block_number(self): block_number = self.f_blocks_list.pop(0) save(self.bytefy(), 0 ,'disk') return block_number def save(bytearr, offset, filepath): with open(filepath, "rw+") as disk: disk.seek(offset, os.SEEK_SET) disk.write(bytearr) disk.close()
11,494
44b42d2755eecac9153ecc5362f85f18fb7795b3
# -*- coding: utf-8 -*- """ Created on Sat Mar 26 20:15:46 2016 @author: rbanderson """ import numpy as np import scipy.optimize as opt import copy class sm: def __init__(self, blendranges, random_seed = None): self.blendranges = blendranges self.random_seed = random_seed def do_blend(self, predictions, truevals=None, verbose = True): # create the array indicating which models to blend for each blend range # For three models, this creates an array like: [[0,0],[0,1],[1,1],[1,2],[2,2]] # Which indicates that in the first range, just use model 0 # In the second range, blend models 0 and 1 # in the third range, use model 1 # in the fourth range, blend models 1 and 2 # in the fifth range, use model 2 if self.random_seed is not None: np.random.seed(self.random_seed) self.toblend = [] for i in range(len(predictions) - 1): self.toblend.append([i, i]) if i < len(predictions) - 2: self.toblend.append([i, i + 1]) # If the true compositions are provided, then optimize the ranges over which the results are blended to minimize the RMSEC blendranges = np.array(self.blendranges).flatten() # squash the ranges to be a 1d array blendranges.sort() # sort the entries. These will be used by submodels_blend to decide how to combine the predictions self.blendranges = blendranges if truevals is not None: self.rmse = 99999999 n_opt = 5 i=0 while i < n_opt: if i > 0: blendranges = np.hstack(([-9999], blendranges[1:-1]+0.1*np.random.random(len(blendranges)-2), [9999])) #add some randomness to the blendranges each time truevals = np.squeeze(np.array(truevals)) result = opt.minimize(self.get_rmse, blendranges, (predictions, truevals, verbose), tol=0.00001) if result.fun < self.rmse: self.blendranges = result.x self.rmse = result.fun if verbose==True: print(self.blendranges.sort()) print('RMSE ='+str(self.rmse)) else: pass i=i+1 print(' ') print('Optimum settings:') print('RMSE = ' + str(self.rmse)) print('Low model: ' + str(round(self.blendranges[0], 4)) + ' to ' + str(round(self.blendranges[2], 4))) i = 1 m = 2 while i + 3 < len(self.blendranges) - 1: print('Submodel ' + str(m) + ': ' + str(round(self.blendranges[i], 4)) + ' to ' + str( round(self.blendranges[i + 3], 4))) i = i + 2 m = m + 1 print('High model: ' + str(round(self.blendranges[-3], 4)) + ' to ' + str(round(self.blendranges[-1], 4))) else: self.blendranges = blendranges # print(self.blendranges) # calculate the blended results blended = self.submodels_blend(predictions, self.blendranges, overwrite=False) return blended def get_rmse(self, blendranges, predictions, truevals, verbose, rangemin = 0.0, rangemax = 100, roundval = 10): blendranges[1:-1][blendranges[1:-1] < rangemin] = rangemin # ensure range boundaries don't drift below min blendranges[1:-1][blendranges[1:-1] > rangemax] = rangemax # ensure range boundaries don't drift above max blendranges.sort() # ensure range boundaries stay in order blended = self.submodels_blend(predictions, blendranges, overwrite=False) # calculate the RMSE. Round to specified precision as a way to control how long optimization runs # Note: don't want to round too much - optimization needs some wiggle room RMSE = np.round(np.sqrt(np.mean((blended - truevals) ** 2)),roundval) if verbose == True: print('RMSE = '+str(RMSE)) print('Low model: '+str(round(blendranges[0],4))+' to '+str(round(blendranges[2],4))) i=1 m=2 while i+3<len(blendranges)-1: if verbose == True: print('Submodel '+str(m)+': '+str(round(blendranges[i],4))+' to '+str(round(blendranges[i+3],4))) i=i+2 m=m+1 if verbose == True: print('High model: '+str(round(blendranges[-3],4)) + ' to ' + str(round(blendranges[-1],4))) return RMSE def submodels_blend(self,predictions,blendranges, overwrite=False): blended=np.squeeze(np.zeros_like(predictions[0])) #format the blending ranges (note, initial formatting is done in do_blend) blendranges = np.hstack((blendranges, blendranges[1:-1])) # duplicate the middle entries blendranges.sort() #re-sort them blendranges=np.reshape(blendranges,(int(len(blendranges)/2),int(2))) #turn the vector back into a 2d array (one pair of values for each submodel) self.toblend.append([len(predictions)-1,len(predictions)-1]) blendranges=np.vstack((blendranges,[-9999999,999999])) #print(blendranges) for i in range(len(blendranges)): #loop over each composition range for j in range(len(predictions[0])): #loop over each spectrum ref_tmp=predictions[-1][j] #get the reference model predicted value #check whether the prediction for the reference spectrum is within the current range inrangecheck=(ref_tmp>blendranges[i][0])&(ref_tmp<blendranges[i][1]) if inrangecheck: try: if self.toblend[i][0]==self.toblend[i][1]: #if the results being blended are identical, no blending necessary! blendval=predictions[self.toblend[i][0]][j] else: weight1 = 1 - (ref_tmp - blendranges[i][0]) / ( blendranges[i][1] - blendranges[i][0]) # define the weight applied to the lower model weight2 = (ref_tmp - blendranges[i][0]) / ( blendranges[i][1] - blendranges[i][0]) # define the weight applied to the higher model # calculated the blended value (weighted sum) blendval = weight1 * predictions[self.toblend[i][0]][j] + weight2 * \ predictions[self.toblend[i][1]][j] except: pass if overwrite: blended[j] = blendval # If overwrite is true, write the blended result no matter what else: # If overwrite is false, only write the blended result if there is not already a result there if blended[j] == 0: blended[j] = blendval return blended
11,495
b6e3a62d48c3ae7bea32284ee1fa67da136b16ad
# Nombre: Juan Diego Poccori Escalante # Código: 144884 import math # 1. Escribir un algoritmo que calcule el coseno mediante series de Taylor # definir factorial def factorial(numero): if numero < 0: mensaje = "Error: Debe ser un numero mayor o igual a cero" return print(mensaje) i = 1 factorial = 1 while numero >= i: factorial*=i i+=1 return factorial # Leer angulo en sexagesimales angulo = float(input("Ingrese ángulo en sexagesimales: ")) PIio = 3.1415 # Calcular coseno # Convertir angulo a radianes anguloRad = angulo * (math.pi/180) termino = 1 coseno = 0 signo = 1 j=2 # Mientras termino sea mayor a 0.0000001 while termino > 0.00000001: coseno+=(signo*termino) termino = anguloRad**(j)/factorial(j) j+=2 signo*=-1 # Mostrar coseno redondeado a 6 print(round(coseno,6))
11,496
d071ee971362a9a265a1c7969f9cb781ec7c8ee6
# Generated by Django 3.0.2 on 2020-03-11 17:18 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('pokemon_entities', '0018_auto_20200311_0139'), ] operations = [ migrations.AlterField( model_name='pokemon', name='description', field=models.CharField(blank=True, max_length=1000, verbose_name='Описание'), ), migrations.AlterField( model_name='pokemon', name='image', field=models.ImageField(blank=True, upload_to='', verbose_name='Картинка'), ), migrations.AlterField( model_name='pokemon', name='title_en', field=models.CharField(blank=True, max_length=200, verbose_name='Название на английском языке'), ), migrations.AlterField( model_name='pokemon', name='title_jp', field=models.CharField(blank=True, max_length=200, verbose_name='Название на японском языке'), ), migrations.AlterField( model_name='pokemon', name='title_ru', field=models.CharField(blank=True, max_length=200, verbose_name='Название на русском языке'), ), ]
11,497
9cc71abe8128728634221e3d969adcbdd3273b96
import operator from etc.const import GREATER, GREATER_EQUALS, LESS, LESS_EQUALS, EQUALS, NOT_EQUALS ATTRIBUTES = { 'Character': { 'health': 'hp', 'resource': 'resource', 'is in combat': 'is_in_combat', 'has pet': 'has_pet', 'first class resource': 'first_class_resource', 'pet health': 'pet_hp', }, 'Target': { 'health': 'hp', 'resource': 'resource', 'distance': 'distance' }, 'Tick': { 'second': 'second' } } _DISCRETE = {EQUALS: operator.eq} _CONTINOUS = { GREATER: operator.gt, GREATER_EQUALS: operator.ge, LESS: operator.lt, LESS_EQUALS: operator.le, EQUALS: operator.eq, NOT_EQUALS: operator.ne } OPERATORS = { "is in combat": _DISCRETE, "has pet": _DISCRETE, "first class resource": _DISCRETE, "health": _CONTINOUS, "pet health": _CONTINOUS, "resource": _CONTINOUS, "distance": _DISCRETE, "second": _CONTINOUS } _DISTANCE_VALUES = {"25 yard": 2, "8 yard": 1, "out of range": 0} VALUE_CONVERTER = { "distance": lambda d: _DISTANCE_VALUES[d], "is in combat": lambda c: {"True": True, "False": False}[c], "has pet": lambda c: {"True": True, "False": False}[c], "first class resource": lambda c: {"True": True, "False": False}[c], "health": int, "resource": int, "second": float, "pet health": int }
11,498
e0f7c227115f955b3f5e63bc042552f94ff1027f
# -*- coding: utf-8 -*- import os import sys from deconstrst.deconstrst import build, submit from deconstrst.config import Configuration __author__ = 'Ash Wilson' __email__ = 'ash.wilson@rackspace.com' __version__ = '0.1.0' def main(): config = Configuration(os.environ) # Lock source and destination to the same paths as the Makefile. srcdir, destdir = '.', '_build/deconst' status = build(srcdir, destdir) if status != 0: sys.exit(status) reasons = config.skip_submit_reasons() if reasons: print("Not submitting content to the content service because:", file=sys.stderr) print(file=sys.stderr) for reason in reasons: print(" * " + reason, file=sys.stderr) print(file=sys.stderr) return submit(destdir, config.content_store_url, config.content_store_apikey, config.content_id_base) if __name__ == '__main__': main()
11,499
398e44942f0f2ec3d7a7c7c8ef8c01df4eff8d26
import csv import re import requests from bs4 import BeautifulSoup from get_all_players import read_all_players_from_csv import pandas as pd BASE_URL = 'https://www.basketball-reference.com' COLUMNS = ['Season', 'Age', 'Tm', 'Lg', 'Pos', 'G', 'GS', 'MP', 'FG', 'FGA', 'FG%', '3P', '3PA', '3P%', '2P', '2PA', '2P%', 'eFG%', 'FT', 'FTA', 'FT%', 'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS'] ALL_COLUMNS = ['Player', 'ShortName', 'Height', 'Weight', 'Position', 'BirthPlace', 'SeasonURL'] + COLUMNS def get_season_href(r): children = list(r.children) if not children: return '' else: try: return list(children[0].children)[0].attrs['href'] except: return '' def get_attr(s, attr): try: return s.find(itemprop=attr).get_text() except: return '' def read_all_seasons(new_file=False, seasons_filename='seasons.csv', players_filename='players.csv'): player_data = read_all_players_from_csv(players_filename) if new_file: with open(seasons_filename, 'w') as csvfile: writer = csv.writer(csvfile) writer.writerow(ALL_COLUMNS) existing_players = set() else: with open(seasons_filename, 'r') as csvfile: reader = csv.DictReader(csvfile) existing_players = set(row['ShortName'] for row in reader) for player_row in player_data: print(f'Player {player_row["shortname"]}') if player_row['shortname'] in existing_players: print('skip') continue r = requests.get(BASE_URL + player_row['href']) season_df = pd.read_html(r.text)[0] for c in COLUMNS: if c not in season_df.columns: season_df[c] = pd.np.nan season_df = season_df[COLUMNS] season_df['Player'] = player_row['name'] season_df['ShortName'] = player_row['shortname'] s = BeautifulSoup(r.text, 'lxml') season_df['Position'] = s.findAll( text=re.compile('(Guard|Forward|Point Guard|Center|Power Forward|Shooting Guard|Small Forward)') )[0].strip().split('\n')[0] season_df['Height'] = get_attr(s, 'height') season_df['Weight'] = get_attr(s, 'weight') try: season_df['BirthPlace'] = s.find(itemprop='birthPlace').contents[1].get_text() except Exception: season_df['BirthPlace'] = pd.np.nan season_df['SeasonURL'] = [get_season_href(a) for a in s.select('#per_game > tbody > tr')] + [''] season_df = season_df[season_df['G'] > 0] with open(seasons_filename, 'a') as csvfile: season_df.to_csv(csvfile, columns=ALL_COLUMNS, header=False, index=False) if __name__ == "__main__": read_all_seasons()