index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
3,600 | fb92912e1a752f3766f9439f75ca28379e23823f | REDIRECT_MAP = {
'90':'19904201',
'91':'19903329',
'92':'19899125',
'93':'19901043',
'94':'19903192',
'95':'19899788',
'97':'19904423',
'98':'19906163',
'99':'19905540',
'100':'19907871',
'101':'19908147',
'102':'19910103',
'103':'19909980',
'104':'19911813',
'105':'19911767',
'106':'19913625',
'107':'19913832',
'108':'19915603',
'109':'19915707',
'110':'19915705',
'111':'19915558',
'112':'19917330',
'113':'19917085',
'114':'19918316',
'115':'19919617',
'116':'19918555',
'117':'19919779',
'118':'19920594',
'119':'19920805',
'120':'19921503',
'121':'19923032',
'122':'19922349',
'123':'19923894',
'124':'19924058',
'125':'19924651',
'126':'19929744',
'127':'19929743',
'128':'19929742',
'129':'19929184',
'130':'19929183',
'131':'19928163',
'132':'19927463',
'133':'19927462',
'134':'19927461',
'135':'19926742',
'136':'19926741',
'137':'19926738',
'138':'19930143',
'139':'19930827',
'140':'19931617',
'141':'19931616',
'142':'19932324',
'143':'19932321',
'144':'19932320',
'145':'19932845',
'146':'19932843',
'147':'19932842',
'148':'19932839',
'149':'19933621',
'150':'19933618',
'151':'19934526',
'152':'19934525',
'153':'19934524',
'154':'19935167',
'155':'19935165',
'156':'19936598',
'157':'19936596',
'158':'19936594',
'160':'19937949',
'161':'19937662',
'162':'19937662',
'163':'19937662',
'164':'19937662',
'165':'19937662',
'166':'19940346',
'167':'19939390',
'168':'19938892',
'169':'19938886',
'170':'19938874',
'171':'19938181',
'172':'19938179',
'173':'19938177',
'174':'19937662',
'175':'19937662',
'176':'800073144',
'177':'800073141',
'178':'800070989',
'179':'800070987',
'180':'800070985',
'181':'800068840',
'182':'800068838',
'183':'800068837',
'184':'800068835',
'185':'800073405',
'186':'800075467',
'187':'800075466',
'188':'800077797',
'189':'800077792',
'190':'800077788',
'191':'800080302',
'192':'800080300',
'193':'800080299',
'194':'800080297',
'195':'800080295',
'196':'800080294',
'197':'800082560',
'198':'800082559',
'199':'800082558',
'200':'800085053',
'201':'800085057',
'202':'800085055',
'203':'800087660',
'204':'800087637',
'205':'800087636',
'206':'800090260',
'207':'800090259',
'208':'800090256',
'209':'800090252',
'210':'800090248',
'211':'800095783',
'212':'800093475',
'213':'800093472',
'214':'800093469',
'215':'800093465',
'216':'800097835',
'217':'800097830',
'218':'800097828',
'219':'800102815',
'220':'800100696',
'221':'800107510',
'222':'800105566',
'223':'800105187',
'224':'800105182',
'225':'800105176',
'226':'800105171',
'227':'800110082',
'228':'800110080',
'229':'800110077',
'230':'800107893',
'231':'800112573',
'232':'800112572',
'233':'800112570',
'234':'800115083',
'235':'800115080',
'236':'800117652',
'237':'800136223',
'238':'800135715',
'239':'800135712',
'240':'800127734',
'241':'800125056',
'242':'800125055',
'243':'800125054',
'244':'800122499',
'245':'800122497',
'246':'800120063',
'247':'800120060',
'248':'800118016',
'249':'800118015',
'250':'800138744',
'251':'800138741',
'252':'800138440',
'253':'800156510',
'254':'800156507',
'255':'800159343',
'256':'800200950',
'257':'800200946',
'258':'800180350',
'259':'800180348',
'260':'800162155',
'261':'800162153',
'262':'800159803',
'263':'800205850',
'264':'800205839',
'265':'800210303',
'266':'800210302',
'267':'800212467',
'268':'800212465',
'269':'800212462',
'270':'800215849',
'271':'800218413',
'272':'800220590',
'273':'800220585',
'274':'800220581',
'275':'800220568',
'276':'800223836',
'277':'800223835',
'278':'800226881',
'279':'800226876',
'280':'800226875',
'281':'800229066',
'282':'800229064',
'283':'800232046',
'284':'800232043',
'285':'800234330',
'286':'800234329',
'287':'800234328',
'288':'800239516',
'289':'800236806',
'290':'800242231',
'291':'800242196',
'292':'800242177',
'293':'800245005',
'294':'800247477',
'295':'800247307',
'296':'800247092',
'297':'800250315',
'298':'800250206',
'299':'800250198',
'300':'800252661',
'301':'800252745',
'302':'800252731',
'303':'800255314',
'304':'800255226',
'305':'800261560',
'306':'800264399',
'307':'800264337',
'308':'800262863',
'309':'800267317',
'310':'800268635',
'311':'800270225',
'312':'800272621',
'313':'800272861',
'314':'800275290',
'315':'800275287',
'316':'800275259',
'317':'800277905',
'318':'800277897',
'319':'800277966',
'320':'800280886',
'321':'800280734',
'322':'800280721',
'323':'800283469',
'324':'800283455',
'325':'800291555',
'326':'800291531',
'327':'800288739',
'328':'800286042',
'329':'800286032',
'330':'800294431',
'331':'800294423',
'332':'800294394',
'333':'800297383',
'334':'800299835',
'335':'800302625',
'336':'800305630',
'337':'800305626',
'338':'800308225',
'339':'800307935',
'340':'800308160',
'341':'800308242',
'342':'800310811',
'343':'800310657',
'344':'800310651',
'345':'800312843',
'346':'800313657',
'347':'800313593',
'348':'800313385',
'349':'800315870',
'350':'800315874',
'351':'800315004',
'352':'800315980',
'353':'800317852',
'354':'800317851',
'355':'800317843',
'356':'800317841',
'357':'800320232',
'358':'800322836',
'359':'800322833',
'360':'800325648',
'361':'800325641',
'362':'800328374',
'363':'800328368',
'364':'800330891',
'365':'800330882',
'366':'800330878',
'367':'800336505',
'368':'800336491',
'369':'800338571',
'370':'800341852',
'371':'800339471',
'372':'800344570',
'373':'800344561',
'374':'800344557',
'375':'800347295',
'376':'800348755',
'377':'800350263',
'378':'800350259',
'379':'800353149',
'380':'800351527',
'381':'800355911',
'382':'800355907',
'383':'800358602',
'384':'800358597',
'385':'800357146',
'386':'800360127',
'387':'800364368',
'388':'800364364',
'389':'800364360',
'390':'800369266',
'391':'800367438',
'392':'800367435',
'393':'800365869',
'394':'800376494',
'395':'800376495',
'396':'800376499',
'397':'800376508',
'398':'800376564',
'399':'800376527',
'400':'800376534',
'401':'800376542',
'402':'800376553',
'403':'800376547',
'404':'800373150',
'405':'800373145',
'406':'800372444',
'407':'800372437',
'408':'800372425',
'409':'800379488',
'410':'800382132',
'411':'800382127',
'412':'800382125',
'413':'800386300',
'414':'800384980',
'415':'800384977',
'416':'800387613',
'417':'800387609',
'418':'800390598',
'419':'800390595',
'420':'800390593',
'421':'800391756',
'422':'800393267',
'423':'800396025',
'424':'800399068',
'425':'800401344',
'426':'800404124',
'427':'800408946',
'428':'800407272',
'429':'800407265',
'430':'800411526',
'431':'800411522',
'432':'800414380',
'433':'800413104',
'434':'800413099',
'435':'800415905',
'436':'800415900',
'437':'800417356',
'438':'800420038',
'439':'800420034',
'440':'800420028',
'441':'800422801',
'442':'800421597',
'443':'800421594',
'444':'800427313',
'445':'800427308',
'446':'800427302',
'447':'800427296',
'448':'800428813',
'449':'800430293',
'450':'800430281',
'451':'800430273',
'452':'800434255',
'453':'800434253',
'454':'800434251',
'455':'800434249',
'456':'800434246',
'457':'800431774',
'458':'800443507',
'459':'800442246',
'460':'800440771',
'461':'800439363',
'462':'800439359',
'463':'800436898',
'464':'800434258',
'465':'800446256',
'466':'800450435',
'467':'800450429',
'468':'800450424',
'469':'800452914',
'470':'800452909',
'471':'800452023',
'472':'800452016',
'473':'800455755',
'474':'800455748',
'475':'800457050',
'476':'800458494',
'477':'800461157',
'478':'800459620',
'479':'800464361',
'480':'800464980',
'481':'800462270',
'482':'800465908',
'483':'800465407',
'484':'800465404',
'485':'800467476',
'486':'800467755',
'487':'800468407',
'488':'800468843',
'489':'800469869',
'490':'800469867',
'491':'800470232',
'492':'800470228',
'493':'800470224',
'494':'800470783',
'495':'800471280',
'496':'800471274',
'497':'800471270',
'498':'800471737',
'499':'800472257',
'500':'800472252',
'501':'800472248',
'502':'800472239',
'503':'800472826',
'504':'800473392',
'505':'800473387',
'506':'800473386',
'507':'800474131',
'508':'800474822',
'509':'800476516',
'510':'800476512',
'511':'800477305',
'512':'800477304',
'513':'800477299',
'514':'800477851',
'515':'800478313',
'516':'800478309',
'517':'800478779',
'518':'800479288',
'519':'800479679',
'520':'800480262',
'521':'800480257',
'522':'800483194',
'523':'800482720',
'524':'800482271',
'525':'800481660',
'526':'800481208',
'527':'800480699',
'528':'800483203',
'529':'800483712',
'530':'800484088',
'531':'800484085',
'532':'800484667',
'533':'800485151',
'534':'800485686',
'535':'800487288',
'536':'800487265',
'537':'800487264',
'538':'800487254',
'539':'800487654',
'540':'800488015',
'541':'800488014',
'542':'800488638',
'543':'800488635',
'544':'800489081',
'545':'800489074',
'546':'800489725',
'547':'800489722',
'548':'800490703',
'549':'800490702',
'550':'800492228',
'551':'800494213',
'552':'800494039',
'553':'800494442',
'554':'800494426',
'555':'800495547',
'556':'800495446',
'557':'800496750',
'558':'800498164',
'559':'800498748',
'560':'800499418',
'561':'800499229',
'562':'800500847',
'563':'800500844',
'564':'800500802',
'565':'800501840',
'566':'800501597',
'567':'800502796',
'568':'800502789',
'569':'800503614',
'570':'800504092',
'571':'800503911',
'572':'800508001',
'573':'800507103',
'574':'800506285',
'575':'800505846',
'576':'800505807',
'577':'800505069',
'578':'800509304',
'579':'800509218',
'580':'800508912',
'581':'800509464',
'582':'800510151',
'583':'800511800',
'584':'800511318',
'585':'800512405',
'586':'800512403',
'587':'800513304',
'588':'800513305',
'589':'800513635',
'590':'800513633',
'591':'800514762',
'592':'800514759',
'593':'800515655',
'594':'800515656',
'595':'800516480',
'596':'800516479',
'597':'800516478',
'598':'800517736',
'599':'800517735',
'600':'800517733',
'601':'800517148',
'602':'800517143',
'603':'800517138',
'604':'800519296',
'605':'800519292',
'606':'800520855',
'607':'800520857',
'608':'800520736',
'609':'800521674',
'610':'800522862',
'611':'800523828',
'612':'800523825',
'613':'800524526',
'614':'800524868',
'615':'800525568',
'616':'800525566',
'617':'800525848',
'618':'800525847',
'619':'800525845',
'620':'800526925',
'621':'800526923',
'622':'800526922',
'623':'800528032',
'624':'800527784',
'625':'800527783',
'626':'800529243',
'627':'800528930',
'628':'800528927',
'629':'800530217',
'630':'800530215',
'631':'800530212',
'632':'800531040',
'633':'800530845',
'634':'800530842',
'635':'800531892',
'636':'800532956',
'637':'800532952',
'638':'800533102',
'639':'800534375',
'640':'800534368',
'641':'800534363',
'642':'800535420',
'643':'800535415',
'644':'800535410',
'645':'800536088',
'646':'800536085',
'647':'800536084',
'648':'800537422',
'649':'800537419',
'650':'800537413',
'651':'800565995',
'652':'800565992',
'653':'800563301',
'654':'800563298',
'655':'800562019',
'656':'800562018',
'657':'800560957',
'658':'800560954',
'659':'800560953',
'660':'800560950',
'661':'800567960',
'662':'800567958',
'663':'800567957',
'664':'800566950',
'665':'800566948',
'666':'800566947',
'667':'800568961',
'668':'800568959',
'669':'800568957',
'670':'800569778',
'671':'800569776',
'672':'800569775',
'673':'800570677',
'674':'800570673',
'675':'800570647',
'676':'800571691',
'677':'800571690',
'678':'800571688',
'679':'800573679',
'680':'800573678',
'681':'800573673',
'682':'800572880',
'683':'800572878',
'684':'800572876',
'685':'800574667',
'686':'800574666',
'687':'800574665',
'688':'800575627',
'689':'800575624',
'690':'800575622',
'691':'800576864',
'692':'800576861',
'693':'800576858',
'694':'800577693',
'695':'800578651',
'696':'800578648',
'697':'800578653',
'698':'800580339',
'699':'800581315',
'700':'800582094',
'701':'800583021',
'702':'800590020',
'703':'800590019',
'704':'800590018',
'705':'800589231',
'706':'800589226',
'707':'800588877',
'708':'800587042',
'709':'800587039',
'710':'800586085',
'711':'800584924',
'712':'800583934',
'713':'800590941',
'714':'800590940',
'715':'800590939',
'716':'800592923',
'717':'800592921',
'718':'800592920',
'719':'800591918',
'720':'800591917',
'721':'800591915',
'722':'800593832',
'723':'800593829',
'724':'800593824',
'725':'800593890',
'726':'800594956',
'727':'800594880',
'728':'800594877',
'729':'800594876',
'730':'800595884',
'731':'800595883',
'732':'800595882',
'733':'800595879',
'734':'800596854',
'735':'800597955',
'736':'800597961',
'737':'800597957',
'738':'800597954',
'739':'800597951',
'740':'800598913',
'741':'800600005',
'742':'800600003',
'743':'800600000',
'744':'800600977',
'745':'800600975',
'746':'800600973',
'747':'800601974',
'748':'800603879',
'749':'800603052',
'750':'800603050',
'751':'800604977',
'752':'800605959',
'753':'800607128',
'754':'800608295',
'755':'800608294',
'756':'800608293',
'757':'800609876',
'758':'800610697',
'759':'800611768',
'760':'800611766',
'761':'800611764',
'762':'800612811',
'763':'800612809',
'764':'800612806',
'765':'800615487',
'766':'800613824',
'767':'800613823',
'768':'800617427',
'769':'800617740',
'770':'800618987',
'771':'800618794',
'772':'800620463',
'773':'800620507',
'774':'800621873',
'775':'800621866',
'776':'800621485',
'777':'800623063',
'778':'800622785',
'779':'800624082',
'780':'800624606',
'781':'800624605',
'782':'800624602',
'783':'800626006',
'784':'800626004',
'785':'800625998',
'786':'800625995',
'787':'800625959',
'788':'800625684',
'789':'800627159',
'790':'800627541',
'791':'800628537',
'792':'800628472',
'793':'800628440',
'794':'800628412',
'795':'800628391',
'796':'800629230',
'797':'800629175',
'798':'800630245',
'799':'800630236',
'800':'800631787',
'801':'800631425',
'802':'800631385',
'803':'800631379',
'804':'800631339',
'805':'800631299',
'806':'800631198',
'807':'800630886',
'808':'800633920',
'809':'800633720',
'810':'800633520',
'811':'800634419',
'812':'800635301',
'813':'800635068',
'814':'800635957',
'815':'800638994',
'816':'800638105',
'817':'800637068',
'818':'800636754',
'819':'800636749',
'820':'800636075',
'821':'800639448',
'822':'800639234',
'823':'800639026',
'824':'800640408',
'825':'800640396',
'826':'800640985',
'827':'800640977',
'828':'800645321',
'829':'800644531',
'830':'800644235',
'831':'800643606',
'832':'800642400',
'833':'800641879',
'834':'800645756',
'835':'800647017',
'836':'800648350',
'837':'800648289',
'838':'800648124',
'839':'800647488',
'840':'800649911',
'841':'800649906',
'842':'800649535',
'843':'800649521',
'844':'800649507',
'845':'800649438',
'846':'800649411',
'847':'800650580',
'848':'800652017',
'849':'800652004',
'850':'800651999',
'851':'800651955',
'852':'800651790',
'853':'800651264',
'854':'800651159',
'855':'800652276',
'856':'800652260',
'857':'800654483',
'858':'800654117',
'859':'800654927',
'860':'800656751',
'861':'800656720',
'862':'800656504',
'863':'800656476',
'864':'800655926',
'865':'800658883',
'866':'800659871',
'867':'800659855',
'868':'800657502',
'869':'800662419',
'870':'800663417',
'871':'800661565',
'872':'800664542',
'873':'800665790',
'874':'800667640',
'875':'800668511',
'876':'800668354',
'877':'800668932',
'878':'800668884',
'879':'800668870',
'880':'800668846',
'881':'800670519',
'882':'800670755',
'883':'800670804',
'884':'800670005',
'885':'800669956',
'886':'800671522',
'887':'800670997',
'888':'800676274',
'889':'800674751',
'890':'800674396',
'891':'800674387',
'892':'800674369',
'893':'800674171',
'894':'800674165',
'895':'800673904',
'896':'800673894',
'897':'800673042',
'898':'800672682',
'899':'800673037',
'900':'800674363',
'901':'800671334',
'902':'800676404',
'903':'800677203',
'904':'800678281',
'905':'800677753',
'906':'800678579',
'907':'800678543',
'908':'800682417',
'909':'800680556',
'910':'800680572',
'911':'800681753',
'912':'800683728',
'913':'800683445',
'914':'800684755',
'915':'800685559',
'916':'800685994',
'917':'800686991',
'918':'800688325',
'919':'800688988',
'920':'800688986',
'921':'800688811',
'922':'800688784',
'923':'800690794',
'924':'800690777',
'925':'800690766',
'926':'800691744',
'927':'800691714',
'928':'800691608',
'929':'800691675',
'930':'800692072',
'931':'800692888',
'932':'800692853',
'933':'800694793',
'934':'800695410',
'935':'800696421',
'936':'800696417',
'937':'800696404',
'938':'800696380',
'939':'800695901',
'940':'800696527',
'941':'800696521',
'942':'800696516',
'943':'800697754',
'944':'800698640',
'945':'800700044',
'946':'800700030',
'947':'800700001',
'948':'800699969',
'949':'800700477',
'950':'800700332',
'951':'800701388',
'952':'800701378',
'953':'800702260',
'954':'800702167',
'955':'800702170',
'956':'800703184',
'957':'800703189',
'958':'800704417',
'959':'800704334',
'960':'800704331',
'961':'800705315',
'962':'800705310',
'963':'800706319',
'964':'800706317',
'965':'800707543',
'966':'800707540',
'967':'800707378',
'968':'800707376',
'969':'800707372',
'970':'800709165',
'971':'800709918',
'972':'800709909',
'973':'800709913',
'974':'800709590',
'975':'800709592',
'976':'800711385',
'977':'800711436',
'978':'800711448',
'979':'800712704',
'980':'800712684',
'981':'800712697',
'982':'800713805',
'983':'800713786',
'984':'800715143',
'985':'800715140',
'986':'800717742',
'987':'800717725',
'988':'800717083',
'989':'800719807',
'990':'800719797',
'991':'800721331',
'992':'800721317',
'993':'800722269',
'994':'800722253',
'995':'800722190',
'996':'800723313',
'997':'800723082',
}
REDIRECT_MAP_CATEGORIES = {
'27':'438046136',
'28':'438046133',
'29':'438046135',
'30':'438046134',
'31':'438046128',
'32':'438046127',
'33':'438046130',
'34':'438046131',
'35':'438046132',
'36':'438046129',
}
|
3,601 | f29d377e8a8fd6d2e156da665478d7a4c167f7d5 | import gdalnumeric
#Input File
src = "../dati/islands/islands.tif"
#Output
tgt = "../dati/islands/islands_classified.jpg"
srcArr = gdalnumeric.LoadFile(src)
classes = gdalnumeric.numpy.histogram(srcArr,bins=2)[1]
print classes
#Color look-up table (LUT) - must be len(classes)+1.
#Specified as R,G,B tuples
lut = [[255,0,0],[0,0,0],[255,255,255]]
start = 1
rgb = gdalnumeric.numpy.zeros((3, srcArr.shape[0], srcArr.shape[1],),gdalnumeric.numpy.float32)
# Process all classes and assign colors
for i in range(len(classes)):
mask = gdalnumeric.numpy.logical_and(start <= srcArr, srcArr <= classes[i])
for j in range(len(lut[i])):
rgb[j] = gdalnumeric.numpy.choose(mask, (rgb[j], lut[i][j]))
start = classes[i]+1
# Save the image
gdalnumeric.SaveArray(rgb.astype(gdalnumeric.numpy.uint8), tgt, format="GTIFF",prototype=src) |
3,602 | 2d4b0e7b430ffb5d236300079ded4b848e6c6485 | print raw_input().count(raw_input()) |
3,603 | d7653a205fb8203fed4009846780c63dd1bcb505 | import csv
import sys
if len(sys.argv[1:]) == 5 :
(name_pos, start_pos, length_pos,
first_note_pos, second_note_pos) = [int(pos) for pos in sys.argv[1:]]
elif len(sys.argv[1:]) == 4 :
(name_pos, start_pos, length_pos,
first_note_pos) = [int(pos) for pos in sys.argv[1:]]
second_note_pos = None
else :
name_pos, start_pos, length_pos, first_note_pos, second_note_pos = 5, 3, 4, 2, 1
blacklist=("Blank", "semicolon filler")
reader = csv.reader(sys.stdin)
writer = csv.writer(sys.stdout)
writer.writerow(('column', 'start', 'length'))
for row in reader :
try :
if not row[name_pos].strip() or row[name_pos].strip() in blacklist :
continue
except IndexError :
continue
if second_note_pos is not None and row[second_note_pos].strip() :
col_name = '; '.join(name.strip() for name in (row[name_pos],
row[first_note_pos],
row[second_note_pos]))
elif row[first_note_pos].strip() :
col_name = '; '.join(name.strip() for name in (row[name_pos],
row[first_note_pos]))
else :
col_name = row[name_pos].strip()
col_start = int(row[start_pos].split('-')[0].strip())
col_length = int(float(row[length_pos])) - 1
writer.writerow((col_name, col_start, col_length))
|
3,604 | 84f6336261e1c276f029822754842514715791df | from unittest import TestCase
from spiral.spiral_matrix import SpiralMatrix
class TestOutwardCounterClockwise(TestCase):
def test_traverse_empty(self):
matrix = []
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([], actual)
def test_traverse_empty_vector(self):
matrix = [[]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([], actual)
def test_traverse_single_element(self):
matrix = [[1]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([1], actual)
def test_traverse_row_vector(self):
matrix = [[1, 2, 3]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([3, 2, 1], actual)
def test_traverse_column_vector(self):
matrix = [
[1],
[2],
[3]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([3, 2, 1], actual)
def test_traverse_even_square(self):
matrix = [
[1, 2],
[3, 4]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([3, 4, 2, 1], actual)
def test_traverse_odd_square(self):
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([5, 4, 7, 8, 9, 6, 3, 2, 1], actual)
def test_traverse_wide_odd_height_rectangle(self):
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
actual = [
i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual(
[7, 6, 5, 9, 10, 11, 12, 8, 4, 3, 2, 1], actual)
def test_traverse_wide_even_height_rectangle(self):
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8]]
actual = [
i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual(
[5, 6, 7, 8, 4, 3, 2, 1], actual)
def test_traverse_tall_even_width_rectangle(self):
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
[17, 18, 19, 20]]
actual = [
i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual(
[10, 14, 15, 11, 7, 6, 5, 9, 13, 17, 18, 19, 20, 16, 12, 8, 4, 3, 2, 1],
actual)
def test_traverse_tall_odd_width_rectangle(self):
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]]
actual = [
i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual(
[8, 5, 4, 7, 10, 11, 12,9, 6, 3, 2, 1], actual)
def test_traverse_large_matrix(self):
matrix = [[i * 1000 + j for j in range(0, 1000)]
for i in range(0, 1000)]
actual = [i for i in SpiralMatrix(matrix, clockwise=False, inward=False)]
self.assertEqual([500499, 500500, 499500, 499499], actual[0:4])
self.assertEqual([3, 2, 1, 0],
actual[-4:])
|
3,605 | fc01c6fb812fe78ca04496494d68fcc90ae706f5 | import numpy as np
def shufflelists(lists):
li = np.random.permutation(len(lists[0])
lo = []
for i in range(len(li)):
|
3,606 | 9a7908212bf13565109cd4d9ab6de65909bc6910 | # Copyright 2014 Charles Noneman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test suite for running the test modules"""
from __future__ import print_function
import importlib
import pkgutil
import unittest
import test
def run():
"""Runs all of the tests"""
subsuite_list = []
for _, modname, _ in pkgutil.iter_modules(test.__path__):
if modname.startswith("test_"):
module = importlib.import_module('test.' + modname)
subsuite = unittest.TestLoader().loadTestsFromModule(module)
subsuite_list.append(subsuite)
suite = unittest.TestSuite(subsuite_list)
print("Testing:\n")
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
run()
|
3,607 | 78c8f953b924f3e664570b844bf736a788e9cfb7 | from distutils.core import setup, Extension
setup(name='supermodule', version='1.0', \
ext_modules=[Extension('supermodule', ['main.c'])])
|
3,608 | 88862d6bee5d83dd5f1c656a06a9dc46a5254b10 | import math
import operator as op
Symbol = str
Number = (int, float)
Atom = (Symbol, Number)
List = list
Exp = (Atom, List)
Env = dict
def standard_env() -> Env:
"An environment with some scheme standard procedures"
env = Env()
env.update(vars(math)) # sin, cos, sqrt, pi ...
env.update({
'+':op.add, '-':op.sub, '*':op.mul, '/':op.truediv,
'>':op.gt, '>':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq,
'abs':abs,
'append':op.add,
'apply':lambda proc, args: proc(*args),
'begin':lambda *x: x[-1],
'car':lambda x: x[0],
'cdr':lambda x: x[1:],
'cons':lambda x,y: [x] + y,
'eq?':op.is_,
'expt':pow,
'equal?':op.eq,
'length':len,
'list':lambda *x: List(x),
'list?':lambda x: isinstance(x, List),
'map':map,
'max':max,
'min':min,
'not':op.not_,
'null?':lambda x: x == [],
'number?':lambda x: isinstance(x, Number),
'print':print,
'procedure?':callable,
'round':round,
'symbol?':lambda x: isinstance(x, Symbol),
})
return env
global_env = standard_env()
def eval(x: Exp, env=global_env) -> Exp:
"Evaluate an expression in an environment."
if isinstance(x, Symbol): # variable reference
return env[x]
elif not isinstance(x, List): # constant number
return x
elif x[0] == 'if': # conditional
(_, test, conseq, alt) = x
exp = (conseq if eval(test, env) else alt)
return eval(exp, env)
elif x[0] == 'define': # definition
(_, symbol, exp) = x
env[symbol] = eval(exp, env)
else: # procedure call
proc = eval(x[0], env)
args = [eval(arg, env) for arg in x[1:]]
return proc(*args)
def tokenize(chars: str) -> list:
"convert a string of characters into a list of tokens"
return chars.replace('(', ' ( ').replace(')', ' ) ').split()
def parse(program: str) -> Exp:
"Read a scheme expression from a string"
return read_from_tokens(tokenize(program))
def read_from_tokens(tokens: list) -> Exp:
"Read an expression from a sequence of tokens"
if len(tokens) == 0:
raise SyntaxError('unexpected EOF')
token = tokens.pop(0)
if token == '(':
L = []
while tokens[0] != ')':
L.append(read_from_tokens(tokens))
tokens.pop(0) # pop off ')'
return L
elif token == ')':
raise SyntaxError('unexpected )')
else:
return atom(token)
def atom(token: str) -> Atom:
"Numbers become numbers; every other token is a symbol"
try: return int(token)
except ValueError:
try: return float(token)
except ValueError:
return Symbol(token)
program = "(begin (define r 10) (* pi (* r r)))"
print(eval(parse(program)))
|
3,609 | 18be97061c65185fcebf10c628e0e51bb08522cf | import torch
import argparse
from DialogGenerator import DialogGenerator
from DialogDataset import DialogDataset
from DialogDiscriminator import DialogDiscriminator
from transformers import GPT2Tokenizer
import os
def prep_folder(args):
""" Append to slash to filepath if needed, and generate folder if it doesn't exist"""
if(args.save_folder[-1]!='/'):
args.save_folder += '/'
if(not os.path.isdir(args.save_folder)):
os.mkdir(args.save_folder)
if(__name__=="__main__"):
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=3, dest="epochs", help='Number of epochs to run')
parser.add_argument('--batch-size', type=int, default=50, dest="batch_size", help='Batch size')
parser.add_argument('--max-out-length', type=int, default=128, dest="max_out_length", help='Maximum output length (outputs truncated if longer)')
parser.add_argument('--adversarial-model', type=str, default=None, dest="adv_model", help='Type of adversarial model to use. Will use traditional teacher forcing if None.')
parser.add_argument('--train-disc-only-steps', type=int, default=0, dest="train_disc_only_steps", help='Number of steps for which to train discriminator only (without updating generator)')
parser.add_argument('--gen_weight_decay', type=float, default=0, dest="gen_weight_decay", help='Weight decay for the generator\'s training scheduler')
parser.add_argument('--gen_lr', type=float, default=2e-5, dest="gen_lr", help='Learning rate for generator')
parser.add_argument('--gen_epsilon', type=float, default=1e-8, dest="gen_epsilon", help='Epsilon parameter for generator optimizer')
parser.add_argument('--gen_warmup_steps', type=int, default=0, dest="gen_warmup_steps", help='Number of warmup steps for training generator')
parser.add_argument('--disc_weight_decay', type=float, default=0, dest="disc_weight_decay", help='Weight decay for the discriminator\'s training scheduler')
parser.add_argument('--disc_lr', type=float, default=2e-5, dest="disc_lr", help='Learning rate for discriminator')
parser.add_argument('--disc_epsilon', type=float, default=1e-8, dest="disc_epsilon", help='Epsilon parameter for discriminator optimizer')
parser.add_argument('--disc_warmup_steps', type=int, default=0, dest="disc_warmup_steps", help='Number of warmup steps for training discriminator')
parser.add_argument('--train-data-path', type=str, dest="train_data_path", help="Filepath to preprocessed data")
parser.add_argument('--save-folder', type=str, dest="save_folder", help="Filepath to folder where checkpoints should be saved")
parser.add_argument('--pretrained-gen', type=str, default=None, dest="pretrained_gen", help="Filepath to trained generator. If None, will instantiate a default pretrained generator.")
parser.add_argument('--pretrained-disc', type=str, default=None, dest="pretrained_disc", help="Filepath to trained discriminator. If None, will instantiate a default pretrained discriminator of type specified by --adversarial-model option.")
args = parser.parse_args()
assert args.train_data_path is not None
assert args.save_folder is not None
prep_folder(args)
eos_token_id = GPT2Tokenizer.from_pretrained("gpt2").eos_token_id
train_dataset = DialogDataset(args.train_data_path, eos_token_id)
train_loader = train_dataset.get_loader(args.batch_size, shuffle=True)
gen_opt_params = {"weight_decay": args.gen_weight_decay,
"lr": args.gen_lr,
"warmup_steps": args.gen_warmup_steps,
"epsilon": args.gen_epsilon,
"total_steps": int(len(train_dataset) / args.batch_size) * args.epochs }
generator = DialogGenerator(args.pretrained_gen, args.save_folder, gen_opt_params)
if(args.adv_model is not None):
disc_opt_params = {"weight_decay": args.disc_weight_decay,
"lr": args.disc_lr,
"warmup_steps": args.disc_warmup_steps,
"epsilon": args.disc_epsilon,
"total_steps": int(len(train_dataset) / args.batch_size) * args.epochs }
discriminator = DialogDiscriminator(args.adv_model, args.pretrained_disc, args.save_folder, disc_opt_params)
generator.train_adversarial(train_loader, args.epochs, args.max_out_length, discriminator, args.train_disc_only_steps)
else:
generator.train_traditional(train_loader, args.epochs, args.max_out_length)
|
3,610 | 68dcac07bbdb4dde983939be98ece127d963c254 | """Google Scraper
Usage:
web_scraper.py <search> <pages> <processes>
web_scraper.py (-h | --help)
Arguments:
<search> String to be Searched
<pages> Number of pages
<processes> Number of parallel processes
Options:
-h, --help Show this screen.
"""
import re
from functools import partial
from multiprocessing import Pool
from time import time as timer
import requests
from bs4 import BeautifulSoup
from docopt import docopt
def get_urls(search_string, start):
temp = []
url = 'http://www.google.com/search'
payload = {'q': search_string, 'start': start}
my_headers = {'User-agent': 'Mozilla/11.0'}
r = requests.get(url, params=payload, headers=my_headers)
soup = BeautifulSoup(r.text, 'html.parser')
h3tags = soup.find_all('h3', class_='r')
for h3 in h3tags:
try:
temp.append(re.search('url\?q=(.+?)\&sa', h3.a['href']).group(1))
except:
continue
return temp
def main():
start = timer()
result = []
arguments = docopt(__doc__, version='MakMan Google Scrapper & Mass Exploiter')
search = arguments['<search>']
pages = arguments['<pages>']
processes = int(arguments['<processes>'])
####Changes for Multi-Processing####
make_request = partial(get_urls, search)
pagelist = [str(x * 10) for x in range(0, int(pages))]
with Pool(processes) as p:
tmp = p.map(make_request, pagelist)
for x in tmp:
result.extend(x)
####Changes for Multi-Processing####
result = list(set(result))
print(*result, sep='\n')
print('\nTotal URLs Scraped : %s ' % str(len(result)))
print('Script Execution Time : %s ' % (timer() - start,))
if __name__ == '__main__':
main()
# End
|
3,611 | 5db450424dc143443839e24801ece444d0d7e162 | # Generated by Django 3.2 on 2021-06-28 04:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rrhh', '0014_alter_detallepermiso_fecha_permiso'),
]
operations = [
migrations.AlterField(
model_name='permiso',
name='mes',
field=models.CharField(choices=[('01', 'ENERO'), ('02', 'FEBRERO'), ('03', 'MARZO'), ('04', 'ABRIL'), ('05', 'MAYO'), ('06', 'JUNIO'), ('07', 'JULIO'), ('08', 'AGOSTO'), ('09', 'SEPTIEMBRE'), ('10', 'OCTUBRE'), ('11', 'NOVIEMBRE'), ('12', 'DICIEMBRE')], max_length=2),
),
]
|
3,612 | 5b6ed75279b39a1dad1bf92535c4b129bb599350 | class Solution:
"""
https://leetcode.com/problems/game-of-life/
289. Game of Life
Medium
--------------------
According to the Wikipedia's article: "The Game of Life, also known simply as Life, is a cellular automaton devised by the British mathematician John Horton Conway in 1970."
Given a board with m by n cells, each cell has an initial state live (1) or dead (0). Each cell interacts with its eight neighbors (horizontal, vertical, diagonal) using the following four rules (taken from the above Wikipedia article):
Any live cell with fewer than two live neighbors dies, as if caused by under-population.
Any live cell with two or three live neighbors lives on to the next generation.
Any live cell with more than three live neighbors dies, as if by over-population..
Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.
Write a function to compute the next state (after one update) of the board given its current state. The next state is created by applying the above rules simultaneously to every cell in the current state, where births and deaths occur simultaneously.
Example:
Input:
[
[0,1,0],
[0,0,1],
[1,1,1],
[0,0,0]
]
Output:
[
[0,0,0],
[1,0,1],
[0,1,1],
[0,1,0]
]
Follow up:
Could you solve it in-place? Remember that the board needs to be updated at the same time: You cannot update some cells first and then use their updated values to update other cells.
In this question, we represent the board using a 2D array. In principle, the board is infinite, which would cause problems when the active area encroaches the border of the array. How would you address these problems?
"""
def gameOfLife(self, board):
"""
Do not return anything, modify board in-place instead.
"""
self.gameOfLife_2(board)
def gameOfLife_1(self, board):
"""
Space complexity is O(M*N).Time complexity is O(M*N)
The next solution is the improved one which space complexity is O(1),that's owesome.
:param board:
:return:
"""
self.template = [[[i, j] for i in (-1, 0, 1)] for j in (-1, 0, 1)]
self.template[1][1] = [9999, 9999]
tmp_board = [[j for j in row] for row in board]
for i in range(len(tmp_board)):
for j in range(len(tmp_board[i])):
count = self.countLiveNeighborsCell(tmp_board, i, j)
if tmp_board[i][j] == 0 and count == 3:
board[i][j] = 1
elif tmp_board[i][j] == 1 and count < 2:
board[i][j] = 0
# elif tmp_board[i][j] == 1 and count in [2, 3]:
# board[i][j] = 1
elif tmp_board[i][j] == 1 and count > 3:
board[i][j] = 0
def countLiveNeighborsCell(self, board, i, j):
"""
计算某个cell的live neighbors 数量
:param board:
:param i:
:param j:
:return:
"""
def bordervalidate(x, y, max_x, max_y):
if max_x >= x >= 0 and max_y >= y >= 0:
return True
count = 0
for x in self.template:
for y in x:
m = i + y[0]
n = j + y[1]
if bordervalidate(m, n, len(board) - 1, len(board[0]) - 1):
count += board[m][n]
return count
def gameOfLife_2(self, board):
"""
Space complexity is O(1).Time complexity is O(M*N)
参考思路:
https://leetcode.com/problems/game-of-life/solution/
主要思路:
1.如果原始值是0,更新后是1,那么记为-1
2.如果原始值是1,更新后是0,那么记为2
3.最后把2和-1,分别替换成1和0
:param board:
:return:
"""
neighbors = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)]
def borderCheck(x, y, m_x, m_y):
"""
是否越界
:param x:
:param y:
:param m_x:
:param m_y:
:return:
"""
if 0 <= x <= m_x and 0 <= y <= m_y:
return True
def countLiveNeighborsCell_2(board, i, j):
"""
计算neighbor的数量
:param board:
:param i:
:param j:
:return:
"""
live_count = 0
for neighbor in neighbors:
cross_border = borderCheck(i + neighbor[0], j + neighbor[1], len(board) - 1, len(board[0]) - 1)
if cross_border and board[i + neighbor[0]][j + neighbor[1]] > 0:
live_count += 1
return live_count
for i in range(len(board)):
for j in range(len(board[i])):
count = countLiveNeighborsCell_2(board, i, j)
if board[i][j] == 0 and count == 3:
board[i][j] = -1
elif board[i][j] == 1 and count < 2:
board[i][j] = 2
# elif board[i][j] == 1 and count in [2, 3]:
# board[i][j] = 1
elif board[i][j] == 1 and count > 3:
board[i][j] = 2
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == -1:
board[i][j] = 1
elif board[i][j] == 2:
board[i][j] = 0
def main():
a = [
[0, 1, 0],
[0, 0, 1],
[1, 1, 1],
[0, 0, 0]
]
Solution().gameOfLife(a)
print(a)
print("-----------------")
if __name__ == "__main__":
main()
|
3,613 | 3cac7829cf0c07ddc704a25ec3c781c9510a8e0c | __version__ = '18.07.0' |
3,614 | 5efb8151375d705f3591921654f847e45b6927c9 | """
You are given two strings s1 and s2 of equal length. A string swap is an operation where you choose two indices in a string (not necessarily different) and swap the characters at these indices.
Return true if it is possible to make both strings equal by performing at most one string swap on exactly one of the strings. Otherwise, return false.
"""
class Solution:
def areAlmostEqual(self, s1, s2):
if not(len(s1) == len(s2)):
return False
differences = []
for i in range(len(s1)):
if not(s1[i] == s2[i]):
differences.append(i)
if len(differences) == 0:
return True
elif len(differences) == 2 and s1[differences[0]] == s2[differences[1]]:
return True
return False
if __name__ == "__main__":
s = Solution()
assert(s.areAlmostEqual("kelb", "kelb"))
assert(s.areAlmostEqual("", ""))
assert(not s.areAlmostEqual("abcd", "dcba"))
assert(s.areAlmostEqual("abc", "cba"))
assert(s.areAlmostEqual("abcdefghijklmnopqrstuvwxyz", "zbcdefghijklmnopqrstuvwxya"))
assert(not s.areAlmostEqual("abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxya"))
|
3,615 | efa06d929e76a255afd9923b5340252c291a325c | import sys
from collections import defaultdict
sys.setrecursionlimit(1200)
def dfs(G, v, prev):
t = []
s = 0
for x in G[v]:
if x == prev: continue
tmp = dfs(G, x, v)
s += tmp[1]
t.append(tmp[0] - tmp[1])
t.sort()
t = t[:2]
if len(t) < 2:
return (s, s+1)
return (s + t[0] + t[1], s+1)
def solve():
read_ints = lambda: map(int, sys.stdin.readline().split())
n = int(sys.stdin.readline())
G = defaultdict(list)
for _ in xrange(n-1):
x, y = read_ints()
x, y = x-1, y-1
G[x].append(y)
G[y].append(x)
return min(dfs(G, i, -1)[0] for i in xrange(n))
for t in xrange(int(sys.stdin.readline())):
print "Case #%d:" % (t + 1),
print solve()
|
3,616 | 2962ef1d7ecd4e8d472b9dc36664e4e8745391fd | from keras.preprocessing.text import text_to_word_sequence
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent, Embedding
from keras.layers.recurrent import LSTM
from keras.optimizers import Adam, RMSprop
#from nltk import FreqDist
import numpy as np
import os
import datetime
import re
def load_data(train_source, train_dist, test_source, test_dist, max_len, vocab_size):
'''
fin = open(test_source, "r")
data2 = fin.read()
fin.close()
fout = open(train_source, "a")
fout.write(data2)
fout.close()
fin = open(test_dist, "r")
data2 = fin.read()
fin.close()
fout = open(train_dist, "a")
fout.write(data2)
fout.close()
'''
# Reading raw text from source and destination files
f = open(train_source, 'r')
X_data = f.read()
f.close()
f = open(train_dist, 'r')
y_data = f.read()
f.close()
# Splitting raw text into array of sequences
X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\n'), y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <= max_len and len(y) <= max_len]
y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\n'), y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <= max_len and len(y) <= max_len]
#Check or Create Vocab
vocab_files = [f for f in os.listdir('.') if 'vocab' in f]
x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')
y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')
if len(vocab_files) == 0:
vocab_x = {}
for line in X:
for token in line:
if not token in vocab_x:
vocab_x[token] = 0
vocab_x[token] += 1
X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)
X_vocab = X_vocab[0:(vocab_size)]
for (i, item) in enumerate(X_vocab):
if item == "newlinechar":
X_vocab[i] = "-"
for item in X_vocab:
print>>x_vocab_file, item
x_vocab_file.close()
vocab_y = {}
for line in y:
for token in line:
if not token in vocab_y:
vocab_y[token] = 0
vocab_y[token] += 1
y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)
y_vocab = y_vocab[0:(vocab_size)]
for (i, item) in enumerate(y_vocab):
if item == "newlinechar":
y_vocab[i] = "-"
for item in y_vocab:
print>>y_vocab_file, item
y_vocab_file.close()
else:
X_vocab = x_vocab_file.read().splitlines()
y_vocab = y_vocab_file.read().splitlines()
# Creating the vocabulary set with the most common words
#dist = FreqDist(np.hstack(X))
#X_vocab = dist.most_common(vocab_size-1)
#dist = FreqDist(np.hstack(y))
#y_vocab = dist.most_common(vocab_size-1)
# Creating an array of words from the vocabulary set, we will use this array as index-to-word dictionary
X_ix_to_word = X_vocab
# Adding the word "ZERO" to the beginning of the array
X_ix_to_word.insert(0, 'ZERO')
# Adding the word 'UNK' to the end of the array (stands for UNKNOWN words)
X_ix_to_word.append('UNK')
# Creating the word-to-index dictionary from the array created above
#X_word_to_ix = {word:ix for ix, word in enumerate(X_ix_to_word)}
X_word_to_ix = dict((map(reversed, enumerate(X_ix_to_word))))
# Converting each word to its index value
for i, sentence in enumerate(X):
for j, word in enumerate(sentence):
if word in X_word_to_ix:
X[i][j] = X_word_to_ix[word]
else:
X[i][j] = X_word_to_ix['UNK']
y_ix_to_word = y_vocab
y_ix_to_word.insert(0, 'ZERO')
y_ix_to_word.append('UNK')
#y_word_to_ix = {word:ix for ix, word in enumerate(y_ix_to_word)}
y_word_to_ix = dict((map(reversed, enumerate(y_ix_to_word))))
for i, sentence in enumerate(y):
for j, word in enumerate(sentence):
if word in y_word_to_ix:
y[i][j] = y_word_to_ix[word]
else:
y[i][j] = y_word_to_ix['UNK']
return (X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab), y_word_to_ix, y_ix_to_word)
def load_test_data(source, X_word_to_ix, max_len):
f = open(source, 'r')
X_data = f.read()
f.close()
X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\n') if len(x) > 0 and len(x) <= max_len]
for i, sentence in enumerate(X):
for j, word in enumerate(sentence):
if word in X_word_to_ix:
X[i][j] = X_word_to_ix[word]
else:
X[i][j] = X_word_to_ix['UNK']
return X
def create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len, hidden_size, num_layers):
model = Sequential()
# Creating encoder network
model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len, mask_zero=True))
model.add(LSTM(hidden_size))
model.add(RepeatVector(y_max_len))
# Creating decoder network
for _ in range(num_layers):
model.add(LSTM(hidden_size, return_sequences=True))
model.add(TimeDistributed(Dense(y_vocab_len)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def process_data(word_sentences, max_len, word_to_ix):
# Vectorizing each element in each sequence
sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))
for i, sentence in enumerate(word_sentences):
for j, word in enumerate(sentence):
sequences[i, j, word] = 1.
return sequences
def find_checkpoint_file(folder):
checkpoint_file = [f for f in os.listdir(folder) if 'checkpoint' in f]
if len(checkpoint_file) == 0:
return []
modified_time = [os.path.getmtime(f) for f in checkpoint_file]
return checkpoint_file[np.argmax(modified_time)]
|
3,617 | be90447eb7c717ae0bae28fd7f10238be733648d | import json
from tqdm import tqdm
from topic.topic import get_topic_scores, get_topic_similarity
user_weights = json.load(open('data/selected_user_weights.json', 'r', encoding='utf8'))
reviews = json.load(open('data/business_reviews_test.json', 'r', encoding='utf8'))
for business, business_reviews in reviews.items():
for target_user in user_weights:
if target_user in business_reviews:
target_stars = business_reviews[target_user]['stars']
star_sum = 0
weighted_star_sum = 0
weight_sum = 0
num_scores = 0
for user, review in business_reviews.items():
if user != target_user:
text = review['text']
stars = review['stars']
# topic_scores = get_topic_scores(text)
# weight = 0
# for i, score in topic_scores:
# weight += score * user_weights[target_user][i]
weight = get_topic_similarity(user_weights[target_user], text)
weighted_star_sum += stars * weight
weight_sum += weight
star_sum += stars
num_scores += 1
predicted_stars = weighted_star_sum / weight_sum
average = star_sum / num_scores
|
3,618 | 6add599035573842475c7f9155c5dbbea6c96a8a | from pyathena import connect
from Config import config2
from Config import merchants
def get_mapped_sku(sku):
try:
cursor = connect(aws_access_key_id=config2["aws_access_key_id"],
aws_secret_access_key=config2["aws_secret_access_key"],
s3_staging_dir=config2["s3_staging_dir"],
region_name=config2["region_name"]).cursor()
cursor.execute("SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s ",
{"sku": str(sku)})
# print(cursor.description)
result = cursor.fetchall()
for row in result:
return {'Cross-Reference No': row[0], 'brand': row[1]}
except Exception as e:
print(e)
return {}
return {}
def get_sku(seller_sku, sc_sku, seller):
try:
cursor = connect(aws_access_key_id=config2["aws_access_key_id"],
aws_secret_access_key=config2["aws_secret_access_key"],
s3_staging_dir=config2["s3_staging_dir"],
region_name=config2["region_name"]).cursor()
cursor.execute("SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s ",
{"sku": str(sku)})
# print(cursor.description)
# print(cursor.fetchall())
for row in cursor:
return (row[0])
except Exception as e:
print(e)
return False
return True
def add_sku(sc_sku, seller_sku, seller):
try:
cursor = connect(aws_access_key_id=config2["aws_access_key_id"],
aws_secret_access_key=config2["aws_secret_access_key"],
s3_staging_dir=config2["s3_staging_dir"],
region_name=config2["region_name"]).cursor()
cursor.execute("INSERT INTO optivations.master_product_list VALUES ( %(scsku)s, %(sellersku)s, %(seller)s )",
{"scsku": str(sc_sku), "sellersku": str(seller_sku), "seller": str(seller)})
return (cursor.description)
# print(cursor.fetchall())
# for row in cursor:
# return (row[0])
except Exception as e:
print(e)
return False
return True
# print(add_sku('test', 'test', 'Adean'))
# result = (get_mapped_sku('HDS-3571'))
# print(result['Cross-Reference No'])
|
3,619 | 71ffad81bcbc480dc0a750680bc72e1d5c48556a | # Generated by Django 2.1.5 on 2021-06-01 19:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('fotbal', '0008_auto_20210601_2109'),
]
operations = [
migrations.RemoveField(
model_name='komenty',
name='jmeno',
),
migrations.DeleteModel(
name='Komenty',
),
]
|
3,620 | b355bd5a519d65ea35d4e8d5e6a384424d79130a | # Write a class to hold player information, e.g. what room they are in
# currently.
class Player():
def __init__(self, name, location, items=[]):
self.name = name
self.location = location
self.items = items
# def try_direction(self, user_action):
# attribute = user_action + '_to'
# # see if the current room has an attribute
# # we can use 'hasattr' (has attribute)
# if hasattr(self.location, attribute):
# # can use 'getattr' to move to room
# self.location = getattr(self.location, attribute)
# else:
# print("Nothing to find here!")
def pick_up_item(self, item):
if len(self.items) <= 3:
self.items.append(item)
print(f"""\n\nNOW YOU HAVE THE {item}!
You can drop it at any time by typing 'drop {item}'\n""")
else:
print("Sorry you'll have to drop something to pick this up.")
def drop_item(self, item):
if len(self.items) > 0:
self.items.remove(item)
print(f"YOU HAVE DROPPED THE {item}.")
else:
print("You don't have any items to drop!")
# add for player to print what items they have
# def print_items
|
3,621 | 259a4bb39496bdfc71d60edb4994d26351c6961d | class Patient(object):
def __init__(self, id_number, name, bed_number, *allergies):
self.id_number = id_number
self.name = name
self.allergies = allergies
self.bed_number = bed_number
class Hospital(object):
def __init__(self, name, capacity):
self.patients = []
self.name = name
self.capacity = capacity
def addPatient(self, patient):
if len(self.patients) <= self.capacity:
self.patients.append(patient)
else:
print "The hospital is full."
return self
def discharge(self, patient):
for patient1 in self.patients:
if patient1.name == patient.name:
self.patients.remove(patient)
patient.bed_number = 0
return self
def displayInfo(self):
for patient in self.patients:
print "Id Number:", patient.id_number
print "Name:", patient.name
print "Bed Number:", patient.bed_number
print "Allergies:", patient.allergies
return self
patientA = Patient(1235, "Helen Smith", 10, ("peanuts", "seafood"))
patientB = Patient(1594, "Robert Brown", 15, "eggs")
patientC = Patient(1587, "Amy Beard", 26, ("guinea pigs", "cats"))
patientD = Patient(1658, "Robin Meggs", 51, "coconut")
hospital1 = Hospital("Inova Fairfax", 2)
hospital1.addPatient(patientA).addPatient(patientB).addPatient(patientC).addPatient(patientD).discharge(patientA).displayInfo()
|
3,622 | 5c1d1eafb913822be9b6e46b15c6886f8bf3e2e1 | from flask import Flask, json, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import warnings
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:1234@localhost/escuela'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
db = SQLAlchemy(app)
ma = Marshmallow(app)
class curso(db.Model):
idcurso = db.Column(db.Integer, primary_key=True)
nombre_curso = db.Column(db.String(45))
precio = db.Column(db.Integer)
def __init__(self, nombre, precio):
self.nombre_curso = nombre
self.precio = precio
db.create_all()
class CursoSchema(ma.Schema):
class Meta:
fields = ('idcurso','nombre_curso','precio')
curso_Schema = CursoSchema()
cursos_Schema = CursoSchema(many=True)
@app.route('/',methods=['GET'])
def index():
return jsonify({'message': 'Academia'})
@app.route('/cursos', methods=['POST'])
def create_curso():
nombre_curso = request.json['nombre_curso']
precio = request.json['precio']
new_Curso = curso(nombre_curso,precio)
db.session.add(new_Curso)
db.session.commit()
return curso_Schema.jsonify(new_Curso)
@app.route('/cursos',methods=['GET'])
def get_cursos():
all_cursos = curso.query.all()
result =cursos_Schema.dump(all_cursos)
return jsonify(result)
@app.route('/cursos/<id>', methods=['GET'])
def get_task(id):
cursoGet = curso.query.get(id)
return curso_Schema.jsonify(cursoGet)
@app.route('/cursos/<id>', methods=['PUT'])
def update_curso(id):
cursoUpdate=curso.query.get(id)
nombre = request.json['nombre_curso']
precio = request.json['precio']
cursoUpdate.nombre_curso = nombre
cursoUpdate.precio = precio
db.session.commit()
return curso_Schema.jsonify(cursoUpdate)
@app.route('/cursos/<id>',methods=['DELETE'])
def delete_item(id):
cursoDelete = curso.query.get(id)
db.session.delete(cursoDelete)
db.session.commit()
return curso_Schema.jsonify(cursoDelete)
if __name__ == "__main__":
app.run(debug=True) |
3,623 | 7a41826f65f2f55b4c678df2ac06027df6ca50d4 | __author__ = 'piotrek'
import os
import zipfile
import tarfile
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
from Widgets.list_view import ListView
from Threads.PackThread import PackThread
class CreateArchive(QtWidgets.QDialog):
def __init__(self, model, index, path, parent=None):
super().__init__(parent)
self.setWindowTitle('Utworz archiwum')
self.setWindowModality(QtCore.Qt.WindowModal)
self.resize(350, 400)
self.path = path
self.file_model = model
self.index = index
self.create_components()
self.create_layout()
self.pack_thread = PackThread()
self.pack_thread.status_signal.connect(self.ended)
self.pack_thread.progress_signal.connect(self.progress)
self.pack_thread.access_signal.connect(self.access)
def create_item(self, index):
path = os.path.abspath(self.file_model.filePath(index))
item = QtGui.QStandardItem(os.path.basename(path))
item.setIcon(self.file_model.fileIcon(index))
item.setCheckable(True)
item.setEditable(False)
return item
def create_components(self):
self.option_widget = QtWidgets.QWidget()
self.name_lbl = QtWidgets.QLabel('Nazwa')
self.name_edit = QtWidgets.QLineEdit('untitled')
self.name_edit.setMaxLength(30)
self.name_edit.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp('\w{30}'), self.name_edit))
self.archive_type_cb = QtWidgets.QComboBox()
self.archive_type_cb.addItem('.zip')
self.archive_type_cb.addItem('.tar')
self.path_lbl = QtWidgets.QLabel(self.path)
self.path_lbl.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
self.path_lbl.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.path_lbl.setFrameShadow(QtWidgets.QFrame.Sunken)
self.set_path_btn = QtWidgets.QPushButton('Sciezka', clicked=self.set_path)
self.file_list = ListView('Pliki do zapakowania')
self.file_list.add_element(self.index)
self.file_list.add_to_model(self.create_item(self.index))
self.add_folder_btn = QtWidgets.QPushButton('Dodaj katalog', clicked=self.add_catalog)
self.add_file_btn = QtWidgets.QPushButton('Dodaj plik', clicked=self.add_file)
self.remove_selected_btn = QtWidgets.QPushButton('Usun zaznaczone', clicked=self.file_list.remove_selected)
self.progress_bar = QtWidgets.QProgressBar()
self.progress_bar.setMinimum(0)
self.progress_lbl = QtWidgets.QLabel()
self.pack_btn = QtWidgets.QPushButton('Zapakuj', clicked=self.pack_files)
def set_path(self):
path = QtWidgets.QFileDialog.getExistingDirectory(self, 'Wybierz katalog', QtCore.QDir.homePath())
if path:
self.path = path
self.path_lbl.setText(self.path)
def create_layout(self):
option_layout = QtWidgets.QGridLayout()
v_option_layout = QtWidgets.QVBoxLayout()
main_layout = QtWidgets.QGridLayout()
v_main_layout = QtWidgets.QVBoxLayout()
h_name_layout = QtWidgets.QHBoxLayout()
h_name_layout.addWidget(self.name_lbl)
h_name_layout.addWidget(self.name_edit)
h_name_layout.addWidget(self.archive_type_cb)
v_option_layout.addLayout(h_name_layout)
h_path_layout = QtWidgets.QHBoxLayout()
h_path_layout.addWidget(self.path_lbl)
h_path_layout.addWidget(self.set_path_btn)
v_option_layout.addLayout(h_path_layout)
v_option_layout.addWidget(self.file_list)
h_remove_layout = QtWidgets.QHBoxLayout()
h_remove_layout.addWidget(self.add_folder_btn)
h_remove_layout.addWidget(self.add_file_btn)
h_remove_layout.addWidget(self.remove_selected_btn)
v_option_layout.addLayout(h_remove_layout)
option_layout.addLayout(v_option_layout, 0, 0, 1, 1)
self.option_widget.setLayout(option_layout)
v_main_layout.addWidget(self.option_widget)
v_main_layout.addWidget(self.progress_bar)
v_main_layout.addWidget(self.pack_btn)
main_layout.addLayout(v_main_layout, 0, 0, 1, 1)
self.setLayout(main_layout)
def pack_files(self):
if not self.name_edit.text():
return
if not self.file_list.get_quantity():
return
self.option_widget.setEnabled(False)
self.progress_bar.setMaximum(0)
name = self.name_edit.text() + self.archive_type_cb.itemData(self.archive_type_cb.currentIndex(),
QtCore.Qt.DisplayRole)
path = self.path_lbl.text()
list_index = self.file_list.get_index_list()
path_list = [self.file_model.filePath(index) for index in list_index]
if self.archive_type_cb.currentText() == '.zip':
self.pack_thread.set(pack_zip, name, path, path_list)
elif self.archive_type_cb.currentText() == '.tar':
self.pack_thread.set(pack_tar, name, path, path_list)
self.pack_thread.start()
def add_catalog(self):
catalog = QtWidgets.QFileDialog.getExistingDirectory(self, 'Wybierz katalog', QtCore.QDir.homePath())
if catalog and not QtCore.QFileInfo(catalog).isSymLink():
index = self.file_model.index(catalog)
if index not in self.file_list:
self.file_list.add_element(index)
self.file_list.add_to_model(self.create_item(index))
def add_file(self):
file, _filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Wybierz plik', QtCore.QDir.homePath())
if file:
index = self.file_model.index(file)
if index not in self.file_list:
self.file_list.add_element(index)
self.file_list.add_to_model(self.create_item(index))
def ended(self):
self.parent().trayIcon.showMessage('Zakonczono',
'Zakonczono zapakowywanie pliku {0}'.format(self.pack_thread.name),
QtWidgets.QSystemTrayIcon.Information, 2000)
self.pack_btn.setText('Zamknij')
self.progress_bar.setMaximum(1)
self.progress_bar.setValue(1)
self.pack_thread.terminate()
self.pack_btn.clicked.connect(self.close)
def access(self):
self.setWindowTitle('Brak dostepu')
self.pack_btn.setText('Zamknij')
self.progress_bar.setMaximum(1)
self.progress_bar.setValue(1)
self.pack_thread.terminate()
self.pack_btn.clicked.connect(self.close)
def progress(self, info):
print('info', info) # remove
self.setWindowTitle(info)
def closeEvent(self, QCloseEvent):
if not self.pack_thread.ended:
QCloseEvent.ignore()
self.parent().catalog_list.setRootIndex(self.parent().catalog_list.rootIndex())
self.parent().catalog_list.scrollTo(self.parent().catalog_list.currentIndex())
self.parent().model_list.refresh(self.parent().catalog_list.rootIndex())
def pack_tar(thread, name, target_path, path_list):
tar_path = os.path.join(os.path.abspath(target_path), name)
try:
with tarfile.open(tar_path, 'w') as tar_file:
for file_path in path_list:
if not os.path.isdir(file_path):
thread.progress_signal.emit(file_path)
tar_file.add(file_path, arcname=os.path.basename(file_path))
else:
catalog_path = os.path.dirname(os.path.abspath(file_path))
for root_folder, subfolders, files in os.walk(file_path):
for file in files:
thread.in_progress_signal.emit(os.path.join(root_folder, file))
tar_file.add(os.path.join(root_folder, file),
arcname=os.path.join(root_folder[len(catalog_path) + 1:], file))
except IOError:
thread.access_signal.emit()
def pack_zip(thread, name, target_path, path_list):
zip_path = os.path.join(os.path.abspath(target_path), name)
try:
with zipfile.ZipFile(zip_path, 'w') as zip_file:
for path_file in path_list:
if not os.path.isdir(path_file):
thread.progress_signal.emit(path_file)
zip_file.write(path_file, arcname=os.path.basename(path_file))
else:
path_folder = os.path.dirname(os.path.abspath(path_file))
for root_folder, subfolders, files in os.walk(path_file):
for file in files:
thread.emit(os.path.join(root_folder, file))
zip_file.write(os.path.join(root_folder, file),
arcname=os.path.join(root_folder[len(path_folder) + 1:], file))
except IOError:
thread.access_signal.emit()
|
3,624 | 4c59e5fab2469af3f40cafaac226a993f6628290 | import json
import tempfile
import zipfile
from contextlib import contextmanager
from utils import (
codepipeline_lambda_handler,
create_zip_file,
get_artifact_s3_client,
get_cloudformation_template,
get_input_artifact_location,
get_output_artifact_location,
get_session,
get_user_parameters,
log,
)
@codepipeline_lambda_handler
def lambda_handler(event, context):
"""
Prepares for an AMI deployment.
"""
# Get details from the event.
job = event["CodePipeline.job"]
input_bucket, input_key = get_input_artifact_location(job)
output_bucket, output_key = get_output_artifact_location(job)
user_params = get_user_parameters(job)
assume_role_arn = user_params["AssumeRoleArn"]
image_parameter_name = user_params["ImageParameterName"]
stack_name = user_params["StackName"]
template_filename = user_params["TemplateFilename"]
# Create client in the pipeline account.
pipeline_s3_client = get_artifact_s3_client(job)
# Create clients in the target account.
target_session = get_session(
role_arn=assume_role_arn, session_name="prepare-ami-deployment"
)
target_cfn_client = target_session.client("cloudformation")
target_ssm_client = target_session.client("ssm")
# Download the input artifact zip file, read manifest.json from it,
# and get the AMI it references. Also look up the associated image name.
with download_zip_file(
s3_client=pipeline_s3_client, bucket=input_bucket, key=input_key
) as zip_file:
image_detail_string = zip_file.read("imageDetail.json").decode("utf-8")
log("IMAGE_DETAIL_STRING", image_detail_string)
image_detail = json.loads(image_detail_string)
image = image_detail["ImageURI"]
log("IMAGE", image)
# Update the SSM parameters with the image,
# to be used by the CloudFormation deployment stage of the pipeline.
target_ssm_client.put_parameter(
Name=image_parameter_name, Value=image, Type="String", Overwrite=True
)
# Write the CloudFormation stack's template to the output artifact location,
# to be used by the CloudFormation deployment stage of the pipeline.
template = get_cloudformation_template(
cfn_client=target_cfn_client, stack_name=stack_name
)
with create_zip_file({template_filename: template}) as zip_path:
pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)
@contextmanager
def download_zip_file(s3_client, bucket, key):
"""
Downloads and extracts a zip file from S3.
"""
temp_file = tempfile.NamedTemporaryFile()
with tempfile.NamedTemporaryFile() as temp_file:
s3_client.download_file(bucket, key, temp_file.name)
with zipfile.ZipFile(temp_file.name, "r") as zip_file:
yield zip_file
|
3,625 | 4243c863827f1378c364171ca7d8fdabd42be22f | #!/usr/bin/env python
#pylint: skip-file
"""
HostApi.py
Copyright 2016 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
import urllib.request, urllib.parse, urllib.error
from .models import *
class HostApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def getHosts(self, **kwargs):
"""Retrieve hosts
Args:
limit, str: limit (required)
offset, str: offset (required)
sortBy, str: sortBy (required)
order, str: order (required)
hostName, list[str]: hostName (required)
hostMac, list[str]: hostMac (required)
hostType, list[str]: hostType (required)
connectedInterfaceName, list[str]: connectedInterfaceName (required)
hostIp, list[str]: hostIp (required)
connectedDeviceIp, list[str]: connectedDeviceIp (required)
scope, str: Authorization Scope for RBAC (required)
Returns: HostListResult
"""
allParams = ['limit', 'offset', 'sortBy', 'order', 'hostName', 'hostMac', 'hostType', 'connectedInterfaceName', 'hostIp', 'connectedDeviceIp', 'scope']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getHosts" % key)
params[key] = val
del params['kwargs']
resourcePath = '/host'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('limit' in params):
queryParams['limit'] = self.apiClient.toPathValue(params['limit'])
if ('offset' in params):
queryParams['offset'] = self.apiClient.toPathValue(params['offset'])
if ('sortBy' in params):
queryParams['sortBy'] = self.apiClient.toPathValue(params['sortBy'])
if ('order' in params):
queryParams['order'] = self.apiClient.toPathValue(params['order'])
if ('hostName' in params):
queryParams['hostName'] = self.apiClient.toPathValue(params['hostName'])
if ('hostMac' in params):
queryParams['hostMac'] = self.apiClient.toPathValue(params['hostMac'])
if ('hostType' in params):
queryParams['hostType'] = self.apiClient.toPathValue(params['hostType'])
if ('connectedInterfaceName' in params):
queryParams['connectedInterfaceName'] = self.apiClient.toPathValue(params['connectedInterfaceName'])
if ('hostIp' in params):
queryParams['hostIp'] = self.apiClient.toPathValue(params['hostIp'])
if ('connectedDeviceIp' in params):
queryParams['connectedDeviceIp'] = self.apiClient.toPathValue(params['connectedDeviceIp'])
if ('scope' in params):
headerParams['scope'] = params['scope']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'HostListResult')
return responseObject
def getHostCount(self, **kwargs):
"""Gives total number of hosts
Args:
scope, str: Authorization Scope for RBAC (required)
Returns: CountResult
"""
allParams = ['scope']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getHostCount" % key)
params[key] = val
del params['kwargs']
resourcePath = '/host/count'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('scope' in params):
headerParams['scope'] = params['scope']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'CountResult')
return responseObject
def getHostById(self, **kwargs):
"""Retrieves host based on id
Args:
id, str: Host Id (required)
scope, str: Authorization Scope for RBAC (required)
Returns: HostResult
"""
allParams = ['id', 'scope']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getHostById" % key)
params[key] = val
del params['kwargs']
resourcePath = '/host/{id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('scope' in params):
headerParams['scope'] = params['scope']
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.parse.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'HostResult')
return responseObject
|
3,626 | 35288c9ad4d3550003e3c2f9e9034f4bce1df830 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 15 19:27:59 2020
@author: Dan
"""
import numpy as np
def shift(v,i,j):
if i <= j:
return v
store = v[i]
for k in range(0, i-j-1):
v[i-k] = v[i-k-1]
v[j] = store
return v
def insertion(v):
for i in range(1, len(v)):
j = i
while v[i] < v[j-1] and j > 0:
j = j-1
shift(v,i,j)
return v
# v = np.random.randint(1,50,20)
v = [5,5,1,4,5,8]
print(v)
sorted = insertion(v)
print(sorted)
|
3,627 | 5068a78a1aa31a277b3b5854ddd1d8990d07b104 | #roblem: Have the function PrimeTime(num)
# take the num parameter being passed and return
# the string true if the parameter is a prime number, \
# otherwise return the string false.
# The range will be between 1 and 2^16.
def PrimeTime(num):
prime1 = (num-1)%6
prime2 = (num+1)%6
if prime1 * prime2 == 0:
return 'True'
else:
return 'False'
print(PrimeTime(12)) |
3,628 | bcbcb4ea3a3b8b5c11e9b107103418ae79a3921c | # Create your views here.
from django.shortcuts import render_to_response, Http404, render
from django.template import RequestContext
from books.models import Book
from django.http import HttpResponse, HttpResponseRedirect
import urllib, urllib2
import json
def incr_reads(request, book_id):
if request.POST:
try:
readers = Book.objects.get(id=book_id).incr_reads()
return HttpResponse(readers)
except Book.DoesNotExist:
pass
return HttpResponse('FAILED')
def index(request):
'''
No processing, should use direct to template.
'''
return render_to_response('index.html', {}, context_instance=RequestContext(request))
def search(request):
if request.GET and 'q' in request.GET:
b = Book.search.query(request.GET['q'])
return render_to_response('books/book_list.html', {'object_list':b}, context_instance=RequestContext(request))
def suggest_image(request, book_id):
'''
So this is a helper view for staff to update the picture.
'''
b = Book.objects.get(id=book_id)
_img = b.get_image_suggestions(first=False)
return render_to_response('books/image_suggestor.html', {'images':_img, 'book':b}, context_instance=RequestContext(request))
|
3,629 | 1b3891565f776064cfcca02fb22ea65853f7e66f | from matplotlib import pyplot as plt
# Function for testing
# Maps x => x*x
def calculate(x):
return x * x
inputs = [-0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5]
outputs = [calculate(x) for x in inputs]
plt.plot(inputs, outputs)
plt.savefig("plot.png") |
3,630 | 3ccbafbdc84447438c194288b1409e332bb2b479 | import cv2 as cv
import numpy as np
from servo import *
from func import *
#import threading
#import dlib
# import socket
# import struct
# import pickle
def constrain(val, minv, maxv):
return min(maxv, max(minv, val))
KP = 0.22
KI = 0
KD = 0.17
last = 0
integral = 0
# constants
SIZE = (400, 300)
RECT = np.float32([[0, 299],
[399, 299],
[399, 0],
[0, 0]])
TRAP = np.float32([[0, 299],
[399, 299],
[320, 200],
[80, 200]])
TRAPINT = np.array(TRAP, dtype=np.int32)
cap = cv.VideoCapture(0)
pi, ESC, STEER = setup_gpio()
p = False
control(pi, ESC, 1500, STEER, 90)
time.sleep(1)
timeout = 0
l = 1
r = 0
povor = 0
totl = 1
pid=0
while True:
try:
ret, frame = cap.read()
totl = frame.copy()
#print(totl)
cv.imwrite('home\\pi\\imaaage1.jpg', totl)
#print('totl ready')
img = cv.resize(frame, SIZE)
binary = binarize(img)
perspective = trans_perspective(binary, TRAP, RECT, SIZE)
cv.imwrite('home\\pi\\imaaage2.jpg', perspective)
if detect_stop(perspective):
stop(pi, ESC)
time.sleep(0.5)
#control(pi, ESC, 1548, STEER, 90)
#time.sleep(1)
p = True
continue
left, right = find_left_right(perspective)
if p:
way = input("Куда ехать хозяин?\n")
if way == "2":
control(pi, ESC, 1545, STEER, 90)
time.sleep(4)
p = False
continue
elif way == "3":
control(pi, ESC, 1545, STEER, 90)
time.sleep(1)
control(pi, ESC, 1545, STEER, 145)
time.sleep(3.2)
control(pi, ESC, 1545, STEER, 90)
time.sleep(2)
elif way == "1":
...
p = False
err = 0 - ((left + right) // 2 - 200)
if abs(right - left) < 100:
err = last
#print(err)
pid = KP * err + KD * (err - last) + KI * integral
last = err
integral += err
integral = constrain(integral, -10, 10)
control(pi, ESC, 1545, STEER, 90 + pid)
print(pid)
if detect_stop(perspective):
stop(pi, ESC)
time.sleep(3)
control(pi, ESC, 1548, STEER, 90)
time.sleep(1)
p = True
time.sleep(0.01)
# if cv.waitKey(1) & 0xFF == ord('q'):
# break
except KeyboardInterrupt as e:
control(pi, ESC, 1500, STEER, 90)
print(e)
break
# cv.destroyAllWindows()
cap.release()
|
3,631 | 98b0e42f3ed1a234f63c4d3aa76ceb9fce7c041d | from time import perf_counter_ns
from anthony.utility.distance import compare, compare_info
from icecream import ic
start = perf_counter_ns()
ic(compare("tranpsosed", "transposed"))
print(f"Example Time: {(perf_counter_ns() - start)/1e+9} Seconds")
ic(compare_info("momther", "mother"))
|
3,632 | 2f5244c6144f5aafce29e5aba32bd7e3fc7ecf5b | # -*- coding: utf-8 -*-
'''
* EAFS
* Copyright (C) 2009-2011 Adam Etienne <eadam@lunasys.fr>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 3.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import math,uuid,sys,os,time,operator,xmlrpclib,random,argparse
from eafslib import EAFSChunkServerRpc
class EAFSClient:
def __init__(self, master_host):
self.master = xmlrpclib.ServerProxy(master_host)
self.chunkservers = {}
def write(self, filename, data):
if self.exists(filename):
self.delete(filename)
num_chunks = self.num_chunks(len(data))
attributes = {"mode":"file", "atime":"", "ctime":"", "mtime":"", "attrs":""}
chunkuuids = self.master.alloc(filename, num_chunks, attributes)
self.write_chunks(chunkuuids, data)
def update_chunkservers(self):
chunkservers = self.master.get_chunkservers()
#print "CHUNKSERVERS[RAW]: ", chunkservers
for chunkserver in chunkservers:
#print chunkserver
if chunkserver['uuid'] not in self.chunkservers:
self.chunkservers[chunkserver['uuid']] = EAFSChunkServerRpc( chunkserver['uuid'], chunkserver['address'] )
def write_chunks(self, chunkuuids, data):
chunks = [ data[x:x+self.master.get_chunksize()] \
for x in range(0, len(data), self.master.get_chunksize()) ]
#chunkservers = self.master.get_chunkservers()
self.update_chunkservers()
#print "CHUNKSERVERS: ", self.chunkservers
for i in range(0, len(chunkuuids)): # write to each chunkserver
chunkuuid = chunkuuids[i]
chunklocs = self.master.get_chunklocs(chunkuuid)
for chunkloc in chunklocs:
#print "chunkloc: ", chunkloc
self.chunkservers[chunkloc].rpc.write(chunkuuid, chunks[i])
def num_chunks(self, size):
return (size // self.master.get_chunksize()) \
+ (1 if size % self.master.get_chunksize() > 0 else 0)
def write_append(self, filename, data):
if not self.exists(filename):
raise Exception("append error, file does not exist: " + filename)
num_append_chunks = self.num_chunks(len(data))
append_chunkuuids = self.master.alloc_append(filename, \
num_append_chunks)
self.write_chunks(append_chunkuuids, data)
def exists(self, filename):
return self.master.exists(filename)
def read(self, filename): # get metadata, then read chunks direct
if not self.exists(filename):
raise Exception("read error, file does not exist: " + filename)
chunks = []
chunkuuids = self.master.get_chunkuuids(filename)
#chunkservers = self.master.get_chunkservers()
self.update_chunkservers()
for chunkuuid in chunkuuids:
chunklocs = self.master.get_chunklocs(chunkuuid)
done_chunkserver = []
chunk = None
chunk_read = False
while not (chunk_read or len(done_chunkserver)==len(chunklocs)):
chunkidrnd = random.randint(0, len(chunklocs)-1)
while chunkidrnd not in done_chunkserver and len(done_chunkserver)>0:
chunkidrnd = random.randint(0, len(chunklocs)-1)
chunkloc = chunklocs[chunkidrnd]
print "Select chunkloc %s from %d choices" % (chunkloc, len(chunklocs))
try:
chunk = self.chunkservers[chunkloc].rpc.read(chunkuuid)
chunk_read = True
done_chunkserver.append(chunkidrnd)
except:
print "Chunkserver %d failed" % chunkidrnd
if not chunk_read:
raise Exception("read error, chunkserver unavailable: " + filename)
chunks.append(chunk)
data = reduce(lambda x, y: x + y, chunks) # reassemble in order
return data
def delete(self, filename):
self.master.delete(filename)
def main():
parser = argparse.ArgumentParser(description='EAFS Simple Client')
parser.add_argument('--master', dest='master', default='localhost:6799', help='Master server address')
args = parser.parse_args()
master = 'http://' + args.master
client = EAFSClient(master)
# test write, exist, read
print "\nWriting..."
#try:
if False:
client.write("/usr/python/readme.txt", """
This file tells you all about python that you ever wanted to know.
Not every README is as informative as this one, but we aim to please.
Never yet has there been so much information in so little space.
""")
#except:
# print client.master.dump_metadata()
print "File exists? ", client.exists("/usr/python/readme.txt")
print client.read("/usr/python/readme.txt")
# show structure of the filesystem
print "\nMetadata Dump..."
print client.master.dump_metadata()
if __name__ == "__main__":
main()
"""
# test append, read after append
#print "\nAppending..."
#client.write_append("/usr/python/readme.txt", \
# "I'm a little sentence that just snuck in at the end.\n")
#print client.read("/usr/python/readme.txt")
# test delete
#print "\nDeleting..."
#client.delete("/usr/python/readme.txt")
#print "File exists? ", client.exists("/usr/python/readme.txt")
# test exceptions
#print "\nTesting Exceptions..."
#try:
# client.read("/usr/python/readme.txt")
#except Exception as e:
# print "This exception should be thrown:", e
#try:
# client.write_append("/usr/python/readme.txt", "foo")
#except Exception as e:
# print "This exception should be thrown:", e
"""
|
3,633 | 5580e5942370c925b759b09675306cdfbc7dd4f1 | '''
Created on 5 Mar 2010
@author: oppianmatt
'''
# hook to find setup tools if not installed
try:
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
pass
from setuptools import setup, find_packages
setup(
name = "django-defaultsite",
version = "1.1",
packages = find_packages('src'),
package_dir = {'': 'src'},
package_data={'': ['LICENSE']},
include_package_data=True,
zip_safe=False,
# metadata for upload to PyPI
author = "Oppian System Ltd",
author_email = "matt@oppian.com",
description = "django-defaultsiteSets the Site object in django to something better then example.com.",
license = 'LICENSE.txt',
keywords = "django site example.com",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
url = "http://oppian.com/labs/django-defaultsite/",
long_description=open('README.txt').read(),
)
|
3,634 | 856beaf3b9dad333d5b48c1be3a8ad917f8d020c | from flask import Blueprint, request, make_response
from untils import restful, cacheuntil
from untils.captcha import Captcha
from exts import smsapi
from .forms import SMSCaptchaForm
from io import BytesIO
bp = Blueprint('common', __name__, url_prefix='/c')
# @bp.route('/sms_captcha/', methods=['post'])
# def sms_captcha():
# telephone = request.form.get('telephone')
# if not telephone:
# return restful.params_error(message='请传入手机号码!')
# code = Captcha.gene_text(number=4) # TODO: 获取随机4位数字字符串
# resp = smsapi.send_sms(telephone=telephone, param=code)
# if resp:
# return restful.success(message='短信验证码发送成功!')
# else:
# return restful.params_error(message='短信验证码发送失败!')
# TODO: 发送短信验证码
@bp.route('/sms_captcha/', methods=['post'])
def sms_captcha():
form = SMSCaptchaForm(request.form)
if form.validate():
telephone = form.telephone.data # TODO: 获取手机号
code = Captcha.gene_text(number=4) # TODO: 获取随机4位数字字符串
resp = smsapi.send_sms(telephone=telephone, param=code)
if resp:
cacheuntil.set(telephone, code) # TODO: redis存储短信验证码
return restful.success(message='短信验证码发送成功!')
else:
return restful.params_error(message='短信验证码发送失败!')
else:
return restful.params_error(message=form.get_random_error(), data=form.get_all_errors())
# TODO: 图形验证码视图
@bp.route('/captcha/')
def CaptchaView():
text, image = Captcha.gene_graph_captcha()
cacheuntil.set(text.lower(), text.lower()) # TODO: redis存储图片验证码
out = BytesIO()
# TODO: 将图片保存到IO中格式png
image.save(out, 'png')
# TODO: 保存完毕后,移动指针到起始位置
out.seek(0)
# TODO: 将IO读取出来转为image/png响应
resp = make_response(out.read())
resp.content_type = 'image/png'
return resp
|
3,635 | 05f77472625e902b66c4a97a4c640835826bd494 | from functiona import *
total = totalMarks(85,67,56,45,78)
avg = average(total)
grade = findGrade(avg)
print(grade)
print(total)
print(avg) |
3,636 | 0f4bb65b93df997ca1a9b7945ebcec53a2f43822 | """This module will serve the api request."""
import json
from bson.json_util import dumps
from flask import abort, request, Response, jsonify
from api import app, collection
@app.route("/api/v1/users", methods=['POST'])
def create_user():
"""
Function to create new users.
"""
try:
# Create new user
try:
body = request.get_json()
except:
# Bad request as request body is not available
return abort(400)
record_id = collection.insert(body)
return jsonify({"message":"Successfully Created the resource."}), 201
except:
# Error while trying to create the resource
return "Error while trying to create the resource", 500
@app.route("/api/v1/users", methods=['GET'])
def fetch_users():
"""
Function to fetch the users.
"""
try:
# Fetch all the record(s)
records_fetched = collection.find()
# Check if the records are found
if records_fetched.count() > 0:
# Prepare the response
records = dumps(records_fetched)
resp = Response(records, status=200, mimetype='application/json')
return resp
else:
# No records are found
return jsonify({"message":"No records are found"}), 404
except Exception as e:
print(str(e))
# Error while trying to fetch the resource
return jsonify({"message":"Error while trying to fetch the resource"}), 500
@app.route("/api/v1/users/<user_id>", methods=['POST'])
def update_user(user_id):
"""
Function to update the user.
"""
try:
# Get the value which needs to be updated
try:
body = ast.literal_eval(json.dumps(request.get_json()))
except:
# Bad request as the request body is not available
# Add message for debugging purpose
return "", 400
# Updating the user
records_updated = collection.update_one({"id": int(user_id)}, body)
# Check if resource is updated
if records_updated.modified_count > 0:
# Prepare the response as resource is updated successfully
return "", 200
else:
# Bad request as the resource is not available to update
# Add message for debugging purpose
return "", 404
except:
# Error while trying to update the resource
# Add message for debugging purpose
return "", 500
@app.route("/api/v1/users/<user_id>", methods=['DELETE'])
def remove_user(user_id):
"""
Function to remove the user.
"""
try:
# Delete the user
delete_user = collection.delete_one({"id": int(user_id)})
if delete_user.deleted_count > 0 :
# Prepare the response
return "", 204
else:
# Resource Not found
return "", 404
except:
# Error while trying to delete the resource
# Add message for debugging purpose
return "", 500
@app.errorhandler(404)
def page_not_found(e):
"""Send message to the user with notFound 404 status."""
# Message to the user
message = {
"err":
{
"msg": "This route is currently not supported. Please refer API documentation."
}
}
# Making the message looks good
resp = jsonify(message)
# Sending OK response
resp.status_code = 404
# Returning the object
return resp
|
3,637 | 6b6397fd18848ffa2ae9c0ec1443d20f2cbeb8b0 | import math
import pandas as pd
from matplotlib import pyplot as plt
tests = [
{ "task": "listsort", "prompt": "examples", "length": 5, "shots": 0, "accuracy": 0.28, "trials": 50},
{ "task": "listsort", "prompt": "examples", "length": 5, "shots": 1, "accuracy": 0.40, "trials": 50},
{ "task": "listsort", "prompt": "examples", "length": 5, "shots": 3, "accuracy": 0.30, "trials": 50},
{ "task": "listsort", "prompt": "examples", "length": 5, "shots": 5, "accuracy": 0.28, "trials": 50},
{ "task": "listsort", "prompt": "examples", "length": 5, "shots": 7, "accuracy": 0.32, "trials": 50},
{ "task": "listsort", "prompt": "examples", "length": 5, "shots": 10, "accuracy": 0.50, "trials": 50},
{ "task": "listsort", "prompt": "examples", "length": 5, "shots": 13, "accuracy": 0.36, "trials": 50},
{ "task": "listsort", "prompt": "examples", "length": 5, "shots": 16, "accuracy": 0.22, "trials": 50},
{ "task": "listsort", "prompt": "examples", "length": 5, "shots": 32, "accuracy": 0.20, "trials": 50},
{ "task": "listsort", "prompt": "code", "length": 5, "shots": 0, "accuracy": 0.76, "trials": 50},
{ "task": "listsort", "prompt": "code", "length": 5, "shots": 1, "accuracy": 0.66, "trials": 50},
{ "task": "listsort", "prompt": "code", "length": 5, "shots": 3, "accuracy": 0.46, "trials": 50},
{ "task": "listsort", "prompt": "code", "length": 5, "shots": 5, "accuracy": 0.44, "trials": 50},
{ "task": "listsort", "prompt": "code", "length": 5, "shots": 7, "accuracy": 0.44, "trials": 50},
{ "task": "listsort", "prompt": "code", "length": 5, "shots": 10, "accuracy": 0.42, "trials": 50},
{ "task": "listsort", "prompt": "code", "length": 5, "shots": 13, "accuracy": 0.30, "trials": 50},
{ "task": "listsort", "prompt": "code", "length": 5, "shots": 16, "accuracy": 0.32, "trials": 50},
# { "task": "listsort", "prompt": "examples", "length": 10, "shots": 0, "accuracy": 0.04, "trials": 50},
# { "task": "listsort", "prompt": "examples", "length": 10, "shots": 1, "accuracy": 0.04, "trials": 50},
# { "task": "listsort", "prompt": "examples", "length": 10, "shots": 10, "accuracy": 0.00, "trials": 50},
# { "task": "listsort", "prompt": "examples", "length": 10, "shots": 32, "accuracy": 0.00, "trials": 50},
# { "task": "listsort", "prompt": "code", "length": 10, "shots": 0, "accuracy": 0.04, "trials": 50},
# { "task": "listsort", "prompt": "code", "length": 10, "shots": 1, "accuracy": 0.14, "trials": 50},
# { "task": "listsort", "prompt": "code", "length": 10, "shots": 10, "accuracy": 0.00, "trials": 50},
]
for d in tests:
d["code"] = d["prompt"] == "code"
d["correct"] = d["accuracy"] * d["trials"]
p = d["accuracy"]
# 80% confidence: 0.842
# 95% confidence:
d["err"] = 0.842 * math.sqrt(p * (1-p) / d["trials"])
df = pd.DataFrame(tests)
plt.style.use('dark_background')
examples_df = df[df["prompt"] == "examples"]
plt.errorbar('shots', 'accuracy', yerr=examples_df["err"], data=examples_df, marker='o', capsize=2,
color='mediumorchid', markersize=4, linewidth=1, linestyle='-', label="Examples")
code_df = df[df["prompt"] == "code"]
plt.errorbar('shots', 'accuracy', yerr=code_df["err"], data=code_df, marker='o', capsize=4,
color='darkcyan', markersize=4, linewidth=1, label="Coding")
plt.legend()
plt.xlabel("Shots")
plt.ylabel("Accuracy")
plt.title("List Sort Length 5")
# plt.savefig('Fig2.png', dpi=300, bbox_inches='tight')
plt.show()
# seaborn.lineplot(data=df, x="shots", y="correct", hue="prompt", ci="sd")
# length 99
# { "task": "listsort", "prompt": "examples", "length": 5, "shots" 10, "accuracy": 0.46, "trials": 50},
# { "task": "listsort", "prompt": "code", "length": 5, "shots": 0, "accuracy": 0.50, "trials": 50},
# { "task": "listsort", "prompt": "code", "length": 10, "shots": 0, "accuracy": 0.02, "trials": 50},
|
3,638 | 46b1e5adbd956c35820d7d2b17628364388cdcd7 | __author__ = 'tomer'
import sqlite3
from random import randint
import test_data
def init_database(conn):
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS catalogs
(id INTEGER PRIMARY KEY AUTOINCREMENT, catalog_name TEXT)''')
c.execute('''CREATE TABLE IF NOT EXISTS products
(id INTEGER PRIMARY KEY AUTOINCREMENT, sku_id INTEGER, catalog_id INTEGER, product_name TEXT, price FLOAT, description TEXT)''')
c.execute('''CREATE TABLE IF NOT EXISTS users
(id INTEGER PRIMARY KEY AUTOINCREMENT, user_name TEXT)''')
c.execute('''CREATE TABLE IF NOT EXISTS products_bought
(id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER,product_id INTEGER)''')
c.execute('''CREATE TABLE IF NOT EXISTS product_context
(id INTEGER PRIMARY KEY AUTOINCREMENT,recommendation_id INTEGER, product_id INTEGER, device TEXT, os TEXT, time_of_day TEXT, day_of_week TEXT, latitude float, longitude float,num_items_in_cart INTEGER, purchases_in_last_month INTEGER)''')
c.execute('''CREATE TABLE IF NOT EXISTS recommendations
(id INTEGER PRIMARY KEY AUTOINCREMENT,user_id INTEGER, product_id INTEGER, interacted BOOLEAN)''')
def load_fake_data(conn):
c = conn.cursor()
c.execute('''DELETE FROM catalogs''')
c.execute('''DELETE FROM products''')
c.execute('''DELETE FROM users''')
c.execute('''DELETE FROM products_bought''')
c.execute('''DELETE FROM product_context''')
c.execute('''DELETE FROM recommendations''')
catalogs = []
c.execute('''INSERT INTO catalogs (catalog_name) VALUES (?)''',('BestBuy',))
catalogs.append(c.lastrowid)
c.execute('''INSERT INTO catalogs (catalog_name) VALUES (?)''',('RiteAid',))
catalogs.append(c.lastrowid)
ppl = []
c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Tomer',))
ppl.append(c.lastrowid)
c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Alex',))
ppl.append(c.lastrowid)
c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Matt',))
ppl.append(c.lastrowid)
c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Rachael',))
ppl.append(c.lastrowid)
c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Sam',))
ppl.append(c.lastrowid)
c.execute('''INSERT INTO users (user_name) VALUES (?)''',('Joey',))
ppl.append(c.lastrowid)
products = []
# Load fake products
for i in range(1,20):
c.execute('''INSERT INTO products (id,sku_id,catalog_id, product_name, price,description) VALUES (NULL,?,?,?,?,?)''',(randint(1,2000),catalogs[randint(0,len(catalogs)-1)],'Movie' + str(i),randint(1,2000),'Title' + str(i)))
products.append(c.lastrowid)
# Load fake transactions
for i in range(1,50):
c.execute('''INSERT INTO products_bought (id,user_id, product_id) VALUES (NULL,?,?)''',(ppl[randint(0,len(ppl)-1)],products[randint(0,len(products)-1)]))
values = (c.lastrowid,device[randint(0,len(device)-1)],oses[randint(0,len(oses)-1)], times[randint(0,len(times)-1)], days[randint(0,len(days)-1)], lats[randint(0,len(lats)-1)], lons[randint(0,len(lons)-1)],randint(0,5),randint(0,30))
c.execute('''INSERT INTO product_context (id,recommendation_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?)''',values)
# Load fake recommendations
for i in range(1,1000):
product_id = products[randint(0, len(products) - 1)]
c.execute('''INSERT INTO recommendations (id,user_id, product_id, interacted) VALUES (NULL,?,?,'true')''',(ppl[randint(0,len(ppl)-1)],product_id))
values = (c.lastrowid,product_id,device[randint(0,len(device)-1)],oses[randint(0,len(oses)-1)], times[randint(0,len(times)-1)], days[randint(0,len(days)-1)], lats[randint(0,len(lats)-1)], lons[randint(0,len(lons)-1)],randint(0,3),randint(0,3))
c.execute('''INSERT INTO product_context (id,recommendation_id , product_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)''',values)
conn.commit()
oses = ['IOS', 'Android']#, 'Windows10', 'macOS']
device = ['mobile']#, 'computer']
'''
times = ['10:33 AM',
'2:38 PM',
'3:01 AM',
'12:31 AM',
'2:56 PM',
'8:01 AM',
'5:00 PM',
'9:38 PM',
'3:01 AM']
'''
times = ['morning', 'afternoon', 'night']
days = ['M']#['M', 'T', 'W', 'R', 'F', 'S', 'Su']
'''
lats = ['-149.8935557',
'-149.9054948',
'-149.7522',
'-149.8643361',
'-149.8379726',
'-149.9092788',
'-149.7364877',
'-149.8211',
'-149.8445832',
'-149.9728678']
'''
lats = ['north']#, 'south']
'''
lons = ['61.21759217',
'61.19533942',
'61.2297',
'61.19525062',
'61.13751355',
'61.13994658',
'61.19533265',
'61.2156',
'61.13806145',
'61.176693']
'''
lons = ['east']#, 'west']
def get_users(conn):
c = conn.cursor()
c.execute('''select * from users''')
return c.fetchall()
def get_catalogs(conn):
c = conn.cursor()
c.execute('''select * from catalogs''')
return c.fetchall()
def get_products(conn, catalog_id):
c = conn.cursor()
c.execute('''select * from products where catalog_id = ?''',(catalog_id,))
return c.fetchall()
def get_product_by_id(conn, catalog_id, product_id):
c = conn.cursor()
c.execute('''SELECT * FROM products WHERE catalog_id = ? AND id = ?''',(catalog_id,product_id))
return c.fetchall()
def get_products_bought(conn, catalog_id):
c = conn.cursor()
c.execute('''select pb.* from products_bought pb, catalogs cat, products p where pb.product_id = p.id and p.catalog_id = ?''',(catalog_id,))
return c.fetchall()
def get_all_data(conn):
c = conn.cursor()
c.execute('''select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id''')
return c. fetchall()
def get_data_for_user(conn,userid):
c = conn.cursor()
c.execute('''select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and u.id = ?''',(userid,))
return c.fetchall()
def get_data_for_user_and_catalog(conn, userid, catalogid):
c = conn.cursor()
c.execute('''select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and u.id = ? and c.id = ?''',(userid,catalogid))
return c.fetchall()
def get_transactions_for_catalog(conn,catalogid):
c = conn.cursor()
c.execute('''select u.*, p.*, c.* from users u, products p, products_bought pb, catalogs c where p.id = pb.product_id and p.catalog_id == c.id and u.id = pb.user_id and c.id = ?''',(catalogid,))
return c.fetchall()
def get_recommendations_by_user(conn,userId):
c = conn.cursor()
c.execute('''select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.user_id = ?''',(userId,))
return c.fetchall()
def get_recommendations_by_product(conn,productId):
c = conn.cursor()
c.execute('''select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.product_id = ?''',(productId,))
return c.fetchall()
def get_connection():
return sqlite3.connect('recommendation_engine.db')
def generate_context(product_id):
return [product_id, device[randint(0, len(device) - 1)], oses[randint(0, len(oses) - 1)],
times[randint(0, len(times) - 1)], days[randint(0, len(days) - 1)], lats[randint(0, len(lats) - 1)],
lons[randint(0, len(lons) - 1)], randint(0, 3), randint(0, 3)]
def add_recommendation(conn, product_ids,user_ids,contexts):
ids = []
c = conn.cursor()
for i in range(0,len(product_ids)):
product_id = product_ids[i]
user_id = user_ids[i]
context = contexts[i]
c.execute('''INSERT INTO recommendations (id,user_id, product_id, interacted) VALUES (NULL,?,?,'false')''',
(user_id, product_id))
context.insert(0,c.lastrowid)
ids.append(c.lastrowid)
c.execute( '''INSERT INTO product_context (id,recommendation_id , product_id , device , os , time_of_day , day_of_week , latitude , longitude ,num_items_in_cart , purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)''',
context)
conn.commit()
c.execute('select r.*, c.* from recommendations r, product_context c where r.id = c.recommendation_id and r.id in (%s)' %
','.join('?'*len(ids)), ids)
return c.fetchall()
def get_probability(conn, x, giveny):
c = conn.cursor()
query = '''select count(*) from product_context where '''
first = True
params = []
for key,val in x.items():
if not first:
query += ' and '
else:
first = False
query += str(key) + '=?'
params.append(str(val))
c.execute(query,params)
total = c.fetchone()[0]
for key,val in giveny.items():
query += ' and ' + str(key) + '=?'
params.append(str(val))
c.execute(query,params)
smaller = c.fetchone()[0]
if total == 0:
return 0
else:
return smaller/float(total)
def load_test_data(conn):
c = conn.cursor()
# Clear database
c.execute('''DELETE FROM catalogs''')
c.execute('''DELETE FROM products''')
c.execute('''DELETE FROM users''')
c.execute('''DELETE FROM products_bought''')
c.execute('''DELETE FROM product_context''')
c.execute('''DELETE FROM recommendations''')
# Initialize users
user_names = test_data.USER_NAMES
# Initialize movie names
product_names = test_data.PRODUCT_NAMES
# Initialize Prices
prices = test_data.POSSIBLE_PRICES
# Load test catalog
catalog_ids = []
c.execute('''INSERT INTO catalogs (catalog_name) VALUES (?)''', ('MovieDatabase',))
catalog_ids.append(c.lastrowid)
# Load test users
user_ids = []
for user in user_names:
c.execute('''INSERT INTO users (user_name) VALUES (?)''', (user,))
user_ids.append(c.lastrowid)
# Load test products
product_ids = []
for product in product_names:
values = (randint(1, 2000), catalog_ids[0], product, prices[randint(0, len(prices)-1)], 'desc')
c.execute('''INSERT INTO products (id, sku_id, catalog_id, product_name, price, description) VALUES (NULL,?,?,?,?,?)''', values)
product_ids.append(c.lastrowid)
# Load fake transactions
for i in range(1, 50):
values = (user_ids[randint(0, len(user_ids)-1)], product_ids[randint(0, len(product_ids)-1)])
c.execute('''INSERT INTO products_bought (id,user_id,product_id) VALUES (NULL,?,?)''', values)
values = (c.lastrowid,
device[randint(0, len(device) - 1)],
oses[randint(0, len(oses) - 1)],
times[randint(0, len(times) - 1)],
days[randint(0, len(days) - 1)],
lats[randint(0, len(lats) - 1)],
lons[randint(0, len(lons) - 1)],
randint(0, 3),
randint(0, 3))
c.execute('''INSERT INTO product_context (id,recommendation_id,device,os,time_of_day,day_of_week,latitude,longitude,num_items_in_cart,purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?)''', values)
# Load fake recommendations
for i in range(1, 1000):
product_id = product_ids[randint(0, len(product_ids)-1)]
values = (user_ids[randint(0, len(user_ids)-1)], product_id,)
c.execute('''INSERT INTO recommendations (id,user_id,product_id,interacted) VALUES (NULL,?,?,'True')''', values)
values =(c.lastrowid,
product_id,
device[randint(0, len(device) - 1)],
oses[randint(0, len(oses) - 1)],
times[randint(0, len(times) - 1)],
days[randint(0, len(days) - 1)],
lats[randint(0, len(lats) - 1)],
lons[randint(0, len(lons) - 1)],
randint(0, 3),
randint(0, 3))
c.execute('''INSERT INTO product_context (id,recommendation_id,product_id,device,os,time_of_day,day_of_week,latitude,longitude,num_items_in_cart,purchases_in_last_month) VALUES (NULL,?,?,?,?,?,?,?,?,?,?)''', values)
conn.commit()
|
3,639 | ff3962d875da8e3f9e6c3178b1a8191ebb8a7b60 | TABLE_NAME = 'active_module'
|
3,640 | 16a77c45a58e31c575511146dfceeaef0a2bc3a7 | def get_partial_matched(n):
pi = [0] * len(n)
begin = 1
matched = 0
while begin + matched < len(n):
if n[begin + matched] == n[matched]:
matched += 1
pi[begin + matched - 1] = matched
else:
if matched == 0:
begin += 1
else:
begin += matched - pi[matched - 1]
matched = pi[matched - 1]
return pi
def get_common(h, n):
pi = get_partial_matched(n)
begin = 0
matched = 0
while begin + matched < len(h):
if matched < len(n) and h[begin + matched] == n[matched]:
matched += 1
if matched == len(n):
return len(n) - begin
else:
if matched == 0:
begin += 1
else:
begin += matched - pi[matched - 1]
matched = pi[matched - 1]
return 0
def solution(status):
n = len(status)
ret = 0
for i in range(n - 1):
clockwise = i % 2 == 0
if clockwise:
ret += get_common(2 * status[i], status[i + 1])
else:
ret += get_common(2 * status[i + 1], status[i])
return ret
C = int(input())
for _ in range(C):
N = int(input())
status = []
for _ in range(N + 1):
status.append(input())
print(solution(status))
|
3,641 | 33c4e0504425c5d22cefb9b4c798c3fd56a63771 | #!/usr/bin/python
import math
def Main():
try:
radius = float(input("Please enter the radius: "))
area = math.pi * radius**2
print("Area =", area)
except:
print("You did not enter a number")
if __name__ == "__main__":
Main()
|
3,642 | 13a2814e8744c6c09906d790185ed44fc2b3f23e | import random
import torch
import numpy as np
from torch.autograd import Variable
class SupportSetManager(object):
FIXED_FIRST = 0
RANDOM = 1
def __init__(self, datasets, config, sample_per_class):
self.config = config
(TEXT, LABEL, train, dev, test) = datasets[0]
self.TEXT = TEXT
self.sample_per_class = sample_per_class
print('Picking up prototypes')
self.prototype_text_list = []
for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):
prototype_text = []
#print taskid, LABEL.vocab
if not hasattr(LABEL, 'vocab'):
self.prototype_text_list.append(prototype_text)
continue
for lab_id in range(len(LABEL.vocab.itos)):
prototype_text.append([])
for example in train.examples:
lab_id = LABEL.vocab.stoi[example.label]
if prototype_text[lab_id] is not None:
prototype_text[lab_id].append(example.text)
else:
prototype_text[lab_id] = [example.text]
for lab_id in range(len(LABEL.vocab.itos)):
if len(prototype_text[lab_id]) == 0:
prototype_text[lab_id].append(['<pad>'])
if self.sample_per_class >= 1 and self.sample_per_class < len(prototype_text[lab_id]):
prototype_text[lab_id] = prototype_text[lab_id][:self.sample_per_class]
print('Task %s: picked up %s prototypes', (taskid, self.sample_per_class))
self.prototype_text_list.append(prototype_text)
def select_support_set(self, taskid, policy):
if policy == self.FIXED_FIRST:
supp_set = self.select_support_set_first(taskid)
elif policy == self.RANDOM:
supp_set = self.select_support_set_random(taskid)
return supp_set
def select_support_set_first(self, taskid):
prototype_text = self.prototype_text_list[taskid]
examples_text = []
for lab_id in range(len(prototype_text)):
examples_text.append(prototype_text[lab_id][0])
prototype_matrix = self.TEXT.numericalize(
self.TEXT.pad(x for x in examples_text),
device=self.config.device)
#if taskid == 0: #TODO test the consistency of the first example
# print examples_text
# print prototype_matrix
return prototype_matrix
def select_support_set_random(self, taskid, ):
prototype_text = self.prototype_text_list[taskid]
examples_text = []
for lab_id in range(len(prototype_text)):
rand_idx = random.randint(0, len(prototype_text[lab_id]) - 1)
examples_text.append(prototype_text[lab_id][rand_idx])
prototype_matrix = self.TEXT.numericalize(
self.TEXT.pad(x for x in examples_text),
device=self.config.device)
#if taskid == 0: #TODO test the consistency of the first example
# print examples_text
# print prototype_matrix
return prototype_matrix
def get_average_as_support(self, taskid, mnet_model):
prototype_text = self.prototype_text_list[taskid]
prototype_emb_list = []
for lab_id in range(len(prototype_text)):
prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in prototype_text[lab_id]),
device=self.config.device)
prototype_matrix = mnet_model.get_hidden(prototype_sent)
prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))
#print prototype_emb_list
#return torch.cat(prototype_emb_list, dim=0) #works for the new pytorch version
return torch.cat(prototype_emb_list, 0)
def get_average_and_std_as_support(self, taskid, mnet_model):
prototype_text = self.prototype_text_list[taskid]
prototype_emb_list = []
prototype_std_list = []
for lab_id in range(len(prototype_text)):
N = len(prototype_text[lab_id])
prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in prototype_text[lab_id]),
device=self.config.device, train=True)
prototype_matrix = mnet_model.get_hidden(prototype_sent)
mean_vec = torch.mean(prototype_matrix, dim=0)
if N > 1:
#std_val = torch.sqrt((torch.pow(prototype_matrix, 2).sum() - N * torch.pow(mean_vec, 2).sum()) / (N - 1))
std_val = (torch.pow(prototype_matrix, 2).sum() - N * torch.pow(mean_vec, 2).sum()) / (N - 1)
std_val = Variable(std_val.data)
else:
std_val = Variable(torch.from_numpy(np.array([1.0]).astype(np.float32))).cuda()
prototype_emb_list.append(mean_vec)
prototype_std_list.append(std_val)
#print prototype_std_list
return torch.cat(prototype_emb_list, 0), torch.cat(prototype_std_list, 0)
def get_average_as_support_sample(self, taskid, mnet_model, sample_per_class):
prototype_text = self.prototype_text_list[taskid]
prototype_emb_list = []
for lab_id in range(len(prototype_text)):
if sample_per_class > len(prototype_text[lab_id]):
prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in prototype_text[lab_id]),
device=self.config.device)
else:
top_ind = range(len(prototype_text[lab_id]))
random.shuffle(top_ind)
top_ind = top_ind[:sample_per_class]
prototype_text_sample = [prototype_text[lab_id][i] for i in top_ind]
prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in prototype_text_sample),
device=self.config.device)
prototype_matrix = mnet_model.get_hidden(prototype_sent)
prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))
return torch.cat(prototype_emb_list, 0)
def get_average_as_support_large(self, taskid, mnet_model, batchsize):
prototype_text = self.prototype_text_list[taskid]
prototype_emb_list = []
for lab_id in range(len(prototype_text)):
num_batch = len(prototype_text[lab_id]) / batchsize
if len(prototype_text[lab_id]) % batchsize != 0 and num_batch == 0:
num_batch += 1
lab_emb_sum = []
for i in range(num_batch):
#print i
#print len(prototype_text[lab_id]), i*batchsize, (i+1) * batchsize
batch_text = prototype_text[lab_id][i * batchsize : min((i+1) * batchsize, len(prototype_text[lab_id]))]
#print batch_text
len_text = len(batch_text)
#print len_text
batch_prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in batch_text),
device=self.config.device, train=True)
#print batch_prototype_sent
prototype_matrix = mnet_model.get_hidden(batch_prototype_sent)
prototype_matrix = Variable(prototype_matrix.data)
#prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))
#prototype_emb_list.append(torch.sum(prototype_matrix, dim=0) / len_text)
#break
#TODO: the following three lines not equivalent to the two lines below
# lab_emb_sum.append(torch.sum(prototype_matrix, dim=0))
#lab_emb_sum = torch.sum( torch.cat(lab_emb_sum, 0), dim=0 )
#lab_emb_sum /= len(prototype_text[lab_id])
lab_emb_sum.append(torch.mean(prototype_matrix, dim=0))
lab_emb_sum = torch.mean( torch.cat(lab_emb_sum, 0), dim=0 )
prototype_emb_list.append(lab_emb_sum)
return torch.cat(prototype_emb_list, 0)
|
3,643 | 9f2a8e78aa2e3eab8f74847443dec9083603da39 | import socket
# Packet Sniffing
# It's All Binary
# Usage: python basic_sniffer.py
# create the sniffer raw socket object
sniffer = socket.socket(socket.AF_INET,socket.SOCK_RAW, socket.IPPROTO_ICMP)
#bind it to localhost
sniffer.bind(('0.0.0.0',0))
# make sure that the IP header is included
sniffer.setsockopt(socket.IPPROTO_IP,socket.IP_HDRINCL,1)
print 'sniffer is listening for incomming connections'
# get a single packet
print sniffer.recvfrom(65535)
|
3,644 | b10a50ce649650542d176a2f6fb8c35c500fbc38 | from rest_framework import serializers
from django.contrib.auth import password_validation
from rest_framework.validators import UniqueValidator
from .models import CustomUser, Role, Permission, ActionEntity
from .utils import create_permission
class ActionEntitySerializer(serializers.ModelSerializer):
id = serializers.IntegerField(required=False)
class Meta:
model = ActionEntity
fields = '__all__'
class PermissionSerializer(serializers.ModelSerializer):
actionEntitySet = ActionEntitySerializer(many=True)
class Meta:
model = Permission
fields = '__all__'
class RoleSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
permissions = PermissionSerializer(many=True)
name = serializers.CharField(max_length=32, validators=[UniqueValidator(queryset=Role.objects.all())])
status = serializers.IntegerField()
describe = serializers.CharField(required=False, allow_null=True, max_length=128)
class Meta:
model = Role
fields = '__all__'
def create(self, validated_data):
permissions = validated_data.pop('permissions', None)
role = Role.objects.create(**validated_data)
create_permission(role)
return role
def update(self, instance, validated_data):
permissions = validated_data.pop('permissions', None)
for permissionData in permissions:
for actionData in permissionData.get('actionEntitySet'):
action = ActionEntity.objects.get(pk=actionData.get('id'))
action.enable = actionData.get('enable')
action.save()
super().update(instance, validated_data)
return instance
class SelfChangePasswordSerializer(serializers.Serializer):
old_password = serializers.CharField(required=True)
new_password = serializers.CharField(required=True)
def get_current_user(self):
return self.context['request'].user
def validate(self, data):
old_password = data.get('old_password', None)
new_password = data.get('new_password', None)
if old_password is not None and not self.get_current_user().check_password(old_password):
raise serializers.ValidationError({'old_password': 'Your old password was entered incorrectly. Please enter it again.'})
if new_password is not None:
password_validation.validate_password(new_password)
return super().validate(data)
class ChangePasswordSerializer(serializers.Serializer):
new_password = serializers.CharField(max_length=128)
def get_current_user(self):
return self.context['request'].user
def validate(self, data):
new_password = data.get('new_password', None)
if new_password is not None:
password_validation.validate_password(new_password)
return super().validate(data)
class UserCreateSerializer(serializers.ModelSerializer):
username = serializers.CharField(min_length= 5, max_length=150, validators=[UniqueValidator(queryset=CustomUser.objects.all())])
password = serializers.CharField(max_length=128)
price_level = serializers.IntegerField(min_value=1, max_value=5)
balance = serializers.DecimalField(max_digits=10, decimal_places=2, min_value=0.0)
role_id = serializers.IntegerField(required=False, allow_null=True)
class Meta:
model = CustomUser
fields = '__all__'
def validate(self, data):
password = data.get('password', None)
username = data.get('username', None)
if password is not None:
password_validation.validate_password(password)
return super().validate(data)
def create(self, validated_data):
user = super().create(validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class UserSerializer(serializers.ModelSerializer):
role = RoleSerializer(read_only=True)
role_id = serializers.IntegerField(required=False, allow_null=True)
class Meta:
model = CustomUser
exclude = (
'password',
)
def update(self, instance, validated_data):
validated_data.pop('password', None)
return super().update(instance, validated_data)
class UserSimpleSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = (
'id', 'username', 'price_level'
) |
3,645 | 919e1f8a4b021d75496f3bcff369261a09362a65 | from typing import Callable, List, Optional
import numpy as np
import lab1.src.grad.grad_step_strategy as st
import lab1.src.grad.stop_criteria as sc
DEFAULT_EPSILON = 1e-9
DEFAULT_MAX_ITERATIONS = 1e5
def gradient_descent(f: Callable[[np.ndarray], float],
f_grad: Callable[[np.ndarray], np.ndarray],
start: np.ndarray,
step_strategy: st.StepStrategy,
stop_criteria: sc.StopCriteria,
eps_strategy: float = DEFAULT_EPSILON,
eps_stop_criteria: float = DEFAULT_EPSILON,
max_iterations_strategy=DEFAULT_MAX_ITERATIONS,
max_iterations_criteria=DEFAULT_MAX_ITERATIONS,
trajectory: Optional[List] = None):
strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy, max_iterations_strategy)
criteria = sc.get_stop_criteria(stop_criteria, f, f_grad, eps_stop_criteria, max_iterations_criteria)
cur_x = start
iters = 0
if trajectory is not None:
trajectory.append(cur_x)
while True:
iters += 1
cur_grad = f_grad(cur_x)
step = strategy.next_step(cur_x)
next_x = cur_x - step * cur_grad
if criteria.should_stop(cur_x, next_x):
return cur_x, iters
cur_x = next_x
if trajectory is not None:
trajectory.append(cur_x)
if iters == max_iterations_criteria:
return cur_x, iters
if __name__ == '__main__':
def foo(p):
return p[0] ** 2 + p[1] ** 2
def foo_grad(p):
x, y = p[0], p[1]
return np.array([2 * x, 2 * y])
res, _ = gradient_descent(foo,
foo_grad,
start=np.array([3, 4]),
step_strategy=st.StepStrategy.DIVIDE_STEP,
stop_criteria=sc.StopCriteria.BY_GRAD)
print(res)
|
3,646 | 8b583ee55df409020a605b467479236e610a2efe | from external.odds.betclic.api import get_odds
# FDJ parsing is broken - their UI has been refactored with JS framework &
# protected async JSON API usage (requires HEADERS) and more complex to isolate & group match odds
# hence move to another betting website - which is still full html rendered
|
3,647 | 0f266db39988cfce475380036f4f4f5b1a1fee1a | """
dansfunctions - various useful functions in python
usage:
>>import dansfunctions
>>dansfunctions.fg # module of general mathematical, vector and string format functions
>>dansfunctions.fp # module of matplotlib shortcuts
>>dansfunctions.widgets # module of tkinter shortcuts
Requirements: numpy
Optional requirements: matplotlib, tkinter
"""
from . import functions_general as fg
try:
import matplotlib
matplotlib.use('TkAgg')
from . import functions_plotting as fp
except ImportError:
fp = None
print('Matplotlib may not be available')
try:
from .tkgui import basic_widgets as widgets
except ImportError:
widgets = None
print('tkinter may not be available')
def version_info():
return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)
def module_info():
import sys
out = 'Python version %s' % sys.version
out += '\n%s' % version_info()
# Modules
out += '\n numpy version: %s' % fg.np.__version__
try:
import matplotlib
out += '\nmatplotlib version: %s' % matplotlib.__version__
except ImportError:
out += '\nmatplotlib version: None'
try:
import tkinter
out += '\n tkinter version: %s' % tkinter.TkVersion
except ImportError:
out += '\n tkinter version: None'
return out
def check_general_functions():
print('dansfunctions/functions_general.py')
print('Version: %s (%s)' % (fg.__version__, fg.__date__))
print('Methods:')
print(fg.list_methods(fg, False))
def check_plotting_functions():
print('dansfunctions/functions_plotting.py')
if fp is None:
print('Matplotlib may not be available')
return
print('Version: %s (%s)' % (fp.__version__, fp.__date__))
print('Methods:')
print(fg.list_methods(fp, False))
def check_tkinter_functions():
print('dansfunctions/tkgui/basic_widgets.py')
if widgets is None:
print('tkinter may not be available')
return
print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))
print('Methods:')
print(fg.list_methods(widgets, False))
|
3,648 | 7da274803de80f2864471d00c9d15aff1103372f | from nodes.Value import Value
class Number(Value):
def __init__(self, number: int):
if abs(number) > 2 ** 31:
raise SyntaxError(str(number) + ' number is out of range')
self.number = number
def __str__(self):
return str(self.number)
|
3,649 | 99ecb927e22bc303dd9dffd2793887e7398dbb83 | #Importacion de Dependencias Flask
from flask import Blueprint,Flask, render_template, request,redirect,url_for,flash
#modelado de basedato.
from App import db
# Importacion de modulo de ModeloCliente
from App.Modulos.Proveedor.model import Proveedor
#Inportacion de modulo de formularioCliente
from App.Modulos.Proveedor import form
_Proveedor=Blueprint('Proveedor',__name__,url_prefix='/Proveedor')
@_Proveedor.route('/Proveedor', methods=['GET', 'POST']) # registro de proveedor
def proveedor():
frm = form.Fr_Proveedor(request.form)
if request.method == 'POST':
pr = Proveedor.query.filter_by(CI=frm.CI.data).first()
if frm.validate() and pr is None:
new_user = Proveedor(razonSolcial=frm.RasonSocial.data,
CI=frm.CI.data,
Direccion=frm.Direccion.data,
Correo=frm.Correo.data,
convencional=frm.Convencional.data,
Celular=frm.Celular.data
)
db.session.add(new_user)
db.session.commit()
flash("Se registrado con exito sus datos")
return redirect(url_for('Proveedor.proveedor'))
else:
flash("Error: No se registrado con exito sus Datos")
return render_template('Proveedor/frproveedor.html', frm=frm)
@_Proveedor.route('/listaP') # listado de Proveedores.
def listaP():
titulo = "Lista Proveedor"
return render_template("Proveedor/listaP.html", titulo=titulo, listas=Proveedor.query.all())
@_Proveedor.route('/UpdateP', methods=[ 'POST'])
def UpdateP():
print(request.form)
updateP = Proveedor.query.filter_by(CI=request.form['CI']).first()
print("ci:",updateP.CI)
updateP.razonSolcial = request.form['RasonSocial']
updateP.Direccion = request.form['Direccion']
updateP.Correo = request.form['Correo']
updateP.convencional= request.form['Convencional']
updateP.Celular = request.form['Celular']
db.session.commit()
return redirect(url_for('Proveedor.listaP'))
@_Proveedor.route('/deleteP/<string:id>',methods=['GET','POST'])
def deleteP(id=None):
dlTP = Proveedor.query.filter_by(CI=id).first()
db.session.delete(dlTP)
db.session.commit()
return redirect(url_for('Proveedor.listaP'))
@_Proveedor.route("/modalP")
def modalP():
frm = form.Fr_Proveedor(request.form)
return render_template("modal/modaproveedor.html", frm=frm, title="Proveedor")
|
3,650 | f29fa3d796d9d403d6bf62cb28f5009501c55545 | """You are given a string .
Your task is to find out if the string contains:
alphanumeric characters, alphabetical characters, digits,
lowercase and uppercase characters."""
s = raw_input()
print(any(i.isalnum()for i in s))
print(any(i.isalpha()for i in s))
print(any(i.isdigit()for i in s))
print(any(i.islower()for i in s))
print(any(i.isupper()for i in s))
""" any() in python returns
True is any of element of the iterable(list,tuple,dict,set etc) are true
to the condition else returns False."""
|
3,651 | 3f655a12ac45c152215949d3d8bdb71147eeb849 | from collections import deque
def safeInsert(graph,left,right):
if left not in graph:
graph[left] = {}
graph[left][right] = True
if right not in graph:
graph[right] = {}
graph[right][left] = True
def trace(graph,start,end):
queue = deque([start])
pred = {start:None}
while len(queue)>0:
cur = queue.popleft()
if cur in graph:
for neigh in graph[cur]:
if neigh not in pred:
pred[neigh] = cur
queue.append(neigh)
if end not in pred:
return "no route found"
else:
stack = [end]
while pred[stack[-1]]!=None:
stack.append(pred[stack[-1]])
stack = stack[::-1]
return " ".join(stack)
graph = {}
n = int(raw_input())
for i in xrange(n):
line = raw_input().split()
for neigh in line[1:]:
safeInsert(graph,line[0],neigh)
start,end = raw_input().split()
print trace(graph,start,end) |
3,652 | 6ef8a174dcce633b526ce7d6fdb6ceb11089b177 | import sys
def main():
lines = [line.strip() for line in sys.stdin.readlines()]
h = lines.index("")
w = len(lines[0].split()[0])
start = 0
grids = set()
while start < len(lines):
grid = tuple(x.split()[0] for x in lines[start:start + h])
if len(grid) == h:
grids.add(grid)
start += h + 1
print >> sys.stderr, len(grids)
for grid in grids:
for line in grid:
print line
print
main()
|
3,653 | 47c1746c2edfe4018decd59efbacc8be89a1f49e |
from src.basepages.BreadCrumbTicketInfoBasePage import *
class BreadCrumbHomeBasePage:
def __init__(self):
""
def gotoTicketInfoBasePage(self,ticketInfoPage):
self.driver.get(ticketInfoPage)
breadCrumbTicketInfoBasePage = BreadCrumbTicketInfoBasePage()
breadCrumbTicketInfoBasePage.driver = self.driver
return breadCrumbTicketInfoBasePage
|
3,654 | c447d1fe38a4af43de39e05d46dacbe88249d427 | from quantopian.algorithm import order_optimal_portfolio
from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.factors import SimpleMovingAverage
from quantopian.pipeline.filters import QTradableStocksUS
import quantopian.optimize as opt
from quantopian.pipeline.factors import Returns
def initialize(context):
# Schedule our rebalance function to run at the end of
# each day, when the market closes
#set_slippage(slippage.FixedSlippage(spread=0.0, volume_limit=1))
#set_slippage(slippage.FixedBasisPointsSlippage(basis_points=0, volume_limit=100))
#set_slippage(slippage.VolumeShareSlippage(0))
schedule_function(
my_rebalance,
date_rules.every_day(),
time_rules.market_close(minutes=1 )
)
# Create our pipeline and attach it to our algorithm.
my_pipe = make_pipeline()
attach_pipeline(my_pipe, 'my_pipeline')
def make_pipeline():
#longs = Returns(window_length=2).percentile_between(0,20,mask=QTradableStocksUS())
#shorts = Returns(window_length=2).percentile_between(80,100,mask=QTradableStocksUS())
longs = Returns(window_length=2).bottom(1,mask=QTradableStocksUS())
shorts = Returns(window_length=2).top(1,mask=QTradableStocksUS())
return Pipeline(
columns={
'longs': longs,
'shorts': shorts,
},
screen=QTradableStocksUS()& (shorts | longs)
)
def compute_target_weights(context, data):
"""
Compute ordering weights.
"""
# Initialize empty target weights dictionary.
# This will map securities to their target weight.
weights = {}
# If there are securities in our longs and shorts lists,
# compute even target weights for each security.
if context.longs :
long_weight = 0.5 / len(context.longs)
if context.shorts:
short_weight = -0.5 / len(context.shorts)
#if ~(context.longs & context.shorts):
# return weights
# Exit positions in our portfolio if they are not
# in our longs or shorts lists.
for security in context.portfolio.positions:
if security not in context.longs and security not in context.shorts and data.can_trade(security):
weights[security] = 0
for security in context.longs:
weights[security] = long_weight
for security in context.shorts:
weights[security] = short_weight
return weights
def before_trading_start(context, data):
"""
Get pipeline results.
"""
# Gets our pipeline output every day.
pipe_results = pipeline_output('my_pipeline')
# Go long in securities for which the 'longs' value is True,
# and check if they can be traded.
context.longs = []
for sec in pipe_results[pipe_results['longs']].index.tolist():
if data.can_trade(sec):
context.longs.append(sec)
#print(context.longs)
#print('Longs: ')
#print(context.longs)
# Go short in securities for which the 'shorts' value is True,
# and check if they can be traded.
context.shorts = []
for sec in pipe_results[pipe_results['shorts']].index.tolist():
if data.can_trade(sec):
context.shorts.append(sec)
#print('Shorts: ')
#print(context.shorts)
def my_rebalance(context, data):
"""
Rebalance daily
"""
for stock in context.portfolio.positions:
#print('selling everything')
#print(stock)
order_target_percent(stock, 0.0)
# Calculate target weights to rebalance
#print(context)
target_weights = compute_target_weights(context, data)
#print(target_weights)
# If we have target weights, rebalance our portfolio
if target_weights:
order_optimal_portfolio(
objective=opt.TargetWeights(target_weights),
constraints=[],
)
|
3,655 | dcfc6d76730ba3b33e64cc8f2c166f739bbde5ff | # This script created by Joseph Aaron Campbell - 10/2020
""" With Help from Agisoft Forum @:
https://www.agisoft.com/forum/index.php?topic=12027.msg53791#msg53791
"""
""" Set up Working Environment """
# import Metashape library module
import Metashape
# create a reference to the current project via Document Class
doc = Metashape.app.document
# set reference for the currently active chunk
activeChunk = Metashape.app.document.chunk
# get the current Chunks label ( name )
currentChunkLabel = activeChunk.label
# get the current (saved) project's parent folder URL via python3 pathLib
# this path variable is used when exporting the 3D model later in the script.
# 'parent' will return the parent folder the project lives in
# 'name' will return the saved project name and extension
# 'stem' will return just the project name without extension
from pathlib import Path
parentFolderPath = str(Path(Metashape.app.document.path).parent)
print("parent Folder is : " + parentFolderPath)
# set reference to the output folders as string
outputFolder = Path(str(parentFolderPath) + "\\" + "_Output")
outputChunkFolder = Path(str(outputFolder) + "\\" + "_" + str(currentChunkLabel))
outputMaskfolder = Path(str(outputChunkFolder) + "\\" + "_Masks")
print("output folder: " + str(outputFolder))
print("output chunk folder: " + str(outputChunkFolder))
print("mask output folder is: " + str(outputMaskfolder))
# create an 'output' sub-folder for exported data from project
# also create sub-folder for model export within 'output' sub-folder
# this method will create the folder if doesnt exist, and also do nothing if it does exist
Path(outputFolder).mkdir(exist_ok=True)
Path(outputChunkFolder).mkdir(exist_ok=True)
Path(outputMaskfolder).mkdir(exist_ok=True)
# export masks to output mask folder
# this uses the Metashape Task class, otherwise loop through every camera in chunk and save mask as image file
# create a reference to the Tasks ExportMasks method
mask_task = Metashape.Tasks.ExportMasks()
# define which cameras to export masks for
mask_task.cameras = activeChunk.cameras
# define the output path for the exported mask files
mask_task.path = str(str(outputMaskfolder) + "\\" + "{filename}.png")
# activate the task for the active chunk to export the masks
mask_task.apply(object=activeChunk)
|
3,656 | e651edcbe68264e3f25180b10dc8e9d5620ecd6b | import unittest
import requests
class TestAudiobookResponse(unittest.TestCase):
def test_audiobook_can_insert(self):
""" test that audiobook can be inserted into db """
data = {
"audiotype": "Audiobook",
"metadata": {
"duration": 37477,
"title": "another",
"author": "Solomon",
"narrator": "Ndiferke"
}
}
response = requests.post(
"http://localhost:9001/api/create-audio", json=data)
success = response.json()
self.assertEqual(success["success"], True)
def test_audiobook_can_read(self):
""" test that audiobook can be read from DB """
response = requests.get(
"http://localhost:9001/api/get-audio/Audiobook")
self.assertEqual(response.status_code, 200)
def test_audiobook_can_delete(self):
""" test that audiobook can be deleted from DB"""
num = str(5)
response = requests.delete(
"http://localhost:9001/api/delete-audio/Audiobook/"+num)
self.assertEqual(response.status_code, 200)
def test_audiobook_can_update(self):
""" test that audiobook can be updated in DB"""
data = {
"audiotype": "Audiobook",
"metadata": {
"title": "audiobook1",
"duration": 45678,
"author": "Solomon",
"narrator": "Aniefiok"
}
}
num = str(3)
response = requests.put(
"http://localhost:9001/api/update-audio/Audiobook/"+num, json=data)
self.assertEqual(response.status_code, 200)
if __name__ == "__main__":
unittest.main()
|
3,657 | ed65d7e0de3fc792753e34b77254bccc8cee6d66 | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.community.register_path."""
from etils import epath
from tensorflow_datasets.core.community import register_path
def test_data_dir_register():
register = register_path.DataDirRegister(
namespace_to_data_dirs={'ns1': [epath.Path('/path/ns1')]})
assert {'ns1'} == register.namespaces
|
3,658 | 76c1929e901fce469661a299184765875d0eb53f | 쫲𪪌𤣬㐭𤙵⃢姅幛𑄹馻돷軔ሠ𡺶ײַ𢊅𠞡鞿𭞎𦠟𦔻뜸𣦏蛫履뜟𢒰疅𗕢𨞧漷𫴫礴𬽣𨚒𠻚゚罉ꉷ🕸𡑁𩂱𑫌抿锃𫕊𦿮橖𓋊𧭠酞Ё햾𞄶𪳧蕱ꗍ𐊯𬏷먽𬻩뱩𗼾𠑧銋𝂥蘒굳뜀𬜀𮧛𡐔𭋇𭘡𬙒蕶믅𬂚𡟐剿𨸒ᄉ𮩨烘𮩽𭚱𗨦ﳇ큿턽쾘𩁻ꫲ𗨿蚀𨊍胗𗔑鼴𨵾㽡𩠌ݜ𪢭𩘼넴𤩭𬩽𢺤𫅬𧁏響𬶉喡𘨘뉵𨲊Ζ𥀐𩨇𠮢⏂𭖳𫓢𧛃𦥮単𦇭𮏨𬋪婓츏𪱔𭄿𦭺𣍵蹖🨞ޝ仂䱔讔ﭑ𨹟𐊁ﱥ𧅔𓇪뽼𗩢픛𖮌䮁ီ邾ࢠ㭨𣯇𩱵𥋭ѽ샑𭹓𫳃𧝽𥲇ᛲ𘁕粣𣑑ᒔ𣥕쯞⒋𩍥纝攂𨄷ͭ𧲵퍃氩𢐅𤵲뎋𥵘黴𤼩𬝉ⶌ𥬥𧺥浞🇴𑨲𠎆筬⌝༤쾘𤌲ы𡚎𑲙㽰𐛱𫛛𢙆𭢙𣧙𗷰𢦯驯𑠓닋𬎾𗁳𠆑𦆻𮥸𘐘𠠂𐪐𤥈龞𨺔𮙔𡓝𥇛㨶唌揕𦷭銶𛅺𤃒🎾𢴬𫉀ՠ𬆟ꏔ蚦𡈌捐𣷱붝궄쐅𭲊䠜𥾚蹱𞤮𑍇𣘌𬋏𐓡銚捸㉃佼됩젽𮑖𩼊𐣠轴룱ꥁ𣫻𪍓𐡮퉴𩮥𖼙𠑗ꉔ𝔒𗔀𩾀煫𥨺𨰋𝋱𑨪聧猎쿇𣵽쵟𗈔詒碼絴𠼂𭖠𗯼䳳렻𡇠ޕ𗴨Ṱ𘤱𬧊𡰑𝑒𐒱𐕜𒋾𫧺뗾𝑡𠮔𒃵𠰫𢅹湽𠻂𩝓遗𗄫瞆𣞶𢈲샂𤸌繗𫼫䱆⨮듺㈉颫𠷤𬥲𫧹𣱅ᒚ磭𮦅𣞜𨭰ᇑ𘏧שׂ𡢒㎇镋𠬼𭫲ꑋ︾𢊪𡦶䝫𗔖𦖯步🀧𠢉邨ꙧ挃平𬓆㖧𡽴火𩔎リ𝠱𓊍䏰𪇙𮙅ᆒ蜥夺ᾎ𬓤𧋫ᔤ㐵𗖣𥎰𦋯滖𐔺𠡫鏸𑿏𦫮뷬𦭧𦹎瑲𐤰⿴㣌橍ᚠ𤊹贶𢺶𒋴믣⍣𠓌𭤅岦៴𩬴𧁎䫻埃𨐳𘆥䏼蟵𬨙𡄞꘠𤢔𦫏𧙁𧴞𞺄𐁖䚤𣩿𮋼𤿒𣧌盉𩺢𗂻⎭𩕒𡌹𝉁铌𢇺🚣𔕹𢹠哄젥𧸩ᐪ烑섈𗼱䍀𦺷羏[鯔𡐼𗺕𨏫𪽬蘙ꙅ𪹍璐氎𨍥𐒙𤭳🁃𩙡쓣𫐄솶ꡄ칑昡𩇓枭拹𠔪䢍윃툓𗖘𥿪𩧅𫿫𧜱🧯㕐α𧄮眳𨿭樸𗿤𦰓뛼槹𤾊𑛃䝑ꇁ𧿋⚳᰼𥁒䋯눪𡕩瞇𥽂𢥤龀И𩕄휐𩈹𗼴䓆😅𣯷𨔱㯂肯ᖭ𪻸둼𥰷𭇿𤪂郌𣽅쏈𭺮톿𪧨𡋛刚𤧈扂𡓽𢎨𢋪柏𨄓嚒넏薓𠽅뺓𗃠者늅𦆴𩕼ፔ鰄𨻃𣨖紙𡪁𧃒𦼲粌𠖒𑒅𛇝𠣩𓌺𥯀𪄸𝠫톰𠽣𠤍⋋樜𤓭𑋍讲닖𦶰𬓿𮈵𢃫䡷쌽𮈔󠅹ﳅ疟㓐轒䙖쮚𝓙𡃄𨐾𧕽𮨎𥚘𝝚䅐羥㋵镌𫗵䶈春㷚𘟰𢺝幖𫮳𠷵ྈ𬣆𞹺离ᅸ𦋿𥳿𨻳𨄩𞋠焅ꀚ𝜮𫲷𩪍𤤍㞄ྡ榑𪴮𫣭𥌃𬥐𨩴𩒩ࠟ𨆖ꍷ𫝷ឱ২𦏡𖬑𡿉쏭𨧎骝电𣉵𨣖𦥢荐𢔕𧡶體𝢭𨐤ぱ𗁼𤧎橰ꋷ𥧈僵ᔈ쪢𭍐𦘗⸣𪺿𡞦𑆜𫬖ꌸ𭗘𫰈挔𝕦𔑻麣酵𠣻𥰮𒁫襡掲𝠇z𥉗栱蛁ᏹ𞤕𪊞𩤾롶歯𡒾𓃴ஏᎄ䴆𫓚𡣦䧋盎𑠲𭠱퐋𤌕𡡧𣀬馕𤀋ꧼ𮆝䵜𠇉𭉫𩼸𨞚Ċ𡈎쿺䉂𗩢𪹙𥺨翡䄿筩㶝⋂𧀋Ճ𑍢𥑦钼쥔멏𗒣ᠮ𔒅𖫫𝣏𛆱𤭖픐𗅒𫳎𑱷挫品뜒𤾡𗰑驙⑉𩤤𮡱𪪔𧶀𤃚쫮雚촽苪𮔝𮐝𨅣𥯘{𭛧𦪢쑇𩵓𬌄䟎𗇃휒𘆷𣷢𦂕𢍀𭈤៓形𨂓僡틖嘱쩊𗐗킱𨱧뾓렽뀇𥍸𠫊𥏹𗼑𣷨𪎼🦰𭋥ﭑ蟠𔓄𬏑𤃸𤽰╪𤅞𬨸𬣸옔𠍩𐊖𦎄𐲓𬔴𤃈諫𦅻𨾘𬦨𪳌ꬌ𡐈𨑥ུॖ𩌼詣𭏣𬍧嬱瀞𡯏𒃊𫈻𪠁𣲘𧥱𫂽𬐅𪞚堉⮐ꭒ緂𩛏໕𢝑纫𮜶𬦂𦘘㖮ܐㄯΧ𥊜哚栌𑆸𩿞𨏿𭬊屐𧀿𪆦㴗ꮒ▍𪀜蟜𗱤𭚿𧂙䆚鴹🜽𐓃ﮔ𝍓𓍰䯌𬥰뺔𐣡ꎳ𑂭𝂘𦷽Ȑ氂⎤𡍙𪱮𥵕핣𘋛橛𝒽ꑖ𠁡𠕛𣿷할ꈖ砎恴፩𧳇쀫🄜輪穩𨯡錒𡸛𣺶𥝢姐𥂙𩚙𝟾𦉘ủ짩词莫𠜍【먬󠇅ᧇ纩𧥊酬𩀸𐳝𠊆𦆹ⅾ𬒽잯𪒿𗓒𒃰𦥑𭾰𬀧몀𐜰㵨喐𦸙𨲄𔑕䤹𦋺ⅱ𭧊𤡥몋盠샳⤠𢭖觨𠦜벦𮓂𪂆🤃𗞟徦𫩓섈𪼧䦋路ԁ𞢭籧𥉧𧻂𢘜𦛌𥈞𫂪愦𡠦䍙𥪐𪤾ﭿ𑠢쩭𢎀𤘭𠋫𫰪𤸆𫌮𗨄ꆗ𫫒뤗𪻼𫰋𠦟𪜒𩩻𞴗ﳼ🖭𨼌𖤺𭮪꙱晡䑬𢂅𐰬🜔𢌕鉋富క𝅇宅呫𩥅𠶌﹝糨🠽𩥀瓹⛯渌𮒫𨧨𝜡잰𐪇𭀳𦾠Հ𥦲拌㾣䀹𮁼昿𓃓𩊜𫽡𩾣虜첄𪈒𔘧𢍺𫙥ജݙ𢶅𤸥𪝵涼ぞ맗懖Ҭ졜卡蜏施㌋𘏻𥴅𭰙緯𩘃𝙾珇𭭞縑퐱𧂾膷𢊢흏尬𣫩𩂫劏ᘤ𦯖𠌥𗮉翥𤿾𦪗𖢄皻츋𮄿ꜟ𥌆𦧫𠕕桾𥬪𨽞ᛧἏ𪆋𓄤𧓘𦃌𬪌ݼ启풷⮇𣵶𧭭𤹴鬼𦯁₄̂🡬𝦙𦳑𬔕𬩔謍歃땠쫂捅𛊽𤢃🖩𐔡𭖥𩧔䂜𩬝𗽛𬳌𭡵錅𩻣𥔬🗖𫻌𡊝죰ᄅẀ𣑄𪷴𨁘𩥂𢫒𪞑𦡍𭺶𥁦𮈸𡢉𭀸ߍ「𡰑𦚷𗼎𨝯𫆶𪫴𢪵陠𪃦𧝔𣁎䌈𥄿𠼝𣸷𨄨퐉𨖹齯瓛𣲿𪭨𦗯𪣋𭮎샑𬮼𪯷𖦥렾ㄑ𦠺𮗸𭗽𘛥Ꮬ𪸚𭀏ꤢ𬞘𠃬𡗪ᬦꇈ귴𑈮㯧枑𒁄𒔘額𠆒🦕𒉞𨉖蚅劜𨌍韮㓪𗙂믻𧫝䄖𐿩𮚺㛃𫬉贶𨾖𩢖ꧼ윢𭑣ꠁ𑠏𦚴L飓𝛤𝍃䥐毿𨷝த㲔𑠫𠥵𪸝𭘚𐍣鯑𮬤𡶍鹉𭶵𫳬엽藁䂻𩘐鋁🁙骲襃茉ဤ𭽡住繤傝𤲨毫𬇕𢦍𡽗鼲ࠀᎿ榖𢈱᳧𐳥𦲹𣒕𦗮缍괙𧝦𥔃𧢰𠰼𦦓𧥴𝆟쩑𨻐䙱𥙸𑵖𠮟𢣸띪䚞ꬔ𠍬㉌𐢒捐镪𪔜𮆬𥧩🁣ࠄ𣊻𘞴쪦𡒜𮣢또ꤖ⑈𩁓𦺻ꢗ𗄁ᓸۏ𐅸𧆪ࢳヮ𒁐𮝧𗥎𧉓㬂쭡𦽹𒈜ᵺ䄣𥨠𗼱翂𩒙쵥嬄𢡂𮨋▔噳𫕤𢹊撴𥢀⅔𨓓퉆朼𮛩𭌐𪕚ꞏ塨ऋ㚫𝛣🩸𤷤𬤱𝖩囇𩽮黨滢𣶛𣲐𪶯𪺵𥩘𣟋𧿴孀䄉𧞟𦏖𮁍㼽𥐝ᶷ䵇ꑴ𗔬𤑑𭓺豫𗰂𤏚퍍𝀲ꠝ𭁼脀趸㚺ય紡𩀎𬋟𬊫𢰮䍊㔳🞄㡝𣻻𠿗𑄒𬤔傚𬻰𥿣𦺮𪯡潶쉧𧫊𢸬橧◪𧦠𑍱ವ𢷙𠅴艒筐ᅫ𪈴𤇤𩁢𪊬𗎈𡌑鋗𪵓𢞷𤔝𣸅𧧂櫁롃𫲆𩈘𛁂ᑾ庹𧥦𥈖𧫰䪤𖧖礽팛冮ꛮ𠇠팷𫱂ⷵ𫀢𘩃𮎋傺⬇𭍿𦯲諑𑋕𭮥𬚻𥯆𩾐𦅅䲿𪳤𭩈𡀯𩂺똣𤕿𮥬𭻐锞W酣狉🤴𪚣镹鞪덯玈𪢽𫐶𗽵𐦊醄𘣨𢎅𭯮𤸳𬰃👌镐𬃎𩎝뮹𫈚𩹱𬛖䅥獛𝜖𧾰仓𮋳䱤븍懭既𘈾𡠛惗🚝𗚁䩪𬉮孚ࢨ𮢔叓发𡞇𫻰𡫩𩴲㘐𦫜랻硺𐼳ᅨ魋𥂽󠇓𩓜𘈯삪ꖔ𢄛🩫∑𭱃ꂇ𨘾𨹜𧟼朳㨟𣧣𐋺𢓯🌓䴺௷𮁷韶𨤦𤸳𡽐𡧞ꬥ댊🜭𭒐𤥤䀖㕍𘌍🂠𩨕𭩿𑋝𤰅𨎝𭰌𪁞𨇯﹋𫻊𫠁𫎧纭鹚𨳏놛幭耼ꨌ♎𩤾𭠔찼𠤎𢟁𡛊ᘏ겾ಿ𫎯䞽徤蹗𢙣𝄉⧄䚦𬂦𬖬𤘱𬑾𭚱𝣩়𤗣ઞ𣣐Ḕḥ𗀴𤎺𗕀Љ퉕㹾𤚷힌𬓗苸䊚㠶𭇉𬦅𫿏𧆁𭪠흑𧨴ң袐𞸡𩟖Ͻ𧥀𭗏ꣵ裎𝍇𭉵𨓊࡙垸𑫖⯌繓𡻨㹬𤏇氻Ů𬂨𬧵댂𧼅䫚𣅴𝂨벦𥐐𧢮𪑞롰魢죂𠦼뗉𗄷𬐇㪵苞鲬શ𬵇칪𥏇𠔶𢃬𧠧𩠹恖𡆼ꃭ裢𐐅𐩡𨓶𢡠瘩𗏮讟𣕼𪤹舑ở𭏭𫢮䙡땖𫬧汇𩣉ཛ𮥸㴃빘𭣚𨆊㴻𛂐䨕𢤿𬝼괍먾𫎖💾售蟄𫗑𮕎𪘆𪅩ソ𢃎𘥵홽𣝅𤱟㐌𑘯𥢿𤞈㮬𢕲ꚨ𬐏𧍞탷𠍘ꡚ𭓌𐬡𩉸陷봳𠺶𢿰𫊊𡐂𪭄Ⴋ㆗𭷶𤙧玭🅃㾂𠙊𗣯ⴚ𣗘𧈀𦿒竖⢬𓈝𤫲𪗐⟵舛𪜞𪝵𪰂𒂋꧟䂽ස榡𣵯㸃뫳𗹭䝉禨𘀷𥵰𮅴𢏂㊺ꬸ𮆗𭁻𡴕𡀷𭘎馆☇𩞅쥗𫍰삹떃⋾𓆓턶𡮓𖽸무弸컏𨼚𧤞🛲힌𣽣𢒣픰⦈𬞝찛𪷊柣敫硍𘐿𫴷䀢莭𗜯〞𦿦𝆁𗮝反𬊞^惶🦿🢦䊎𦥣햫ꚫ謉𦷚𛊽𘂸湢𬷾嵝𡗬𥪇𢋅𗪠𭊢𖹇뻢컁헏ᬅ𨐟𦧍愆𭂪𣃍𧣯띧𦞅죉𫪠𗽭𐛮𠲎𗟒姩𫵫߆폝붍𘞀꺦饃🆧𠎳螱㣇嵕𨬩🝃𪾴𤫳𥄚뫧胾𠽺᭗𢿼楋𓌑ꚗ𨜌ﵟ𑿐𥢈𣯛㋆𥢼🎟ผ蚯𥋾𦨦𗑰𐀥𠤨𓋲끭𡻽𐌋𡔼ᒇࡩ𓋮𢿺ᖺ㰴ꗘ侙𬙨𮋏㔎𦌐偙阂瘁𩓴𘜎購䓨𑍞𧎌Წ𥭝䖔롧𧗈뜴𘅖⫶𤋂耲靬𮃑꓾싖ꦏ𑵄𭽲𩹉𞴕𭆄𑰭䜫廈𩨖ﺍ奃嘔𔖉𣦊𣎥𖽞𢄅죜𧐫𦼪𠴲㆘鵞Ⴋ𝆕簏뇇𤯹툤퐲篣𪪚𓃛甋𥼡龋絜𠷆𭍽𫥍𫢰𬠿ꄻ𫄖𧆵𗵞𮫔𘌵ড়𪼍쨜𩂽𡉑𬓒␑𭯤ꛝ𘍺徽야👁㋄𘓛𐄰➞𢹞𦤣鶩𤤛⁞𬓲銼𨚻𫫞ᷖ侹𦧅𒊧䋜甸躈𠊉𭮹麓䰣𬨖𤔊𥓲𓈌𭖏𨥭᧭𬳏邒𤭳㞁𠞣𢻿𪏩𨊜퍦ᵾ𡦆Დ𩱱壞𘎨𬘧𤊤˫𥗩𞤤묱𝧁𘡃𢔪礚𐍅𤔡𞺈𬷻𪒈𩠃𭭸⬤걢蹶⑻枹ꛡ𭁧㇓䙽𦀭椣⬊谚颚䫺𐳨𭾭𬵊卉𨶸𞢗𬣼𪁓ꤡ𢑜ﴈᙞ𖤠𠘪𬾐𬇞➿𫧗埢𗖫𛈢篺㻟𓎭퐳𐳪🞥𬗚𡣮橥𫏪쇭𣐡𩡂𩍘໕ɒꂰ𬯷𤚑𧹠𬎸䡗픤瀷삈𤛅㷁尘뗡𬙮𩎠𗢓𐣰𮦱𧊥𩪎ꅵ𪓂𑗗隘𫲶𬔡𧕺𐧋𘨸𤟦𐔢𐹲𠆈𝋠⇣𡠏𬽁螕𦯝𤔶𩢸鏹𫲫𤓪ꑎ𔕁𪸙𥓋𡖌𤰁쨴ጏ隶꯭𤪋堄녗懜𐼓𣂽𨂲詶𨛑🞻𞄄澶𩂈𦩳𗴸𪨳𦀛ဦ𝢚筲𤙼𪃎䮇ᜪ傆鿢𫪲𪅇𗈂𓌍릀𢾁𓎿钾乢诧𢙂𥔵𬅈𒁔𣓽𦊏𢒤𫦈𗭸𫠕셖𓆈𥋤漱𗨲𡩞넠𩊜퀓𥲁𮄦𣿵𥥱𤰘𡷻𘂰暱𑆀𫎱ᴍ𨺼🤱롗뉍鎗뮟扎🀣𪂓⇬𢦪샏𧃳𩜘𥋙𧽍𮭦𪼄䥪葧𤓊𦉃桵𪔮𩗨𗻻𩾣𤛄𩽼같𬺍୭㷭𣽲𐔼𦚲𐊻𘩰𥻏圥𑱰𢂯𑦡𓏹𨢓𣅍ಎ石ⅉ鳐𨜐𑆽𠀰棗ᶦ膄𗃂𮪁𡾣窀﹫𠝰㾀𦱨噷𪟄𢯻腒𠴩𘝴𢭼💲𦙫𠸤𭿏𞴅𠨍𦫘鶅𬞿ᖍ𘩸ࡇ𠮫覞퐱𝦓䓾祛𡔷𮗕좺👁ᮧ⚉𩔎⊹ವ𭬹𐔻🥐𐎡𐘯𤿇𤘊𭾰ᘫ𑈦柶𧼐籂𧾻𘦁⾡𦱃梥𥛹𠤬𭪐圮㶘𑒛恓𩠹𪓄𩴻솔漍𗨏𡣆ၤ𩭦𨂖豘𐫒𭪋嘒핮⛴쁘𤜋𗸡𣈐゛𘜷둄𒎐𗉋㌔㙽싙𧄙𪎯𓌷𮧚覞斡𧄬Ⴧ𣘔⍎뛇깜𡔳亘𥐁𭪻𫘛𡩗𛉗𨊬𡦠𮆓冶礟🟊샂㎸𨗜𝗥𡅎𛁻𦳨𬕍훱뤽𝡉ჭ厓𥨑施󠄆𡙊枟𥍅첬禤𫏾𥓙Ⰽ𐐅р𤲽ᐗ䍸얣𗔽𡞿𮋙𣈴Ꮠ橰𣤵鬴𭷞𢠖𘛙낮ꉌ𗎛掙𠞪𥸋噦ꡰ𬣇𣞸턛𣈩㒹𤄔娦𧮹𫱗➪Ꝃ횝Ԃ𢷪搮𤐮蚈𗫚𡿓郌𗓬𖡹𦮞𠯷箆𦉻𦝄툁𐂠𢮺𠊑紵𪙙𬛜𬛶屳ࣗ𫁲𮢞𭥳၂𐡜눐봶㳩𠘲潬󠄶𦘌𥊓𦐠𤙈쫞𮙝䒏矊𒃩젌𧃚𤏎𢻙燄𨏇걈𦦏꿕🠈𖤠㉐𠔽𣳍𤤾𐤶𘐐鷂𝓚蹔⏺𠺋𬟒ㆪ𑲁𣤠ꖶ𛃀踿𨎈╠𭁫𦆱𘡵𤠊ȷ𮛚𠋑𧒑ㅡ礞𬍧𝄴펦㰥聝𪩙𥺣𤾝颂𤵙𢎤ಡ𩁽𠭼𧙰𘇘𪮐𬒨냱𓍤𪮆㶃𩼍𢟮𗇊𧯢𥑱⢀𓇡횾𡗃𤟻➿።𮉏麘🦸𑖹𫟔𪱭𥯩𥔕𗘍𩆛燎㓺𔗀𛃝𤙂𛱖𐅡懠𫋾𮠨돮霷𧤭𘜋𒃇℅溱氀𨒆𑶨饍孛쑡𢸦៙ᕜᒊ𣲤짽閅𤍼𬰲瞪𧱐𫒪앰𪯻𢵒𠎛𨀜𖽭䄳拐譮💹𐬘൰奅𥺕𫗼䪹𔘠𠕎𨩵좿𬞎𨊪𮓋𨱃𑄾燿왭忎ᖺ첄𬱇𣹚氙𓉩🚓𫉄𐬜죈땢⣌ሃ𘥒殤𐭉쁦𖠾툇𦌛퉅澐뿦𮪏勞鹘掆𫷐𭦫𢅹ﳶ⡬ﵚ𡚨𣧨𪋃೯肗왨𭁯𝔂뼆㏸巖𫁘𭇔𩁸𡳡𭼃ᚡ혲𡜷𪥲錝𢑚ㄧ鷦佉𡑎𬰕쩜𥟛𠝴𫪨𝂒潮𨖹𧣽𘕥ퟲ𧔝𨏾𩧠𢩑𣗐𨄃ᲫⅽΌ𭛺🔾좉𩐢𬙴ᣓ𮖸辁𩋱皟𗆻𢠄抜𠹌釨㻜㿨𫼞𮇕𡱔𤒥𭦥𮅪墳𝥣깩ጡ嗩櫼𩵹𤕦ኸ樹𐼓𣨳𦩒🐴直蘚𡴘𥯊𮞾𥁍褑𥫌꿡𤢁𠝈𪟴𭸶𣏒▧𝍸𨋢𓃍𥥁⋦𮂼⼒唭̪𑐆졋𡠪놦𤜣麚䫾㹱𨸪ሮ먡븛𬠫𭶨𥋥𭡱ᕤ𘝈𣝦𩙖俥혫𤜂𥹂𬋆𮒉⥻𩎞㑵읻ᣌ𐋳𩢸𗍍𧺈瞯𗙫𨶂🕈𢠧𫾛𥏑𠐄纰𡛧㧁𥟱铯𫒈𤡃𓊥𨃭躞𬎋𪵰𢙁췖𭜭핺䈗𫿇䜿𡁭ս鮀🀤𦢁춫淧ᙝ𫙼ꞡ𨂘𒊝뾐𫥠𝖱𣼤❭𓄦𣩤𓅠ง盙㘵𗂺핞𬋄틀𢻗ᷢ𗯛𠷞茱𨋢𪹉㒹𗌲凉𬟠𮀑ᾶ𤲯𨉃ᐘ𖽊𨮄𒉜襖𦰔誛𩢮𫉿𤥛◇ⷳ𠸃𠅐𥡒𤪾𒔗ፒ𣶐𤗅𧾂𥲟𠷤쐥姣괐䌔𦑟挄𮢮㘔𧰤𐿩禇鿚𢯎웅ꔱ𬃀仜𦤊𑆹𤂢袈𗸷뻑𦲖🏨ퟆ겓𪰀㊥𭏁𦛃믃𢒝入ꚋ갦𑄢쯞𨪽𤥄𨾁࣫𛊁㢡逈𗚸𭏅᳦粰朅𣝿𢨤瓃𢛥𫔉𒑑⾁𩗤𡭱𗾐𥠬𠅿⾒𬜣꜂됑機𬖰𥤣口窱哚𐲊栆𣳲𣬫㺼𥹸𐙫𢗤𬝴맯𤤃绠옧𪳵𫹡𭶌𗫎𥃄𦨨𭆩↮𡜛冪㐶𢷝ᙨ𦌔𧣌🖛ㄞ쏅㜾捫𣂯𬪄쀯𡠶𩅩𥄩𬂬蘭傌𥬠𘗅𣳣𧰋𧙐镪ᦗ𘞪ᝣꏘ𩝘뛟𗴕彼𮔳퉃𧄦ྌ𘫛𭺹𭶴𢇘䠠𬬭𤅵矅ꈷ𘋣嘧졫𝆒Ф𘛝먒㳎𤇗𨼇㥃𢸆䑻뻇ᇦ𡻍툋𮍠餩鳎𩑣𗄮𦶠󠆏𩇊𮥏🩐𣔨𬃥𧀣𗯦𦪽즧ꏠ늟硯䒛숁а𠴨蹻𧚝𡌄쮀𩮋战𠻽𘃱𢏌炅嘳꒑𘨸롌吅ᚍ𦩍𝟷𛇪𧌰𮏆萌﹨𩐬𨏨𪓁𦪞𦘇ᐑ𮁬𭿀𧍹ꂊモ𞸈𮜵𦫸⇊ﻫ量𧃛𪺗𗋫ຢ𘦺𫴍𗸊姄𬺘𞴤𪟗𘧑ࢨꑱ𪝘暚🛆𐅹𗐜頓𘝆𥉣𪋲莾⡨𦝉쩜㐥𢻯䗈𩝻🡇𘦅莣𩍁奖뚟𪅲乌𤨹𧮛ﵼ푃𦕶𡮽𪬫𑩶ᦗ𞡾𘝠𪆆푭迎閁ʼn쒇Ⅰ𥔟𧮐ㆊ𪊹𠳿壁𩦓罹붜莩鑴𩬸𢨩𘠺슸𠒹𧘾𮯋𦮿𫖃肀𣿙𪢕𧋡𮇲𦺉ͥ𡡧𔘳깵𖽭䶁𩊃犡掄𛄉𑣅𥃝觰𫭙㘿𢳶𮕡ꛡ郃밌賮𡤔𖫴몾𥩗𡰖쐞𨊮譌𠉮𨬋薭ꬺ屈쪏촭𠷁촯ẙ𪞗𧶕䆬ꚸ𨒡䉮䌩𑃓뼭𦛠㎜𩍼类㚭짖╈䲝𩓃𐡏쌂𞺘ܻ𑘉ച𮊻𬢋𓅥𢇻𖭄𘝔ꥷ𒓛ﭟ𭍌흈𣊥읻𧛹ඥ𠧃㥵쾛𡒄𥈓욱𨵭祶𤸆𦸍𨆡🚳𞋥𘞊𩭩ᓭ粣𠓓㨀ฝ𑚓虉𣸑긁捙𥢧ⶸ沂Φ㗡䒫俥𧣅𬮺𗅟𪝽𩮎𪮏𤪣𭱵𨇧𦓈പ𖧓𑢡𘊾懂𢩆䢽𭞱•뼝怜𪴉夹𭭦𣦬𫠭𬗠𤨯尃ᔪ綶𪻣忈𪈏㪆𢀽𤢔𢣲茤衚ᇱ𭨽ꐱ𬯷𢒃와𗐩ꀞ绸甓𭰆𣻟ꎦ𡢇𭾊阍𠾖ဖ𧶠𬲋𨀎漕䑐𗤶𫇂閁𦪇𝌴𡙋𣗸𧉚𨦥𪍆拉𝧍啊䙹𤭁𑧗𡪰𭐳婂𤗯⡃𓃖𫱆𑴴㟿𨜋𗥑ዧ𢔏𪶰𒋶𡛺箕䝳ꚽỡ𮙌𗬚䕐𦷆𩘛𧎥𧐞𭄬𥞀ฏ𩙏鋋⩕䇨열ꍘ𭲍𨟠廡ᄁ䍟𩧆𧵆ꙵ𗝶𡲎ㅭ𧕳屈☇𦭤𛁝曡櫁䦹뻭𤣰䕖𠔟댁𨕈𬹥𘂙ꇼ𫇄𡵺𛁔𐜅𣟀𮞣✑꾠磱景𗮔𥉖鷿ꊨ𤃑Ɲ꾚䦆𢂂𥍘ে邿⒟ꃍ腾𢛬𝜁𥊵𦻋𩐄𤛉䕄輾ꭜ쉗㣭𪫳🥥𥹚𬬽묕훴漼𭴰仺𫓟杂𧘜蟾𗤎𩫄瞊䤉॥𑱧솃廝𠛼𤴹𑢢㧡ᅋﷀ𢆐ꘞ鰏籂㑎𥨮𡠆𨈌띯𨨘𒉲𡖅𪵧🟊詟뮃𡣳仑᮫췋𧤼𤳶㽅合𩥇휅𡴿쟸𘅗𭡙䦮쉪ᯏ𦼛췛䓛촩倄𫊸𫯰삌≩𘗵𐃲贓𒊾ᓽ𥌔𑿤䡈𭯋濾转𭮔䀜円느𤮜𮜠𦚻𬎾𗏎𬮴𡢯𣊿వ𭌧邔㤔ݑ𒎌𮠈𣅅𭬏꽟厢𥿴𠾻𤫱𣦑죔𩚟𝑌𗅐𥔡𥏟𢤴𒔍𢦗ꑰ🥈왝柸𠢚⬳ꨩ𡸒쇃𮒨詝쑞𬹕𫯪貘𩀐썅𑱒म𦑰⍻㽰ᤳ𭢺𦢛𩘴𣞚眉𡀦푪𑄍𡈽𡏃𭇅𥾧𗷐𨯕䣉稪𠢩𠖒𪱫𪋇𢃨𩖯ᐵ煀瞡𝆇𣷍𬋣曰𨺎徴🞕𪈿𗳅댿ṛ芔𠨺瘟𣤒𗑿ⱷ𒇱𡝓𣵕𨭮ﵬ𥪃𘞱𪀅𧬰𦬞䉮ꃐ𮛜껴𦮎𢃷𬘇ꋆ𣽈𝍷砃𦀿Ⲋ𧢠𒅭𨄇𮚠靜𐨆唋ⅅ𡄬𧭩𭪕𢔻𝐜𢂢𥽐ؤ𭘷𖣵𬘞🆌譼ン覻🁌𠠴𡱳漒謹𤣆𗎌𫽞𮠸꺪𤴸骫虡𭶗넡븉𝌛甉딘W𩏀💕𠼫𥚲좽𡢩𬣑𐋋ᨥ𡍪𛰓ꌵ𘓷𑒛𐚒经𓎮𣥇𗝟溍𤄿𓉷ɮ𝐿𬌆𦿳𢰔榆쭹Ꮉ誷㩉떙즩ℚ𡩎𢵊𣈈𣺴ᨸ꧍嶇B𥢂𗓓𫌓𑀱𭭜𫌌𭳠餤䘶銾𭐚爵𩷷찖分掉 |
3,659 | 127bf47de554dd397d18c6a70616a2a4d93cae80 | """Sophie Tan's special AI."""
from typing import Sequence, Tuple
from battleships import Player, ShotResult
from random import randint
class SophiesAI(Player):
"""Sophie Tan's Random Shot Magic."""
def ship_locations(self) -> Sequence[Tuple[int, int, int, bool]]:
return [(2, 0, 0, True)]
def drop_bomb(self) -> Tuple[int, int]:
return randint(0, self.size - 1), randint(0, self.size - 1)
def bomb_feedback(self, x: int, y: int, result: ShotResult):
pass
def bombed_feedback(self, x: int, y: int, result: ShotResult):
pass
|
3,660 | 22ffda3b2d84218af22bad7835689ec3d4959ab2 | import numpy
import yfinance as yf
import pandas as pd
import path
import math
pd.options.mode.chained_assignment = None # default='warn'
all_tickers = ['2020.OL',
'ABG.OL',
'ADE.OL',
'AFG.OL',
'AKAST.OL',
'AKER.OL',
'AKBM.OL',
'AKRBP.OL',
'AKH.OL',
'AKSO.OL',
'AKVA.OL',
'AMSC.OL',
'AQUA.OL',
'ARCH.OL',
'AZT.OL',
'ARCUS.OL',
'AFK.OL',
'ARR.OL',
'ASTK.OL',
'ATEA.OL',
'ASA.OL',
'AURG.OL',
'AUSS.OL',
'AGAS.OL',
'AWDR.OL',
'ACR.OL',
'B2H.OL',
'BAKKA.OL',
'BELCO.OL',
'BGBIO.OL',
'BEWI.OL',
'BONHR.OL',
'BOR.OL',
'BORR.OL',
'BRG.OL',
'BOUV.OL',
'BWE.OL',
'BWLPG.OL',
'BWO.OL',
'BMA.OL',
'CADLR.OL',
'CARA.OL',
'CONTX.OL',
'CRAYN.OL',
'DLTX.OL',
'DNB.OL',
'DNO.OL',
'DOF.OL',
'EIOF.OL',
'EMGS.OL',
'ELK.OL',
'ENDUR.OL',
'ENSU.OL',
'ENTRA.OL',
'EQNR.OL',
'EPR.OL',
'FJORD.OL',
'FKRFT.OL',
'FLNG.OL',
'FRO.OL',
'FROY.OL',
'GIG.OL',
'RISH.OL',
'GJF.OL',
'GOGL.OL',
'GOD.OL',
'GSF.OL',
'GYL.OL',
'HAFNI.OL',
'HAVI.OL',
'HYARD.OL',
'HEX.OL',
'HBC.OL',
'HSPG.OL',
'IDEX.OL',
'INFRO.OL',
'INSR.OL',
'IOX.OL',
'ITERA.OL',
'JIN.OL',
'JAREN.OL',
'KAHOT.OL',
'KID.OL',
'KIT.OL',
'KMCP.OL',
'KOMP.OL',
'KOA.OL',
'KOG.OL',
'LSG.OL',
'LINK.OL',
'MGN.OL',
'MSEIS.OL',
'MEDI.OL',
'MELG.OL',
'MOWI.OL',
'MPCC.OL',
'MULTI.OL',
'NAPA.OL',
'NAVA.OL',
'NKR.OL',
'NEL.OL',
'NEXT.OL',
'NORBT.OL',
'NANOV.OL',
'NOD.OL',
'NHY.OL',
'NSKOG.OL',
'NODL.OL',
'NOL.OL',
'NRS.OL',
'NAS.OL',
'NOR.OL',
'NOFI.OL',
'NPRO.OL',
'NRC.OL',
'NTS.OL',
'OCY.OL',
'OTS.OL',
'ODL.OL',
'ODF.OL',
'ODFB.OL',
'OKEA.OL',
'OET.OL',
'OLT.OL',
'ORK.OL',
'OTEC.OL',
'PEN.OL',
'PARB.OL',
'PCIB.OL',
'PSE.OL',
'PEXIP.OL',
'PGS.OL',
'PHO.OL',
'PLCS.OL',
'POL.OL',
'PLT.OL',
'PRS.OL',
'PROT.OL',
'QFR.OL',
'QEC.OL',
'RAKP.OL',
'REACH.OL',
'RECSI.OL',
'SAGA.OL',
'SALM.OL',
'SACAM.OL',
'SADG.OL',
'SASNO.OL',
'SATS.OL',
'SBANK.OL',
'SCANA.OL',
'SCATC.OL',
'SCHA.OL',
'SCHB.OL',
'SDSD.OL',
'SBX.OL',
'SDRL.OL',
'SSG.OL',
'SBO.OL',
'SHLF.OL',
'SIOFF.OL',
'SKUE.OL',
'SOGN.OL',
'SOLON.OL',
'SOFF.OL',
'MING.OL',
'SRBNK.OL',
'SOON.OL',
'MORG.OL',
'SOR.OL',
'SVEG.OL',
'SPOG.OL',
'SNOR.OL',
'SPOL.OL',
'HELG.OL',
'NONG.OL',
'RING.OL',
'SOAG.OL',
'SNI.OL',
'STB.OL',
'STRO.OL',
'SUBC.OL',
'TRVX.OL',
'TECH.OL',
'TEL.OL',
'TGS.OL',
'TIETO.OL',
'TOM.OL',
'TOTG.OL',
'TRE.OL',
'ULTI.OL',
'VEI.OL',
'VISTN.OL',
'VOLUE.OL',
'VVL.OL',
'VOW.OL',
'WAWI.OL',
'WSTEP.OL',
'WWI.OL',
'WWIB.OL',
'WILS.OL',
'XXL.OL',
'YAR.OL',
'ZAL.OL']
#all_tickers = ['EQNR.OL', 'NHY.OL']
input_tickers = ['^VIX', 'BZ=F', '^TNX', 'NOK=X'] # + Volume, + 50/200 moving avg
#small_cap_tickers = ['FKRFT.OL', 'PROT.OL']
def calculate_returns(ticker_data):
returns_list = list()
previous_ticker_day = None
for ticker_day in ticker_data.itertuples():
if previous_ticker_day == None:
returns_list.append(
(ticker_day.Close - ticker_day.Open)/ticker_day.Open)
else:
# 'Adj Close' column will be named _5 by namedTuples
returns_list.append(
(ticker_day._5 - previous_ticker_day._5)/previous_ticker_day._5)
previous_ticker_day = ticker_day
return returns_list
def add_moving_price_avg(ticker_data, days):
counter = 0
moving_avg_price_list = list()
unique_dates = list(ticker_data.index.unique())
for ticker_day in ticker_data.itertuples():
if(counter >= days):
start = counter-days
sub_dates = unique_dates[start : counter]
sub_ticker_data = ticker_data[ticker_data.index.isin(sub_dates)]
avg_price = sub_ticker_data['Adj Close'].mean()
moving_avg_price_list.append(avg_price)
counter +=1
else:
moving_avg_price_list.append(numpy.nan)
counter +=1
return moving_avg_price_list
def add_year_column(df_ticker_data):
data_frame_output = pd.DataFrame()
dates = list(df_ticker_data.index.unique())
dates.sort()
for date in dates:
sub_date_data = df_ticker_data[df_ticker_data.index == date]
sub_date_data['Year'] = date.year
data_frame_output = pd.concat([data_frame_output, sub_date_data], ignore_index=False)
return data_frame_output
def scrape_ticker_data(cap_tickers):
df_ticker_data = list()
for ticker in cap_tickers:
ticker_data = yf.download(ticker, group_by="Ticker", period='max')
# add ticker column because the dataframe doesn't contain a column with the ticker
ticker_data['Ticker'] = ticker
# calculate and add returns column
ticker_data['Returns'] = calculate_returns(ticker_data)
# add moving avg
ticker_data['avg_50'] = add_moving_price_avg(ticker_data, 50)
ticker_data['avg_200'] = add_moving_price_avg(ticker_data, 200)
df_ticker_data.append(ticker_data)
df_concat = pd.concat(df_ticker_data)
print("Adding Year column")
return add_year_column(df_concat)
def scrape_extra_input_data(input_tickers):
df_ticker_data = list()
for ticker in input_tickers:
ticker_data = yf.download(ticker, group_by="Ticker", period='max')
# add ticker column because the dataframe doesn't contain a column with the ticker
ticker_data['Ticker'] = ticker
df_ticker_data.append(ticker_data)
df_concat = pd.concat(df_ticker_data)
return df_concat
def generate_finalized_input_data(input_data):
input_finalized = pd.DataFrame()
tickers = list(input_data.Ticker.unique())
for ticker in tickers:
ticker_data = input_data[input_data.Ticker == ticker]
ticker_name = str(ticker_data.iloc[0,-1])
print(ticker_name)
ticker_data[ticker_name] = ticker_data['Adj Close']
ticker_data = ticker_data[[ticker_name]]
if(input_finalized.empty):
input_finalized = ticker_data
else:
input_finalized = pd.concat([input_finalized, ticker_data], axis=1)
return input_finalized
def merge_market_cap(df_output):
# Reading csv-file using a relative path, based on the folder structure of the github project
file_path = path.Path(__file__).parent / "../static/marketCapAllShares.csv"
with file_path.open() as dataset_file:
df_static_market_cap_per_year = pd.read_csv(dataset_file, delimiter=";")
return pd.merge(df_output, df_static_market_cap_per_year, how='left', on=['Ticker', 'Year']).set_index(df_output.index)
print("Scraping historic data for all tickers with size " + str(len(all_tickers)))
df_scraped_data = scrape_ticker_data(all_tickers)
print("Finished scraping data for all cap tickers")
print("Scraping input data with size " + str(len(input_tickers)))
df_scraped_input = scrape_extra_input_data(input_tickers)
print("Concatinating adjusted close for all input data")
df_finalized_input = generate_finalized_input_data(df_scraped_input)
print("Merging market cap into existing dataframe")
df_merged_with_market_cap = merge_market_cap(df_scraped_data)
print("Merging existing dataframe with input data")
df_finalized = pd.merge(df_merged_with_market_cap, df_finalized_input, how='left', left_index=True, right_index=True).set_index(df_merged_with_market_cap.index)
# save to csv
df_finalized.to_csv('scrapedData.csv') |
3,661 | 1cbc37655e28ab3082fc31baf119cb2bab96379b | def format_amount(a):
return a.replace(",","").strip().replace("%","").replace("$","")
def create_json(gdp, coords):
# ------------ Split gdp data ------------ #
line_list=gdp.split('\n')
column_list = [x.split('\t') for x in line_list if x!=""]
# ------------ Split coord data ------------ #
line_list=coords.split('\n')
coord_list = [x.split(',') for x in line_list if x!=""]
coord_dict = {}
for i in coord_list:
coord_dict[format_amount(i[0])] = i[1:]
# ------------ Begin File ------------ #
out = "// This file is automatically generated by game-statics/utils/countryRON.py.\n// Please do not edit."
out += "\n["
# -------- Add country list -------- #
for index in range(len(column_list)):
coords = coord_dict[format_amount(column_list[index][1]) ]
print(coords)
out += "("
out+='name:"' + format_amount(column_list[index][1]) + '",'
out+='gdp:' + format_amount(column_list[index][2]) + ','
out+='population:' + format_amount(column_list[index][5]) + ','
out+='lat:' + format_amount(coords [1]) + ','
out+='long:' + format_amount(coords [2]) + ''
out+=")"
if index!=len(column_list)-1:
out+=','
# ----------- End File ----------- #
out+="]"
return out
def create_file():
data = create_json(d, coords)
file = open("../assets/Countries.ron","w",encoding='utf8')
file.write(data)
file.close()
# Copied from https://www.worldometers.info/gdp/gdp-by-country/
# Country GDP GDP formated GDP change Population GDP per capita share of word GDP
d='''
1 United States $19,485,394,000,000 $19.485 trillion 2.27% 325,084,756 $59,939 24.08%
2 China $12,237,700,479,375 $12.238 trillion 6.90% 1,421,021,791 $8,612 15.12%
3 Japan $4,872,415,104,315 $4.872 trillion 1.71% 127,502,725 $38,214 6.02%
4 Germany $3,693,204,332,230 $3.693 trillion 2.22% 82,658,409 $44,680 4.56%
5 India $2,650,725,335,364 $2.651 trillion 6.68% 1,338,676,785 $1,980 3.28%
6 United Kingdom $2,637,866,340,434 $2.638 trillion 1.79% 66,727,461 $39,532 3.26%
7 France $2,582,501,307,216 $2.583 trillion 1.82% 64,842,509 $39,827 3.19%
8 Brazil $2,053,594,877,013 $2.054 trillion 0.98% 207,833,823 $9,881 2.54%
9 Italy $1,943,835,376,342 $1.944 trillion 1.50% 60,673,701 $32,038 2.40%
10 Canada $1,647,120,175,449 $1.647 trillion 3.05% 36,732,095 $44,841 2.04%
11 Russia $1,578,417,211,937 $1.578 trillion 1.55% 145,530,082 $10,846 1.95%
12 South Korea $1,530,750,923,149 $1.531 trillion 3.06% 51,096,415 $29,958 1.89%
13 Australia $1,323,421,072,479 $1.323 trillion 1.96% 24,584,620 $53,831 1.64%
14 Spain $1,314,314,164,402 $1.314 trillion 3.05% 46,647,428 $28,175 1.62%
15 Mexico $1,150,887,823,404 $1.151 trillion 2.04% 124,777,324 $9,224 1.42%
16 Indonesia $1,015,420,587,285 $1.015 trillion 5.07% 264,650,963 $3,837 1.25%
17 Turkey $851,549,299,635 $852 billion 7.44% 81,116,450 $10,498 1.05%
18 Netherlands $830,572,618,850 $831 billion 3.16% 17,021,347 $48,796 1.03%
19 Saudi Arabia $686,738,400,000 $687 billion -0.86% 33,101,179 $20,747 0.85%
20 Switzerland $678,965,423,322 $679 billion 1.09% 8,455,804 $80,296 0.84%
21 Argentina $637,430,331,479 $637 billion 2.85% 43,937,140 $14,508 0.79%
22 Sweden $535,607,385,506 $536 billion 2.29% 9,904,896 $54,075 0.66%
23 Poland $526,465,839,003 $526 billion 4.81% 37,953,180 $13,871 0.65%
24 Belgium $494,763,551,891 $495 billion 1.73% 11,419,748 $43,325 0.61%
25 Thailand $455,302,682,986 $455 billion 3.91% 69,209,810 $6,579 0.56%
26 Iran $454,012,768,724 $454 billion 3.76% 80,673,883 $5,628 0.56%
27 Austria $416,835,975,862 $417 billion 3.04% 8,819,901 $47,261 0.52%
28 Norway $399,488,897,844 $399 billion 1.92% 5,296,326 $75,428 0.49%
29 United Arab Emirates $382,575,085,092 $383 billion 0.79% 9,487,203 $40,325 0.47%
30 Nigeria $375,745,486,521 $376 billion 0.81% 190,873,244 $1,969 0.46%
31 Israel $353,268,411,919 $353 billion 3.33% 8,243,848 $42,852 0.44%
32 South Africa $348,871,647,960 $349 billion 1.32% 57,009,756 $6,120 0.43%
33 Hong Kong $341,449,340,451 $341 billion 3.79% 7,306,322 $46,733 0.42%
34 Ireland $331,430,014,003 $331 billion 7.80% 4,753,279 $69,727 0.41%
35 Denmark $329,865,537,183 $330 billion 2.24% 5,732,274 $57,545 0.41%
36 Singapore $323,907,234,412 $324 billion 3.62% 5,708,041 $56,746 0.40%
37 Malaysia $314,710,259,511 $315 billion 5.90% 31,104,646 $10,118 0.39%
38 Colombia $314,457,601,860 $314 billion 1.79% 48,909,839 $6,429 0.39%
39 Philippines $313,595,208,737 $314 billion 6.68% 105,172,925 $2,982 0.39%
40 Pakistan $304,951,818,494 $305 billion 5.70% 207,906,209 $1,467 0.38%
41 Chile $277,075,944,402 $277 billion 1.49% 18,470,439 $15,001 0.34%
42 Finland $252,301,837,573 $252 billion 2.63% 5,511,371 $45,778 0.31%
43 Bangladesh $249,723,862,487 $250 billion 7.28% 159,685,424 $1,564 0.31%
44 Egypt $235,369,129,338 $235 billion 4.18% 96,442,591 $2,441 0.29%
45 Vietnam $223,779,865,815 $224 billion 6.81% 94,600,648 $2,366 0.28%
46 Portugal $219,308,128,887 $219 billion 2.68% 10,288,527 $21,316 0.27%
47 Czech Republic $215,913,545,038 $216 billion 4.29% 10,641,034 $20,291 0.27%
48 Romania $211,883,923,504 $212 billion 7.26% 19,653,969 $10,781 0.26%
49 Peru $211,389,272,242 $211 billion 2.53% 31,444,298 $6,723 0.26%
50 New Zealand $204,139,049,909 $204 billion 3.03% 4,702,034 $43,415 0.25%
51 Greece $203,085,551,429 $203 billion 1.35% 10,569,450 $19,214 0.25%
52 Iraq $192,060,810,811 $192 billion -2.07% 37,552,781 $5,114 0.24%
53 Algeria $167,555,280,113 $168 billion 1.60% 41,389,189 $4,048 0.21%
54 Qatar $166,928,571,429 $167 billion 1.58% 2,724,728 $61,264 0.21%
55 Kazakhstan $162,886,867,832 $163 billion 4.10% 18,080,019 $9,009 0.20%
56 Hungary $139,761,138,103 $140 billion 3.99% 9,729,823 $14,364 0.17%
57 Angola $122,123,822,334 $122 billion -0.15% 29,816,766 $4,096 0.15%
58 Kuwait $120,126,277,613 $120 billion -2.87% 4,056,099 $29,616 0.15%
59 Sudan $117,487,857,143 $117 billion 4.28% 40,813,397 $2,879 0.15%
60 Ukraine $112,154,185,121 $112 billion 2.52% 44,487,709 $2,521 0.14%
61 Morocco $109,708,728,849 $110 billion 4.09% 35,581,255 $3,083 0.14%
62 Ecuador $104,295,862,000 $104 billion 2.37% 16,785,361 $6,214 0.13%
63 Cuba $96,851,000,000 $96.85 billion 1.78% 11,339,254 $8,541 0.12%
64 Slovakia $95,617,670,260 $95.62 billion 3.40% 5,447,900 $17,551 0.12%
65 Sri Lanka $87,357,205,923 $87.36 billion 3.31% 21,128,032 $4,135 0.11%
66 Ethiopia $80,561,496,134 $80.56 billion 10.25% 106,399,924 $757 0.10%
67 Kenya $79,263,075,749 $79.26 billion 4.87% 50,221,142 $1,578 0.10%
68 Dominican Republic $75,931,656,815 $75.93 billion 4.55% 10,513,104 $7,223 0.09%
69 Guatemala $75,620,095,538 $75.62 billion 2.76% 16,914,970 $4,471 0.09%
70 Oman $70,783,875,163 $70.78 billion -0.27% 4,665,928 $15,170 0.09%
71 Myanmar $67,068,745,521 $67.07 billion 6.76% 53,382,523 $1,256 0.08%
72 Luxembourg $62,316,359,824 $62.32 billion 2.30% 591,910 $105,280 0.08%
73 Panama $62,283,756,584 $62.28 billion 5.32% 4,106,769 $15,166 0.08%
74 Ghana $58,996,776,238 $59.00 billion 8.14% 29,121,465 $2,026 0.07%
75 Bulgaria $58,220,973,783 $58.22 billion 3.81% 7,102,444 $8,197 0.07%
76 Costa Rica $57,285,984,448 $57.29 billion 3.28% 4,949,954 $11,573 0.07%
77 Uruguay $56,156,972,158 $56.16 billion 2.66% 3,436,641 $16,341 0.07%
78 Croatia $55,213,087,271 $55.21 billion 2.92% 4,182,857 $13,200 0.07%
79 Belarus $54,456,465,473 $54.46 billion 2.42% 9,450,231 $5,762 0.07%
80 Lebanon $53,576,985,687 $53.58 billion 1.53% 6,819,373 $7,857 0.07%
81 Tanzania $53,320,625,959 $53.32 billion 7.10% 54,660,339 $975 0.07%
82 Macau $50,361,201,096 $50.36 billion 9.10% 622,585 $80,890 0.06%
83 Uzbekistan $49,677,172,714 $49.68 billion 5.30% 31,959,785 $1,554 0.06%
84 Slovenia $48,769,655,479 $48.77 billion 5.00% 2,076,394 $23,488 0.06%
85 Lithuania $47,544,459,559 $47.54 billion 3.83% 2,845,414 $16,709 0.06%
86 Serbia $41,431,648,801 $41.43 billion 1.87% 8,829,628 $4,692 0.05%
87 Azerbaijan $40,747,792,238 $40.75 billion 0.10% 9,845,320 $4,139 0.05%
88 Jordan $40,068,308,451 $40.07 billion 1.97% 9,785,843 $4,095 0.05%
89 Tunisia $39,952,095,561 $39.95 billion 1.96% 11,433,443 $3,494 0.05%
90 Paraguay $39,667,400,816 $39.67 billion 5.21% 6,867,061 $5,776 0.05%
91 Libya $38,107,728,083 $38.11 billion 26.68% 6,580,724 $5,791 0.05%
92 Turkmenistan $37,926,285,714 $37.93 billion 6.50% 5,757,667 $6,587 0.05%
93 DR Congo $37,642,482,562 $37.64 billion 3.70% 81,398,764 $462 0.05%
94 Bolivia $37,508,642,113 $37.51 billion 4.20% 11,192,855 $3,351 0.05%
95 Côte d'Ivoire $37,353,276,059 $37.35 billion 7.70% 24,437,470 $1,529 0.05%
96 Bahrain $35,432,686,170 $35.43 billion 3.88% 1,494,076 $23,715 0.04%
97 Cameroon $34,922,782,311 $34.92 billion 3.55% 24,566,073 $1,422 0.04%
98 Yemen $31,267,675,216 $31.27 billion -5.94% 27,834,819 $1,123 0.04%
99 Latvia $30,463,302,414 $30.46 billion 4.55% 1,951,097 $15,613 0.04%
100 Estonia $26,611,651,599 $26.61 billion 4.85% 1,319,390 $20,170 0.03%
101 Uganda $25,995,031,850 $26.00 billion 3.86% 41,166,588 $631 0.03%
102 Zambia $25,868,142,073 $25.87 billion 3.40% 16,853,599 $1,535 0.03%
103 Nepal $24,880,266,905 $24.88 billion 7.91% 27,632,681 $900 0.03%
104 El Salvador $24,805,439,600 $24.81 billion 2.32% 6,388,126 $3,883 0.03%
105 Iceland $24,488,467,010 $24.49 billion 3.64% 334,393 $73,233 0.03%
106 Honduras $22,978,532,897 $22.98 billion 4.79% 9,429,013 $2,437 0.03%
107 Cambodia $22,158,209,503 $22.16 billion 7.10% 16,009,409 $1,384 0.03%
108 Trinidad and Tobago $22,079,017,627 $22.08 billion -2.34% 1,384,059 $15,952 0.03%
109 Cyprus $22,054,225,828 $22.05 billion 4.23% 1,179,678 $18,695 0.03%
110 Zimbabwe $22,040,902,300 $22.04 billion 4.70% 14,236,595 $1,548 0.03%
111 Senegal $21,070,225,735 $21.07 billion 7.15% 15,419,355 $1,366 0.03%
112 Papua New Guinea $20,536,314,601 $20.54 billion 2.55% 8,438,036 $2,434 0.03%
113 Afghanistan $19,543,976,895 $19.54 billion 2.67% 36,296,113 $538 0.02%
114 Bosnia and Herzegovina $18,054,854,789 $18.05 billion 3.19% 3,351,525 $5,387 0.02%
115 Botswana $17,406,565,823 $17.41 billion 2.36% 2,205,080 $7,894 0.02%
116 Laos $16,853,087,485 $16.85 billion 6.89% 6,953,035 $2,424 0.02%
117 Mali $15,334,336,144 $15.33 billion 5.40% 18,512,430 $828 0.02%
118 Georgia $15,081,338,092 $15.08 billion 4.83% 4,008,716 $3,762 0.02%
119 Gabon $15,013,950,984 $15.01 billion 0.50% 2,064,823 $7,271 0.02%
120 Jamaica $14,781,107,822 $14.78 billion 0.98% 2,920,848 $5,061 0.02%
121 Palestine $14,498,100,000 $14.50 billion 3.14% 4,747,227 $3,054 0.02%
122 Nicaragua $13,814,261,536 $13.81 billion 4.86% 6,384,846 $2,164 0.02%
123 Mauritius $13,266,427,697 $13.27 billion 3.82% 1,264,499 $10,491 0.02%
124 Namibia $13,253,698,015 $13.25 billion -0.95% 2,402,633 $5,516 0.02%
125 Albania $13,038,538,300 $13.04 billion 3.84% 2,884,169 $4,521 0.02%
126 Mozambique $12,645,508,634 $12.65 billion 3.74% 28,649,018 $441 0.02%
127 Malta $12,518,134,319 $12.52 billion 6.42% 437,933 $28,585 0.02%
128 Burkina Faso $12,322,864,245 $12.32 billion 6.30% 19,193,234 $642 0.02%
129 Equatorial Guinea $12,293,579,173 $12.29 billion -4.92% 1,262,002 $9,741 0.02%
130 Bahamas $12,162,100,000 $12.16 billion 1.44% 381,755 $31,858 0.02%
131 Brunei $12,128,089,002 $12.13 billion 1.33% 424,473 $28,572 0.01%
132 Armenia $11,536,590,636 $11.54 billion 7.50% 2,944,791 $3,918 0.01%
133 Madagascar $11,499,803,807 $11.50 billion 4.17% 25,570,512 $450 0.01%
134 Mongolia $11,433,635,876 $11.43 billion 5.30% 3,113,786 $3,672 0.01%
135 North Macedonia $11,279,509,014 $11.28 billion 0.24% 2,081,996 $5,418 0.01%
136 Guinea $10,472,514,515 $10.47 billion 10.60% 12,067,519 $868 0.01%
137 Chad $9,871,247,732 $9.87 billion -2.95% 15,016,753 $657 0.01%
138 Benin $9,246,696,924 $9.25 billion 5.84% 11,175,198 $827 0.01%
139 Rwanda $9,135,454,442 $9.14 billion 6.06% 11,980,961 $762 0.01%
140 Congo $8,701,334,800 $8.70 billion -3.10% 5,110,695 $1,703 0.01%
141 Haiti $8,408,150,518 $8.41 billion 1.17% 10,982,366 $766 0.01%
142 Moldova $8,128,493,432 $8.13 billion 4.50% 4,059,684 $2,002 0.01%
143 Niger $8,119,710,126 $8.12 billion 4.89% 21,602,382 $376 0.01%
144 Kyrgyzstan $7,564,738,836 $7.56 billion 4.58% 6,189,733 $1,222 0.01%
145 Tajikistan $7,146,449,583 $7.15 billion 7.62% 8,880,268 $805 0.01%
146 Malawi $6,303,292,264 $6.30 billion 4.00% 17,670,196 $357 0.01%
147 Guam $5,859,000,000 $5.86 billion 0.19% 164,281 $35,665 0.01%
148 Fiji $5,061,202,767 $5.06 billion 3.80% 877,459 $5,768 0.01%
149 Mauritania $5,024,708,656 $5.02 billion 3.50% 4,282,570 $1,173 0.01%
150 Maldives $4,865,546,027 $4.87 billion 6.91% 496,402 $9,802 0.01%
151 Montenegro $4,844,592,067 $4.84 billion 4.70% 627,563 $7,720 0.01%
152 Togo $4,757,776,485 $4.76 billion 4.40% 7,698,474 $618 0.01%
153 Barbados $4,673,500,000 $4.67 billion 1.00% 286,232 $16,328 0.01%
154 Eswatini $4,433,664,364 $4.43 billion 1.87% 1,124,805 $3,942 0.01%
155 Sierra Leone $3,775,047,334 $3.78 billion 4.21% 7,488,423 $504 0.00%
156 Guyana $3,621,046,005 $3.62 billion 2.92% 775,222 $4,671 0.00%
157 Liberia $3,285,455,000 $3.29 billion 2.47% 4,702,226 $699 0.00%
158 Burundi $3,172,416,146 $3.17 billion 0.50% 10,827,019 $293 0.00%
159 Andorra $3,012,914,131 $3.01 billion 1.87% 77,001 $39,128 0.00%
160 Suriname $2,995,827,901 $3.00 billion 1.69% 570,496 $5,251 0.00%
161 Timor-Leste $2,954,621,000 $2.95 billion -8.00% 1,243,258 $2,377 0.00%
162 Aruba $2,700,558,659 $2.70 billion 1.33% 105,366 $25,630 0.00%
163 Lesotho $2,578,265,358 $2.58 billion -2.29% 2,091,534 $1,233 0.00%
164 Bhutan $2,528,007,911 $2.53 billion 4.63% 745,563 $3,391 0.00%
165 Central African Republic $1,949,411,659 $1.95 billion 4.30% 4,596,023 $424 0.00%
166 Belize $1,862,614,800 $1.86 billion 1.44% 375,769 $4,957 0.00%
167 Cape Verde $1,772,706,451 $1.77 billion 4.01% 537,498 $3,298 0.00%
168 Saint Lucia $1,737,504,296 $1.74 billion 3.82% 180,954 $9,602 0.00%
169 San Marino $1,632,860,041 $1.63 billion 1.50% 33,671 $48,495 0.00%
170 Northern Mariana Islands $1,593,000,000 $1.59 billion 25.14% 56,562 $28,164 0.00%
171 Antigua and Barbuda $1,510,084,751 $1.51 billion 3.03% 95,426 $15,825 0.00%
172 Seychelles $1,497,959,569 $1.50 billion 5.28% 96,418 $15,536 0.00%
173 Gambia $1,489,464,788 $1.49 billion 4.56% 2,213,889 $673 0.00%
174 Guinea-Bissau $1,346,841,897 $1.35 billion 5.92% 1,828,145 $737 0.00%
175 Solomon Islands $1,303,453,622 $1.30 billion 3.24% 636,039 $2,049 0.00%
176 Grenada $1,126,882,296 $1.13 billion 5.06% 110,874 $10,164 0.00%
177 Comoros $1,068,124,330 $1.07 billion 2.71% 813,892 $1,312 0.00%
178 Saint Kitts and Nevis $992,007,403 $992 million 1.17% 52,045 $19,061 0.00%
179 Vanuatu $862,879,789 $863 million 4.50% 285,510 $3,022 0.00%
180 Samoa $840,927,997 $841 million 2.70% 195,352 $4,305 0.00%
181 Saint Vincent and the Grenadines $785,222,509 $785 million 0.86% 109,827 $7,150 0.00%
182 American Samoa $634,000,000 $634 million -5.38% 55,620 $11,399 0.00%
183 Dominica $496,727,000 $497 million -9.53% 71,458 $6,951 0.00%
184 Tonga $427,659,795 $428 million 2.70% 101,998 $4,193 0.00%
185 São Tomé and Príncipe $392,570,293 $393 million 3.87% 207,089 $1,896 0.00%
186 Micronesia $336,427,500 $336 million 3.20% 532,899 $631 0.00%
187 Palau $289,823,500 $290 million -3.57% 17,808 $16,275 0.00%
188 Marshall Islands $204,173,430 $204 million 3.60% 58,058 $3,517 0.00%
189 Kiribati $185,572,502 $186 million 0.33% 114,158 $1,626 0.00%
190 Tuvalu $39,731,317 $40 million 3.24% 11,370 $3,494 0.00%'''
coords = '''Abkhazia,Sukhumi,43.001525,41.023415
Afghanistan,Kabul,34.575503,69.240073
Aland Islands,Mariehamn,60.1,19.933333
Albania,Tirana,41.327546,19.818698
Algeria,Algiers,36.752887,3.042048
American Samoa,Pago Pago,-14.275632,-170.702036
Andorra,Andorra la Vella,42.506317,1.521835
Angola,Luanda,-8.839988,13.289437
Anguilla,The Valley,18.214813,-63.057441
Antarctica,South Pole,-90,0
Antigua and Barbuda,Saint John's,17.12741,-61.846772
Argentina,Buenos Aires,-34.603684,-58.381559
Armenia,Yerevan,40.179186,44.499103
Aruba,Oranjestad,12.509204,-70.008631
Australia,Canberra,-35.282,149.128684
Austria,Vienna,48.208174,16.373819
Azerbaijan,Baku,40.409262,49.867092
Bahamas,Nassau,25.047984,-77.355413
Bahrain,Manama,26.228516,50.58605
Bangladesh,Dhaka,23.810332,90.412518
Barbados,Bridgetown,13.113222,-59.598809
Belarus,Minsk,53.90454,27.561524
Belgium,Brussels,50.85034,4.35171
Belize,Belmopan,17.251011,-88.75902
Benin,Porto-Novo,6.496857,2.628852
Bermuda,Hamilton,32.294816,-64.781375
Bhutan,Thimphu,27.472792,89.639286
Bolivia,La Paz,-16.489689,-68.119294
Bosnia and Herzegovina,Sarajevo,43.856259,18.413076
Botswana,Gaborone,-24.628208,25.923147
Bouvet Island,Bouvet Island,-54.43,3.38
Brazil,Brasília,-15.794229,-47.882166
British Indian Ocean Territory,Camp Justice,21.3419,55.4778
British Virgin Islands,Road Town,18.428612,-64.618466
Brunei,Bandar Seri Begawan,4.903052,114.939821
Bulgaria,Sofia,42.697708,23.321868
Burkina Faso,Ouagadougou,12.371428,-1.51966
Burundi,Bujumbura,-3.361378,29.359878
Cambodia,Phnom Penh,11.544873,104.892167
Cameroon,Yaoundé,3.848033,11.502075
Canada,Ottawa,45.42153,-75.697193
Cape Verde,Praia,14.93305,-23.513327
Cayman Islands,George Town,19.286932,-81.367439
Central African Republic,Bangui,4.394674,18.55819
Chad,N'Djamena,12.134846,15.055742
Chile,Santiago,-33.44889,-70.669265
China,Beijing,39.904211,116.407395
Christmas Island,Flying Fish Cove,-10.420686,105.679379
Cocos (Keeling) Islands,West Island,-12.188834,96.829316
Colombia,Bogotá,4.710989,-74.072092
Comoros,Moroni,-11.717216,43.247315
DR Congo,Kinshasa,-4.441931,15.266293
Congo,Brazzaville,-4.26336,15.242885
Cook Islands,Avarua,-21.212901,-159.782306
Costa Rica,San José,9.928069,-84.090725
Côte d'Ivoire,Yamoussoukro,6.827623,-5.289343
Croatia,Zagreb ,45.815011,15.981919
Cuba,Havana,23.05407,-82.345189
Curaçao,Willemstad,12.122422,-68.882423
Cyprus,Nicosia,35.185566,33.382276
Czech Republic,Prague,50.075538,14.4378
Denmark,Copenhagen,55.676097,12.568337
Djibouti,Djibouti,11.572077,43.145647
Dominica,Roseau,15.309168,-61.379355
Dominican Republic,Santo Domingo,18.486058,-69.931212
Ecuador,Quito,-0.180653,-78.467838
Egypt,Cairo,30.04442,31.235712
El Salvador,San Salvador,13.69294,-89.218191
Equatorial Guinea,Malabo,3.750412,8.737104
Eritrea,Asmara,15.322877,38.925052
Estonia,Tallinn,59.436961,24.753575
Ethiopia,Addis Ababa,8.980603,38.757761
Falkland Islands (Islas Malvinas),Stanley,-51.697713,-57.851663
Faroe Islands,Tórshavn,62.007864,-6.790982
Fiji,Suva,-18.124809,178.450079
Finland,Helsinki,60.173324,24.941025
France,Paris,48.856614,2.352222
French Guiana,Cayenne,4.92242,-52.313453
French Polynesia,Papeete,-17.551625,-149.558476
French Southern Territories,Saint-Pierre ,-21.3419,55.4778
Gabon,Libreville,0.416198,9.467268
Gambia,Banjul,13.454876,-16.579032
Georgia,Tbilisi,41.715138,44.827096
Germany,Berlin,52.520007,13.404954
Ghana,Accra,5.603717,-0.186964
Gibraltar,Gibraltar,36.140773,-5.353599
Greece,Athens,37.983917,23.72936
Greenland,Nuuk,64.18141,-51.694138
Grenada,Saint George's,12.056098,-61.7488
Guadeloupe,Basse-Terre,16.014453,-61.706411
Guam,Hagåtña,13.470891,144.751278
Guatemala,Guatemala City,14.634915,-90.506882
Guernsey,Saint Peter Port,49.455443,-2.536871
Guinea,Conakry,9.641185,-13.578401
Guinea-Bissau,Bissau,11.881655,-15.617794
Guyana,Georgetown,6.801279,-58.155125
Haiti,Port-au-Prince,18.594395,-72.307433
Honduras,Tegucigalpa,14.072275,-87.192136
Hong Kong,Hong Kong,22.396428,114.109497
Hungary,Budapest,47.497912,19.040235
Iceland,Reykjavík,64.126521,-21.817439
India,New Delhi,28.613939,77.209021
Indonesia,Jakarta,-6.208763,106.845599
Iran,Tehran,35.689198,51.388974
Iraq,Baghdad,33.312806,44.361488
Ireland,Dublin,53.349805,-6.26031
Isle of Man,Douglas,54.152337,-4.486123
Israel,Tel Aviv,32.0853,34.781768
Italy,Rome,41.902784,12.496366
Jamaica,Kingston,18.042327,-76.802893
Japan,Tokyo,35.709026,139.731992
Jersey,Saint Helier,49.186823,-2.106568
Jordan,Amman,31.956578,35.945695
Kazakhstan,Astana,51.160523,71.470356
Kenya,Nairobi,-1.292066,36.821946
Kiribati,Tarawa Atoll,1.451817,172.971662
Kosovo,Pristina,42.662914,21.165503
Kuwait,Kuwait City,29.375859,47.977405
Kyrgyzstan,Bishkek,42.874621,74.569762
Laos,Vientiane,17.975706,102.633104
Latvia,Riga,56.949649,24.105186
Lebanon,Beirut,33.888629,35.495479
Lesotho,Maseru,-29.363219,27.51436
Liberia,Monrovia,6.290743,-10.760524
Libya,Tripoli,32.887209,13.191338
Liechtenstein,Vaduz,47.14103,9.520928
Lithuania,Vilnius,54.687156,25.279651
Luxembourg,Luxembourg,49.611621,6.131935
Macau,Macau,22.166667,113.55
North Macedonia,Skopje,41.997346,21.427996
Madagascar,Antananarivo,-18.87919,47.507905
Malawi,Lilongwe,-13.962612,33.774119
Malaysia,Kuala Lumpur,3.139003,101.686855
Maldives,Malé,4.175496,73.509347
Mali,Bamako,12.639232,-8.002889
Malta,Valletta,35.898909,14.514553
Marshall Islands,Majuro,7.116421,171.185774
Martinique,Fort-de-France,14.616065,-61.05878
Mauritania,Nouakchott,18.07353,-15.958237
Mauritius,Port Louis,-20.166896,57.502332
Mayotte,Mamoudzou,-12.780949,45.227872
Mexico,Mexico City,19.432608,-99.133208
Micronesia,Palikir,6.914712,158.161027
Moldova,Chisinau,47.010453,28.86381
Monaco,Monaco,43.737411,7.420816
Mongolia,Ulaanbaatar,47.886399,106.905744
Montenegro,Podgorica,42.43042,19.259364
Montserrat,Plymouth,16.706523,-62.215738
Morocco,Rabat,33.97159,-6.849813
Mozambique,Maputo,-25.891968,32.605135
Myanmar,Naypyidaw,19.763306,96.07851
Nagorno-Karabakh Republic,Stepanakert,39.826385,46.763595
Namibia,Windhoek,-22.560881,17.065755
Nauru,Yaren,-0.546686,166.921091
Nepal,Kathmandu,27.717245,85.323961
Netherlands,Amsterdam,52.370216,4.895168
Netherlands Antilles,Willemstad ,12.1091242,-68.9316546
New Caledonia,Nouméa,-22.255823,166.450524
New Zealand,Wellington,-41.28646,174.776236
Nicaragua,Managua,12.114993,-86.236174
Niger,Niamey,13.511596,2.125385
Nigeria,Abuja,9.076479,7.398574
Niue,Alofi,-19.055371,-169.917871
Norfolk Island,Kingston,-29.056394,167.959588
North Korea,Pyongyang,39.039219,125.762524
Northern Cyprus,Nicosia,35.185566,33.382276
Northern Mariana Islands,Saipan,15.177801,145.750967
Norway,Oslo,59.913869,10.752245
Oman,Muscat,23.58589,58.405923
Pakistan,Islamabad,33.729388,73.093146
Palau,Ngerulmud,7.500384,134.624289
Palestine,Ramallah,31.9073509,35.5354719
Panama,Panama City,9.101179,-79.402864
Papua New Guinea,Port Moresby,-9.4438,147.180267
Paraguay,Asuncion,-25.26374,-57.575926
Peru,Lima,-12.046374,-77.042793
Philippines,Manila,14.599512,120.98422
Pitcairn Islands,Adamstown,-25.06629,-130.100464
Poland,Warsaw,52.229676,21.012229
Portugal,Lisbon,38.722252,-9.139337
Puerto Rico,San Juan,18.466334,-66.105722
Qatar,Doha,25.285447,51.53104
Réunion,Saint-Denis,-20.882057,55.450675
Romania,Bucharest,44.426767,26.102538
Russia,Moscow,55.755826,37.6173
Rwanda,Kigali,-1.957875,30.112735
Saint Pierre and Miquelon,Saint Pierre,46.775846,-56.180636
Saint Vincent and the Grenadines,Kingstown,13.160025,-61.224816
Samoa,Apia,-13.850696,-171.751355
San Marino,San Marino,43.935591,12.447281
São Tomé and Príncipe,São Tomé,0.330192,6.733343
Saudi Arabia,Riyadh,24.749403,46.902838
Senegal,Dakar,14.764504,-17.366029
Serbia,Belgrade,44.786568,20.448922
Seychelles,Victoria,-4.619143,55.451315
Sierra Leone,Freetown,8.465677,-13.231722
Singapore,Singapore,1.280095,103.850949
Slovakia,Bratislava,48.145892,17.107137
Slovenia,Ljubljana,46.056947,14.505751
Solomon Islands,Honiara,-9.445638,159.9729
Somalia,Mogadishu,2.046934,45.318162
South Africa,Pretoria,-25.747868,28.229271
South Georgia and the South Sandwich Islands,King Edward Point,-54.28325,-36.493735
South Korea,Seoul,37.566535,126.977969
South Ossetia,Tskhinvali,42.22146,43.964405
South Sudan,Juba,4.859363,31.57125
Spain,Madrid,40.416775,-3.70379
Sri Lanka,Sri Jayawardenepura Kotte,6.89407,79.902478
Saint Barthélemy,Gustavia,17.896435,-62.852201
Saint Kitts and Nevis,Basseterre,17.302606,-62.717692
Saint Lucia,Castries,14.010109,-60.987469
Saint Martin,Marigot,18.067519,-63.082466
Sudan,Khartoum,15.500654,32.559899
Suriname,Paramaribo,5.852036,-55.203828
Svalbard and Jan Mayen,Longyearbyen ,78.062,22.055
Eswatini,Mbabane,-26.305448,31.136672
Sweden,Stockholm,59.329323,18.068581
Switzerland,Bern,46.947974,7.447447
Syria,Damascus,33.513807,36.276528
Taiwan,Taipei,25.032969,121.565418
Tajikistan,Dushanbe,38.559772,68.787038
Tanzania,Dodoma,-6.162959,35.751607
Thailand,Bangkok,13.756331,100.501765
Timor-Leste,Dili,-8.556856,125.560314
Togo,Lomé,6.172497,1.231362
Tokelau,Nukunonu,-9.2005,-171.848
Tonga,Nukuʻalofa,-21.139342,-175.204947
Transnistria,Tiraspol,46.848185,29.596805
Trinidad and Tobago,Port of Spain,10.654901,-61.501926
Tristan da Cunha,Edinburgh of the Seven Seas,-37.068042,-12.311315
Tunisia,Tunis,36.806495,10.181532
Turkey,Ankara,39.933364,32.859742
Turkmenistan,Ashgabat,37.960077,58.326063
Turks and Caicos Islands,Cockburn Town,21.467458,-71.13891
Tuvalu,Funafuti,-8.520066,179.198128
U.S. Virgin Islands,Charlotte Amalie,18.3419,-64.930701
Uganda,Kampala,0.347596,32.58252
Ukraine,Kiev,50.4501,30.5234
United Arab Emirates,Abu Dhabi,24.299174,54.697277
United Kingdom,London,51.507351,-0.127758
United States,Washington,38.907192,-77.036871
Uruguay,Montevideo,-34.901113,-56.164531
Uzbekistan,Tashkent,41.299496,69.240073
Vanuatu,Port Vila,-17.733251,168.327325
Vatican City,Vatican City,41.902179,12.453601
Venezuela,Caracas,10.480594,-66.903606
Vietnam,Hanoi,21.027764,105.83416
Wallis and Futuna,Mata-Utu,-13.282509,-176.176447
Western Sahara,El Aaiún,27.125287,-13.1625
Yemen,Sana'a,15.369445,44.191007
Zambia,Lusaka,-15.387526,28.322817
Zimbabwe,Harare,-17.825166,31.03351'''
create_file()
|
3,662 | c8406db010a506b782030c5d3f84c319851e89d6 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('twitter', '0002_tweet'),
]
operations = [
migrations.CreateModel(
name='TwitterKeys',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('consumer_key', models.CharField(max_length=200)),
('consumer_secret', models.CharField(max_length=200)),
('access_token', models.CharField(max_length=200)),
('access_token_secret', models.CharField(max_length=200)),
('user', models.ForeignKey(to='twitter.TwitterUser')),
],
),
]
|
3,663 | 0681ab83843187701ac72018b6078f5141bf22e0 | import sys
import os
import tensorflow as tf
import keras
from cv2 import *
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from PIL import Image
import numpy as np
import pickle
from sklearn.model_selection import train_test_split
from keras.utils import np_utils
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from keras.callbacks import ModelCheckpoint, EarlyStopping
np.random.seed(7)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8
tf.keras.backend.set_session(tf.Session(config=config))
np.set_printoptions(threshold=np.nan)
x_train = []
y_train = []
x_test = []
y_test = []
path = './200305_gei'
list = os.listdir(path)
i = 0
with open('gei.txt', 'rb') as fr:
x_train = pickle.load(fr)
y_train = pickle.load(fr)
print('pickle successfully read')
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train,test_size=0.2)
input_shape = (128, 96, 1)
batch_size = 128
num_classes = 128
epochs = 100
x_train = np.array(x_train)
x_test = np.array(x_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(5,5), strides=(1,1), padding='same', activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Conv2D(64, kernel_size=(2,2), strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss : ", score[0])
print("Test Accuracy : ", score[1])
|
3,664 | e3386b01bb0bdc7064a2e3e9f3edce8a3231721b | import json
from faker import Faker
import random
fake = Faker()
from faker.providers import date_time
fake.add_provider(date_time)
class Hour(object):
def __init__(self):
self.dayOfTheWeek = fake.day_of_week()
self.openingTime = str(random.randint(1, 12)) + 'AM'
self.closingTime = str(random.randint(1, 12)) + 'PM'
|
3,665 | 2d248ac1df1845bc5a2ee62a7171c1c47ca6d0ca | #!/usr/bin/env python3
import sys
sys.path.insert(0, '../../common/python/')
from primality import prime_factors
"""
phi(n) = n*sum_{p|n} (1 - 1/p)
1/phi(n) = (1/n)*sum_{p|n} p/(p - 1)
n/phi(n) = sum_{p|n} p/(p - 1)
"""
def n_over_phi(n):
top = 1
bot = 1
pfactors = prime_factors(n)
for p, count in pfactors.items():
top *= p
bot *= (p - 1)
return top / bot
def maximise_n_over_phi(upto):
max_value = 0
max_n = 0
for n in range(2, upto+1):
n_over_phi_ = n_over_phi(n)
if n_over_phi_ > max_value:
max_value = n_over_phi_
max_n = n
return max_n
def main():
print(maximise_n_over_phi(1000000))
if __name__ == '__main__':
main()
|
3,666 | c6d61a0159073304309cd4b1534ed5aed666bab5 | import os, glob, argparse, json, re
from collections import defaultdict
import numpy as np
import pandas as pd
from utils.CONSTANTS import output_dir, all_chromosomes, BINNED_CHRSZ, dataset_expts
def extract_chrom_num(s):
m = re.search('\d+', s)
if m is None:
return 24
else:
return int(m.group(0))
def main(expt_set, chrom, checkpoint_code, dataset='val', model_list=[], directory=None):
if directory is None:
directory = output_dir
model_list = sorted(model_list, key=extract_chrom_num)
print('Using the following {} models'.format(len(model_list)), model_list)
ensemble_code = 'c' + 'c'.join([str(extract_chrom_num(e)) for e in model_list])
ensemble_imp_path = 'averaged_preds-{}/{}'.format(checkpoint_code, ensemble_code)
imp_dir = os.path.join(directory, '{}_imputations/{}/'.format(dataset, expt_set))
os.makedirs(imp_dir+ensemble_imp_path, exist_ok=True)
pred_track_names = dataset_expts[dataset]
print('averaging chromosome: {}'.format(chrom))
for t in pred_track_names:
print(t, flush=True)
model_count = 0
avg = np.zeros(BINNED_CHRSZ[chrom])
expts_included = []
for m in model_list:
imp_path = imp_dir+m+'/{}.{}.{}.npz'.format(t, chrom, checkpoint_code)
if os.path.exists(imp_path):
vals = np.load(imp_path)['arr_0']
assert vals.shape[0] == BINNED_CHRSZ[chrom], 'wrong shape: pred shape {} != chrom shape {}'.format(vals.shape[0],
BINNED_CHRSZ[chrom])
avg += vals
model_count += 1
expts_included.append(m)
else:
print('No imputations {} {}'.format(m, t))
avg /= model_count
all_zeros = not np.any(avg)
assert not all_zeros, 'EMPTY ARRAY, NOT SAVING'
nans = np.isnan(avg).any()
assert not nans, 'NANS in ARRAY, NOT SAVING'
np.savez_compressed(imp_dir+ensemble_imp_path+'/{}.{}.npz'.format(t, chrom), avg)
# save list of models which had predictions and were therefore included
with open(imp_dir+ensemble_imp_path+'/{}.{}_info.txt'.format(t, chrom), 'w') as f:
for expt in expts_included:
f.write(expt+'\n')
print('Done', flush=True)
return ensemble_imp_path
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('expt_set')
parser.add_argument('chrom')
parser.add_argument('checkpoint_code') # 07.1
parser.add_argument('dataset')
parser.add_argument('-model_list', nargs='+', required=True, help='Model names e.g. chromschr21')
parser.add_argument('--directory', default=None)
args = parser.parse_args()
main(args.expt_set, args.chrom, args.checkpoint_code, dataset=args.dataset,
model_list=args.model_list, directory=args.directory)
|
3,667 | fcfec60a2302ee0c1385add053d4371040a2aff4 | # Generated by Django 2.1.2 on 2018-10-19 22:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='mascota',
name='descripcion',
field=models.CharField(max_length=200),
),
]
|
3,668 | 355e3932c8bd9105e0c1ce9259e3b7416997523c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from operation import *
import math
class InsertWord(Operation):
@classmethod
def getValue(cls, t, h, w, b, arg=None):
return math.log(arg["prob"]) * w[cls]
@classmethod
def getKBest(cls, t, h , args, w, b, k):
valueList = []
for index in args["parenNodeIndexList"]:
for word, prob in args["vocabDic"].items():
arg["parentNodeIndex"] = index
arg["word"] = word
arg["prob"] = prob
valueList.append([self.getValue(t, h, w, b, arg), arg])
valueList = sorted(ValueList, reverse=True)[:k-1]
return valueList
@classmethod
def transFormTree(cls, h, arg=None):
parentNodeIndex = arg["parentNodeIndex"]
word = arg["word"]
h[0].insert_node(word, parentNodeIndex)
@classmethod
def transFormT_H(cls, t_h, arg=None):
parentNodeIndex = arg["parentNodeInex"]
word = arg["word"]
t_h.hs.appens( [t_h.hs[len(t_h.hs) -1][0].insert_node(word, parentNodeIndex), ] )
|
3,669 | 4daf029c4bc9f0726080bd67f37b1e77c9697d1c | '''
Copyright (C) 2014 mdm
marco[dot]masciola[at]gmail
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import sys
from ctypes import *
import os
class Map(object):
# lib = cdll.LoadLibrary("map_x64.dll")
lib = cdll.LoadLibrary('../src/libmap-1.20.10.so')
'''
these are the fortran derived types created by the FAST registry.
'''
f_type_init = None
f_type_initout = None
f_type_d = None
f_type_u = None
f_type_x = None
f_type_y = None
f_type_z = None
f_type_p = None
ierr = c_int(0)
status = create_string_buffer(1024)
summary_file = c_char_p
val = c_double
class ModelData_Type(Structure):
_fields_ = []
'''
void * object ;
double gravity ;
double seaDensity ;
double depth ;
char fileName[255] ;
char summaryFileName[255] ;
char libraryInputLine[255] ;
char nodeInputLine[255] ;
char elementInputLine[255] ;
char optionInputLine[255] ;
'''
class InitializationData_Type(Structure):
_fields_= [("object",c_void_p),
("gravity",c_double),
("seaDensity",c_double),
("depth",c_double),
("fileName",c_char*255),
("summaryFileName",c_char*255),
("libraryInputLine",c_char*255),
("nodeInputLine",c_char*255),
("elementInputLine",c_char*255),
("optionInputLine",c_char*255)]
'''
void * object ;
char progName[99] ;
char version[99] ;
char compilingData[24] ;
char * writeOutputHdr ; int writeOutputHdr_Len ;
char * writeOutputUnt ; int writeOutputUnt_Len ;
'''
class InitializationOutputData_Type(Structure):
_fields_ = [("object",c_void_p),
("progName",c_char*99),
("version",c_char*99),
("CompilingData",c_char*99),
("writeOutputHdr",c_char_p),
("writeOutputHdr_Len",c_int),
("writeOutputUnt",c_char_p),
("writeOutputUnt_Len",c_int)]
class InputData_Type(Structure):
_fields_ = []
class OutputData_Type(Structure):
_fields_ = []
'''
void * object ;
double g ;
double depth ;
double rhoSea ;
'''
class ParameterData_Type(Structure):
_fields_ = [("object",c_void_p),
("g",c_double),
("depth",c_double),
("rhoSea", c_double)]
class ConstraintData_Type(Structure):
_fields_ = []
class ContinuousData_Type(Structure):
_fields_ = []
'''
fields for the fortran types
MAP_EXTERNCALL MAP_InitInputType_t* map_create_init_type( char* msg, MAP_ERROR_CODE* status );
MAP_EXTERNCALL MAP_InitOutputType_t* map_create_initout_type( char* msg, MAP_ERROR_CODE* status );
MAP_EXTERNCALL MAP_InputType_t* map_create_input_type( char* msg, MAP_ERROR_CODE* status );
MAP_EXTERNCALL MAP_ParameterType_t* map_create_parameter_type( char* msg, MAP_ERROR_CODE* status );
MAP_EXTERNCALL MAP_ConstraintStateType_t* map_create_constraint_type( char* msg, MAP_ERROR_CODE* status );
MAP_EXTERNCALL MAP_OtherStateType_t* map_create_other_type( char* msg, MAP_ERROR_CODE* status );
MAP_EXTERNCALL MAP_OutputType_t* map_create_output_type( char* msg, MAP_ERROR_CODE* status );
MAP_EXTERNCALL MAP_ContinuousStateType_t* map_create_continuous_type( char* msg, MAP_ERROR_CODE* status );
'''
MapData_Type = POINTER(ModelData_Type)
MapInit_Type = POINTER(InitializationData_Type)
MapInitOut_Type = POINTER(InitializationOutputData_Type)
MapInput_Type = POINTER(InputData_Type)
MapOutput_Type = POINTER(OutputData_Type)
MapParameter_Type = POINTER(ParameterData_Type)
MapConstraint_Type = POINTER(ConstraintData_Type)
MapContinuous_Type = POINTER(ContinuousData_Type)
# read file stuff
lib.set_init_to_null.argtype=[MapInit_Type, c_char_p, POINTER(c_int) ]
lib.map_set_summary_file_name.argtype=[MapInit_Type, c_char_p, POINTER(c_int) ]
lib.map_add_cable_library_input_text.argtype=[MapInit_Type]
lib.map_add_node_input_text.argtype=[MapInit_Type]
lib.map_add_line_input_text.argtype=[MapInit_Type]
lib.map_add_options_input_text.argtype=[MapInit_Type]
lib.map_create_init_type.argtype = [ c_char_p, POINTER(c_int) ]
lib.map_create_initout_type.argtype = [ c_char_p, POINTER(c_int) ]
lib.map_create_input_type.argtype = [ c_char_p, POINTER(c_int) ]
lib.map_create_parameter_type.argtype = [ c_char_p, POINTER(c_int) ]
lib.map_create_constraint_type.argtype = [ c_char_p, POINTER(c_int) ]
lib.map_create_other_type.argtype = [ c_char_p, POINTER(c_int) ]
lib.map_create_output_type.argtype = [ c_char_p, POINTER(c_int) ]
lib.map_create_continuous_type.argtype = [ c_char_p, POINTER(c_int) ]
lib.map_create_continuous_type.argtype = [ MapData_Type ]
lib.map_create_init_type.restype = MapInit_Type
lib.map_create_initout_type.restype = MapInitOut_Type
lib.map_create_input_type.restype = MapInput_Type
lib.map_create_parameter_type.restype = MapParameter_Type
lib.map_create_constraint_type.restype = MapConstraint_Type
lib.map_create_other_type.restype = MapData_Type
lib.map_create_output_type.restype = MapOutput_Type
lib.map_create_continuous_type.restype = MapContinuous_Type
lib.map_set_sea_depth.argtypes = [ MapParameter_Type, c_double ]
lib.map_set_gravity.argtypes = [ MapParameter_Type, c_double ]
lib.map_set_sea_density.argtypes = [ MapParameter_Type, c_double ]
lib.map_size_lines.restype = c_int
# numeric routines
lib.map_residual_function_length.restype = c_double
lib.map_residual_function_height.restype = c_double
lib.map_jacobian_dxdh.restype = c_double
lib.map_jacobian_dxdv.restype = c_double
lib.map_jacobian_dzdh.restype = c_double
lib.map_jacobian_dzdv.restype = c_double
lib.map_residual_function_length.argtypes = [ MapData_Type, c_int, c_char_p, POINTER(c_int) ]
lib.map_residual_function_height.argtypes = [ MapData_Type, c_int, c_char_p, POINTER(c_int) ]
lib.map_jacobian_dxdh.argtypes = [ MapData_Type, c_int, c_char_p, POINTER(c_int) ]
lib.map_jacobian_dxdv.argtypes = [ MapData_Type, c_int, c_char_p, POINTER(c_int) ]
lib.map_jacobian_dzdh.argtypes = [ MapData_Type, c_int, c_char_p, POINTER(c_int) ]
lib.map_jacobian_dzdv.argtypes = [ MapData_Type, c_int, c_char_p, POINTER(c_int) ]
lib.map_get_fairlead_force_2d.argtypes = [POINTER(c_double), POINTER(c_double), MapData_Type, c_int, c_char_p, POINTER(c_int)]
# plot routines
lib.map_plot_x_array.argtypes = [ MapData_Type, c_int, c_int, c_char_p, POINTER(c_int) ]
lib.map_plot_x_array.restype = POINTER(c_double)
lib.map_plot_y_array.argtypes = [ MapData_Type, c_int, c_int, c_char_p, POINTER(c_int) ]
lib.map_plot_y_array.restype = POINTER(c_double)
lib.map_plot_z_array.argtypes = [ MapData_Type, c_int, c_int, c_char_p, POINTER(c_int) ]
lib.map_plot_z_array.restype = POINTER(c_double)
lib.map_plot_array_free.argtypes = [ POINTER(c_double) ]
# modifyers
lib.map_offset_vessel.argtypes = [MapData_Type, MapInput_Type, c_double, c_double, c_double, c_double, c_double, c_double, c_char_p, POINTER(c_int)]
lib.map_linearize_matrix.argtypes = [MapInput_Type, MapParameter_Type, MapData_Type, MapOutput_Type, MapConstraint_Type, c_double, POINTER(c_int), c_char_p]
lib.map_linearize_matrix.restype = POINTER(POINTER(c_double))
lib.map_free_linearize_matrix.argtypes = [POINTER(POINTER(c_double))]
lib.map_init.argtypes = [ MapInit_Type,
MapInput_Type,
MapParameter_Type,
MapContinuous_Type,
c_void_p,
MapConstraint_Type,
MapData_Type,
MapOutput_Type,
MapInitOut_Type,
POINTER(c_int),
c_char_p]
lib.map_update_states.argtypes = [ c_double,
c_int,
MapInput_Type,
MapParameter_Type,
MapContinuous_Type,
c_void_p,
MapConstraint_Type,
MapData_Type,
POINTER(c_int),
c_char_p]
lib.map_end.argtypes = [ MapInput_Type,
MapParameter_Type,
MapContinuous_Type,
c_void_p,
MapConstraint_Type,
MapData_Type,
MapOutput_Type,
POINTER(c_int),
c_char_p]
lib.map_initialize_msqs_base.argtypes = [MapInput_Type,
MapParameter_Type,
MapContinuous_Type,
MapConstraint_Type,
MapData_Type,
MapOutput_Type,
MapInitOut_Type]
lib.map_size_lines.argtypes = [ MapData_Type,
POINTER(c_int),
c_char_p]
def __init__( self ) :
self.f_type_d = self.CreateDataState()
self.f_type_u = self.CreateInputState( )
self.f_type_x = self.CreateContinuousState( )
self.f_type_p = self.CreateParameterState( )
self.f_type_y = self.CreateOutputState( )
self.f_type_z = self.CreateConstraintState( )
self.f_type_init = self.CreateInitState( )
self.f_type_initout = self.CreateInitoutState( )
Map.lib.set_init_to_null(self.f_type_init, self.status, pointer(self.ierr) )
Map.lib.map_initialize_msqs_base(self.f_type_u, self.f_type_p, self.f_type_x, self.f_type_z, self.f_type_d, self.f_type_y, self.f_type_initout)
self.summary_file("outlist.map.sum")
def init( self ):
Map.lib.map_init( self.f_type_init, self.f_type_u, self.f_type_p, self.f_type_x, None, self.f_type_z, self.f_type_d, self.f_type_y, self.f_type_initout, pointer(self.ierr), self.status )
if self.ierr.value != 0 :
print self.status.value
def size_lines(self):
size = Map.lib.map_size_lines(self.f_type_d, pointer(self.ierr), self.status )
if self.ierr.value != 0 :
print self.status.value
return size
def update_states(self, t, interval):
Map.lib.map_update_states(t, interval, self.f_type_u, self.f_type_p, self.f_type_x, None, self.f_type_z, self.f_type_d, pointer(self.ierr), self.status )
if self.ierr.value != 0 :
print self.status.value
"""
Calls function in main.c and fordatamanager.c to delete insteads of c structs. First, the malloc'ed arrays need to vanish
gracefully; we accomplish this by calling MAP_End(...) routine. Then, the structs themself are deleted. Order is important.
MAP_EXTERNCALL int MAP_End ( InputData *u, ParameterData *p, ContinuousData *x, ConstraintData *z, ModelData *data, OutputData *y, char *map_msg, MAP_ERROR_CODE *ierr )
MAP_EXTERNCALL void MAP_Input_Delete( InputData* u )
MAP_EXTERNCALL void MAP_Param_Delete( ParameterData* p )
MAP_EXTERNCALL void MAP_ContState_Delete( InputData* x )
MAP_EXTERNCALL void MAP_ConstrState_Delete( InputData* z )
MAP_EXTERNCALL void MAP_Output_Delete( InputData* y )
MAP_EXTERNCALL void MAP_OtherState_Delete( ModelData* data )
"""
def end(self):
Map.lib.map_end(self.f_type_u, self.f_type_p, self.f_type_x, None, self.f_type_z, self.f_type_d, self.f_type_y, pointer(self.ierr), self.status)
"""
Set a name for the MAP summary file. Does not need to be called. If not called, the default name is 'outlist.sum.map'
"""
def summary_file(self, echo_file):
self.f_type_init.contents.summaryFileName = echo_file
Map.lib.map_set_summary_file_name(self.f_type_init, self.status, pointer(self.ierr) )
"""
Calls function in fortdatamanager.c to create instance of c structs
MAP_EXTERNCALL InitializationData* MAP_InitInput_Create( char* map_msg, MAP_ERROR_CODE* ierr )
"""
def CreateInitState( self ) :
obj = Map.lib.map_create_init_type( self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
return obj
"""
Calls function in fortdatamanager.c to create instance of c structs
MAP_EXTERNCALL void MAP_InitOutput_Delete( InputData* io )
"""
def CreateInitoutState( self ) :
obj = Map.lib.map_create_initout_type( self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
return obj
"""
Calls function in fortdatamanager.c to create instance of c structs
MAP_EXTERNCALL ModelData *MAP_OtherState_Create( char *map_msg, MAP_ERROR_CODE *ierr )
"""
def CreateDataState( self ) :
obj = Map.lib.map_create_other_type( self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
return obj
"""
Calls function in fortdatamanager.c to create instance of c structs
MAP_EXTERNCALL InputData* MAP_Input_Create( char* map_msg, MAP_ERROR_CODE *ierr )
"""
def CreateInputState( self ) :
obj = Map.lib.map_create_input_type( self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
return obj
"""
Calls function in fortdatamanager.c to create instance of c structs
MAP_EXTERNCALL ContinuousData* MAP_ContState_Create( char* map_msg, MAP_ERROR_CODE *ierr )
"""
def CreateContinuousState( self ) :
obj = Map.lib.map_create_continuous_type( self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
return obj
"""
Calls function in fortdatamanager.c to create instance of c structs
MAP_EXTERNCALL OutputData *MAP_Output_Create( char *map_msg, MAP_ERROR_CODE *ierr )
"""
def CreateOutputState( self ) :
obj = Map.lib.map_create_output_type( self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
return obj
"""
Calls function in fortdatamanager.c to create instance of c structs
MAP_EXTERNCALL ConstraintData* MAP_ConstrState_Create( char* map_msg, MAP_ERROR_CODE *ierr )
"""
def CreateConstraintState( self ) :
obj = Map.lib.map_create_constraint_type( self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
return obj
"""
Calls function in fortdatamanager.c to create instance of c structs
MAP_EXTERNCALL ParameterData* MAP_Param_Create( char* map_msg, MAP_ERROR_CODE *ierr )
"""
def CreateParameterState( self ) :
obj = Map.lib.map_create_parameter_type( self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
return obj
def map_set_sea_depth( self, depth ):
Map.lib.map_set_sea_depth( self.f_type_p, depth )
def map_set_gravity( self, g ):
Map.lib.map_set_gravity( self.f_type_p, g )
def map_set_sea_density( self, rho ):
Map.lib.map_set_sea_density( self.f_type_p, rho )
def plot_x( self, lineNum, length ) :
arr = [None]*length
array = POINTER(c_double)
array = Map.lib.map_plot_x_array( self.f_type_d, lineNum, length, self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
self.end( )
Map.lib.map_plot_array_free( array )
sys.exit('MAP terminated premature.')
arr = [array[j] for j in range(length)]
Map.lib.map_plot_array_free( array )
return arr
def plot_y( self, lineNum, length ) :
arr = [None]*length
array = POINTER(c_double)
array = Map.lib.map_plot_y_array( self.f_type_d, lineNum, length, self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
self.end( )
Map.lib.map_plot_array_free( array )
sys.exit('MAP terminated premature.')
arr = [array[j] for j in range(length)]
Map.lib.map_plot_array_free( array )
return arr
def plot_z( self, lineNum, length ) :
arr = [None]*length
array = POINTER(c_double)
array = Map.lib.map_plot_z_array( self.f_type_d, lineNum, length, self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
self.end( )
Map.lib.map_plot_array_free( array )
sys.exit('MAP terminated premature.')
arr = [array[j] for j in range(length)]
Map.lib.map_plot_array_free( array )
return arr
def get_fairlead_force_2d(self, index):
"""Gets the horizontal and vertical fairlead force in a 2D plane along the
straight-line line. Must ensure update_states() is called before accessing
this function. The function will not solve the forces for a new vessel position
if it updated. , otherwise the fairlead forces are not updated with the new
vessel position. Called C function:
MAP_EXTERNCALL void map_get_fairlead_force_2d(double* H, double* V, MAP_OtherStateType_t* other_type, int index, char* map_msg, MAP_ERROR_CODE* ierr);
:param index: The line number the fairlead forces are being requested for. Zero indexed
:returns: horizontal and vertical fairlead force [N]
>>> H,V = print get_fairlead_force_2d(1)
"""
H_ref = c_double(-999.9)
V_ref = c_double(-999.9)
Map.lib.map_get_fairlead_force_2d( pointer(H_ref), pointer(V_ref),self.f_type_d, index, self.status, pointer(self.ierr))
return H_ref.value, V_ref.value
def get_fairlead_force_3d(self, index):
"""Gets the horizontal and vertical fairlead force in a 3D frame along relative
referene global axis. Must ensure update_states() is called before accessing
this function. The function will not solve the forces for a new vessel position
if it updated. , otherwise the fairlead forces are not updated with the new
vessel position. Called C function:
MAP_EXTERNCALL void map_get_fairlead_force_3d(double* fx, double* fy, double* fz, MAP_OtherStateType_t* other_type, int index, char* map_msg, MAP_ERROR_CODE* ierr);
:param index: The line number the fairlead forces are being requested for. Zero indexed
:returns: horizontal and vertical fairlead force [N]
>>> fx,fy,fz = get_fairlead_force_3d(1)
"""
fx = c_double(-999.9)
fy = c_double(-999.9)
fz = c_double(-999.9)
Map.lib.map_get_fairlead_force_3d( pointer(fx), pointer(fy), pointer(fz), self.f_type_d, index, self.status, pointer(self.ierr))
return fx.value, fy.value, fz.value
def funcl( self, i ) :
self.val = Map.lib.map_residual_function_length(self.f_type_d, i, self.status, pointer(self.ierr))
if self.ierr.value != 0 :
print self.status.value
self.end( )
sys.exit('MAP terminated premature.')
return self.val
def funch( self, i ) :
self.val = Map.lib.map_residual_function_height(self.f_type_d, i, self.status, pointer(self.ierr))
if self.ierr.value != 0 :
print self.status.value
self.end( )
sys.exit('MAP terminated premature.')
return self.val
def dxdh( self, i ) :
self.val = Map.lib.map_jacobian_dxdh( self.f_type_d, i, self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
self.end( )
sys.exit('MAP terminated premature.')
return self.val
def dxdv( self, i ) :
self.val = Map.lib.map_jacobian_dxdv( self.f_type_d, i, self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
self.end( )
sys.exit('MAP terminated premature.')
return self.val
def dzdh( self, i ) :
self.val = Map.lib.map_jacobian_dzdh( self.f_type_d, i, self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
self.end( )
sys.exit('MAP terminated premature.')
return self.val
def dzdv( self, i ) :
self.val = Map.lib.map_jacobian_dzdv( self.f_type_d, i, self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
self.end( )
sys.exit('MAP terminated premature.')
return self.val
def linear( self, epsilon ) :
array = POINTER(POINTER(c_double))
array = Map.lib.map_linearize_matrix( self.f_type_u, self.f_type_p, self.f_type_d, self.f_type_y, self.f_type_z, epsilon, pointer(self.ierr), self.status)
if self.ierr.value != 0 :
print self.status.value
self.end( )
sys.exit('MAP terminated premature.')
arr = [[array[j][i] for i in range(6)] for j in range(6)]
Map.lib.map_free_linearize_matrix(array)
return arr
def displace_vessel(self,x,y,z,phi,the,psi) :
Map.lib.map_offset_vessel(self.f_type_d, self.f_type_u, x,y,z,phi,the,psi, self.status, pointer(self.ierr) )
if self.ierr.value != 0 :
print self.status.value
self.end( )
sys.exit('MAP terminated premature.')
def read_file(self, file_name):
f = open(file_name, 'r')
charptr = POINTER(c_char)
option_breaks = ("LINE DICTIONARY", "NODE PROPERTIES", "LINE PROPERTIES", "SOLVER OPTIONS")
for line in f:
line = line
if "LINE DICTIONARY" in line.upper():
for _ in xrange(3): line = next(f)
while not any(opt in line for opt in option_breaks):
self.f_type_init.contents.libraryInputLine = line+'\0'
Map.lib.map_add_cable_library_input_text(self.f_type_init)
line = next(f)
if "NODE PROPERTIES" in line.upper():
for _ in xrange(3): line = next(f)#.rstrip('\n')
while not any(opt in line for opt in option_breaks):
self.f_type_init.contents.nodeInputLine = line+'\0'
Map.lib.map_add_node_input_text(self.f_type_init)
line = next(f)
if "LINE PROPERTIES" in line.upper():
for _ in xrange(3): line = next(f)
while not any(opt in line for opt in option_breaks):
self.f_type_init.contents.elementInputLine = line+'\0'
Map.lib.map_add_line_input_text(self.f_type_init)
line = next(f)
if "SOLVER OPTIONS" in line.upper():
for _ in xrange(3): line = next(f)
while not any(opt in line for opt in option_breaks):
self.f_type_init.contents.optionInputLine = line+'\0'
Map.lib.map_add_options_input_text(self.f_type_init)
line = next(f,"SOLVER OPTIONS")
# def read_file( self, fileName ):
# f = open(fileName, 'r')
# charptr = POINTER(c_char)
# line_offset = []
# temp_str = []
# offset = 0
#
# for line in f:
# line_offset.append(offset)
# offset += len(line)
# f.seek(0)
#
# i = 0
# for line in f:
# words = line.split()
# if words[0] == "LineType":
# next(f)
# LineType_ref = i
# elif words[0] == "Node":
# next(f)
# Node_ref = i
# elif words[0] == "Line":
# next(f)
# Line_ref = i
# elif words[0] == "Option":
# next(f)
# Option_ref = i
# i+=1
#
# f.seek(line_offset[LineType_ref+2])
# for line in f:
# line = line.rstrip('\n')
# if line[0] == "-":
# break
# else:
# self.f_type_init.contents.libraryInputLine = line+'\0'
# Map.lib.map_add_cable_library_input_text(self.f_type_init)
#
# f.seek(line_offset[Node_ref+3])
# for line in f:
# if line[0] == "-":
# break
# else:
# self.f_type_init.contents.nodeInputLine = line+'\0'
# Map.lib.map_add_node_input_text(self.f_type_init)
#
# f.seek(line_offset[Line_ref+4])
# for line in f:
# if line[0] == "-":
# break
# else:
# self.f_type_init.contents.elementInputLine = line+'\0'
# Map.lib.map_add_line_input_text(self.f_type_init)
#
# f.seek(line_offset[Option_ref+5])
# for line in f:
# if line[0]=="-":
# break
# elif line[0]=="!":
# None
# else:
# self.f_type_init.contents.optionInputLine = line+'\0'
# Map.lib.map_add_options_input_text(self.f_type_init)
|
3,670 | 19d5e9db142237d1cb2276ccaf083ca4a96109fc | from django.conf.urls import url
from basket import views
urlpatterns = [
url(r'^$', views.view_basket, name='basket'),
url(r'^add/(?P<product_pk>\d+)$', views.add_to_basket, name='add_to_basket'),
url(r'^remove/(?P<basketitem_pk>\d+)$', views.remove_from_basket, name='remove_from_basket'),
]
|
3,671 | 71ff8e8a62a3b2731071ed7a039b51c150ebaca4 | import os ,sys
from scrapy.cmdline import execute
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute('scrapy crawl laptop'.split())
|
3,672 | 4ecf976a7d655efb5af427083ec1943cae6fe56d | class Restaurant():
"""A restaurant model."""
def __init__(self, restaurant_name, cuisine_type):
"""Initialize name and type."""
self.name = restaurant_name
self.type = cuisine_type
def describe_restaurant(self):
"""Prints restaurant information."""
print("The restaurant's name is " + self.name.title())
print("The cuisine type is " + self.type.title())
def open_restaurant(self):
"""Message indicating the restaurant is open."""
print("The restaurant is now open!")
# my_restaurant = Restaurant('Juan Pho Yu', 'pho')
# print(my_restaurant.name)
# print(my_restaurant.type)
# my_restaurant.describe_restaurant()
# my_restaurant.open_restaurant() |
3,673 | d4cdc4f1995eab7f01c970b43cb0a3c5ed4a2711 | from golem import actions
from projects.golem_gui.pages import common
from projects.golem_gui.pages import api
from projects.golem_gui.pages import test_builder_code
description = 'Verify the user can edit test code and save it'
tags = ['smoke']
def setup(data):
common.access_golem(data.env.url, data.env.admin)
api.project.using_project('test_builder_code')
data.test = api.test.create_access_test_code(data.project)
def test(data):
test_line = "description = 'desc'"
test_builder_code.set_value(test_line)
actions.click(test_builder_code.save_button)
common.assert_toast_message_is_displayed('Test '+data.test+' saved')
actions.refresh_page()
test_builder_code.assert_value(test_line)
|
3,674 | 777c08876a2de803fc95de937d9e921044545ef8 | from bs4 import BeautifulSoup
import requests
res = requests.get('http://quotes.toscrape.com/')
#print(res.content)
#proper ordered printing
#print(res.text)
#lxml -> parser library
soup = BeautifulSoup(res.text , 'lxml')
#print(soup)
quote = soup.find_all('div',{'class' : 'quote'})
with open('Quotes.txt','w') as ff:
for q in quote:
msg = q.find('span',{'class' : 'text'})
print(msg.text)
ff.write(msg.text)
author = q.find('small',{'class' : 'author'})
print(author.text)
ff.write("\n")
ff.write(author.text)
print()
ff.write("\n\n")
|
3,675 | 851162e6c40a9f4f82a983a84fd0b4d6a6a57412 |
class Type(object):
"""Type of values."""
def __init__(self, jtype):
self.jtype = jtype
def __repr__(self):
return self.jtype.toString()
def __str__(self):
return self.jtype.toPrettyString(False, False)
|
3,676 | f1475d651c3b52611657a9767ad62796b55d8711 | # obtain the dataset
import pandas as pd
titanic = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')
#titanic.info()
print(titanic.head())
# preprocessing
x = titanic.drop(['row.names', 'name', 'survived'], axis=1)
y = titanic['survived']
x['age'].fillna(x['age'].mean(), inplace = True) # add data for age feature
x.fillna('UNKNOWN', inplace=True)
# split
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=33)
#feature extraction
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer()
x_train = vec.fit_transform(x_train.to_dict(orient='record'))
x_test = vec.transform(x_test.to_dict(orient='record'))
#print(len(vec.feature_names_))
# import decision tree model
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier(criterion='entropy')
dtc.fit(x_train, y_train)
#y_predict = dtc.predict(x_test)
print(dtc.score(x_test, y_test))
from sklearn import feature_selection
fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=20)
x_train_fs = fs.fit_transform(x_train, y_train)
dtc.fit(x_train_fs, y_train)
x_test_fs = fs.transform(x_test)
print(dtc.score(x_test_fs, y_test)) |
3,677 | 19bb3cd0c7862f39a78479d9a9703ebef198fc73 | import math
from Config import defaults as df
from Utils.controls import sigmoid_decay
def f1(phi, phi_o, d):
"""sinusoidally growing function between (phi_o-d) to phi_o"""
return 1 - sigmoid_decay(phi, phi_o, d)
def f2(phi, sigma):
"""normal distribution"""
return math.exp(-phi ** 2 / sigma ** 2)
def f3(phi, a):
"""sharp peak"""
return a ** 2 / (phi + a) ** 2
def optofitness(op_array, n_obj=1):
"""apply respective transfer functions to an array of order parameters
**order of elements matters
"""
d = 5
f_speed = f1(op_array[1], df.v_flock, df.v_tol)
f_coll = f3(op_array[3], df.a_tol)
f_disc = f3(op_array[4], df.num_agents / 5)
f_wall = f2(op_array[0], df.r_tol)
f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5)
if op_array[2] > 0:
f_corr = op_array[2]
else:
f_corr = 0
time_fit = 1 # (1-sigmoid_decay(op_array[6], df.max_sim_time-df.wait_time, 200))
if n_obj == 2:
# F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster
# F2 = -time_fit * f_wall * f_coll
F2 = -time_fit *f_coll * f_corr * f_disc * f_cluster
F1 = -time_fit * f_wall * f_speed
return round(F1, d), round(F2, d)
elif n_obj == 3:
F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster
F2 = -time_fit * f_wall
F3 = -time_fit * f_coll
return round(F1, d), round(F2, d), round(F3, d)
elif n_obj == 'all':
return round(f_wall, d), round(f_speed, d), round(f_corr, d), round(f_coll, d), round(f_disc, d), round(f_cluster, d)
F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster
return round(F1, d)
|
3,678 | e3a984294cad5830358df50fa00111017cbe226d | from django.urls import path
from .views import MainView
app_name = "bio"
# app_name will help us do a reverse look-up latter.
urlpatterns = [
path('get_mtx_data', MainView.as_view()),
]
|
3,679 | ebb4cf1ec2baa7bd0d29e3ae88b16e65cf76a88a | from flask import Flask, render_template, url_for, request, redirect, session, flash
import os, json
from usuarios import crearUsuario, comprobarUsuario
from busqueda import filtrado
from compra import procesarCompra, Dinero
app = Flask(__name__)
catalogo_data = json.loads(open(os.path.join(app.root_path,'json/catalogo.json')).read())
peliculas = catalogo_data['peliculas']
# Ruta de la pagina index de la aplicacion
@app.route("/", methods = ['GET','POST'])
def index():
if(request.method == 'POST'):
peliculasFiltradas = filtrado(peliculas, request)
return render_template('index.html', peliculas = peliculasFiltradas)
else:
return render_template('index.html', peliculas = peliculas)
# Ruta de la informacion de una pelicula
@app.route("/info/<pelicula>", methods = ['GET', 'POST'])
def informacionPelicula(pelicula):
peliculas = catalogo_data['peliculas']
if(request.method == 'POST'):
return redirect(url_for('index'), code = 307)
for aux in peliculas:
if aux['titulo'] == pelicula.replace("%20", " "):
return render_template('informacion-pelicula.html', pelicula = aux)
return redirect(url_for('index'))
# Ruta de la pagina html de login
@app.route("/login", methods=['GET','POST'])
def login():
if ('user' in session):
return redirect(url_for('index'))
else:
if(request.method == 'POST'):
try:
comprobarUsuario(request.form['nombre-usuario'], request.form['contrasenia'])
session['user'] = request.form['nombre-usuario']
if 'last_user' in session:
print(session['last_user'])
if session['last_user'] == session['user']:
if (('last_carrito' in session) and ('last_precio' in session)):
session['carrito'] = session['last_carrito']
session['precio'] = session['last_precio']
return redirect(url_for('index'))
except Exception as error:
return render_template('login.html', error = error)
else:
return render_template('login.html')
# Ruta de la pagina html de registro
@app.route("/register", methods = ['GET', 'POST'])
def register():
if ('user' in session):
return redirect(url_for('index'))
else:
if(request.method == 'POST'):
try:
crearUsuario(request.form['cuestionario_nombre'], request.form['cuestionario_nombreCompleto'], request.form['cuestionario_contrasenia'], request.form['cuestionario_correo'], request.form['cuestionario_cuenta'])
session['user'] = request.form['cuestionario_nombre']
return redirect(url_for('index'))
except Exception as error:
return render_template('register.html', error = error)
else:
return render_template('register.html')
#Ruta para cerrar sesion
@app.route('/logout')
def logout():
if('user' in session):
if(('carrito' in session) and ('precio' in session)):
session['last_user'] = session['user']
session['last_carrito'] = session['carrito']
session['last_precio'] = session['precio']
session.pop('user', None)
session.pop('carrito', None)
session.pop('precio', None)
return redirect(url_for('index'))
else:
return redirect(url_for('index'))
#Ruta para ver el carrito
@app.route('/carrito', methods = ['GET', 'POST'])
def carrito():
if(request.method == 'POST'):
return redirect(url_for('index'), code = 307)
else:
return render_template('carrito.html', peliculas = peliculas)
#Metodo get para aniadir una pelicula al carrito de compra
@app.route("/info/<pelicula>/aniadir-carrito", methods = ['GET'])
def aniadir(pelicula):
if 'precio' not in session:
session['precio'] = 0
for aux in peliculas:
if aux['titulo'] == pelicula.replace("%20", " "):
if 'carrito' in session:
for deter in session['carrito']:
if(deter['nombre'] == pelicula.replace("%20", " ")):
deter['cantidad'] = deter['cantidad'] + 1
session['precio'] = session['precio'] + aux['precio']
return redirect(url_for('index'))
add = {"nombre" : pelicula.replace("%20", " "), "cantidad": 1}
session['carrito'].append(add)
session['precio'] = session['precio'] + aux['precio']
return redirect(url_for('index'))
else:
session['carrito'] = []
add = {"nombre" : pelicula.replace("%20", " "), "cantidad": 1}
session['carrito'].append(add)
session['precio'] = session['precio'] + aux['precio']
return redirect(url_for('index'))
return redirect(url_for('index'))
#Metodo get para eliminar una pelicula del carrito de compra
@app.route("/info/<pelicula>/del-carrito", methods = ['GET'])
def delete(pelicula):
if 'precio' not in session:
return redirect(url_for('carrito'))
for aux in peliculas:
if aux['titulo'] == pelicula.replace("%20", " "):
if 'carrito' in session:
#Quitar pelicula del carrito
i = 0
for deter in session['carrito']:
if(deter['nombre'] == pelicula.replace("%20", " ")):
if(deter['cantidad'] == 1):
del(session['carrito'][i])
else:
deter['cantidad'] = deter['cantidad'] - 1
i = i + 1
#Modificar dinero del carrito
if (len(session['carrito']) == 0):
session['precio'] = 0
else:
session['precio'] = session['precio'] - aux['precio']
if(session['precio'] < 0):
session['precio'] = 0
else:
return redirect(url_for('carrito'))
return redirect(url_for('carrito'))
#Pagina de cofirmacion del carrito de compra
@app.route("/confirmacion", methods = ['GET', 'POST'])
def confirmacion():
if('user' in session):
if(('carrito' in session) and (len(session['carrito']) > 0)):
return render_template('confirmacion.html', peliculas = peliculas)
else:
return redirect(url_for('index'))
else:
return redirect(url_for('index'))
@app.route("/confirmacion/push")
def push():
try:
procesarCompra(session, peliculas)
session.pop('carrito', None)
session.pop('precio', None)
return redirect(url_for('index'))
except Dinero as error:
flash("Parece que no tienes dinero suficiente")
return redirect(url_for('index'))
except Exception as error:
flash(error)
return redirect(url_for('index', error = error))
# Ruta del historial de compra de un usuario
@app.route("/historial", methods = ['GET', 'POST'])
def historial():
if('user' not in session):
return redirect(url_for('index'))
else:
PATH = 'usuarios/' + session['user'] + '/historial.json'
historial = json.loads(open(PATH).read())
if(request.method == 'POST'):
return redirect(url_for('index'), code = 307)
else:
return render_template('historial.html', historial = historial)
#Mantener cookies despues de cerrar el navegador
@app.before_request
def session_management():
session.permanent = True
if __name__ == "__main__":
app.secret_key = os.urandom(24)
app.run(host='0.0.0.0', port=5001, debug=True)
|
3,680 | 85974e48c7eafdf39379559820ed7f0bdc07fb7a | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 12 16:38:15 2013
@author: a92549
Fixes lack of / between tzvp and tzvpfit
"""
import sys
def main(argv):
for com in argv:
with open(com, 'rb') as f:
txt = f.read()
if 'tzvp tzvpfit' in txt:
parts = txt.split('tzvp tzvpfit',1)
new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]
with open(com, 'wb') as f:
f.write(new_txt)
elif 'tzvp\ntzvpfit' in txt:
parts = txt.split('tzvp\ntzvpfit',1)
new_txt = parts[0] + 'tzvp/tzvpfit' + parts[1]
with open(com, 'wb') as f:
f.write(new_txt)
if __name__ == '__main__':
main(sys.argv[1:]) |
3,681 | 991fa5f9c83a1821e62f7baacbc56a4d31982312 | a, b = map(int, input().split())
def mult(a, b):
if a > 9 or b > 9 or a < 1 or b < 1:
print(-1)
else:
print(a * b)
mult(a,b) |
3,682 | 193d48237b4b1e406eb565943cf01f0423449fca | # hw.shin@konantech.com
#leekiljae@ogqcorp.com |
3,683 | 1154fd3883dc8856e24127d56ce6a983308dc1aa | # -*- coding: utf-8 -*-
__author__ = 'wxy'
class ListProcess(object):
def __init__(self, rsp, nickname):
self.rsp = rsp
self.nickname = nickname
def get_friend_uin(self):
try:
for list in self.rsp['result']['info']:
if list['nick'] == self.nickname:
tar_uin = list['uin']
return tar_uin
except:
return False
def get_group_uin(self):
try:
for list in self.rsp['result']['gnamelist']:
if list['name'] == self.nickname:
print '++++++++++++++++++++++++++++++++++'
print list
tar_uin = list['gid']
return tar_uin
except:
return False
|
3,684 | 3983f8dfb9c7b7e664af05857a0f6fe380154424 | from django import forms
from . import models
class PhotoForm(forms.Form):
image = forms.ImageField()
|
3,685 | d5d61b23dc14ffdfe7fe6f983164916863928eaf | from django.apps import AppConfig
class AttendaceConfig(AppConfig):
name = 'attendace'
|
3,686 | 5b7c04f23fb674191639e95dff8c530933379d67 | """
Вам дана последовательность строк.
В каждой строке замените все вхождения нескольких одинаковых букв на одну букву.
Буквой считается символ из группы \w.
Sample Input:
attraction
buzzzz
Sample Output:
atraction
buz
"""
from sys import stdin
import re
for word in stdin:
lst_in = word
match = re.finditer(r'(\w)\1+', lst_in)
for item in match:
lst_in = lst_in.replace(item[0], item[0][0])
print(lst_in, end='')
|
3,687 | a2292bc9cee57c5d4a7d36c66510ce4b4f3e20da | """
Simulator contains the tools needed to set up a multilayer antireflection
coating simulation.
Based on transfer matrix method outlined in Hou, H.S. 1974.
"""
# Author: Andrew Nadolski (with lots of help from previous work by Colin Merkel,
# Steve Byrnes, and Aritoki Suzuki)
# Filename: simulator.py
import glob
import os
import pprint
import time
import materials as mats
import numpy as np
import scipy as sp
class Layer:
"""A layer in the AR coating.
Attributes
----------
name : string
The name of the material comprising the layer. Default is 'Generic layer'
thickness : float
The thickness of the layer material. Default is 5 mil.
type : string
The type of layer. Default is `Layer`, which is an element of the AR
coating. Other acceptable types are `Source` and `Terminator`.
dielectric : float
The dielectric constant of the layer material. Default is 1.
losstangent : float
The loss tangent of the material. Default is 0.
"""
def __init__(self):
self.name = 'Generic layer'
self.thickness = 5.
self.type = 'Layer'
self.units = 'mil'
self.dielectric = 1.
self.losstangent = 0.
def __repr__(self):
"""Return a nice string formatted representation of the layer."""
return '{} (AR layer)'.format(self.name)
def display_layer_parameters(self):
"""Display the attributes of the layer."""
pprint.pprint(vars(self))
return
def get_index(self):
"""Return the refractive index of the layer."""
return (np.sqrt(self.dielectric))
def ideal_thickness(self, opt_freq=160e9):
"""Return the ideal quarter wavelength thickness of the AR coating layer
at a given optimization frequency.
Arguments
---------
opt_freq : float, optional
The optimization frequency (in Hz) for the layers thickness. Defaults
to 160 GHz.
"""
return (1/np.sqrt(self.dielectric)*3e8/(4*opt_freq))
class SourceLayer(Layer):
"""A special case of ``Layer``; represents the layer from which the simulated wave
emanates.
Attributes
----------
thickness : float
The thickness of the source layer. Defaults to ``numpy.inf`` since the model
doesn't care about the thickness of source layer. The thickness of the
source layer should not be changed under normal operations.
type : string
The type of layer. Default is `Source`, which is an element of the model,
but not the coating. Other acceptable types are `Layer` and `Terminator`.
"""
def __init__(self):
Layer.__init__(self)
self.thickness = np.inf
self.type = 'Source'
def __repr__(self):
"""Return a nice string formatted representation of the layer."""
return '{} (source layer)'.format(self.name)
class SubstrateLayer(Layer):
"""A special case of ``Layer``; represents the layer to which the AR coating is
attached.
Attributes
----------
thickness : float
The thickness of the substrate layer. Defaults to 250 mils, which is
the typical thickness of a sample puck used in the Berkeley FTS setup.
This may be changed as is necessary, but the units must (eventually) be
converted to meters before being fed to the simulator.
type : string
The type of layer
"""
def __init__(self):
Layer.__init__(self)
self.thickness = 250.
self.type = 'Substrate'
def __repr__(self):
return '{} (substrate)'.format(self.name)
class TerminatorLayer(Layer):
"""A special case of ``Layer``; represents the layer upon which the simulated wave
terminates.
Attributes
----------
thickness : float
The thickness of the terminating layer. Defaults to ``numpy.inf`` since
the model doesn't care about the thickness of the terminating layer.
The thickness of the terminating layer should not be changed under
normal operations.
type : string
The type of layer. Default is `Terminator`, which is an element of the model,
but not the coating. Other acceptable types are `Source` and `Layer`.
"""
def __init__(self):
Layer.__init__(self)
self.thickness = np.inf
self.type = 'Terminator'
def __repr__(self):
"""Return a nice string formatted representation of the layer."""
return '{} (terminator layer)'.format(self.name)
class Builder:
"""The main body of the simulator code.
Attributes
----------
bands : list
A list of n tuples, with each tuple composed of a lower and upper limit
for a frequency band in units of hertz. Default is the SPT-3G bands.
freq_sweep : array
The range of frequencies to be simulated. Defaults to 0. Set a frequency
sweep by calling ``set_freq_sweep()``.
optimization_frequency : float
The frequency (in Hz) at which to calculate the ideal thickness for a given
material. Defaults to 160e9 Hz (160 GHz).
save_name : string
The name under which the results of the simulation are saved. Defaults to
'transmission_data_XXXXX.txt' where `XXXXX` is a time-stamp to avoid
overwriting previous simulation results.
save_path : string
The path to which the simulation results will be saved. Defaults to the
current working directory.
source : object
``Layer`` object ``SourceLayer`` that defines where the wave emanates from.
Default is `None`.
stack : list
The user-defined layers incorporated in the simulation EXCEPT the source
and terminator layers. Default is empty list.
structure : list
The layers incorporated in the simulation INCLUDING the source and
terminator layers. Default is empty list. The list is populated
by creating layers and calling ``_interconnect()``.
terminator : object
``Layer`` object ``TerminatorLayer`` that defines where the wave terminates.
Defaults is `None`.
"""
def __init__(self):
self.bands = [(81.7e9, 107.5e9),(128.6e9, 167.2e9),(196.9e9, 249.2e9)]
self.freq_sweep = 0.
self.log_name = 'log_simulation_{t}.txt'.format(t=time.ctime(time.time()))
self.optimization_frequency = 160e9 # given in Hz, i.e. 160 GHz
self.save_name = 'transmission_data_{t}.txt'.format(t=time.ctime(time.time()))
self.save_path = '.'
self.source = None
self.stack = []
self.structure = []
self.terminator = None
def _calc_R_T_amp(self, polarization, n, delta):
"""Calculate the reflected and transmitted amplitudes
Arguments
---------
polarization : string
The polarization of the source wave. Must be one of: 's', 'p', or 'u'.
n : array
An array of refractive indices, ordered from source to terminator
delta : array
An array of wavevector offsets
Returns
-------
(r, t) : tuple
A tuple where 'r' is the reflected amplitude, and 't' is the
transmitted amplitude
"""
t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex)
r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex)
# # debugging statement
# print("\nr_amp is:")
# for i in range(len(self.structure)):
# for j in range(len(self.structure)):
# print("{}{} {}".format(i,j,r_amp[i][j]))
# # debugging statement
# print("\nt_amp is:")
# for i in range(len(self.structure)):
# for j in range(len(self.structure)):
# print("{}{} {}".format(i,j,t_amp[i][j]))
for i in range(len(self.structure)-1):
t_amp[i,i+1] = self._t_at_interface(polarization, n[i], n[i+1])
r_amp[i,i+1] = self._r_at_interface(polarization, n[i], n[i+1])
# # debugging statement
# print("\nmod r_amp is:")
# for i in range(len(self.structure)):
# for j in range(len(self.structure)):
# print("{}{} {}".format(i,j,r_amp[i][j]))
# # debugging statement
# print("\nmod t_amp is:")
# for i in range(len(self.structure)):
# for j in range(len(self.structure)):
# print("{}{} {}".format(i,j,t_amp[i][j]))
M = np.zeros((len(self.structure),2,2),dtype=complex)
# # debugging statement
# print("\nThe 'M' matrix is:")
# for i in range(len(self.structure)):
# for j in range(2):
# for k in range(2):
# print("M{}{}{} ---> {}".format(i,j,k,M[i][j][k]))
m_r_amp = np.zeros((len(self.structure),2,2), dtype=complex)
m_t_amp = np.zeros((len(self.structure),2,2), dtype=complex)
for i in range(1,len(self.structure)-1):
m_t_amp[i] = self._make_2x2(np.exp(-1j*delta[i]), 0., 0., np.exp(1j*delta[i]), dtype=complex)
m_r_amp[i] = self._make_2x2(1., r_amp[i,i+1], r_amp[i,i+1], 1., dtype=complex)
# # debugging statement
# print("\nThe temporary 'm_r_amp' matrix is:")
# for i in range(len(self.structure)):
# for j in range(2):
# for k in range(2):
# print("m_r_amp{}{}{} ---> {}".format(i,j,k,m_r_amp[i][j][k]))
# # debugging statement
# print("\nThe temporary 'm_t_amp' matrix is:")
# for i in range(len(self.structure)):
# for j in range(2):
# for k in range(2):
# print("m_t_amp{}{}{} ---> {}".format(i,j,k,m_t_amp[i][j][k]))
m_temp = np.dot(m_t_amp, m_r_amp)
# # debugging statement
# print("\nThe 'm_temp' matrix is:")
# for i in m_temp:
# print i
# for i in range(len(self.structure)):
# for j in range(2):
# for k in range(2):
# print("m_temp{}{}{} ---> {}".format(i,j,k,m_temp[i][j][k]))
for i in range(1,len(self.structure)-1):
M[i] = 1/t_amp[i,i+1] * np.dot(self._make_2x2(np.exp(-1j*delta[i]),
0., 0., np.exp(1j*delta[i]),
dtype=complex),
self._make_2x2(1., r_amp[i,i+1], \
r_amp[i,i+1], 1., \
dtype=complex))
# # debugging statement
# print("\nThe modified 'M' matrix is:")
# for i in range(len(self.structure)):
# for j in range(2):
# for k in range(2):
# print("mod M{}{}{} ---> {}".format(i,j,k,M[i][j][k]))
M_prime = self._make_2x2(1., 0., 0., 1., dtype=complex)
# # debugging statement
# print("\nThe first modified 'M_prime' matrix is:")
# for i in range(2):
# for j in range(2):
# print("1st mod M_prime{}{} ---> {}".format(i,j,M_prime[i][j]))
for i in range(1, len(self.structure)-1):
# print("\n'M_prime' #{} is:\n{}".format(i,M_prime))
M_prime = np.dot(M_prime, M[i])
# # debugging statement
# print("\nThe second modified 'M_prime' matrix is:")
# for i in range(2):
# for j in range(2):
# print("2nd mod M_prime{}{} ---> {}".format(i,j,M_prime[i][j]))
# print("\nr_amp01 is ---> {}".format(r_amp[0,1]))
# print("t_amp01 is ---> {}".format(t_amp[0,1]))
mod_M_prime = self._make_2x2(1.,r_amp[0,1], r_amp[0,1], 1., dtype=complex)/t_amp[0,1]
# # debugging statement
# print("\nThe third modified 'M_prime' matrix is:")
# for i in range(2):
# for j in range(2):
# print("3rd mod M_prime{}{} ---> {}".format(i, j, mod_M_prime[i][j]))
M_prime = np.dot(self._make_2x2(1., r_amp[0,1], r_amp[0,1], 1., \
dtype=complex)/t_amp[0,1], M_prime)
# # debugging statement
# print("\nThe 'M_final' matrix is:")
# for i in range(2):
# for j in range(2):
# print("M_final{}{} ---> {}".format(i, j, M_prime[i][j]))
t = 1/M_prime[0,0]
r = M_prime[0,1]/M_prime[0,0]
# # debugging statement
# print("\n't' ---> {}".format(t))
# print("'r' ---> {}".format(r))
return (r, t)
def _d_converter(self):
"""Check the units of all elements in the connected ar coating
stack. Convert the lengths of the layers to meters if they are
not already in meters.
"""
units = {'um':1e-6, 'mm':1e-3, 'inch':2.54e-2, 'in':2.54e-2,\
'micron':1e-6, 'mil':2.54e-5, 'm':1.0}
for i in self.stack:
i.thickness = i.thickness*units[i.units]
return
def _find_ks(self, n, frequency, tan, lossy=True):
"""Calculate the wavenumbers.
Arguments
---------
n : array
An array of refractive indices, ordered from source to
terminator
frequency : float
The frequency at which to calculate the wavevector, k
tan : array
An array of loss tangents, ordered from vacuum to substrate
lossy : boolean, optional
If `True` the wavevector will be found for a lossy material.
If `False` the wavevector will be found for lossless material.
Default is `True`.
Returns
-------
k : complex
The complex wavenumber, k
"""
if lossy:
k = 2*np.pi*n*frequency*(1+0.5j*tan)/3e8 # New expression for loss (as of 9/13/16), this one is more physical (i.e. subtractive)
# k = 2*np.pi*n*frequency*(1-0.5j*tan)/3e8 # Original expression for loss (pre 9/13/16), but it is incorrectly ADDITIVE
else:
k = 2*np.pi*n*frequency/3e8
return k
def _find_k_offsets(self, k, d):
"""Calculate the wavenumber offset, delta.
Arguments
---------
k : array
The wavevector
d : array
An array of thicknesses, ordered from source to terminator
Returns
-------
delta : array
The wavenumber offset
"""
olderr = sp.seterr(invalid= 'ignore') # turn off 'invalid multiplication' error;
# it's just the 'inf' boundaries
delta = k * d
sp.seterr(**olderr) # turn the error back on
return delta
def _get_R(self, net_r_amp):
"""Return fraction of reflected power.
Arguments
---------
net_r_amp : float
The net reflection amplitude after calculating the transfer matrix.
"""
return np.abs(net_r_amp)**2
def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0., theta_f=0.):
"""Return the fraction of transmitted power.
Arguments
---------
polarization : string
The polarization of the source wave. One of: 's' or 'p'.
net_t_amp : float
The net transmission amplitude after calculating the transfer matrix.
n_i : float
The index of refraction of material 'i'.
n_f : float
The index of refraction of material 'f'.
theta_i : float, optional
The angle of incidence at interface 'i'. Default is 0.
theta_f : float, optional
The angle of incidence at interface 'f'. Default is 0.
"""
if (polarization=='s'):
return np.abs(net_t_amp**2) * (n_f/n_i)
elif (polarization=='p'):
return np.abs(net_t_amp**2) * (n_f/n_i)
else:
raise ValueError("Polarization must be 's' or 'p'")
def _get_bandpass_stats(self):
mean = []
for band in self.bands:
pass
pass
def _interconnect(self):
"""Connect all the AR coating layer objects, ensuring that the source
and terminator layers come first and last, respectively.
"""
self.clear_structure()
self.structure.append(self.source)
for i in range(len(self.stack)):
self.structure.append(self.stack[i])
self.structure.append(self.terminator)
return
def _make_2x2(self, A11, A12, A21, A22, dtype=float):
"""Return a 2x2 array quickly.
Arguments
---------
A11 : float
Array element [0,0].
A12 : float
Array element [0,1].
A21 : float
Array element [1,0].
A22 : float
Array element [1,1].
dtype : dtype, optional
The datatype of the array. Defaults to float.
"""
array = np.empty((2,2), dtype=dtype)
array[0,0] = A11
array[0,1] = A12
array[1,0] = A21
array[1,1] = A22
return array
def _make_log(self):
pass
def _make_save_path(self, save_path, save_name):
"""Assemble the file name and path to the results file.
Returns
-------
path : string
The full path to the save destination for the simulation results
"""
if save_name.endswith('.txt'):
path = os.path.join(save_path, save_name)
else:
self.save_name = save_name+'.txt'
path = os.path.join(save_path, save_name)
return path
def _r_at_interface(self, polarization, n_1, n_2):
"""Calculate the reflected amplitude at an interface.
Arguments
---------
polarization : string
The polarization of the source wave. Must be one of: 's' or 'p'.
n_1 : float
The index of refraction of the first material.
n_2 : float
The index of refraction of the second material.
Returns
-------
reflected amplitude : float
The amplitude of the reflected power
"""
if polarization == 's':
return ((n_1-n_2)/(n_1+n_2))
elif polarization == 'p':
return ((n_1-n_2)/(n_1+n_2))
else:
raise ValueError("Polarization must be 's' or 'p'")
def _sort_ns(self):
"""Organize the refractive indices of the layers in the simulation.
Returns
-------
n : array
The ordered list of indices of refraction, from source to terminator
"""
n = []
for layer in self.structure:
n.append(layer.get_index())
n = np.asarray(n)
return n
def _sort_ds(self):
"""Organize the layers' thicknesses in the simulation.
Returns
-------
d : array
The ordered list of thicknesses, from source to terminator
"""
d = []
for layer in self.structure:
if (layer.type == 'Layer' or layer.type == 'Substrate'):
d.append(layer.thickness)
d.insert(0, self.structure[0].thickness)
d.append(self.structure[-1].thickness)
d = np.asarray(d)
return d
def _sort_tans(self):
"""Organize the loss tangents of the layers in the simulation.
Returns
-------
tan : array
The ordered list of loss tangents, from source to terminator
"""
tan = []
for layer in self.structure:
tan.append(layer.losstangent)
tan = np.asarray(tan)
return tan
def _t_at_interface(self, polarization, n_1, n_2):
"""Calculate the transmission amplitude at an interface.
Arguments
---------
polarization : string
The polarization of the source wave. Must be one of: 's' or 'p'.
n_1 : float
The index of refraction of the first material.
n_2 : float
The index of refraction of the second material.
Returns
-------
transmitted_amplitude : float
The amplitude of the transmitted power
"""
if polarization == 's':
return 2*n_1/(n_1 + n_2)
elif polarization == 'p':
return 2*n_1/(n_1 + n_2)
else:
raise ValueError("Polarization must be 's' or 'p'")
def _unpolarized_simulation(self, frequency, theta_0=0):
"""Handle the special case of unpolarized light by running the model
for both 's' and 'p' polarizations and computing the mean of the two
results.
Arguments
---------
frequency : float
The frequency (in Hz) at which to evaluate the model.
theta_0 : float, optional
The angle of incidence at the initial interface. Default is 0.
"""
s_data = self.simulate(frequency, 's', theta_0)
p_data = self.simulate(frequency, 'p', theta_0)
T = (s_data + p_data)/2
return T
def add_layer(self, material, thickness=5.0, units='mil', type='layer', \
stack_position=-1):
"""Create a layer from the set of pre-programmed materials and add it
to the AR coating stack
Arguments
---------
material : string
A key in the dictionary of materials found in materials.py.
You can view these materials by calling
'show_materials()'.
thickness : float, optional
The thickness of the AR coating layer material. Assumed to
be given in 'mil' (i.e. thousandths of an inch) unless
otherwise stated. Default is 5.
units : string, optional
The units of length for the AR coating layer. Default is 'mil'.
Must be one of:
{ 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }
type : string, optional
The layer type. Default is 'layer', which corresponds to
an AR layer. Other options are 'source' or 'terminator', which
correspond to source and terminator layers, respectively.
stack_position : int, optional
The position of the layer in the AR coating stack, indexed
from 0. Default is -1 (i.e., layer is automatically added
to the end (bottom?) of the stack.
"""
type = type.lower()
if type == 'layer':
layer = Layer()
layer.name = material.lower()
layer.thickness = thickness
layer.units = units
try:
# layer.dielectric = mats.Electrical.DIELECTRIC[layer.name]
layer.dielectric = mats.Electrical.props[layer.name][0]
except:
raise KeyError('I don\'t know that material!')
try:
# layer.losstangent = mats.Electrical.LOSS_TAN[layer.name]
layer.losstangent = mats.Electrical.props[layer.name][1]
except:
layer.losstangent = 0
print('\nI don\'t know this loss tangent. Setting loss to 0!')
if (stack_position == -1):
self.stack.append(layer)
else:
self.stack.insert(stack_position, layer)
elif type == 'source':
self.source = SourceLayer()
self.source.name = material.lower()
try:
# self.source.dielectric = mats.Electrical.DIELECTRIC[self.source.name]
self.source.dielectric = mats.Electrical.props[self.source.name][0]
except:
raise KeyError('I don\'t know that material!')
try:
# self.source.losstangent = mats.Electrical.LOSS_TAN[self.source.name]
self.source.losstangent = mats.Electrical.props[self.source.name][1]
except:
self.source.losstangent = 0
print('\nI don\'t know this loss tangent. Setting loss to 0!')
elif type == 'terminator':
self.terminator = TerminatorLayer()
self.terminator.name = material.lower()
try:
# self.terminator.dielectric = mats.Electrical.DIELECTRIC[self.terminator.name]
self.terminator.dielectric = mats.Electrical.props[self.terminator.name][0]
except:
raise KeyError('I don\'t know that material!')
try:
# self.terminator.losstangent = mats.Electrical.LOSS_TAN[self.terminator.name]
self.terminator.losstangent = mats.Electrical.props[self.terminator.name][1]
except:
self.terminator.losstangent = 0
print('\nI don\'t know this loss tangent. Setting loss to 0!')
else:
raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR')
return
def add_custom_layer(self, material, thickness, units, dielectric, loss_tangent, stack_position=-1):
"""Add a layer with custom properties to the AR stack.
Arguments
---------
material : string
The name of the layer
thickness : float
The thickness of the layer
units : string
The units of length for the AR coating layer. Must be one of:
{ 'mil', 'inch', 'mm', 'm', 'um', 'in', 'micron' }
dielectric : float
The dielectric constant of the AR coating layer
loss_tangent : float
The loss tangent of the AR coating layer
stack_position : int, optional
The position of the layer in the AR coating stack, indexed
from 0. Default is -1 (i.e., layer is automatically added
to the end (bottom?) of the stack.
"""
layer = Layer()
layer.units = units
layer.thickness = thickness
layer.dielectric = dielectric
layer.losstangent = loss_tangent
if (stack_position == -1):
self.stack.append(layer)
else:
self.stack.insert(stack_position, layer)
return
def display_sim_parameters(self):
"""Display all the simulation parameters in one place."""
pprint.pprint(vars(self))
return
def clear_structure(self):
"""Remove all elements from the current AR ``structure``."""
self.structure = []
return
def remove_layer(self, layer_pos):
"""Remove the specified layer from the AR coating stack.
Arguments
---------
layer_pos : int
The list index of the layer to remove from the AR coating stack
"""
self.stack.pop(layer_pos)
return
def run_sim(self):
"""Take the attributes of the ``Builder()`` object and execute the
simulation at each frequency in ``Builder().freq_sweep``. Save the
output to a columnized, tab-separated text file.
Returns
-------
transmission : array
A three-element array. The first element is a list of
frequencies, the second elements is a list of the
transmissions at each frequency, and the third is a list of
the reflections at each frequency.
"""
t0 = time.time()
print('Beginning AR coating simulation')
self._d_converter()
self._interconnect()
f_list = []
t_list = []
r_list = []
for f in self.freq_sweep:
results = self.sim_single_freq(f)
f_list.append(f)
t_list.append(results['T'])
r_list.append(results['R'])
fs = np.asarray(f_list)
ts = np.asarray(t_list)
rs = np.asarray(r_list)
results = np.array([fs, ts, rs])
t = time.ctime(time.time())
data_name = self._make_save_path(self.save_path, self.save_name)
header = 'Frequency (Hz)\t\tTransmission amplitude\t\tReflection amplitude'
# log_name = self._make_save_path(self.save_path, self.log_name)
# log = self._make_log()
with open(data_name, 'wb') as f:
np.savetxt(f, np.c_[fs, ts, rs], delimiter='\t', header=header)
# with open(log_name, 'wb') as f:
# for line in log:
# f.writelines(line)
# f.write('\n')
print('Finished running AR coating simulation')
t1 = time.time()
t_elapsed = t1-t0
print('Elapsed time: {t}s\n'.format(t=t_elapsed))
return results
def set_freq_sweep(self, lower_bound, upper_bound, resolution=1, units='ghz'):
"""Set the frequency range over which the simulation will run.
Arguments
---------
lower_bound : float
The low end of the frequency range, given in GHz.
upper_bound : float
The high end of the frequency range, given in GHz.
reolution : float, optional
The interval at which to sample the frequency range, given in GHz.
Defaults to 1 GHz.
units : str
The units of frequency. Must be one of:
Hz, hz, KHz, khz, MHz, mhz, GHz, ghz
"""
convert = {'Hz':1.0, 'hz':1.0, 'KHz':1e3, 'khz':1e3, 'MHz':1e6,
'mhz':1e6, 'GHz':1e9, 'ghz':1e9}
low = lower_bound*convert[units]
high = upper_bound*convert[units]
samples = (high-low)/resolution
self.freq_sweep = np.linspace(low, high, samples)
return
# def set_source_layer(self, material):
# """Change the source layer.
# Arguments
# ---------
# material : string
# A key in the dielectrics dictionary.
# """
# self.source = SourceLayer(material)
# return
# def set_terminator_layer(self, material):
# """Change the terminator layer.
# Arguments
# ---------
# material : string
# A key in the dielectrics dictionary.
# """
# self.terminator = TerminatorLayer(material)
# return
def show_materials(self):
"""List the materials with known properties. The listed material names
are keys in the materials properties dictionary.
"""
print('\nThe materials with known dielectric properties are:\n')
pprint.pprint(mats.Electrical.props)
# pprint.pprint(mats.Electrical.DIELECTRIC)
print('\nThe materials with known loss tangents are:\n')
pprint.pprint(mats.Electrical.props)
# pprint.pprint(mats.Electrical.LOSS_TAN)
return
def sim_single_freq(self, frequency, polarization='s', theta_0=0):
"""Run the model simulation for a single frequency.
Arguments
---------
frequency : float
The frequency at which to evaluate the model (in Hz).
polarization : string, optional
The polarization of the source wave. Must be one of: 's',
'p', or 'u'. Default is 's'.
### NOTE ###
I've chosen 's' polarization as the default because this
simulator only handles normal incidence waves, and and at
normal incidence 's' and 'p' are equivalent.
theta_0 : float, optional
The angle of incidence at the first interface.
Returns
-------
result : dict
dict = {
'T' : array; the total transmission through the model.
'R' : array; the total reflection through the model.
}
"""
# check the desired polarization
# if polarization == 'u':
# return self._unpolarized_simulation(frequency)
n = self._sort_ns() # get all refractive indices
d = self._sort_ds() # get all thicknesses
tan = self._sort_tans() # get all loss tans
k = self._find_ks(n, frequency, tan) # find all wavevectors, k
delta = self._find_k_offsets(k, d) # calculate all offsets
r, t = self._calc_R_T_amp(polarization, n, delta) # get trans, ref amps
T = self._get_T(polarization, t, n[0], n[-1]) # find net trans, ref power
R = self._get_R(r)
result = {'T':T, 'R':R}
return result
def snell(self, indices, theta_0):
"""Caclulate the Snell angles for the entire model.
Arguments
---------
indices : list
The list of indices of refraction for all elements in the model,
ordered from source to terminator.
theta_0 : float
The angle of incidence at the first interface.
"""
return sp.arcsin(np.real_if_close(n_list[0]*np.sin(th_0) / n_list))
class MCMC:
"""Contains the methods specific to ``emcee``, the MCMC Hammer, and helper
methods to set up MCMC simulations and visualize the results.
"""
def __init__(self):
self.name = 'blah'
self.priors = []
def __repr__(self):
return '{} (MCMC object)'.format(self.name)
def add_prior(self, layer_number, prior_type, low_bound, hi_bound, units='mil'):
"""Add a prior to a part of the model in order to constrain the total
simulation space. Can only place constraints on thickness and dielectric
for now.
Arguments
---------
layer_number : int
The position of the layer in the AR coating stack. Indexed from 1, so
incident `vacuum` is 0 and first AR coating layer is 1.
prior_type : string
Flags the prior as either a cut to dielectric constant or thickness.
One of 'thickness', 't', 'dielectric', or 'd'.
low_bound : float
The lower boundary of the range.
hi_bound : float
The higher boundary of the range.
units : string, optional
The units of the lower and upper bounds. Only applies to 'thickness'
cuts because dielectric constants are unitless. Defaults to `mils`.
"""
prior = {'layer_number':layer_number, 'prior_type':prior_type, \
'low_bound':low_bound, 'hi_bound':hi_bound, 'units':units}
self.priors.append(prior)
return
def lnlikelihood(self):
return
def lnprior(self):
"""Define the known prior attributes of the model in order to constrain
the simulation space.
"""
return
def lnprobability(self):
"""The logspace sum of ``lnprior`` and ``lnlikelihood``.
"""
return
def sort_priors(self):
"""Sort the contents of ``self.prior`` by layer number
Returns
-------
sorted_priors : list
A list of priors sorted by layer number. If a layer has both
thickness and dielectric priors, the thickness dielectric is first
and the dielectric is second.
"""
return
|
3,688 | a0dbb374f803cb05a35f823f54ef5f14eaf328b2 | # coding: utf-8
"""
login.py
~~~~~~~~
木犀官网登陆API
"""
from flask import jsonify, request
from . import api
from muxiwebsite.models import User
from muxiwebsite import db
@api.route('/login/', methods=['POST'])
def login():
email = request.get_json().get("email")
pwd = request.get_json().get("password")
user = User.query.filter_by(email=email).first()
if not user:
return jsonify({}), 403
if not user.verify_password(pwd):
return jsonify({}), 400
token = user.generate_auth_token()
return jsonify ({
'token': token,
}), 200
|
3,689 | 3fa1736fd87448ec0da4649153521d0aba048ccf | from rest_framework import serializers
from dailytasks.models import Tasks
class TasksSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source='user.username')
class Meta:
model = Tasks
fields = ['id','created','title','description','status','user']
|
3,690 | c036e6a0a9f06b08ee3eb43655dd833b46fd1e76 |
from .factories import * |
3,691 | 4892d4f364b03b53b1ad6f4c2177bbe2898edbda | #!/usr/bin/env python
import os
import sys
import numpy as np
from sklearn.metrics import roc_curve, auc
def confusion_matrix(Or, Tr, thres):
tpos = np.sum((Or >= thres) * (Tr == 1))
tneg = np.sum((Or < thres) * (Tr == 0))
fpos = np.sum((Or >= thres) * (Tr == 0))
fneg = np.sum((Or < thres) * (Tr == 1))
return tpos, tneg, fpos, fneg
def auc_roc(Pr, Tr):
fpr, tpr, _ = roc_curve(Tr, Pr, pos_label=1.0)
return auc(fpr, tpr)
def dice_score(Pr, Tr, thres):
im1 = Pr.copy()
im2 = Tr.copy()
im1[im1 > thres] = 1
im1[Pr <= thres] = 0
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
# Compute Dice coefficient
intersection = np.logical_and(im1, im2)
return 2.0 * intersection.sum() / (im1.sum() + im2.sum())
def cut_off(x):
if x <= 0:
print('Cut_off is applied')
return float('inf')
return x
def compute_dice(out_fol, steps=100):
files = [x for x in os.listdir(out_fol) if 'npy' in x]
# steps = 100
thresholds = np.array(range(30, 60)) / steps
# thresholds = np.array([0.4, 0.5, 0.6])
dice_thresholds = np.zeros(len(thresholds))
print(thresholds)
for i, threshold in enumerate(thresholds): # compute dice by varying thresholds
dice_running = 0
for file in files:
data = np.load(os.path.join(out_fol, file))
im1 = data[:, 0:1]
im2 = data[:, 1:]
dice_running += dice_score(im1, im2, threshold)
dice_thresholds[i] = dice_running / len(files)
print('applying threshold: ', threshold, dice_running / len(files))
sys.stdout.flush()
print(dice_thresholds)
save_dice = np.concatenate((thresholds.reshape(-1, 1), dice_thresholds.reshape(-1, 1)), axis=1)
np.save(os.path.join(out_fol, out_fol.split('/')[-1] + '_thresholds_dice_saved'), save_dice)
threshold_best = thresholds[np.argmax(dice_thresholds)]
stats = np.zeros((len(files), 7))
for i, file in enumerate(files):
data = np.load(os.path.join(out_fol, file))
im1 = data[:, 0]
im2 = data[:, 1]
dice = dice_score(im1.copy(), im2.copy(), threshold_best)
tpos, tneg, fpos, fneg = confusion_matrix(im1.copy(), im2.copy(), threshold_best)
PPV = tpos / cut_off(tpos + fpos)
NPV = tneg / cut_off(fneg + tneg)
TPR = tpos / cut_off(tpos + fneg)
FNR = fneg / cut_off(tpos + fneg)
FPR = fpos / cut_off(fpos + tneg)
TNR = tneg / cut_off(fpos + tneg)
stats[i] = np.array([dice, PPV, NPV, TPR, TNR, FPR, FNR])
# auc_val = auc_roc(im1.copy(), im2.copy())
# print(file, auc_val, dice, '\n')
stats = stats[:i + 1]
mu = np.mean(stats, axis=0)
stds = np.std(stats, axis=0)
with open(os.path.join(out_fol, out_fol.split('/')[-1] + '_results_summary.txt'), 'w') as f:
f.writelines('best threshold: {}\n'.format(threshold_best))
f.writelines('mean: {}\n'.format(mu))
f.writelines('stds: {}\n'.format(stds))
print('best threshold: ', threshold_best)
print('mean: ', mu)
print('stds: ', stds)
def compute_dice_final(out_fol, threshold_best):
files = [x for x in os.listdir(out_fol) if 'preds_gts.npy' in x]
stats = np.zeros((len(files), 7))
for i, file in enumerate(files):
data = np.load(os.path.join(out_fol, file))
im1 = data[:, 0]
im2 = data[:, 1]
dice = dice_score(im1.copy(), im2.copy(), threshold_best)
tpos, tneg, fpos, fneg = confusion_matrix(im1.copy(), im2.copy(), threshold_best)
print(tpos, tneg, fpos, fneg)
PPV = tpos / cut_off(tpos + fpos)
NPV = tneg / cut_off(fneg + tneg)
TPR = tpos / cut_off(tpos + fneg)
FNR = fneg / cut_off(tpos + fneg)
FPR = fpos / cut_off(fpos + tneg)
TNR = tneg / cut_off(fpos + tneg)
stats[i] = np.array([dice, PPV, NPV, TPR, TNR, FPR, FNR])
# auc_val = auc_roc(im1.copy(), im2.copy())
print(file, stats[i])
stats = stats[:i + 1]
mu = np.mean(stats, axis=0)
stds = np.std(stats, axis=0)
with open(os.path.join(out_fol, out_fol.split('/')[-1] + '_slides_level.txt'), 'w') as f:
for i, s in enumerate(stats):
f.writelines(
'{},{},{},{},{},{},{},{}\n'.format(files[i].split('_preds')[0], s[0], s[1], s[2], s[3], s[4], s[5],
s[6]))
with open(os.path.join(out_fol, out_fol.split('/')[-1] + '_results_summary.txt'), 'w') as f:
f.writelines('best threshold: {}\n'.format(threshold_best))
f.writelines('mean: {}\n'.format(mu))
f.writelines('stds: {}\n'.format(stds))
print('best threshold: ', threshold_best)
print('mean: ', mu)
print('stds: ', stds)
|
3,692 | 51cb750082ce93b6d14fe3aa40711836d493129c | """
Proyecto SA^3
Autor: Mario Lopez
Luis Aviles
Joaquin V
Fecha: Octubre del 2012
versión: 1
"""
#Manejo de temlates en el HTML
import jinja2
from jinja2 import Environment, PackageLoader
import os
import cgi
import datetime
import urllib
# for hashing
import hashlib
#Layer de comunicacion con Modelo
from modelo.Layer import *
from modelo.Layer2 import *
#Framework de Web para Python
import webapp2
# API DataStore
from google.appengine.ext import db
# intitalization of template system. It says that HTML templates will
# be found in current directory ("__file__")
# variable env para sesiones
env = Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
env.filters['format_time'] = format_time
# Método para verificar si hay una sesión activa
def before_filter(fn):
def inner_function(self):
if not 'session' in env.globals:
self.redirect('/')
return fn(self)
return inner_function
"""
REQUEST HANDLERS
"""
class MainPage(webapp2.RequestHandler):
"""Pantalla inicial. Despliega una forma para iniciar sesión
"""
def get(self):
self.response.headers['Content-Type'] = 'text/html'
# Generar el admin
matricula = 'admin'
password = 'admin'
nombre = 'admin'
apellidop = 'admin'
apellidom = 'admin'
tipo = 'admin'
# Generar password
h = hashlib.new('ripemd160')
h.update(password)
md5 = h.hexdigest()
password = md5
#Usuario(matricula = matricula, password = password, nombre = nombre, apellidop = apellidop, apellidom = apellidom, tipo = tipo).put()
#productos = db.GqlQuery("SELECT * FROM Inventario")
#Desplegar lista de productos
_despliegaLogin(self, '/vistas/login.html')
class VerUsuarios(webapp2.RequestHandler):
""" Despliega los usuarios registrados
"""
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
usuarios = db.GqlQuery("SELECT * FROM Usuario")
_despliegaVerUsuarios(self, usuarios, '/vistas/verUsuarios.html')
class RegistroAlumno(webapp2.RequestHandler):
""" Formulario para registrar usuarios
"""
def get(self):
self.response.headers['Content-Type'] = 'text/html'
clinicas = db.GqlQuery("SELECT * FROM Clinica")
_despliegaRegistroAlumno(self, clinicas, '/vistas/registroAlumno.html')
class GrabaAlumno(webapp2.RequestHandler):
def post(self):
nombre = self.request.get('nombre')
matricula = self.request.get('matricula')
password = self.request.get('password')
# Generar password
h = hashlib.new('ripemd160')
h.update(password)
md5 = h.hexdigest()
password = md5
class RegistraUsuario(webapp2.RequestHandler):
""" Formulario para registrar usuarios
"""
def get(self):
self.response.headers['Content-Type'] = 'text/html'
_despliegaRegistraUsuario(self, '/vistas/registraUsuario.html')
class GrabaUsuario(webapp2.RequestHandler):
def post(self):
nombre = self.request.get('nombre')
matricula = self.request.get('matricula')
password = self.request.get('password')
apellidop = self.request.get('apellidop')
apellidom = self.request.get('apellidom')
tipo = self.request.get('tipo')
# Generar password
h = hashlib.new('ripemd160')
h.update(password)
md5 = h.hexdigest()
password = md5
grabaUsuario(matricula,password,nombre,apellidop,apellidom,tipo)
self.redirect('/verUsuarios')
class IniciaSesion(webapp2.RequestHandler):
""" Entrada: al dar click en iniciar sesión en la pantalla principal
Salida: se crea la sesión del usuario y lo redirige a....
"""
def post(self):
self.response.headers['Content-Type'] = 'text/html'
matricula = self.request.get('matricula')
password = self.request.get('password')
h = hashlib.new('ripemd160')
h.update(password)
md5 = h.hexdigest()
password = md5
user = db.GqlQuery("SELECT * FROM Usuario WHERE matricula = '" + matricula + "' AND password = '" + password + "'")
if user.count() == 1:
for u in user:
user = []
user.append(u.nombre)
user.append(u.matricula)
user.append(u.tipo)
user.append(u.key())
env.globals['session'] = user
self.redirect('/bienvenida')
else:
self.redirect('/')
class CerrarSesion(webapp2.RequestHandler):
""" Entrada: al dar click en cerrar sesión
Salida: se elimina la sesión actual y se
redirige a la pantalla para iniciar sesión
"""
def get(self):
del env.globals['session']
self.redirect('/')
class Bienvenida(webapp2.RequestHandler):
""" Pantalla que se muestra al iniciar sesion
"""
@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
_despliegaBienvenida(self, '/vistas/bienvenida.html')
class AgregaHorarioClinica(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
clinicas = getAllClinicas()
_despliegaAgregaHorarioClinica(self,clinicas, '/vistas/agregarHorarioClinica.html')
#=======================================Funciones de Clinica
class AgregarClinica(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
_despliegaAgregarClinica(self, '/vistas/Clinica/agregarClinica.html')
class GrabaClinica(webapp2.RequestHandler):
def post(self):
key = self.request.get('key')
nombre = self.request.get('nombre')
descripcion = self.request.get('descripcion')
localizacion = self.request.get('localizacion')
unidades = int(self.request.get('unidades'))
defectuosas = int(self.request.get('defectuosas'))
if(key == None or key ==""):
grabaClinica(nombre,descripcion,localizacion,unidades,defectuosas)
else:
actualizaClinica(key,nombre,descripcion,localizacion,unidades,defectuosas)
self.redirect('/verClinicas') #Redireccion a la vista de clinicas
class EliminaClinica(webapp2.RequestHandler):
def get(self):
key = self.request.get('key')
eliminaClinica(key)
self.redirect('/verClinicas') #Redireccion a las clinicas
class VerClinicas(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
time.sleep(.1)
clinicas = getAllClinicas()
_despliegaVerClinicas(self, clinicas, '/vistas/Clinica/verClinicas.html')
class EditaClinica(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
clinica = db.get(self.request.get('key'))
_despliegaEditaClinica(self, clinica, '/vistas/Clinica/editaClinica.html')
#=======================================Fin de manejos de Clinicas
#=======================================Inicia Manejo de Grupos
class AgregarGrupo(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
_despliegaAgregarGrupo(self,self.request.get('key'), '/vistas/Grupo/agregarGrupo.html')
class GrabarGrupo(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
key = self.request.get('key')
clinica = self.request.get('clinica')
nombre = self.request.get('nombre')
descripcion = self.request.get('descripcion')
inicioAgenda = self.request.get('inicioAgenda')
finAgenda = self.request.get('finAgenda')
fa = self.request.get('fa')
if(key == None or key == ""):
grabaGrupo(clinica,nombre,descripcion,inicioAgenda,finAgenda,fa)
else:
actualizaGrupo(key,nombre,descripcion,inicioAgenda,finAgenda,fa)
self.redirect('/verGrupos?key='+clinica) #Redireccion a la vista de Grupos de una Clinica
class EliminarGrupo(webapp2.RequestHandler):
def get(self):
key = self.request.get('key')
eliminaGrupo(key)
self.redirect('/verGrupos?key='+self.request.get('clinica')) #Redireccion a la vista de los Grupos
class VerGrupos(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
clinica = getObject(self.request.get('key'))
_despliegaVerGrupos(self,clinica, getGrupos(clinica.key()), '/vistas/Grupo/verGrupos.html')
class EditarGrupo(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
grupo = db.get(self.request.get('key'))
clinica = self.request.get('clinica')
_despliegaEditaGrupo(self, clinica, grupo, '/vistas/Grupo/editaGrupo.html')
#=======================================Fin de manejo de Grupos
#=======================================Inicia Manejo de Asignacion de Grupo
class UsuariosAsignacion(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type']= 'text/html'
usuarios = getAllUsuarios()
_despliegaUsuariosAsignacion(self,usuarios,'/vistas/Asignacion/verUsuarios.html')
class ClinicasAsignacion(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type']= 'text/html'
clinicas = getAllClinicas()
usuario = getObject(self.request.get('usuario'))
_despliegaClinicasAsignacion(self,usuario,clinicas,'/vistas/Asignacion/verClinicas.html')
class GruposAsignacion(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type']= 'text/html'
clinica = getObject(self.request.get('clinica'))
usuario = getObject(self.request.get('usuario'))
_despliegaGruposAsignacion(self,usuario,clinica,'/vistas/Asignacion/verGrupos.html')
class GuardaAsignacion(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type']= 'text/html'
grupo = self.request.get('grupo')
usuario = self.request.get('usuario')
#Crea la asignacion entre ambos objetos
creaAsignacion(usuario,grupo)
_despliegaExito(self,"Usuario Asignado Correctamente",'/asignaUsuarios1','/vistas/Exito.html')
#=======================================Fin de Manejo de Asignacion de Grupo
class AgregarHorario(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
_despliegaAgregarHorario(self,self.request.get('key'), '/vistas/Horario/agregarHorario.html')
class GrabarHorario(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
key = self.request.get('key')
grupo = self.request.get('grupo')
descripcion = self.request.get('descripcion')
dia = self.request.get('dia')
horaInicio = self.request.get('horaInicio')
horaFin = self.request.get('horaFin')
if(key == None or key == ""):
grabaHorario(grupo,descripcion,dia,horaInicio,horaFin)
else:
actualizaGrupo(key,descripcion,dia,horaInicio,horaFin)
self.redirect('/verHorarios?key='+grupo) #Redireccion a la vista de Grupos de una Clinica
class EliminarHorario(webapp2.RequestHandler):
def get(self):
key = self.request.get('key')
eliminaHorario(key)
self.redirect('/verHorarios?key='+self.request.get('grupo')) #Redireccion a la vista de Horarios
class VerHorarios(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
#horarios = getAllHorarios(self.request.get('key'))
grupo = getObject(self.request.get('key'))
_despliegaVerHorarios(self,grupo, getHorarios(grupo), '/vistas/Horario/verHorarios.html')
class EditarHorario(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
horario = db.get(self.request.get('key'))
grupo = self.request.get('grupo')
_despliegaEditaHorario(self, grupo, horario, '/vistas/Horario/editaHorario.html')
#=======================================Fin de manejo de Horario
class EliminaUsuario(webapp2.RequestHandler):
def get(self):
usuarioKey = self.request.get('key')
deleteUsuario(usuarioKey)
self.redirect('/verUsuarios')
class EditaUsuario(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
usuarioKey = self.request.get('key')
usuario = getUsuario(usuarioKey);
_despliegaEditaUsuario(self, usuario, '/vistas/editaUsuario.html')
class GuardaCambiosUsuario(webapp2.RequestHandler):
def post(self):
usuarioKey = self.request.get('usuarioKey')
nombre = self.request.get('nombre')
matricula = self.request.get('matricula')
apellidop = self.request.get('apellidop')
apellidom = self.request.get('apellidom')
tipo = self.request.get('tipo')
usuario = getUsuario(usuarioKey);
updateUsuario(usuario,nombre,matricula,apellidop,apellidom,tipo)
self.redirect('/verUsuarios')
#====================================Inicia Proceso de Agendas
class AgendaPacienteExample(webapp2.RequestHandler):
def get(self):
horario = self.request.get('horario')
disponible = verificaDisponibilidadExample(horario)
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write('Total:<br/>')
self.response.out.write(disponible)
class AgendaPaciente(webapp2.RequestHandler):
def post(self):
horario = self.request.get('horario')
descripcion = self.request.get('descripcion')
folio = self.request.get('folio')
usuario = env.globals.get('session')[3]
disponible = verificaDisponibilidad(horario,usuario,descripcion,folio)
self.response.headers['Content-Type'] = 'text/html'
if (disponible[1] == True):
_despliegaExito(self,"El usuario ha agendado correctamente (No."+str(disponible[0])+")",'/verHorariosUsuario','/vistas/Exito.html')
else:
_despliegaError(self,"Agenda Llena ("+str(disponible[0])+" Pacientes), no es posible agendar",'/verHorariosUsuario','/vistas/Error.html')
class VerFormaCita(webapp2.RequestHandler):
def get(self):
horario = self.request.get('horario')
self.response.headers['Content-Type'] = 'text/html'
_despliegaFormaCita(self,horario,'/vistas/Alumno/agendaForma.html')
class VerGruposUsuario(webapp2.RequestHandler):
def get(self):
k=env.globals.get('session')
key = k[3]
usuario = db.get(key)
grupos = usuario.grupos
self.response.headers['Content-Type'] = 'text/html'
_despliegaGruposUsuario(self,usuario,grupos, '/vistas/Alumno/verGrupos.html')
class VerHorariosUsuario(webapp2.RequestHandler):
def get(self):
usuario = env.globals.get('session')[3]
horarios = getAgendaValida(usuario)
self.response.headers['Content-Type'] = 'text/html'
_despliegaHorariosUsuario(self,horarios, '/vistas/Alumno/verHorarios.html')
#===================================Finaliza Proceso de agendas
#=======================================Inicia Manejo de Periodos
class AgregarPeriodo(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
_despliegaAgregarPeriodo(self, '/vistas/Periodo/agregarPeriodo.html')
class GrabarPeriodo(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
descripcion = self.request.get('descripcion')
fechaInicio = self.request.get('fechaInicio')
fechaFin = self.request.get('fechaFin')
actual = self.request.get('actual')
if actual == '1':
esActual = True
quitaActual()
else:
esActual = False
fi = to_datetime(fechaInicio)
ff = to_datetime(fechaFin)
grabaPeriodo(descripcion,fi,ff,esActual)
self.redirect('/verPeriodo') #Redireccion a la vista de Grupos de una Clinica
class EliminarPeriodo(webapp2.RequestHandler):
def get(self):
key = self.request.get('key')
deletePeriodo(key)
self.redirect('/verPeriodo') #Redireccion a la vista de Horarios
class VerPeriodo(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
#horarios = getAllHorarios(self.request.get('key'))
periodos = getAllPeriodos()
_despliegaVerPeriodo(self,periodos, '/vistas/Periodo/verPeriodo.html')
class EditarPeriodo(webapp2.RequestHandler):
#@before_filter
def get(self):
self.response.headers['Content-Type'] = 'text/html'
periodoKey = self.request.get('key')
periodo = getPeriodo(periodoKey)
_despliegaEditaPeriodo(self, periodo, '/vistas/Periodo/editaPeriodo.html')
class GrabarCambiosPeriodo(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
descripcion = self.request.get('descripcion')
fechaInicio = self.request.get('fechaInicio')
fechaFin = self.request.get('fechaFin')
actual = self.request.get('actual')
if actual == '1':
esActual = True
quitaActual()
else:
esActual = False
fi = to_datetime(fechaInicio)
ff = to_datetime(fechaFin)
periodoKey = self.request.get('key')
periodo = getPeriodo(periodoKey)
updatePeriodo(periodo,descripcion,fi,ff,esActual)
self.redirect('/verPeriodo') #Redireccion a la vista de Grupos de una Clinica
"""
Views
"""
def _despliegaLogin(self, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({}))
def _despliegaRegistraCita(self, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({}))
def _despliegaFormaCita(self,horario, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'horario':horario}))
def _despliegaVerUsuarios(self, usuarios, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'usuarios': usuarios }))
def _despliegaBienvenida(self, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({}))
def _despliegaRegistroAlumno(self, clinicas, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'clinicas': clinicas }))
def _despliegaRegistraUsuario(self, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({}))
def _despliegaAgregaHorarioClinica(self, clinicas, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'clinicas': clinicas }))
def _despliegaMostrarHorariosClinica(self, horarios,clinica, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'horarios': horarios,'clinica':clinica }))
def _despliegaAgregarClinica(self, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({}))
"""
Despliega la vista para agregar un grupo nuevo
"""
def _despliegaAgregarGrupo(self,clinica, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'clinica':clinica}))
def _despliegaAgregarHorario(self,grupo, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'grupo':grupo}))
def _despliegaVerClinicas(self, clinicas, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'clinicas': clinicas }))
def _despliegaEditaUsuario(self, usuario, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'usuario': usuario }))
"""
Vista de Grupos de una Clinica en Especial
"""
def _despliegaVerGrupos(self, clinica, grupos, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'grupos': grupos,'clinica':clinica}))
"""
Vista de Grupos de una Clinica en Especial
"""
def _despliegaVerHorarios(self, grupo, horarios, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'grupo': grupo,'horarios':horarios}))
"""
Vista para editar Un grupo en especial
"""
def _despliegaEditaGrupo(self,clinica,grupo, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'grupo':grupo,'clinica':clinica}))
"""
Vista para ver usuarios del sistema
"""
def _despliegaUsuariosAsignacion(self,usuarios, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'usuarios':usuarios}))
"""
Vista para ver clinicas para asignar
"""
def _despliegaClinicasAsignacion(self,usuario,clinicas,templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'usuario':usuario,'clinicas':clinicas}))
"""
Vista para ver grupos a asignar
"""
def _despliegaGruposAsignacion(self,usuario,clinica,templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'usuario':usuario,'clinica':clinica}))
"""
Despliega un mensaje de Exito y la liga de retorno
"""
def _despliegaExito(self,mensaje,liga,templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'mensaje':mensaje,'liga':liga}))
def _despliegaError(self,mensaje,liga,templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'mensaje':mensaje,'liga':liga}))
"""
Vista para editar Un horario
"""
def _despliegaEditaHorario(self,grupo, horario, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'grupo':grupo,'horario':horario}))
"""
Vista de los Grupos a los que pertenece un usuario
"""
def _despliegaGruposUsuario(self,usuario,grupos, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'grupos':grupos,'usuario':usuario}))
def _despliegaEditaClinica(self, clinica, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'clinica': clinica }))
def _despliegaHorariosUsuario(self, horarios, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'horarios': horarios }))
"""
Vistas para manejo de periodos
"""
def _despliegaAgregarPeriodo(self, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({}))
def _despliegaVerPeriodo(self,periodos, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'periodos':periodos}))
def _despliegaEditaPeriodo(self, periodo, templateFile):
template = env.get_template(templateFile)
self.response.out.write(template.render({'periodo':periodo}))
app = webapp2.WSGIApplication([('/', MainPage),
('/iniciaSesion', IniciaSesion),
('/bienvenida', Bienvenida),
('/verUsuarios', VerUsuarios),
('/registroAlumno', RegistroAlumno),
('/grabaAlumno', GrabaAlumno),
('/registraUsuario', RegistraUsuario),
('/grabaUsuario', GrabaUsuario),
('/verClinicas', VerClinicas),
('/agregarClinica', AgregarClinica),
('/agregaHorarioClinica', AgregaHorarioClinica),
('/agregarHorario', AgregarHorario),
#Manejo de Clinicas
('/grabaClinica', GrabaClinica),
('/cerrarSesion', CerrarSesion),
('/grabaClinica', GrabaClinica),
('/eliminaUsuario', EliminaUsuario),
('/editaUsuario', EditaUsuario),
('/editaClinica', EditaClinica),
('/eliminaClinica', EliminaClinica),
('/verClinicas', VerClinicas),
('/agregarClinica', AgregarClinica),
#Fin manejo de Clinica
#Inicio de Manejo de Grupos
('/verGrupos', VerGrupos),
('/grabarGrupo', GrabarGrupo),
('/eliminarGrupo', EliminarGrupo),
('/agregarGrupo', AgregarGrupo),
('/editarGrupo', EditarGrupo),
#Fin de manejo de Grupo
#Inicio de Manejo de Horarios
('/verHorarios', VerHorarios),
('/grabarHorario', GrabarHorario),
('/eliminarHorario', EliminarHorario),
('/agregarHorario', AgregarHorario),
('/editarHorario', EditarHorario),
#Fin de manejo de Grupo
#Inicio de Agregar periodos
('/agregarPeriodo', AgregarPeriodo),
('/grabarPeriodo', GrabarPeriodo),
('/verPeriodo', VerPeriodo),
('/editarPeriodo', EditarPeriodo),
('/eliminarPeriodo', EliminarPeriodo),
('/grabarCambiosPeriodo', GrabarCambiosPeriodo),
#Finaliza manejo de periodos
#Inicia manejo de Asignacion
('/asignaUsuarios1', UsuariosAsignacion),
('/asignaUsuarios2', ClinicasAsignacion),
('/asignaUsuarios3', GruposAsignacion),
('/guardaAsignacion', GuardaAsignacion),
#Finaliza manejo de Asignacion
#Inicia Agenda
('/agendaPaciente',AgendaPaciente),
('/agendaPacienteExample',AgendaPacienteExample),
('/verGruposUsuario',VerGruposUsuario),
('/verHorariosUsuario',VerHorariosUsuario),
('/verFormaCita',VerFormaCita),
#Finaliza Agenda
('/cerrarSesion', CerrarSesion),
('/guardaCambiosUsuario', GuardaCambiosUsuario)], debug=True)
|
3,693 | 5cd9d4fe9889c4d53b50d86fa78ae84d0c242536 | import tensorflow as tf
import random
from tqdm import tqdm
import spacy
import ujson as json
from collections import Counter
import numpy as np
import os.path
nlp = spacy.blank("en")
def word_tokenize(sent):
doc = nlp(sent)
return [token.text for token in doc]
def convert_idx(text, tokens):
current = 0
spans = []
for token in tokens:
current = text.find(token, current)
if current < 0:
print("Token {} cannot be found".format(token))
raise Exception()
spans.append((current, current + len(token)))
current += len(token)
return spans
def process_file(filename, data_type, word_counter, char_counter, shuffle=False):
print("Generating {} examples...".format(data_type))
examples = []
eval_examples = {}
total = 0
with open(filename, "r") as fh:
for l in fh:
ques, ans, label = l.strip().split("\t")
ques_tokens = word_tokenize(ques)
ques_chars = [list(token) for token in ques_tokens]
ans_tokens = word_tokenize(ans)
ans_chars = [list(token) for token in ans_tokens]
label = int(label)
total += 1
for token in ques_tokens:
word_counter[token.lower()] += 1
for char in token:
char_counter[char] += 1
for token in ans_tokens:
word_counter[token.lower()] += 1
for char in token:
char_counter[char] += 1
example = {"ans_tokens": ans_tokens,
"ans_chars": ans_chars, "ques_tokens": ques_tokens,
"ques_chars": ques_chars, "y":label, "id": total}
examples.append(example)
if random:
random.shuffle(examples)
print("{} questions in total".format(len(examples)))
return examples
def get_embedding(counter, data_type, limit=-1, emb_file=None, size=None, vec_size=None, token2idx_dict=None):
print("Generating {} embedding...".format(data_type))
embedding_dict = {}
filtered_elements = [k for k, v in counter.items() if v > limit]
if emb_file is not None:
assert size is not None
assert vec_size is not None
with open(emb_file, "r", encoding="utf-8") as fh:
for line in tqdm(fh, total=size):
array = line.split()
word = "".join(array[0:-vec_size])
vector = list(map(float, array[-vec_size:]))
if word in counter and counter[word] > limit:
embedding_dict[word] = vector
print("{} / {} tokens have corresponding {} embedding vector".format(
len(embedding_dict), len(filtered_elements), data_type))
else:
assert vec_size is not None
for token in filtered_elements:
embedding_dict[token] = [np.random.normal(
scale=0.01) for _ in range(vec_size)]
print("{} tokens have corresponding embedding vector".format(
len(filtered_elements)))
NULL = "--NULL--"
OOV = "--OOV--"
token2idx_dict = {token: idx for idx, token in enumerate(
embedding_dict.keys(), 2)} if token2idx_dict is None else token2idx_dict
token2idx_dict[NULL] = 0
token2idx_dict[OOV] = 1
embedding_dict[NULL] = [0. for _ in range(vec_size)]
embedding_dict[OOV] = [0. for _ in range(vec_size)]
idx2emb_dict = {idx: embedding_dict[token]
for token, idx in token2idx_dict.items()}
emb_mat = [idx2emb_dict[idx] for idx in range(len(idx2emb_dict))]
return emb_mat, token2idx_dict
def build_features_SemEval(config, examples, data_type, out_file, word2idx_dict, char2idx_dict, is_test=False):
ans_limit = config.test_para_limit if is_test else config.para_limit
ques_limit = config.test_ques_limit if is_test else config.ques_limit
char_limit = config.char_limit
def filter_func(example, is_test=False):
return len(example["ans_tokens"]) > ans_limit or len(example["ques_tokens"]) > ques_limit
print("Processing {} examples...".format(data_type))
writer = tf.python_io.TFRecordWriter(out_file)
total = 0
total_ = 0
meta = {}
for example in tqdm(examples):
total_ += 1
#if filter_func(example, is_test):
# continue
total += 1
context_idxs = np.zeros([ans_limit], dtype=np.int32)
context_char_idxs = np.zeros([ans_limit, char_limit], dtype=np.int32)
ques_idxs = np.zeros([ques_limit], dtype=np.int32)
ques_char_idxs = np.zeros([ques_limit, char_limit], dtype=np.int32)
y = 0
def _get_word(word):
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in word2idx_dict:
return word2idx_dict[each]
return 1
def _get_char(char):
if char in char2idx_dict:
return char2idx_dict[char]
return 1
for i, token in enumerate(example["ans_tokens"][:ans_limit]):
context_idxs[i] = _get_word(token)
for i, token in enumerate(example["ques_tokens"][:ques_limit]):
ques_idxs[i] = _get_word(token)
for i, token in enumerate(example["ans_chars"][:ans_limit]):
for j, char in enumerate(token):
if j == char_limit:
break
context_char_idxs[i, j] = _get_char(char)
for i, token in enumerate(example["ques_chars"][:ques_limit]):
for j, char in enumerate(token):
if j == char_limit:
break
ques_char_idxs[i, j] = _get_char(char)
label = example["y"]
y = float(label)
record = tf.train.Example(features=tf.train.Features(feature={
"ans_idxs": tf.train.Feature(bytes_list=tf.train.BytesList(value=[context_idxs.tostring()])),
"ques_idxs": tf.train.Feature(bytes_list=tf.train.BytesList(value=[ques_idxs.tostring()])),
"ans_char_idxs": tf.train.Feature(bytes_list=tf.train.BytesList(value=[context_char_idxs.tostring()])),
"ques_char_idxs": tf.train.Feature(bytes_list=tf.train.BytesList(value=[ques_char_idxs.tostring()])),
"y": tf.train.Feature(bytes_list=tf.train.BytesList(value=[np.array([y]).tostring()])),
"id": tf.train.Feature(int64_list=tf.train.Int64List(value=[example["id"]]))
}))
writer.write(record.SerializeToString())
print("Build {} / {} instances of features in total".format(total, total_))
meta["total"] = total
writer.close()
return meta
def save(filename, obj, message=None):
if message is not None:
print("Saving {}...".format(message))
with open(filename, "w") as fh:
json.dump(obj, fh)
def preproSemEval(config):
word_counter, char_counter = Counter(), Counter()
train_examples = process_file(
config.SemEval_train_file, "train", word_counter, char_counter, shuffle=True)
dev_examples = process_file(
config.SemEval_dev_file, "dev", word_counter, char_counter)
test_examples = process_file(
config.SemEval_test_file, "test", word_counter, char_counter)
word_emb_file = config.fasttext_file if config.fasttext else config.glove_word_file
char_emb_file = config.glove_char_file if config.pretrained_char else None
char_emb_size = config.glove_char_size if config.pretrained_char else None
char_emb_dim = config.glove_dim if config.pretrained_char else config.char_dim
word2idx_dict = None
if os.path.isfile(config.word2idx_file):
with open(config.word2idx_file, "r") as fh:
word2idx_dict = json.load(fh)
word_emb_mat, word2idx_dict = get_embedding(word_counter, "word", emb_file=word_emb_file,
size=config.glove_word_size, vec_size=config.glove_dim, token2idx_dict=word2idx_dict)
char2idx_dict = None
if os.path.isfile(config.char2idx_file):
with open(config.char2idx_file, "r") as fh:
char2idx_dict = json.load(fh)
char_emb_mat, char2idx_dict = get_embedding(
char_counter, "char", emb_file=char_emb_file, size=char_emb_size, vec_size=char_emb_dim, token2idx_dict=char2idx_dict)
build_features_SemEval(config, train_examples, "train",
config.train_record_file, word2idx_dict, char2idx_dict)
dev_meta = build_features_SemEval(config, dev_examples, "dev",
config.dev_record_file, word2idx_dict, char2idx_dict)
test_meta = build_features_SemEval(config, test_examples, "test",
config.test_record_file, word2idx_dict, char2idx_dict, is_test=True)
save(config.word_emb_file, word_emb_mat, message="word embedding")
save(config.char_emb_file, char_emb_mat, message="char embedding")
save(config.dev_meta, dev_meta, message="dev meta")
save(config.word2idx_file, word2idx_dict, message="word2idx")
save(config.char2idx_file, char2idx_dict, message="char2idx")
save(config.test_meta, test_meta, message="test meta")
save("data/test.json", dev_examples, message="test example")
|
3,694 | f10e20d5c409930d697c36d1897ebcb648511e27 | from typing import List
from fastapi import Depends, APIRouter
from sqlalchemy.orm import Session
from attendance.database import get_db
from attendance import schemas
from attendance.models import User
from attendance import crud
from attendance.dependency import get_current_user
router = APIRouter()
#BASE_SALARY
#hr
@router.get("/salary/{user_id}", status_code=200)
def read_base_salary(user_id: int, db: Session = Depends(get_db), current_user: User=Depends(get_current_user)):
return crud.get_base_salarys(db, user_id=user_id, current=current_user)
@router.post("/salary", status_code=201)
def create_base_salary(salary: schemas.BaseSalaryCreate, db: Session = Depends(get_db), current_user: User=Depends(get_current_user)):
return crud.create_base_salary(db, base_salary=salary, current=current_user)
|
3,695 | e93d5461a2604d3b8015489397c68e16d1cb222e | from manim import *
class SlidingDoorIllustration(Scene):
def construct(self):
waiting_room = Rectangle(color=BLUE, stroke_width=8)
waiting_room.shift(LEFT + DOWN)
workspace = Rectangle(color=BLUE, stroke_width=8)
workspace.next_to(waiting_room, RIGHT + UP, buff=0)
workspace.shift(LEFT)
t1 = Text("Waiting Room").move_to(waiting_room.get_center()).scale(0.5)
t2 = Text("Workspace").move_to(workspace.get_center()).scale(0.5)
doors = Line(workspace.get_corner(DL) + LEFT, waiting_room.get_corner(UR), color=RED, stroke_width=8)
door = Line(workspace.get_corner(DL), waiting_room.get_corner(UR), color=GREEN, stroke_width=8)
self.add(waiting_room, workspace, t1, t2, doors, door)
self.play(door.animate.shift(LEFT))
self.wait()
self.play(door.animate.shift(RIGHT))
self.wait()
|
3,696 | cbad5d6f381e788a2f064aac0a5d468f40b39c93 | import os, subprocess
os.environ['FLASK_APP'] = "app/app.py"
os.environ['FLASK_DEBUG'] = "1"
# for LSTM instead: https://storage.googleapis.com/jacobdanovitch/twtc/lstm.tar.gz
# Will have to change app.py to accept only attention_weights
subprocess.call('./serve_model.sh')
subprocess.call(['flask', 'run'])
|
3,697 | 51cdb41836415c08609ee6a6bcc3adbaf2533da4 | """
USERS MODEL
"""
from www import app
import mongoengine
import datetime
class User(mongoengine.Document):
username = mongoengine.StringField(required=True)
password = mongoengine.StringField(required=True)
email = mongoengine.StringField(required=True)
active_hash = mongoengine.StringField(required=False, default=None)
active_hash_expires = mongoengine.DateTimeField(required=False,
default=None)
recover_hash = mongoengine.StringField(required=False)
recover_hash_expires = mongoengine.DateTimeField(required=False)
active = mongoengine.BooleanField(required=True, default=False)
locked = mongoengine.BooleanField(required=True, default=True) # locked changes depending on user active or not
first_name = mongoengine.StringField(required=False)
last_name = mongoengine.StringField(required=False)
show_as = mongoengine.StringField(required=False)
date_of_birth = mongoengine.DateTimeField(required=False)
created_at = mongoengine.DateTimeField(required=True, default=datetime.datetime.utcnow())
updated_at = mongoengine.DateTimeField(required=False, default=datetime.datetime.utcnow())
meta = {
'db_alias': app.config["DEFAULT_DATABASE_ALIAS"],
'collection': 'users',
}
@classmethod
def pre_save(cls, sender, document, **kwargs):
document.updated_at = datetime.datetime.utcnow()
mongoengine.signals.pre_save.connect(User.pre_save, sender=User)
|
3,698 | 3d1e6be71f92910cdc9eb2bf60ea7f8f1187f706 | '''
This script will do auto-check in/out for ZMM100 fingerprint access control
device by ZKSoftware.
At my office, the manager uses an application to load data from the
fingerprint device. After he loads data, log in device's database is cleared.
So in my case, I write this script to automate checking in/out everyday.
Device is running linux with busybox, so I have access to ftpput, ftpget and
wget commands (ftpd is missing). Data is stored in /mnt/mtdblock/data/ZKDB.db.
This is a sqlite3 database file. User info is in USER_INFO, user transactions
are in ATT_LOG table.
Procedure:
- telnet into the device
- ftpput database file at /mnt/mtdblock/data/ZKDB.db to a temporary FTP server
- edit ZKDB.db file on server
- ftpget ZKDB.db from FTP server
'''
import argparse
import datetime
import os
import random
import sqlite3
import subprocess as spr
import sys
import telnetlib
def get_server_ip(device_ip):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((device_ip, 80))
return s.getsockname()[0]
def transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):
'''
Transfer file from from_ip to to_ip via telnet.
Use ftpput and ftpget.
'''
# ====FTP Server====
try:
import pyftpdlib
except ImportError:
import pip
pip.main('install pyftpdlib'.split())
# start pyftpdlib FTP server: anonymous with write permission, port 2121
ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])
print('Server started')
filename = os.path.basename(remote_file_path)
s = telnetlib.Telnet(DEVICE_IP)
print(s.read_until(b'login: ').decode())
s.write(b'root \n')
print(s.read_until(b'Password: ').decode())
s.write(b'solokey\n')
if s.read_until(b'#'):
s.write(bytes('ls %s\n' % DB_PATH, 'utf-8'))
files = s.read_until(b'#').decode()
if filename in files:
while True:
if cmd == 'ftpput':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip,
filename,
remote_file_path),
'utf-8')
elif cmd == 'ftpget':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip, remote_file_path, filename), 'utf-8')
else:
raise ValueError('cmd must be `ftpput` or `ftpget`')
s.write(command)
ret = s.read_until(b'#').decode()
if 'refused' not in ret:
print(ret)
break
# stop pyftpdlib FTP server
ftp_server.kill()
print('Server killed')
def generate_verify_time(status='in', late=False):
'''
Generate normal verify time based on status `in` or `out`
`in` time will be random 10 mins before 8:00
`out` time will be random 10 mins after 17:00
'''
if status == 'in':
status = 0
if not late:
hour = 7
minute = random.randint(50, 59)
else:
hour = 8
minute = random.randint(15, 20)
elif status == 'out':
status = 1
hour = 17
minute = random.randint(0, 10)
else:
raise ValueError('status must be `in` or `out`')
second = random.randint(0, 59)
time = datetime.time(hour, minute, second)
return time
def add_log(uid, date, status, late=False):
'''
Edit ZKDB.db file, ATT_LOG table,
insert a row which represents a check in/out log
uid: User PIN
date: follow format: dd/mm/yyyy - 14/01/2017
status: 'in' is checking in, 'out' is checking out
'''
# verify_type: 0 is password, 1 is fingerprint
verify_type = 1
if status == 'in':
status = 0
time = generate_verify_time('in', late=late)
elif status == 'out':
status = 1
time = generate_verify_time('out')
else:
raise ValueError('status must be `in` or `out`')
date = datetime.datetime.strptime(date, '%d/%m/%Y')
combined = datetime.datetime.combine(date, time)
verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)
with sqlite3.connect(DB) as conn:
query = ('INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, '
'Status, Work_Code_ID, SEND_FLAG) '
'VALUES ({}, {}, "{}", {}, 0, 0)').format(uid, verify_type,
verify_time, status,
0, 0)
cur = conn.execute(query)
cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')
r = cur.fetchone()
print_log(r, uid, verify_type, verify_time, status)
def add_logs(uid, start, end, status, late=False):
start_date = datetime.datetime.strptime(start, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end, '%d/%m/%Y')
day_count = end_date - start_date
day_count = day_count.days + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date)
add_log(uid, date, status, late)
def delete_log(log_id):
'''
Delete a log row with ID=log_id
'''
with sqlite3.connect(DB) as conn:
query = ('DELETE FROM ATT_LOG WHERE ID={}'.format(log_id))
conn.execute(query)
print('Deleted log {}'.format(log_id))
def get_logs(uid, start_date, end_date):
'''
Returns logs of 'uid' from 'start_date' to 'end_date'
uid: User PIN
start_date: follow format 14/01/2017
end_date: follow format 15/01/2017
Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status)
'''
start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y')
with sqlite3.connect(DB) as conn:
query = ('SELECT ID, User_PIN, Verify_Type, Verify_Time, Status '
'FROM ATT_LOG WHERE User_PIN = {}'.format(uid))
cur = conn.execute(query)
rows = cur.fetchall()
ret = []
for row in rows:
log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S')
if log_date >= start_date and log_date <= end_date + datetime.timedelta(days=1):
ret.append(row)
return ret
def get_logs_by_date(uid, date):
return get_logs(uid, date, date)
def print_log(*log_row):
'''
Pretty print a log row
log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)
'''
id, uid, verify_type, verify_time, status = log_row
if status == 1:
status = 'Check out'
elif status == 0:
status = 'Check in'
print('{}. {} {} at {}'.format(id, uid, status, verify_time))
def check_log_row(log_row):
'''
Each day must have exactly 2 logs.
One for checking in, before 8:00:00
One for checking out, after 17:00:00
Return True if satisfies all conditions, else False
'''
in_time = datetime.time(8, 0, 0)
out_time = datetime.time(17, 0, 0)
log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')
status = log_row[-1]
if status == 1 and log_date.time() < out_time:
print('Early log on {}: {}'.format(log_date.date(), log_date))
return False
elif status == 0 and log_date.time() > in_time:
print('Late log on {}: {}'.format(log_date.date(), log_date))
return False
else:
return True
def check_log_by_date(uid, date):
pass
def fix_logs(uid, start_date, end_date):
'''
Fix logs of uid from start_date to end_date
A normalized log contains 2 logs per day
One check in log before 8:00
One check out log after 17:00
'''
start_date = '{:%d/%m/%Y}'.format(start_date)
end_date = '{:%d/%m/%Y}'.format(end_date)
day_count = (end_date - start_date) + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date.date)
logs = get_logs_by_date(uid, date)
if len(logs) == 2:
if not check_log_row(logs[0]) or not check_log_row(logs[1]):
delete_log(logs[0][0])
delete_log(logs[1][0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
elif len(logs) == 0:
add_log(uid, date, 'in')
add_log(uid, date, 'out')
else:
for log in logs:
delete_log(log[0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
def main():
today = '{:%d/%m/%Y}'.format(datetime.date.today())
parser = argparse.ArgumentParser()
parser.add_argument('action', help='`get`, `checkin`, `checkout`, '
'`add` or `fix` logs', default='get')
parser.add_argument('uids', help='User PINs', type=int, nargs='*')
parser.add_argument('-d', '--date', help='Date', default=today)
parser.add_argument('-r', '--range',
help='Range of date, ex. 01/01/2017-02/01/2017')
parser.add_argument('--log', help='log id to delete')
parser.add_argument('--late', help='Checkin late or not',
action='store_true')
args = parser.parse_args()
uids = args.uids
date = args.date or today
if not args.range:
start, end = date, date
else:
start, end = args.range.split('-')
transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')
for uid in uids:
if args.action == 'get':
logs = get_logs(uid, start, end)
for log in logs:
print_log(*log)
elif args.action == 'checkin':
add_logs(uid, start, end, 'in', late=args.late)
elif args.action == 'checkout':
add_logs(uid, start, end, 'out')
elif args.action == 'add':
add_log(uid, start, end)
elif args.action == 'fix':
fix_logs(uid, start, end)
elif args.action == 'delete':
delete_log(args.log)
else:
raise ValueError('Action must be `get`, `checkin`, `checkout`, '
'`fix` or `delete`')
transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')
if __name__ == '__main__':
# ====config====
DEVICE_IP = '10.0.0.204' # todo: find IP, input IP
DB_PATH = '/mnt/mtdblock/data/ZKDB.db'
DB = os.path.basename(DB_PATH)
server_ip = get_server_ip(DEVICE_IP)
main()
|
3,699 | 10fda09f47c292cb3dc901f42d38ead7757460f5 | import json
import requests
import boto3
import uuid
import time
profile_name = 'mine'
region = 'us-west-2'
session = boto3.Session(profile_name=profile_name)
api = session.client('apigateway', region_name=region)
cf = session.client('cloudformation', region_name=region)
def get_key(name_of_key):
print('Discovering API Key')
response = api.get_api_keys(includeValues=True)
items = response['items']
for item in items:
if name_of_key in item['name']:
return item['value']
def get_url(name_of_stack):
print('Discovering Cloudformation Exports')
exports = cf.list_exports()['Exports']
for export in exports:
if export['Name'] == 'url-{}'.format(name_of_stack):
return export['Value']
def post(url, key, data):
data_json = json.dumps(data)
headers = {'Content-type': 'application/json', 'x-api-key': key}
return requests.post(url, data=data_json, headers=headers)
if __name__ == "__main__":
name = 'advanced'
full_url = get_url(name)
api_key = get_key(name)
while True:
body = {
"input": [
str(uuid.uuid4()),
str(uuid.uuid4())
]
}
print(post(full_url, api_key, body))
time.sleep(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.