index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
2,600 | 033719313f92aaf3c62eb1b07a9aa08f13c7bb6e | #!/usr/bin/python
#
# Author: Johnson Kachikaran (johnsoncharles26@gmail.com)
# Date: 7th August 2016
# Google Drive API:
# https://developers.google.com/drive/v3/reference/
# https://developers.google.com/resources/api-libraries/documentation/drive/v3/python/latest/
"""
Includes functions to integrate with a user's Google drive. The results and implementation is based on the API
provided by the Google Drive API:
https://developers.google.com/drive/v3/reference/
https://developers.google.com/resources/api-libraries/documentation/drive/v3/python/latest/
"""
import io
import os
import threading
from googleapiclient.http import MediaIoBaseDownload
from colorker.security import CredentialManager
from colorker.settings import STORAGE
def list_files(query=None, order_by=None, files=False, user_settings=None):
drive_service = CredentialManager.get_client_drive_service(user_settings)
response = drive_service.files().list(
orderBy=order_by, q=query, pageSize=1000,
fields='nextPageToken, files(id, name, mimeType, fileExtension, parents)').execute(num_retries=3)
result, resources, names, parents = [], [], {}, {}
for drive_file in response.get('files', []):
names[str(drive_file['id'])] = str(drive_file['name'])
parents[str(drive_file['id'])] = drive_file.get('parents', [])
resources.append({'id': drive_file['id'], 'name': drive_file['name'],
'parents': [str(parent) for parent in drive_file.get('parents', [])],
'mimeType': drive_file['mimeType']})
while response.get('nextPageToken', None):
drive_files = drive_service.files()
response = drive_files.list(orderBy=order_by, q=query, pageSize=1000, pageToken=response['nextPageToken'],
fields='nextPageToken, files(id, name, mimeType, fileExtension, parents)').execute(num_retries=3)
for drive_file in response.get('files', []):
names[str(drive_file['id'])] = str(drive_file['name'])
parents[str(drive_file['id'])] = drive_file.get('parents', [])
resources.append({'id': drive_file['id'], 'name': drive_file['name'],
'parents': [str(parent) for parent in drive_file.get('parents', [])],
'mimeType': drive_file['mimeType']})
for resource in resources:
if resource['parents']:
for parent in resource['parents']:
path = str(names.get(parent, '')) + str('/') + str(resource['name'])
while parents.get(parent, []):
parent = str(parents[parent][0])
path = str(names.get(parent, '')) + str('/') + path
resource['name'] = path
if files:
if resource['mimeType'] != 'application/vnd.google-apps.folder':
result.append(resource)
else:
result.append(resource)
else:
if files:
if resource['mimeType'] != 'application/vnd.google-apps.folder':
result.append(resource)
else:
result.append(resource)
return result
def get_metadata(file_id, user_settings=None):
"""
Obtains the metadata of a file
:param str file_id: the identifier of the file whose metadata is needed
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:return: metadata of the file including id, mimeType, size, parents, kind, fileExtension, and webContentLink
"""
drive_service = CredentialManager.get_client_drive_service(user_settings)
files_service = drive_service.files().get(
fileId=file_id, fields='id, mimeType, size, parents, kind, name, fileExtension, webContentLink')
return files_service.execute(num_retries=3)
def get_file_contents(file_id, meta_err=False, user_settings=None):
"""
Obtains the contents of a file as a list of dictionaries. File type of the requested file must be a csv or a
Google fusion table.
:param str file_id: the identifier of the file whose content is needed
:param bool meta_err: optional, internal use only
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:return: list of dictionaries where each dictionary is a row in the file
:rtype: list
"""
metadata = get_metadata(file_id, user_settings)
if (metadata.get('fileExtension', None) == 'csv' or metadata.get('mimeType', None) == 'text/csv') and metadata.get(
'webContentLink', None):
drive_service = CredentialManager.get_client_drive_service(user_settings)
if user_settings is None:
user_settings = threading.current_thread().settings
temp_dir_path = user_settings.get(STORAGE.TEMPORARY.LOCAL, None)
if not os.path.exists(temp_dir_path):
os.makedirs(temp_dir_path)
file_path = temp_dir_path + str(file_id) + ".csv"
if not os.path.exists(file_path):
request = drive_service.files().get_media(fileId=file_id)
fh = io.FileIO(file_path, mode='wb')
downloader = MediaIoBaseDownload(fh, request, chunksize=1024 * 1024)
done = False
while done is False:
status, done = downloader.next_chunk()
fh.close()
header, rows = [], []
with open(file_path, 'rb') as csv_file:
for line in csv_file.readlines():
if not header:
header = [str(heading).strip() for heading in str(line).split(',')]
else:
row = line.split(',')
row_dict = {}
for index, column in enumerate(row):
row_dict[header[index]] = str(column).strip()
rows.append(row_dict)
return rows
elif metadata.get('mimeType', None) == 'application/vnd.google-apps.fusiontable':
ft_service = CredentialManager.get_client_fusion_table_service(user_settings)
query = ft_service.query()
table = query.sql(sql='SELECT * FROM ' + str(file_id), hdrs=False).execute(num_retries=3)
result_rows = []
columns = [str(column) for column in table['columns']]
rows = table['rows']
for row in rows:
result_row = {}
for index, cell in enumerate(row):
result_row[columns[index]] = str(cell) if isinstance(cell, unicode) else cell
result_rows.append(result_row)
return result_rows
elif meta_err:
raise Exception('Unsupported file type for the file - ' + str(metadata['name'] + '.'))
return []
|
2,601 | 94f50e371ef65e86d0d2d40a3ed16946f8811be3 | from django.apps import AppConfig
class TimestechConfig(AppConfig):
name = 'TimesTech'
|
2,602 | b8957acb71d435a93b4397a24d3b5cf4b2a817f8 | # -*- coding: utf-8 -*-
'''
Created on 2014/07/24
@author: seigo
'''
from google.appengine.api import users
from google.appengine.ext import webapp
from MyModel import HistoricalTable, PollRating, Government
from datetime import datetime
hts = [["2014/7/1","集団的自衛権行使容認の閣議決定","http://www.47news.jp/47topics/e/254919.php"],["2014/3/18","ロシア、クリミアを編入","http://www.47news.jp/CN/201403/CN2014031801002413.html"],["2014/2/9","舛添氏が圧勝、東京都知事選","http://www.47news.jp/CN/201402/CN2014020901001630.html"],["2014/1/7","国家安全保障局を設置","http://www.47news.jp/CN/201401/CN2014010701001086.html"],["2013/12/26","安倍首相が靖国神社参拝","http://www.47news.jp/CN/201312/CN2013122601000987.html"],["2013/12/6","特定秘密保護法が成立","http://www.47news.jp/CN/201312/CN2013120601002724.html"],["2013/11/3","東北楽天がプロ野球日本一","http://www.47news.jp/CN/201311/CN2013110301002118.html"],["2013/10/1","消費税率引き上げ決定、4月8%","http://www.47news.jp/CN/201310/CN2013100101002292.html"],["2013/9/8","2020年東京五輪開催決定","http://www.47news.jp/CN/201309/CN2013090401001495.html"],["2013/7/21","参院選で自民圧勝、ねじれ解消","http://www.47news.jp/CN/201307/CN2013072101001638.html"],["2013/3/15","TPP交渉に参加表明","http://www.47news.jp/CN/201303/CN2013031501001566.html"],["2013/2/12","北朝鮮が3度目の核実験","http://www.47news.jp/CN/201302/CN2013021201001987.html"],["2013/1/16","アルジェリア人質事件発生","http://www.47news.jp/CN/201301/CN2013011601001649.html"],["2012/12/26","第2次安倍内閣発足","http://www.47news.jp/CN/201212/CN2012122601001577.html"],["2012/12/6","自公が政権奪還、衆院選","http://www.47news.jp/CN/201212/CN2012121601001041.html"],["2012/11/15","習近平新指導部発足、中国","http://www.47news.jp/CN/201211/CN2012111501001203.html"],["2012/11/6","オバマ米大統領が再選","http://www.47news.jp/CN/201211/CN2012110701000867.html"],["2012/10/1","新型輸送機オスプレイを沖縄配備","http://www.47news.jp/CN/201210/CN2012100101001335.html"],["2012/9/11","尖閣諸島の魚釣島など3島国有化","http://www.47news.jp/CN/201209/CN2012091101001254.html"],["2012/8/10","消費税増税法が成立、10%へ","http://www.47news.jp/CN/201208/CN2012081001002702.html"],["2012/6/27","東京電力を国有化、公的資金注入","http://www.47news.jp/CN/201206/CN2012062701001601.html"],["2011/12/19","北朝鮮の金正日総書記が死去発表","http://www.47news.jp/CN/201112/CN2011121901001386.html"],["2011/11/27","大阪ダブル選で「維新の会」勝利","http://www.47news.jp/CN/201111/CN2011112701001230.html"],["2011/10/20","リビアのカダフィ大佐が死亡","http://www.47news.jp/CN/201110/CN2011102001000912.html"],["2011/10/5","米アップル創業者ジョブズ氏死去","http://www.47news.jp/CN/201110/CN2011100601000102.html"],["2011/9/2","野田内閣が発足","http://www.47news.jp/CN/201109/CN2011090201000656.html"],["2011/8/19","円が戦後最高値更新、75円95銭","http://www.47news.jp/CN/201108/CN2011081901001116.html"],["2011/7/17","なでしこジャパン女子W杯初優勝","http://www.47news.jp/CN/201107/CN2011071801000025.html"],["2011/5/6","首相、浜岡原発停止要請","http://www.47news.jp/CN/201105/CN2011050601000847.html"],["2011/3/11","東日本大震災","http://www.47news.jp/CN/201103/CN2011031101000455.html"],["2011/2/22","NZ地震、日本人28人も死亡","http://www.47news.jp/CN/201104/CN2011040401001017.html"],["2011/1/31","民主党小沢一郎元代表を強制起訴","http://www.47news.jp/CN/201101/CN2011013101000352.html"],["2010/11/23","北朝鮮が韓国・延坪島砲撃","http://www.47news.jp/CN/201011/CN2010112301000213.html"],["2010/10/6","ノーベル化学賞に根岸、鈴木両氏","http://www.47news.jp/CN/201010/CN2010100601000811.html"],["2010/9/15","政府が為替介入、6年半ぶり","http://www.47news.jp/CN/201009/CN2010091501000138.html"],["2010/9/7","尖閣で中国漁船が巡視船に衝突","http://www.47news.jp/CN/201009/CN2010090701000382.html"],["2010/7/11","参院選で民主党大敗、ねじれ国会","http://www.47news.jp/CN/201007/CN2010071101000032.html"],["2010/6/8","鳩山首相退陣、菅内閣発足","http://www.47news.jp/CN/201006/CN2010060801000756.html"],["2010/5/28","普天間移設で日米合意","http://www.47news.jp/CN/201005/CN2010052801000165.html"],["2010/4/20","宮崎県で口蹄疫、被害拡大","http://www.47news.jp/CN/201004/CN2010042001000207.html"],["2009/11/20","デフレ宣言、3年5カ月ぶり","http://www.47news.jp/CN/200911/CN2009112001000267.html"],["2009/10/2","2016年五輪はリオ、東京落選","http://www.47news.jp/CN/200910/CN2009100201000542.html"],["2009/9/16","鳩山内閣発足","http://www.47news.jp/CN/200909/CN2009091601000915.html"],["2009/8/30","民主党圧勝で政権交代、衆院選","http://www.47news.jp/CN/200908/CN2009083001000015.html"],["2009/8/3","全国初の裁判員裁判、東京地裁","http://www.47news.jp/CN/200908/CN2009080301000461.html"],["2009/6/25","歌手M・ジャクソンさん急死","http://www.47news.jp/CN/200906/CN2009062601000067.html"],["2009/5/25","北朝鮮が2回目の核実験","http://www.47news.jp/CN/200905/CN2009052501000261.html"],["2009/3/23","WBCで「侍ジャパン」が連覇","http://www.47news.jp/CN/200903/CN2009032401000025.html"],["2009/1/20","米、オバマ新政権が発足","http://www.47news.jp/CN/200901/CN2009012001000945.html"],["2008/10/31","田母神俊雄航空幕僚長を更迭","http://www.47news.jp/CN/200810/CN2008103101000632.html"],["2008/9/24","麻生内閣発足","http://www.47news.jp/CN/200809/CN2008092401000025.html"],["2008/9/15","リーマン・ショック","http://www.47news.jp/CN/200809/CN2008091501000215.html"],["2008/9/1","福田首相、退陣表明","http://www.47news.jp/CN/200809/CN2008090101000736.html"],["2008/7/7","北海道・洞爺湖サミット~9日","http://www.47news.jp/CN/200807/CN2008070901000704.html"],["2008/6/11","福田首相の問責決議が可決","http://www.47news.jp/CN/200806/CN2008061101000609.html"],["2008/5/12","中国・四川大地震","http://www.47news.jp/CN/200805/CN2008051201000871.html"],["2008/4/9","日銀総裁に白川副総裁が昇格","http://www.47news.jp/CN/200804/CN2008040901000924.html"],["2008/2/19","海自イージス艦が漁船と衝突","http://www.47news.jp/CN/200802/CN2008021901000329.html"],["2008/1/27","大阪府知事選で橋下徹氏初当選","http://www.47news.jp/CN/200801/CN2008012801000076.html"],["2007/11/28","防衛装備疑惑で前防衛次官を逮捕","http://www.47news.jp/CN/200711/CN2007112801000463.html"],["2007/11/2","テロ特措法期限切れ海自撤収命令","http://www.47news.jp/CN/200710/CN2007102901000620.html"],["2007/9/12","安倍首相が退陣。後任に福田氏","http://www.47news.jp/CN/200709/CN2007091201000426.html"],["2007/7/29","参院選で自民党が歴史的惨敗","http://www.47news.jp/CN/200707/CN2007072901000697.html"],["2007/5/28","松岡農相が自殺","http://www.47news.jp/CN/200705/CN2007052801000693.html"],["2007/5/14","改憲手続き定めた国民投票法成立","http://www.47news.jp/CN/200705/CN2007051401000231.html"]]
prs = [["2007/4/16","38.3 ","17.5 ","44.2 "],["2007/5/12","38.2 ","14.2 ","47.6 "],["2007/6/1","48.7 ","15.5 ","35.8 "],["2007/7/30","59.0 ","12.0 ","29.0 "],["2007/8/27","45.5 ","14.0 ","40.5 "],["2007/9/13","46.6 ","7.9 ","45.5 "],["2007/9/25","25.6 ","16.6 ","57.8 "],["2007/10/27","29.6 ","20.2 ","50.2 "],["2007/11/5","36.6 ","16.4 ","47.0 "],["2007/12/15","47.6 ","17.1 ","35.3 "],["2008/1/11","42.8 ","15.8 ","41.4 "],["2008/2/9","44.6 ","19.9 ","35.5 "],["2008/3/15","50.6 ","16.0 ","33.4 "],["2008/4/4","59.6 ","13.8 ","26.6 "],["2008/5/1","66.6 ","13.6 ","19.8 "],["2008/6/12","60.2 ","14.8 ","25.0 "],["2008/7/11","53.5 ","19.7 ","26.8 "],["2008/8/1","48.2 ","20.4 ","31.5 "],["2008/9/2","28.0 ","4.1 ","67.9 "],["2008/9/24","32.9 ","18.5 ","48.6 "],["2008/10/18","39.0 ","18.5 ","42.5 "],["2008/11/8","42.1 ","16.9 ","40.9 "],["2008/12/6","61.4 ","13.2 ","25.4 "],["2009/1/10","70.2 ","10.6 ","19.2 "],["2009/2/7","70.9 ","11.0 ","18.1 "],["2009/2/17","76.6 ","10.0 ","13.4 "],["2009/3/7","70.8 ","13.2 ","16.0 "],["2009/3/25","63.4 ","12.8 ","23.7 "],["2009/4/28","56.2 ","14.2 ","29.6 "],["2009/5/11","55.1 ","16.9 ","28.0 "],["2009/5/16","60.2 ","13.5 ","26.2 "],["2009/6/13","70.5 ","12.0 ","17.5 "],["2009/7/3","60.9 ","15.7 ","23.4 "],["2009/9/16","13.1 ","14.9 ","72.0 "],["2009/10/31","22.9 ","15.3 ","61.8 "],["2009/11/28","25.1 ","11.2 ","63.6 "],["2009/12/25","38.1 ","14.7 ","47.1 "],["2010/1/10","33.2 ","16.0 ","50.8 "],["2010/1/17","44.1 ","14.4 ","41.5 "],["2010/2/5","45.1 ","13.5 ","41.4 "],["2010/3/6","48.9 ","14.8 ","36.4 "],["2010/4/3","53.3 ","13.7 ","33.0 "],["2010/4/28","64.4 ","14.9 ","20.7 "],["2010/5/29","73.1 ","7.7 ","19.1 "],["2010/6/4","37.2 ","5.2 ","57.7 "],["2010/7/12","52.2 ","11.5 ","36.2 "],["2010/8/7","44.8 ","16.5 ","38.7 "],["2010/8/27","36.2 ","15.7 ","48.1 "],["2010/9/9","31.5 ","13.8 ","54.7 "],["2010/9/17","21.2 ","14.3 ","64.5 "],["2010/10/5","36.6 ","15.8 ","47.6 "],["2010/11/6","48.6 ","18.7 ","32.7 "],["2010/11/23","61.9 ","14.5 ","23.6 "],["2010/12/25","67.0 ","9.4 ","23.7 "],["2011/1/14","53.9 ","13.9 ","32.1 "],["2011/2/11","63.3 ","16.7 ","19.9 "],["2011/3/26","55.6 ","16.1 ","28.3 "],["2011/4/29","58.6 ","14.5 ","26.8 "],["2011/5/14","57.3 ","14.6 ","28.1 "],["2011/6/28","61.1 ","15.6 ","23.2 "],["2011/7/23","70.6 ","12.3 ","17.1 "],["2011/8/20","70.0 ","14.2 ","15.8 "],["2011/9/2","18.1 ","19.1 ","62.7 "],["2011/10/1","27.8 ","17.6 ","54.6 "],["2011/11/5","34.3 ","18.6 ","47.1 "],["2011/12/3","40.3 ","15.1 ","44.6 "],["2012/1/7","50.6 ","13.7 ","35.7 "],["2012/1/13","47.8 ","16.4 ","35.8 "],["2012/2/18","55.2 ","15.8 ","29.0 "],["2012/3/19","50.2 ","18.2 ","31.6 "],["2012/4/28","60.0 ","13.6 ","26.4 "],["2012/5/26","58.1 ","13.9 ","28.0 "],["2012/6/4","50.0 ","18.0 ","32.0 "],["2012/6/26","54.4 ","15.8 ","29.9 "],["2012/7/14","59.9 ","11.9 ","28.2 "],["2012/8/11","59.0 ","13.1 ","27.9 "],["2012/9/1","59.4 ","14.3 ","26.3 "],["2012/10/1","55.3 ","15.5 ","29.2 "],["2012/11/3","66.0 ","16.2 ","17.7 "],["2012/12/26","21.8 ","16.2 ","62.0 "],["2013/1/26","22.1 ","11.2 ","66.7 "],["2013/2/23","16.2 ","11.1 ","72.7 "],["2013/3/23","16.7 ","12.2 ","71.1 "],["2013/3/30","20.8 ","7.2 ","72.0 "],["2013/4/20","16.0 ","11.9 ","72.1 "],["2013/5/18","16.2 ","12.9 ","70.9 "],["2013/6/1","16.3 ","15.7 ","68.0 "],["2013/6/8","20.4 ","8.4 ","71.2 "],["2013/7/22","31.7 ","12.1 ","56.2 "],["2013/8/24","25.6 ","16.7 ","57.7 "],["2013/9/14","20.4 ","17.8 ","61.8 "],["2013/9/28","21.8 ","7.5 ","70.7 "],["2013/10/1","24.1 ","12.6 ","63.3 "],["2013/10/26","27.0 ","12.3 ","60.7 "],["2013/11/23","26.2 ","15.9 ","57.9 "],["2013/12/8","38.4 ","14.0 ","47.6 "],["2013/12/14","35.9 ","7.2 ","56.9 "],["2013/12/22","33.0 ","12.8 ","54.2 "],["2013/12/28","32.6 ","12.2 ","55.2 "],["2014/1/25","31.0 ","13.1 ","55.9 "],["2014/2/22","29.7 ","16.4 ","53.9 "],["2014/4/11","26.7 ","13.5 ","59.8 "],["2014/5/17","32.5 ","12.8 ","54.7 "],["2014/6/21","33.0 ","14.9 ","52.1 "],["2014/7/1","40.6 ","11.6 ","47.8 "]]
gos = [["野田","2011/09/02","2012/12/26"],["菅","2010/06/08","2011/09/02"],["鳩山","2009/09/16","2010/06/08"],["麻生","2008/09/24","2009/09/16"],["福田","2007/09/26","2008/09/24"],["安倍","2007/04/01","2007/09/26"]]
class initDATA(webapp.RequestHandler):
'''
classdocs
'''
def get(self):
user = users.get_current_user()
if user == None:
self.redirect(users.create_login_url(self.request.uri))
return
for ht in hts:
htdate = datetime.date(datetime.strptime(ht[0], '%Y/%m/%d'))
obj1 = HistoricalTable(date=htdate,title=ht[1],url=ht[2])
obj1.save()
for pr in prs:
prdate = datetime.date(datetime.strptime(pr[0], '%Y/%m/%d'))
obj2 = PollRating(date=prdate,approval_rate=float(pr[3]),unknown_rate=float(pr[2]),disapproval_rate=float(pr[1]))
obj2.save()
for pgo in gos:
gosdate = datetime.date(datetime.strptime(pgo[1], '%Y/%m/%d'))
try:
goedate = datetime.date(datetime.strptime(pgo[2], '%Y/%m/%d'))
except:
goedate = None
obj3 = Government(name=pgo[0], begin=gosdate, end=goedate)
obj3.save()
class clearDATA(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user == None:
self.redirect(users.create_login_url(self.request.uri))
return
for ht in HistoricalTable.all():
ht.delete()
for pr in PollRating.all():
pr.delete()
for pgo in Government.all():
pgo.delete()
|
2,603 | 34d011727c93bb4c8ccf64017e7185717ef98667 | x=25
y=43
print(x&y)
print(x>>y)
print(x^y)
print(x|y) |
2,604 | e9c439eafac8fd689980ffcb562f3b5ee903dd56 | from pylab import *
def f(x,y): return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)
n = 256
x = np.linspace(-3,3,n)
y = np.linspace(-3,3,n)
X,Y = np.meshgrid(x,y)
axes([0.025,0.025,0.95,0.95])
contourf(X, Y, f(X,Y), 8, alpha=.75, cmap=cm.hot)
C = contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5)
clabel(C, inline=1, fontsize=10)
xticks([]), yticks([])
savefig('../figures/contour_ex.png',dpi=48)
show()
|
2,605 | 3d43bf0d0ca1df06b3647a33f88cee067eeff9f4 | from django.test import TestCase, Client
from accounts.models import Account
from .data import account
from rest_framework import status
class TestAccountRequests(TestCase):
def setUp(self):
self.client = Client()
self.superuser = Account.objects.create_superuser(**account)
def test_register_admin(self):
response = self.client.post(f'/account/register/', data=account,
content_type='application/json')
self.assertTrue(status.HTTP_200_OK, response.status_code)
def test_login(self):
data = {
'email': 'office@theoscoding.com',
'password': 'Pwd1q2w3e',
}
Account.objects.create(**data)
response = self.client.post(f'/account/login/', data=data,
content_type='application/json')
self.assertTrue(status.HTTP_200_OK, response.status_code)
|
2,606 | 498d07421d848332ad528ef3d3910d70312b5f55 | from Domain.Librarie import vanzare_obiect, get_id, get_titlu, get_gen, get_pret, get_tip_reducere
def inverse_create(lst_vanzari, id_carte):
new_vanzari = []
for carte in lst_vanzari:
if get_id(carte) != id_carte:
new_vanzari.append(carte)
return new_vanzari
def get_by_id(id, lista):
'''
ia vanzarea cu id-ul dat dintr-o lista
:param id: id-ul vanzarii - string
:param lista: lista de vanzari
:return: vanzarea cu id-ul dat sau None daca nu exista in lista
'''
for vanzare in lista:
if get_id(vanzare) == id:
return vanzare
return None
def create(lst_vanzari, id_vanzare: int, titlu, gen, pret, tip_reducere, undo_list: list, redo_list: list):
'''
Creeaza o vanzare
:param lst_vanzari:lista de vanzari
:param id_vanzare: id-ul vanzarii
:param titlu: titlul cartii din vanzare
:param gen: genul cartii din vanzare
:param pret: pretul vanzarii
:param tip_reducere: tipul de reducere
:param undo_list: lista de sters
:param redo_list: lista de adaugat
:return: returneaza o vanzare cu un id unic si detaliile ei
'''
new_list=['None', 'Gold', 'Silver']
if tip_reducere not in new_list:
raise TypeError('Tip reducere necunoscut.')
if get_by_id(id_vanzare, lst_vanzari) is not None:
raise ValueError(f'Exista deja o vanzare cu acest id {id_vanzare}.')
vanzare_obiecte = vanzare_obiect(id_vanzare, titlu, gen, pret, tip_reducere)
#lst_vanzari.append(vanzare_obiecte)
undo_list.append(lst_vanzari)
redo_list.clear()
#return [id_vanzare, titlu, gen, pret, tip_reducere]
return lst_vanzari + [vanzare_obiecte]
def read(lst_vanzari, id_carte: int=None):
"""
Citeste o vanzare din "baza de date".
:param lst_vanzari: lista de vanzari
:param id_vanzare: id-ul vanzarii.
:return: cartea cu id-ul id_carte sau lista cu toate vanzarile, daca id_carte=None
"""
cartea_cu_id = None
for cartea in lst_vanzari:
if get_id(cartea) == id_carte:
cartea_cu_id = cartea
if cartea_cu_id:
return cartea_cu_id
return None
def update(lst_vanzari, new_vanzare, undo_list: list, redo_list: list):
"""
Actualizeaza o vanzare.
:param lst_vanzari: lista de vaznari
:param new_vanzare: vanzarea care se va actualiza - id-ul trebuie sa fie unul existent.
:param undo_list: lista cu vanzarea care tebuie stearsa
:param redo_list: lista cu vanzarea care trebuie adaugata
:return: o lista cu vanzari actualizata
"""
if read(lst_vanzari, get_id(new_vanzare)) is None:
raise ValueError(f'Nu xista o vanzare cu id-ul {get_id(new_vanzare)} pe care sa o actualizam.')
# lst_carti=[c1:(1,Mobidic), c2:(2,Hansel si Gretel)], cartea=(2, Scufita Rosie)
new_vanzari = []
for vanzare in lst_vanzari:
if get_id(vanzare) != get_id(new_vanzare):
new_vanzari.append(vanzare)
else:
new_vanzari.append(new_vanzare)
undo_list.append(lst_vanzari)
redo_list.clear()
return new_vanzari
def delete(lst_vanzare, id_carte: int, undo_list: list, redo_list: list):
"""
:param lst_vanzare: lista de vanzari
:param id_carte: id-ul cartii din vanzare
:param undo_list: lista cu vanzarea care trebuie stearsa
:param redo_list: lista cu vanzarea care trebuie adaugata
:return: o lista de vanzari fara cartea cu id-ul id_carte.
"""
if read(lst_vanzare, id_carte) is None:
raise ValueError(f'Nu xista o carte cu id-ul {id_carte} pe care sa o stergem.')
new_vanzari = []
for carte in lst_vanzare:
if get_id(carte) != id_carte:
new_vanzari.append(carte)
undo_list.append(lst_vanzare)
redo_list.clear()
return new_vanzari
|
2,607 | e7e9a53d4c41448521b324d51641a46827faa692 | from http import HTTPStatus
#from pytest_chalice.handlers import RequestHandler
import app
from chalice.test import Client
def test_index_with_url():
with Client(app.app) as client:
response = client.http.get('/?url=https://google.com')
assert response.status_code == HTTPStatus.MOVED_PERMANENTLY
assert response.headers['Location'] is not None
def test_index_without_url():
with Client(app.app) as client:
response = client.http.get('/')
assert response.body == b'Invalid or missing url'
def test_link_received_by_sns():
with Client(app.app) as client:
with open('sns_message.txt') as f:
event = client.events.generate_sns_event(message=f.read())
with open('/tmp/event.json', 'w') as f:
import json
f.write(json.dumps(event))
response = client.lambda_.invoke('handle_link_visit', event)
assert response.payload['message'] == 'link visited' |
2,608 | 4f3e297b6925f8d65aacaa59bb837e746747c33f | import torch
from torch import nn
import torch.nn.functional as F
class JointModel(nn.Module):
def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,encoder_shortcut, generator_shortcut, generator_transform,
num_word, emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout, word_rnn_bidirectional,word_attention_size,
context_rnn_size, context_rnn_num_layer, context_rnn_dropout, context_rnn_bidirectional,context_attention_size, mlp_size,
num_label, pretrained_embedding):
super(JointModel, self).__init__()
##NGTM:
self.d_v = d_v # vocabulary size
self.d_e = d_e # dimensionality of encoder
self.d_t = d_t # number of topics
self.encoder_layers = encoder_layers
self.generator_layers = generator_layers
self.generator_transform = generator_transform # transform to apply after the generator
self.encoder_shortcut = encoder_shortcut
self.generator_shortcut = generator_shortcut
self.en1_fc = nn.Linear(self.d_v, self.d_e)
self.en2_fc = nn.Linear(self.d_e, self.d_e)
self.en_drop = nn.Dropout(0.2)
self.mean_fc = nn.Linear(self.d_e, self.d_t)
# self.mean_bn = nn.BatchNorm1d(self.d_t)
self.logvar_fc = nn.Linear(self.d_e, self.d_t)
# self.logvar_bn = nn.BatchNorm1d(self.d_t)
self.generator1 = nn.Linear(self.d_t, self.d_t)
self.generator2 = nn.Linear(self.d_t, self.d_t)
self.generator3 = nn.Linear(self.d_t, self.d_t)
self.generator4 = nn.Linear(self.d_t, self.d_t)
self.r_drop = nn.Dropout(0.2)
self.de = nn.Linear(self.d_t, self.d_v)
# self.de_bn = nn.BatchNorm1d(self.d_v)
##HAN:
self.emb_size = emb_size
self.word_rnn_size = word_rnn_size
self.word_rnn_num_layer = word_rnn_num_layer
self.word_rnn_bidirectional = word_rnn_bidirectional
self.context_rnn_size = context_rnn_size
self.context_rnn_num_layer = context_rnn_num_layer
self.context_rnn_bidirectional = context_rnn_bidirectional
self.num_label = num_label
self.embedding = nn.Embedding(num_word, emb_size)
self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=word_rnn_size, dropout=word_rnn_dropout,
num_layers=word_rnn_num_layer, bidirectional=word_rnn_bidirectional)
word_rnn_output_size = word_rnn_size * 2 if word_rnn_bidirectional else word_rnn_size
self.word_conv_attention_linear = nn.Linear(word_rnn_output_size, self.d_t, bias=False)
self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)
self.context_rnn = nn.GRU(input_size=word_rnn_output_size, hidden_size=context_rnn_size,dropout=context_rnn_dropout,
num_layers=context_rnn_num_layer, bidirectional=context_rnn_bidirectional)
context_rnn_output_size = context_rnn_size * 2 if context_rnn_bidirectional else context_rnn_size
self.context_conv_attention_linear = nn.Linear(context_rnn_output_size, 1, bias=False)
self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size, mlp_size),
nn.LeakyReLU(),
nn.Linear(mlp_size, num_label),
nn.Tanh())
if pretrained_embedding is not None:
self.embedding.weight.data = self.embedding.weight.data.new(pretrained_embedding)
def encoder(self, x):
if self.encoder_layers == 1:
pi = F.relu(self.en1_fc(x))
if self.encoder_shortcut:
pi = self.en_drop(pi)
else:
pi = F.relu(self.en1_fc(x))
pi = F.relu(self.en2_fc(pi))
if self.encoder_shortcut:
pi = self.en_drop(pi)
# mean = self.mean_bn(self.mean_fc(pi))
# logvar = self.logvar_bn(self.logvar_fc(pi))
mean = self.mean_fc(pi)
logvar = self.logvar_fc(pi)
return mean, logvar
def sampler(self, mean, logvar, cuda):
eps = torch.randn(mean.size()).cuda(cuda)
sigma = torch.exp(logvar)
h = sigma.mul(eps).add_(mean)
return h
def generator(self, h):
# temp = self.generator1(h)
# if self.generator_shortcut:
# r = F.tanh(temp) + h
# else:
# r = temp
if self.generator_layers == 0:
r = h
elif self.generator_layers == 1:
temp = self.generator1(h)
if self.generator_shortcut:
r = F.tanh(temp) + h
else:
r = temp
elif self.generator_layers == 2:
temp = F.tanh(self.generator1(h))
temp2 = self.generator2(temp)
if self.generator_shortcut:
r = F.tanh(temp2) + h
else:
r = temp2
else:
temp = F.tanh(self.generator1(h))
temp2 = F.tanh(self.generator2(temp))
temp3 = F.tanh(self.generator3(temp2))
temp4 = self.generator4(temp3)
if self.generator_shortcut:
r = F.tanh(temp4) + h
else:
r = temp4
if self.generator_transform == 'tanh':
return self.r_drop(F.tanh(r))
elif self.generator_transform == 'softmax':
return self.r_drop(F.softmax(r)[0])
elif self.generator_transform == 'relu':
return self.r_drop(F.relu(r))
else:
return self.r_drop(r)
def decoder(self, r):
# p_x_given_h = F.softmax(self.de_bn(self.de(r)))
p_x_given_h = F.softmax(self.de(r))
return p_x_given_h
def init_rnn_hidden(self, batch_size, level):
param_data = next(self.parameters()).data
if level == "word":
bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1
layer_size = self.word_rnn_num_layer * bidirectional_multipier
word_rnn_init_hidden = param_data.new(layer_size, batch_size, self.word_rnn_size).zero_()
return word_rnn_init_hidden
elif level == "context":
bidirectional_multipier = 2 if self.context_rnn_bidirectional else 1
layer_size = self.context_rnn_num_layer * bidirectional_multipier
context_rnn_init_hidden = param_data.new(layer_size, batch_size, self.context_rnn_size).zero_()
return context_rnn_init_hidden
else:
raise Exception("level must be 'word' or 'context'")
def continuous_parameters(self):
for name, param in self.named_parameters():
if not name.startswith("selector"):
yield param
def discrete_parameters(self):
for name, param in self.named_parameters():
if name.startswith("selector"):
yield param
def forward(self, x, x_indices, input_list, length_list, cuda):
###topic model
mean, logvar = self.encoder(x) # batchsize*50
h = self.sampler(mean, logvar, cuda) # batchsize*50
r = self.generator(h) # batchsize*50
p_x_given_h = self.decoder(r) # batchsize*dv
###HAN
num_utterance = len(input_list) # one batch doucument_list
_, batch_size = input_list[0].size()
# word-level rnn
word_rnn_hidden = self.init_rnn_hidden(batch_size, level="word")
word_rnn_output_list = []
word_attention_dict = {}
# de_weight = torch.zeros(self.d_v, self.d_t).cuda()
# de_weight.copy_(self.de.weight.data)
for utterance_index in range(num_utterance):
word_rnn_input = self.embedding(input_list[utterance_index])
word_rnn_output, word_rnn_hidden = self.word_rnn(word_rnn_input, word_rnn_hidden)
word_attention_weight = self.word_conv_attention_linear(word_rnn_output)
# word_attention_weight = Variable(torch.zeros(word_attention_weight.size()).cuda())
batch_data = input_list[utterance_index]
for word_i in range(len(batch_data)): # word_i word
for clause_i in range(len(batch_data[word_i])): # clause_i data(batch)
word_index = int(batch_data[word_i, clause_i]) # word index
if word_index < self.d_v:
if word_index in word_attention_dict:
word_attention_dict[word_index] = (word_attention_dict[word_index] + word_attention_weight[word_i, clause_i,:]) / 2
else:
word_attention_dict[word_index] = word_attention_weight[word_i, clause_i, :]
##HAN
word_attention_weight = self.word_conv_attention_linear2(word_attention_weight)
word_attention_weight = nn.functional.relu(word_attention_weight)
word_attention_weight = nn.functional.softmax(word_attention_weight, dim=0)
word_rnn_last_output = torch.mul(word_rnn_output, word_attention_weight).sum(dim=0)
word_rnn_output_list.append(word_rnn_last_output)
word_rnn_hidden = word_rnn_hidden.detach()
# context-level rnn
context_rnn_hidden = self.init_rnn_hidden(batch_size, level="context")
context_rnn_input = torch.stack(word_rnn_output_list, dim=0)
context_rnn_output, context_rnn_hidden = self.context_rnn(context_rnn_input, context_rnn_hidden)
context_attention_weight = self.context_conv_attention_linear(context_rnn_output)
context_attention_weight = nn.functional.relu(context_attention_weight)
context_attention_weight = nn.functional.softmax(context_attention_weight, dim=0)
context_rnn_last_output = torch.mul(context_rnn_output, context_attention_weight).sum(dim=0)
classifier_input = context_rnn_last_output
logit = self.classifier(classifier_input)
return mean, logvar, p_x_given_h, logit, word_attention_dict |
2,609 | 6aeaa2ed01e0c0dac54cd8220c5da005fccc53e9 | '''
Created on 18/10/2012
@author: matthias
'''
import os
import errno
import uuid
import glob
import shutil
import sys
import subprocess
import time
import pickle
import common.pbs
def prepare_directories(options, extension, subversiondir=None):
# extract datadir from options
datadir = options['datadir']
print("Creating directory {0:s}.".format(datadir+extension))
# recursively create directory
try:
os.makedirs(datadir+extension)
except OSError as err:
# check if the error is because the dir exists
if (err.errno == errno.EEXIST):
print("Directory \"{0:s}\" exists. Moving contents to backup dir.".format(datadir+extension))
# in that case we just create a unique directory and copy all the old stuff there
olddir = datadir+'/'+str(uuid.uuid4())+'.backup/'
os.makedirs(olddir)
# need to expand wildcards first
for file in glob.glob(datadir+extension+'*'):
shutil.move(file, olddir)
else:
print("Error \"{0:s}\" while creating directory.".format(err.strerror))
sys.exit(1)
# change into dir
os.chdir(datadir+extension)
# put in subversion information (if requested)
if subversiondir:
# open info file
infofile = open("svninfo", 'w')
# run svn info in subversiondir
subprocess.call(["svn", "info", subversiondir], stdout=infofile)
# close infofile
infofile.close()
# and return a path to the full dir
return datadir+extension
def runNode(options, executable, datadir, parameters):
"""Runs a job on the current machine."""
command = executable + ' ' + parameters;
print("Command is {0:s}".format(command))
#change to full directory
os.chdir(datadir)
# run only if it's not set to fake mode
if not options['fakeRun']:
# create files to capture output
outfile = open(options['outlog'], 'w')
errfile = open(options['errlog'], 'w')
# and time it
ts = time.time()
subprocess.call(command, stdout=outfile, stderr=errfile, shell=True)
t = time.time()-ts
# close log files
outfile.close()
errfile.close()
else:
print("Running (fakemode): {0:s} in directory {1:s}.".format(command, datadir))
t = 1.
# write timing information
timingfile = open(options['timingFile'], 'w')
pickle.dump(t, timingfile)
timingfile.close()
def run(options, executable, datadir, parameters):
"""Runs a job specified by executable. The name of the executable must contain the complete path.
If options['pbs'] is set to True, the jobs will be submitted to the PBS scheduler."""
if options['pbs']:
common.pbs.run(options, executable, datadir, parameters)
else:
runNode(options, executable, datadir, parameters)
|
2,610 | fbde00d727d7ea99d1a7704f46cb9850c8b210d7 | import pygame
import wave
import threading
import numpy as np
import pylab
import struct
import io
from PIL import Image
import sounddevice as sd
# 处理音频频谱
# voice.wav 格式:8000 rate 16bit 单声道
class SpectrumMap:
def __init__(self):
FILENAME = 'Sound/SoundResource/voice.wav'
self.wavefile = wave.open(FILENAME, 'r')
self.nchannels = self.wavefile.getnchannels()
self.sample_width = self.wavefile.getsampwidth()
self.framerate = self.wavefile.getframerate()
self.numframes = self.wavefile.getnframes()
def seek(self, frame: int):
self.wavefile.setpos(frame)
def map(self, count: int, clear: bool=True):
if clear:
pylab.plt.clf()
y = np.zeros(count)
for i in range(count):
val = self.wavefile.readframes(1)
left = val[0:2]
try:
v = struct.unpack('h', left)[0]
y[i] = v
except struct.error:
pass
data = io.BytesIO()
pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)
pylab.savefig(data)
data.seek(0)
image = Image.open(data)
crop = image.crop((81, 59, 575, 426))
# crop = image
return crop
def raw(self, count, clear: bool=True):
if clear:
pylab.plt.clf()
y = np.zeros(count)
for i in range(count):
val = self.wavefile.readframes(1)
left = val[0:2]
try:
v = struct.unpack('h', left)[0]
y[i] = v
except struct.error:
pass
data = io.BytesIO()
# y = abs(np.fft.fft(y) * self.nchannels)
y = y[:len(y)//2]
# pylab.specgram(y, NFFT=1024, Fs=self.framerate, noverlap=900)
pylab.plt.ylim(-32768, 32768)
pylab.plot(range(count//2), y)
pylab.savefig(data)
data.seek(0)
image = Image.open(data)
crop = image.crop((81, 59, 575, 426))
# crop = image
return crop
@staticmethod
def blend(sp1, sp2, count: int):
im1 = sp1.map(count, clear=True)
im2 = sp2.raw(count, clear=False)
res = Image.blend(im1, im2, 0.5)
return res
# 处理音频频谱 - 尝试实时录音
# 0 Microsoft 声音映射器 - Output, MME (0 in, 2 out)
# < 1 扬声器 (Realtek High Definition, MME (0 in, 2 out)
# 2 主声音驱动程序, Windows DirectSound (0 in, 2 out)
# 3 扬声器 (Realtek High Definition Audio), Windows DirectSound (0 in, 2 out)
# 4 扬声器 (Realtek High Definition Audio), Windows WASAPI (0 in, 2 out)
# 5 Speakers (Realtek HD Audio output), Windows WDM-KS (0 in, 6 out)
# 6 立体声混音 (Realtek HD Audio Stereo input), Windows WDM-KS (2 in, 0 out)
# 7 线路输入 (Realtek HD Audio Line input), Windows WDM-KS (2 in, 0 out)
# 8 FrontMic (Realtek HD Audio Front Mic input), Windows WDM-KS (2 in, 0 out)
# 9 麦克风 (Realtek HD Audio Mic input), Windows WDM-KS (2 in, 0 out)
# fs = 44100 # Hz
# length = 5 # s
# recording = sd.rec(frames=fs * length, samplerate=fs, blocking=True, channels=1)
class SpectrumMap2:
def __init__(self):
devices = sd.query_devices()
device = 11
for i in range(len(devices)):
d = devices[i]
if '立体声混音' in d['name']:
device = i
sd.default.device[0] = device
print('采用', devices[device]['name'], '录音')
self.nchannels = 1
self.framerate = 44100
def record(self, period: float):
recording = sd.rec(frames=int(self.framerate * period),
samplerate=self.framerate, blocking=True, channels=self.nchannels, dtype='int16')
return recording.reshape((recording.size, ))
def map(self, ndata, clear: bool=True):
if clear:
pylab.plt.clf()
y = ndata
data = io.BytesIO()
pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)
pylab.savefig(data)
data.seek(0)
image = Image.open(data)
# crop = image.crop((81, 59, 575, 426))
crop = image
return crop
@staticmethod
def raw(ndata, clear: bool=True):
if clear:
pylab.plt.clf()
y = ndata
count = len(ndata)
data = io.BytesIO()
# y = abs(np.fft.fft(y) * self.nchannels)
y = y[:len(y)//2]
pylab.plt.ylim(-32768, 32768)
pylab.plot(range(count//2), y)
pylab.savefig(data)
data.seek(0)
image = Image.open(data)
# crop = image.crop((81, 59, 575, 426))
crop = image
return crop
# @staticmethod
# def blend(sp1, sp2, ndata):
# im1 = sp1.map(ndata, clear=True)
# im2 = sp2.raw(ndata, clear=False)
# res = Image.blend(im1, im2, 0.5)
# return res
def fetch(self, period: float):
ndata = self.record(period)
# im1 = self.map(ndata, clear=True)
im2 = self.raw(ndata, clear=True)
# res = Image.blend(im1, im2, 0.5)
res = im2
return res
class Sound:
@staticmethod
def load():
pygame.mixer.init()
filename_song = 'Sound/SoundResource/world.execute(me);.mp3'
pygame.mixer.music.load(filename_song)
@staticmethod
def play():
pygame.mixer.music.play()
@staticmethod
def pause():
pygame.mixer.music.pause()
@staticmethod
def stop():
pygame.mixer.music.stop()
|
2,611 | 03156992355a756b2ae38735a98251eb611d4245 | import helper
__author__ = 'AdrianLeo'
helper.greeting("Hey, dummy")
|
2,612 | f7886f8d98ad0519f4635064f768f25dad101a3d | import numpy as np
import cv2 as cv
import random
import time
random.seed(0)
def displayImage(winName, img):
""" Helper function to display image
arguments:
winName -- Name of display window
img -- Source Image
"""
cv.imshow(winName, img)
cv.waitKey(0)
##############################################
# Task 1 ##########################
##############################################
def task_1_a():
print("Task 1 (a) ...")
img = cv.imread('../images/shapes.png')
gray_image = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
edges = cv.Canny( gray_image,50,150)
#cv.imshow('edges', edges)
detected_lines = cv.HoughLines(edges,1,np.pi/180,10)
#print (detected_lines)
for rho,theta in detected_lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv.line(img,(x1,y1),(x2,y2),(0,255,0),1)
displayImage('1_a Hough transform - detected lines ', img)
def myHoughLines(img_edges, d_resolution, theta_step_sz, threshold):
"""
Your implementation of HoughLines
:param img_edges: single-channel binary source image (e.g: edges)
:param d_resolution: the resolution for the distance parameter
:param theta_step_sz: the resolution for the angle parameter
:param threshold: minimum number of votes to consider a detection
:return: list of detected lines as (d, theta) pairs and the accumulator
"""
accumulator = np.zeros((int(180 / theta_step_sz), int(np.linalg.norm(img_edges.shape) / d_resolution)))
detected_lines = []
rho = int(np.linalg.norm(img_edges.shape) / d_resolution)
#print (rho)
theta = int(180 / theta_step_sz)
theta_array = np.deg2rad(np.arange(-90, 90, theta_step_sz))
#print (theta)
width, height = img_edges.shape
img_edges_copy = img_edges.copy()
detected_lines = []
for x in range(width):
for y in range(height):
if img_edges_copy[x,y]:
for index_theta in range(len(theta_array)):
#theta_value = theta * index_theta
rho_value = x*np.cos(theta_array[index_theta]) + y*np.sin(theta_array[index_theta])
# to avoid negative index
index_rho = int (rho_value + rho/2)
# to avoid index overflow
if (index_rho >= rho) : continue
#print('rhoindex')
#print (index_rho)
accumulator[index_theta, index_rho] += 1
if accumulator[index_theta, index_rho] >= threshold:
detected_lines.append((theta_array[index_theta], rho_value))
return detected_lines, accumulator
def task_1_b():
print("Task 1 (b) ...")
img = cv.imread('../images/shapes.png')
img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) # convert the image into grayscale
edges = cv.Canny( img_gray,50,150) # detect the edges
detected_lines, accumulator = myHoughLines(edges, 1, 2, 50)
cv.imshow("1_b Accumulator myHoughLines", accumulator)
#print (len(detected_lines))
for theta,rho in detected_lines:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv.line(img,(x1,y1),(x2,y2),(0,0,255),2)
displayImage('1_b Hough transform - own implementation', img)
##############################################
# Task 2 ##########################
##############################################
def task_2():
print("Task 2 ...")
img = cv.imread('../images/line.png')
img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) # convert the image into grayscale
edges = cv.Canny( img_gray,50,150,apertureSize = 3) # detect the edges
theta_res = 1 # set the resolution of theta
d_res = 1 # set the distance resolution
_, accumulator = myHoughLines(edges, d_res, theta_res, 50)
displayImage("task_2_ accumulator - mean shift", accumulator)
#mean_shift(accumulator)
##############################################
# Task 3 ##########################
##############################################
def myKmeans(data, k, useDist = False):
"""
:return: centers and list of indices that store the cluster index for each data point
"""
centers = np.zeros((k, 1), dtype = int)
index = np.zeros(data.shape[0], dtype=int)
clusters = [[] for i in range(k)]
threshold = 0
if data.shape[1] > 1:
threshold = 20
print('Threshold value = ' + str(threshold))
print('-------------------------------------------------')
# initialize centers using some random points from data
# ....
# Randomly initialize centers with pixel difference of greater than 0
for idx in range(centers.shape[0]):
randIdx = random.choice(range(data.shape[0]))
centers[idx] = randIdx
# Randomly initialize centers of different pixl values. Still buggy
# start_time = time.time()
# indices = np.arange(0,data.shape[0]).tolist()
# for idx in range(centers.shape[0]):
# if len(indices) > 0:
# randIdx = random.choice(indices)
# delIndices = np.unique(np.where((data*255).astype('uint8') == (data[randIdx]*255).astype('uint8'))).tolist()
# if len(delIndices) > 0:
# for i in range(len(delIndices)):
# try:
# indices.remove(delIndices[i])
# except ValueError:
# print('Value not found')
# # print('Indices removed')
# else:
# randIdx = random.choice(range(data.shape[0]))
# centers[idx] = randIdx
# end_time = time.time()
# print('Center no' + str(idx+1) + ' added in ' + str(round(end_time - start_time,5)) + ' seconds')
# To debug uncomment the following lines
# Sometimes the pixel values of two cluster centroids are too close
# Therefore, one of the clusters might end up not having any points at all
# print('Initial centers:\n' + str(centers))
# print('-------------------------------------------------')
# centerVals = data[centers]
# print('Pixel Values of initial centers:\n' + str(centerVals))
# print('-------------------------------------------------')
convergence = False
iterationNo = 0
start_time = time.time()
while not convergence:
# assign each point to the cluster of closest center
# ...
euclDist = 0
centerVals = data[centers]
for idx in range(data.shape[0]):
if useDist:
# Since data is a vector, distance is only the difference
# Normalize the distance to keep it between 0 and 1
euclDist = (centers - idx) / data.shape[0]
cost = np.square(data[idx] - centerVals) + np.square(euclDist)
index[idx] = np.random.choice(np.where(cost == np.min(cost))[0])
clusters[index[idx]].append(idx)
# update clusters' centers and check for convergence
# ...
convCounter = 0
for idx in range(centers.shape[0]):
if (len(clusters[idx]) > 0):
if data.shape[1] == 1:
meanVal = np.mean(data[clusters[idx]])
elif data.shape[1] == 3:
meanVal = np.mean(data[clusters[idx]], axis = 0)
diff = (np.abs(centerVals[idx] - meanVal)*255).astype('uint8')
if (np.sum(diff) > threshold):
# indices = np.unique(np.where((data*255).astype('uint8') == (meanVal*255).astype('uint8'))[0])
indices = np.unique(np.where((data*255).astype('uint8') == (meanVal*255).astype('uint8'))[0])
if indices.size > 0:
centers[idx] = np.random.choice(indices)
else:
# if no pixel with the mean value is found, choose another pixel in the cluster
# and continue
centers[idx] = np.random.choice(clusters[idx])
else:
convCounter += 1
else:
convCounter += 1
if convCounter == k:
convergence = True
iterationNo += 1
print('iterationNo = ', iterationNo)
print('-------------------------------------------------')
end_time = time.time()
print('Data Clustered for K = ' + str(k) + ' in ' + str(round(end_time - start_time, 5)) + ' seconds')
print('-------------------------------------------------')
return index, centers
def task_3_a():
print("Task 3 (a) ...")
print('-------------------------------------------------')
img = cv.imread('../images/flower.png')
'''
...
your code ...
...
'''
grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')
grayImg /= 255
cv.imshow('Intensity Image', grayImg)
K = [2, 4, 6]
for k in K:
print('K = ' + str(k))
print('-------------------------------------------------')
grayVec = np.reshape(grayImg.copy(), (-1,1))
index, centers = myKmeans(grayVec, k)
for kVal in range(k):
indices = np.where(index == kVal)[0]
grayVec[indices] = grayVec[centers[kVal]]
cv.imshow('Segmented Intensity Image for k = ' + str(k), grayVec.reshape(grayImg.shape))
cv.waitKey(0)
print('=================================================')
def task_3_b():
print("Task 3 (b) ...")
print('-------------------------------------------------')
img = cv.imread('../images/flower.png')
'''
...
your code ...
...
'''
imgFloat = img.copy().astype('float64')
imgFloat /= 255
cv.imshow('Color Image', imgFloat)
K = [2, 4, 6]
for k in K:
print('K = ' + str(k))
print('-------------------------------------------------')
imgVec = np.reshape(imgFloat.copy(), (-1,3))
index, centers = myKmeans(imgVec, k)
for kVal in range(k):
indices = np.where(index == kVal)[0]
imgVec[indices] = imgVec[centers[kVal]]
cv.imshow('Segmented Color Image for k = ' + str(k), imgVec.reshape(imgFloat.shape))
cv.waitKey(0)
print('=================================================')
def task_3_c():
print("Task 3 (c) ...")
print('-------------------------------------------------')
img = cv.imread('../images/flower.png')
'''
...
your code ...
...
'''
grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')
grayImg /= 255
cv.imshow('Intensity Image', grayImg)
K = [2, 4, 6]
for k in K:
print('K = ' + str(k))
print('-------------------------------------------------')
grayVec = np.reshape(grayImg.copy(), (-1,1))
index, centers = myKmeans(grayVec, k, useDist = True)
for kVal in range(k):
indices = np.where(index == kVal)[0]
grayVec[indices] = grayVec[centers[kVal]]
cv.imshow('Segmented Intensity Image (Scaled Distance) for k = ' + str(k), grayVec.reshape(grayImg.shape))
cv.waitKey(0)
print('=================================================')
##############################################
# Task 4 ##########################
##############################################
def task_4_a():
print("Task 4 (a) ...")
print('-------------------------------------------------')
D = np.zeros((8,8))
W = np.array((
[0, 1, 0.2, 1, 0, 0, 0, 0], # A
[1, 0, 0.1, 0, 1, 0, 0, 0], # B
[0.2, 0.1, 0, 1, 0, 1, 0.3, 0], # C
[1, 0, 1, 0, 0, 1, 0, 0], # D
[0, 1, 0, 0, 0, 0, 1, 1], # E
[0, 0, 1, 1, 0, 0, 1, 0], # F
[0, 0, 0.3, 0, 1, 1, 0, 1], # G
[0, 0, 0, 0, 1, 0, 1, 0] # H
)) # construct the W matrix
for i in range(W.shape[0]):
D[i,i] = np.sum(W[i,:]) # construct the D matrix
'''
...
your code ...
...
'''
invSqrtD = np.linalg.inv(np.sqrt(D))
L = D - W
op = np.matmul(np.matmul(invSqrtD,L),invSqrtD)
_, _, eigenVecs = cv.eigen(op)
secMinEigenVec = eigenVecs[eigenVecs.shape[1]-2, :]
C1 = 0
C2 = 0
for i in range(secMinEigenVec.shape[0]):
if secMinEigenVec[i] < 0:
C1 += D[i,i]
else:
C2 += D[i,i]
print('Eigen Vec: ' + str(np.round(secMinEigenVec, 3)))
# Figure in pdf
minNormCut = (1/C1 + 1/C2) * 2.4
print('Min Norm Cut = ' + str(minNormCut))
print('=================================================')
##############################################
##############################################
##############################################
# task_1_a()
# task_1_b()
# task_2()
# task_3_a()
# cv.destroyAllWindows()
# task_3_b()
# cv.destroyAllWindows()
# task_3_c()
# cv.destroyAllWindows()
task_4_a() |
2,613 | 2941ecde72325d46b5c3899d4b1a213daff67147 | from azureml.core.compute import AksCompute
from azureml.core.model import Model, InferenceConfig
from azureml.core.webservice import AksWebservice
workspace_name = ""
subscription_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
resource_group = "XXXXXXXXXXXXXXXXX"
workspace_region = "eastus2"
https_cert = "XXXXX"
aks_name = "XXXXXXX"
aks_service_name = 'XXXXXXXXX'
ws = Workspace.create(name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group,
location=workspace_region,
exist_ok=True)
# Provision AKS cluster
prov_config = AksCompute.provisioning_configuration(vm_size="Standard_D14")
prov_config.enable_ssl(leaf_domain_label=https_cert)
# Create the cluster
aks_target = ComputeTarget.create(
workspace=ws, name=aks_name, provisioning_configuration=prov_config
)
inference_config = InferenceConfig(runtime="python",
entry_script="aml_app.py",
conda_file="myenv.yml",
extra_docker_file_steps='dockerfile'
)
aks_python_bot = AksWebservice.deploy_configuration(autoscale_enabled=False,
num_replicas=3,
cpu_cores=2,
memory_gb=4,
auth_enabled=False)
aks_service = Model.deploy(ws,
models=['aml_app.py'],
inference_config=inference_config,
deployment_config=aks_python_bot,
deployment_target=aks_target,
name=aks_service_name)
aks_service.wait_for_deployment(show_output=True)
print(aks_service.state)
|
2,614 | de286b94e09db477e3d920a9eff1a299474baf20 | #!/usr/bin/env python
import mcvine.cli
from numpy import array
from mcvine_workflow.singlextal.resolution import use_res_comps as urc
beam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'
instrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')
samplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml'
psi = -0.005846744654920276
hkl2Q = array([[-0.65520642, 0.93819023, 0. ],
[ 0.66340068, 0.4633009 , -0.80916512],
[-0.66340068, -0.4633009 , -0.80916512]])
pp = array([-0.88585691, 2.86622706, -0.61241657])
pixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0]))
t_m2p = 0.0071883434093180376
Q = array([ 4.75696626, -3.03446862, 0.64836415])
E = 8.4494171829103024
hkl_projection = array([ 0.70608101, 0.61545409, 0.14251389])
urc.run(
beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p,
Q, E, hkl_projection, Nbuffer=100000)
|
2,615 | 80969de6924ae5fe6bb8e7f1211e7aca28c63989 | # a little more thing to be done.
def eval_loop():
while True:
s = input('Please input: ')
if s != 'done':
print(eval(s))
else:
break
eval_loop() |
2,616 | ee58ed68d2f3c43f9611f6c6e4cd2b99adcb43d2 | import bz2
import json
import os
from pyspark.context import SparkContext
from pyspark.accumulators import AccumulatorParam
import numpy as np
from scipy import spatial
import pandas as pd
import re
import operator
import csv
CACHE_DIR = "D:\TwitterDatastream\PYTHONCACHE_SMALL"
EDU_DATA = 'merged.csv'
TRAIN_FEAT_CSV = 'testFeat.csv'
TRAIN_LABS_CSV = 'testLabs.csv'
TRAIN_FEAT_LABS_CSV = 'testFeatLabs.csv'
FEATURE_NAMES_CSV = 'featureNames.csv'
sc = SparkContext('local', 'test')
# location_data = pd.read_csv('new_merged.csv')
class WordsSetAccumulatorParam(AccumulatorParam):
def zero(self, v):
return set()
def addInPlace(self, acc1, acc2):
return acc1.union(acc2)
# An accumulator used to build the word vocabulary
class WordsDictAccumulatorParam(AccumulatorParam):
def zero(self, v):
return dict()
def addInPlace(self, acc1, acc2):
for key in acc2.keys():
try:
acc1[key] += acc2[key]
except:
acc1[key] = acc2[key]
return acc1
# An accumulator used to build the word vocabulary
# vocabulary = sc.accumulator(set(), WordsSetAccumulatorParam())
vocabulary = sc.accumulator(dict(), WordsDictAccumulatorParam())
# load Education census data
location_data = pd.read_csv(EDU_DATA)
area_dict = dict(zip(location_data['city'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist()))
county_dict = dict(zip(location_data['county'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist()))
coord_dict = {tuple(x[:2]):x[2] for x in location_data[['lat', 'lng', 'county']].values}
# create a KD tree of known county center locations to be used to map a tweet coordinate to a county
latlon = list()
for index, row in location_data.iterrows():
latlon.append([location_data['lat'][index], location_data['lng'][index]])
latlon = np.array(latlon)
latlonKDT = spatial.KDTree(latlon)
# function to map place, location or coordinate data from a tweet to a FIPS code of the county and the education
# level distribution of that county
def mapToCounty(place, location, coordinates):
# coordr_dict = {tuple(x[:2]):x[2] for x in location_data[['lat_r', 'lng_r', 'county']].values}
if place:
place = (place.split(",")[0]).lower()
# country = (place.split(",")[1]).lower()
try:
if area_dict[place]: return area_dict[place]
except: None
if location:
location = (location.split(",")[0]).lower()
try:
if area_dict[location]: return area_dict[location]
except: None
if coordinates:
closestLoc = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9)[1]
try:
closest = latlon[closestLoc]
except:
return None
# closest = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9)
# if closest[0] != float('inf') and latlon[closest[1]][0] != 0. and latlon[closest[1]][1] != 0.:
# print(coordinates, closest, latlon[closest[1]])
# return closest[0], closest[1]
if coord_dict[closest[0], closest[1]]:
county_k = coord_dict[(closest[0], closest[1])]
return county_dict[county_k]
return None
# Load Tweets from each file (.bz2 or .json)
def load_bz2_json(filename):
if '.bz2' in filename:
with bz2.open(filename, 'rt') as f:
lines = str(f.read()).split('\n')
else:
with open(filename) as f:
lines = str(f.readlines()).split('\\n')
num_lines = len(lines)
tweets = []
for line in lines:
try:
if line == "":
num_lines -= 1
continue
tweets.append(json.loads(line))
except:
continue
# print(filename, len(tweets))
return tweets
# strip each tweet object and keep only whats necessary in a dictonary
def load_tweet(tweet, tweets_saved):
try:
# tweet_id = tweet['id']
tweet_text = tweet['text']
tweet_user_id = tweet['user']['id']
tweet_user_location = tweet['user']['location']
tweet_user_lang = tweet['user']['lang']
try: tweet_coordinates = tweet['coordinates']['coordinates']
except: tweet_coordinates = None
try: tweet_place = tweet['place']['full_name']
except: tweet_place = None
map_to_county = mapToCounty(tweet_place, tweet_user_location, tweet_coordinates)
if map_to_county:
tweet_county = int(map_to_county[0])
tweet_education_level = tuple(map_to_county[1:])
else:
tweet_county = None
tweet_education_level = None
# created_at = tweet['created_at']
except KeyError:
return {}, tweets_saved
data = {'tweet_text': tweet_text,
# 'tweet_id': tweet_id,
'tweet_user_id': tweet_user_id,
# 'tweet_user_location': tweet_user_location,
'tweet_user_lang': tweet_user_lang,
# 'tweet_place': tweet_place,
# 'tweet_coordinates': tweet_coordinates,
'tweet_county': tweet_county,
'tweet_education_level': tweet_education_level}
# 'date_loaded': datetime.datetime.now(),
# 'tweet_json': json.dumps(tweet)}
tweets_saved += 1
return data, tweets_saved
wordPattern = re.compile(r"\b[A-Za-z_.,!\"']+\b", re.IGNORECASE)
httpPattern = re.compile(r"^RT |@\S+|http\S+", re.IGNORECASE)
# Function that uses regular expressions to remove unwanted characters, URLs, etc. and split tweet_text
# into meaningful words
def parseTweetText(tweet):
text = tweet['tweet_text']
text = httpPattern.sub(r"", text)
words = wordPattern.findall(text)
tweet['tweet_text'] = words #list(zip(words, [1]*len(words)))
# print(tweet)
return tweet
# function to combine word lists and count frequency of each word locally
def combineWordLists(x ,y):
global vocabulary
if isinstance(x, dict):
wordDict = x
xny = y
else:
wordDict = dict()
xny = x + y
for w in xny:
# vocabulary +=[w]
vocabulary += {w: 1}
try:
wordDict[w] += 1
except:
wordDict[w] = 1
return wordDict
# function to add words to the vocabulary and count frequency of each word globally
def genVocabulary(x):
global vocabulary
arr = x[1]
if isinstance(arr, dict):
return x
else:
wordDict = dict()
for w in arr:
vocabulary += {w: 1}
try:
wordDict[w] += 1
except:
wordDict[w] = 1
x = (x[0],wordDict)
return x
# read tweets from each file and parse them into dictionaries with only relevant data
def handle_file(filename):
tweets = load_bz2_json(filename)
tweet_dicts = []
tweets_saved = 0
for tweet in tweets:
tweet_dict, tweets_saved = load_tweet(tweet, tweets_saved)
if tweet_dict:
tweet_dicts.append(tweet_dict)
return tweet_dicts
# filter only tweets that have text, land, education and are written in english
def filterTweets(tweet):
# location = tweet['tweet_user_location']
# coordinates = tweet['tweet_place']
# place = tweet['tweet_coordinates']
text = tweet['tweet_text']
lang = tweet['tweet_user_lang']
education = tweet['tweet_education_level']
county = tweet['tweet_county']
# if location or coordinates or place: ret = True
# else: return False
if not text or text == []: return False
if lang != 'en': return False
if education is None or county is None: return False
return True
# store all data into CSV files
def storeResults(traindata, vocab):
columnIdx = {vocab[voc][0]: voc for voc in range(len(vocab))}
with open(TRAIN_FEAT_CSV, 'wt') as trainFeatFile, open(TRAIN_LABS_CSV, 'wt') as trainLabsFile, open(TRAIN_FEAT_LABS_CSV, 'wt') as trainFeatLabsFile:
trainFeatwriter = csv.writer(trainFeatFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
trainLabswriter = csv.writer(trainLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
trainFeatLabswriter = csv.writer(trainFeatLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
for row in traindata:
edu = row[0][1]
featDict = row[1]
feats = np.zeros(len(columnIdx))
for key in featDict:
try:
feats[columnIdx[key]] = featDict[key]
except:
continue
trainFeatwriter.writerow(feats.tolist())
trainLabswriter.writerow(list(edu))
combList = list(edu) + feats.tolist()
trainFeatLabswriter.writerow(combList)
# main function with all the Spark code
def main():
fileNames = sc.parallelize([])
# generate a list of all files in the data directory
for root, dirs, files in os.walk(CACHE_DIR):
subFileNames = sc.parallelize(files).map(lambda file: os.path.join(root, file))
fileNames = sc.union([fileNames, subFileNames])
# load all tweets and filter
tweetsRdd = fileNames.flatMap(lambda file: handle_file(file)).filter(lambda tweet: filterTweets(tweet))
# clean, parse and filter tweets and map each to county and education level
wordsRdd = tweetsRdd.map(lambda tweet: parseTweetText(tweet)).filter(lambda tweet: filterTweets(tweet))
# set county and education level as the key for each tweet and keep only the text as value
countyEduRdd = wordsRdd.map(lambda tweet: ((tweet['tweet_county'], tweet['tweet_education_level']), tweet['tweet_text']))
# aggregate tweets based on county level and generate vocabulary
countyEduRdd = countyEduRdd.reduceByKey(lambda x, y: combineWordLists(x, y)).map(lambda z: genVocabulary(z))
tempRes = countyEduRdd.collect()
# print(tempRes)
print(len(tempRes))
vocabRDD = sc.parallelize(vocabulary.value.items())
# filter out words that only occur once in the entire dataset (mainly noise)
vocabRDD = vocabRDD.filter(lambda voc: True if voc[1] > 1 else False)
# print("vocabulary = ", sorted(vocabulary.value.items(), key=operator.itemgetter(1)))
vocab = sorted(vocabRDD.collect(), key=operator.itemgetter(1), reverse=True)
# print("vocabulary = ", vocab)
print("vocabulary size = ", len(vocab))
storeResults(tempRes, vocab)
if __name__ == "__main__":
main() |
2,617 | f3644b42d1a6c87c6169f8d123dadf6cd209270c | name = ''
while name != 'your name' and name != 'Your name':
print('Please type your name.')
name = input()
print('Thanks!')
#while 1 == 2 or :
# print('Type your name')
# name = input()
# if name == 'your name':
# break
#print('Thanks!')
|
2,618 | 497f56891670f635feff983058e86055e54be493 | """
- Define a new class Student which is derived from Human and has:
grade field.
do_hobby - print 'dancing' or some another hobby
"""
import andy.Lesson_7.exercise_1
class Student(andy.Lesson_7.exercise_1.Human):
def __init__(self, firstname, lastname, grade):
super().__init__(firstname, lastname)
self.grade = grade
def do_hobby(self):
return self.full_name + " ebet Petra Kovarskogo"
a = Student("Artem", "Nizhnik", "Shkolnik")
print(a.do_hobby())
print(a.grade)
|
2,619 | 231a07e63e40f2e4d204cde76c52e64b922da1b8 | import socket
import time
class FileTransProgram(object):
def __init__(self, ADDR, file_name):
self.ADDR = ADDR
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(ADDR)
self.file_name = file_name
def recv(self):
self.sock.send(bytes("Connect", "utf8"))
file_size = self.sock.recv(1024).strip()
with open(self.file_name, "wb") as f:
while file_size > 0:
if file_size < 1024:
f.write(self.sock.recv(1024))
break
else:
f.write(self.sock.recv(1024))
file_size -= 1024
self.sock.send(bytes("Success", "utf-8"))
self.close()
def stor(self):
try:
with open(self.file_name, "rb") as f:
data = f.read()
self.sock.send(bytes(str(len(data)), "utf-8"))
time.sleep(0.2)
self.sock.send(data)
except FileNotFoundError as e:
raise e
ACK = str(self.sock.recv(1024).strip(), "utf-8")
if ACK == "Success":
self.close()
def close(self):
self.sock.close()
|
2,620 | df5c79c79d827b6b3de7ceb4b1e3c652c8956346 | input = """
t(Z) :- t0(Z).
t(Z) :- g(X,Y,Z), t(X), not t(Y).
t0(2).
g(5,1,3).
g(1,2,4).
g(3,4,5).
"""
output = """
t(Z) :- t0(Z).
t(Z) :- g(X,Y,Z), t(X), not t(Y).
t0(2).
g(5,1,3).
g(1,2,4).
g(3,4,5).
"""
|
2,621 | bee7f3acdb103f3c20b6149407854c83ad367a6b | #!/usr/bin/python3
"""
This module add a better setattr function
"""
def add_attribute(obj, name, value):
""" add an attribute to a class if possible"""
if hasattr(obj, "__dict__"):
setattr(obj, name, value)
else:
raise TypeError("can't add new attribute")
|
2,622 | 92b71c67130cd37b2143fbd9ad71fe9a18b3f7e8 | import requests
from bs4 import BeautifulSoup
import time
print("Put some unfamiliar skills")
unfamilar_skills = input(">")
print(f"Filtering result for {unfamilar_skills}...\n")
def find_jobs():
html_text = requests.get('https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation=').text
soup = BeautifulSoup(html_text,'lxml')
jobs = soup.find_all('li',class_='clearfix job-bx wht-shd-bx')
for job in jobs:
posted = job.find('span',class_='sim-posted').span.text
if("few" in posted):
company_name = job.find('h3',class_='joblist-comp-name').text.replace(" ",'')
skills = job.find('span',class_='srp-skills').text.replace(' ','')
more_info = job.header.a['href']
if unfamilar_skills not in skills:
print(f'Company Name: {company_name.strip()}')
print(f'Skills: {skills.strip()}')
print(f"More Info: {more_info}")
print("")
if __name__ == '__main__':
find_jobs()
while True:
find_jobs()
filter_time = 10
print(f"Waiting for {filter_time} minute")
time.sleep(filter_time*60)
|
2,623 | d04e69c234f2887f5301e4348b4c4ec2ad3af7a2 | '''
Write the necessary code calculate the volume and surface area
of a cylinder with a radius of 3.14 and a height of 5. Print out the result.
'''
pi = 3.14159
r = 3.14
h = 5
volume = pi*r**2*h
surface_area = 2*pi*r**2+r*h
print(volume,surface_area) |
2,624 | d61151859390ab1c907ac3753143312da434981e | '''4. Write a Python program to filter the positive numbers from a list.'''
lst = [1, -3, 4, -56, 7, 3, -8, -5, 2, 4, 9]
New_list = list(filter(lambda x: x > 0, lst))
print(New_list)
|
2,625 | fbb1254c7166fa2aa9cd8a0b9c6525dbe5b652a0 | #!/usr/bin/env python3
import re
import datetime
import math
import pathlib
import os
import io
import argparse
import subprocess
import xml.sax.saxutils
from typing import (Optional, List, Iterable)
import sys
_DEFAULT_TRACK_TYPE = 'Dashcam track'
class Arguments(object):
def __init__(self):
parser = argparse.ArgumentParser(
prog='papago2gpx', description='Extract GPS data from MP4 video\
files created by PAPAGO! dashcams, and format them into a GPX file.')
parser.add_argument('input_paths', nargs='+',
help='The path to an input file or directory.',
metavar='INPUT_PATH')
parser.add_argument('--name', help='The name of the GPX file to\
output. Default to 16 deciaml digits representing the first GPS record time.',
metavar='NAME')
parser.add_argument('--description', help='The description of the GPX\
file to output.', metavar='DESCRIPTION')
parser.add_argument('--author-name', help='The name of the author of\
the GPX file to output.', metavar='AUTHOR_NAME')
parser.add_argument('--author-email', help='The Email address of the\
author of the GPX file to output.', metavar='AUTHOR_EMAIL')
parser.add_argument('--copyright', help="The copyright holder of the\
GPX file to output. Default to `AUTHOR_NAME'.", metavar='COPYRIGHT')
parser.add_argument('--copyright-year', help="The copyright year of\
the GPX file to output. Default to the year the file is created.",
metavar='COPYRIGHT_YEAR')
parser.add_argument('--copyright-license', help='A link to an external\
file containing license text.', metavar='LICENSE')
parser.add_argument('--keywords', help='Keywords associated with the\
GPX file to output.', metavar='KEYWORDS')
parser.add_argument('--track-name', help='The name of the track.',
metavar='TRACK_NAME')
parser.add_argument(
'--track-comment', help='The comment of the track.',
metavar='TRACK_COMMENT')
parser.add_argument('--track-description', help="The description of\
the track.", metavar='TRACK_DESCRIPTION')
parser.add_argument(
'--track-type', default=_DEFAULT_TRACK_TYPE,
help=f"The type of the track. Default to `{_DEFAULT_TRACK_TYPE}'.")
parser.add_argument('--uniq', choices=['first', 'last'],
help='How to process different coordinates\
recorded at the same timestamp. Default to an error.')
parser.add_argument('--overwrite', action='store_true',
help='Allow to overwrite an existing file.')
args = parser.parse_args()
self._input_paths = []
for input_path in args.input_paths:
input_path = pathlib.Path(input_path)
if not input_path.exists():
print(f"{input_path}: File does not exist.", file=sys.stderr)
sys.exit(1)
self._input_paths.append(input_path)
self._name = args.name
self._description = args.description
self._author_name = args.author_name
self._author_email = args.author_email
self._copyright = args.copyright
if self._copyright is None and self._author_name is not None:
self._copyright = self._author_name
self._copyright_year = args.copyright_year
if self._copyright_year is not None and self._copyright is None:
print("`--copyright-year' is specified, but `--copyright' is not.",
file=sys.stderr)
sys.exit(1)
if self._copyright_year is None and self._copyright is not None:
utc_now = datetime.datetime.now(datetime.timezone.utc)
local_aware_now = utc_now.astimezone()
self._copyright_year = local_aware_now.year
self._copyright_license = args.copyright_license
if self._copyright_license is not None and self._copyright is None:
print("`--copyright-license' is specified, but `--copyright' is\
not.", file=sys.stderr)
sys.exit(1)
self._keywords = args.keywords
self._track_name = args.track_name
self._track_comment = args.track_comment
self._track_description = args.track_description
self._track_type = args.track_type
if self._track_type is None:
self._track_type = _DEFAULT_TRACK_TYPE
if self._track_type == '':
self._track_type = None
self._how_to_unique = args.uniq
self._overwrite = args.overwrite
@property
def input_paths(self) -> List[pathlib.Path]:
return self._input_paths
@property
def name(self) -> Optional[str]:
return self._name
@property
def description(self) -> Optional[str]:
return self._description
@property
def author_name(self) -> Optional[str]:
return self._author_name
@property
def author_email(self) -> Optional[str]:
return self._author_email
@property
def copyright(self) -> Optional[str]:
return self._copyright
@property
def copyright_year(self) -> Optional[int]:
return self._copyright_year
@property
def copyright_license(self) -> Optional[str]:
return self._copyright_license
@property
def keywords(self) -> Optional[str]:
return self._keywords
@property
def track_name(self) -> Optional[str]:
return self._track_name
@property
def track_comment(self) -> Optional[str]:
return self._track_comment
@property
def track_description(self) -> Optional[str]:
return self._track_description
@property
def track_type(self) -> Optional[str]:
return self._track_type
@property
def how_to_unique(self) -> str:
return self._how_to_unique
@property
def overwrite(self) -> bool:
return self._overwrite
class BrokenMp4FileError(RuntimeError):
def __init__(self, message: str):
super().__init__(message)
class GpsDataError(RuntimeError):
def __init__(self, message: str):
super().__init__(message)
class GpsDataBlockIndex(object):
def __init__(self, position: int, size: int):
if position <= 0:
raise ValueError(f"An invalid position: `{position}'.")
if size <= 0:
raise ValueError(f"An invalid size: `{size}'.")
self._position = position
self._size = size
@property
def position(self) -> int:
return self._position
@property
def size(self) -> int:
return self._size
def get_gps_data_block_indices(mp4_file: io.FileIO) -> List[GpsDataBlockIndex]:
target_box_path = ['moov', 'gps ']
while True:
box_size = mp4_file.read(4)
if len(box_size) == 0:
raise GpsDataError(
f'{mp4_file.name}: Could not find any GPS data block index.')
if len(box_size) < 4:
error_position = format(mp4_file.tell() - len(box_size), '#010x')
raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\
Expect the size of a box, but got EOF.')
box_size = int.from_bytes(box_size, 'big')
box_type = mp4_file.read(4)
if len(box_type) < 4:
error_position = format(mp4_file.tell() - len(box_type), '#010x')
raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\
Expect the type of a box, but got EOF.')
box_type = box_type.decode('UTF-8')
if box_size == 0:
box_size = None
next_position = None
elif box_size == 1:
box_size = mp4_file.read(8)
if len(box_size) < 8:
error_position = format(mp4_file.tell() - len(box_size),
'#010x')
raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\
Expect the size of a box, but got EOF.')
box_size = int.from_bytes(box_size, 'big')
next_position = mp4_file.tell() + box_size - 16
else:
next_position = mp4_file.tell() + box_size - 8
if box_type == target_box_path[0]:
target_box_path.pop(0)
if len(target_box_path) == 0:
break
else:
if next_position is None:
raise GpsDataError(f'{mp4_file.name}: Could not find any GPS'
' data block index.')
mp4_file.seek(next_position)
if mp4_file.tell() != next_position:
raise BrokenMp4FileError(f'{mp4_file.name}: The size of a box\
is not equal to the actual one.')
unknown = mp4_file.read(4)
if len(unknown) < 4:
error_position = format(mp4_file.tell() - len(unknown), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a'
' big-endian 32-bit unsigned integer, but got EOF.')
unknown = int.from_bytes(unknown, 'big')
if unknown != 257:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect a\
big-endian 32-bit unsigned integer with value `257', but got `{unknown}'.")
gps_data_block_count = mp4_file.read(4)
if len(gps_data_block_count) < 4:
error_position = format(mp4_file.tell() - len(gps_data_block_count),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a'
' big-endian 32-bit unsigned integer, but got EOF.')
gps_data_block_count = int.from_bytes(gps_data_block_count, 'big')
gps_data_block_indices = []
for i in range(gps_data_block_count):
position = mp4_file.read(4)
if len(position) < 4:
error_position = format(mp4_file.tell() - len(position), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect the'
' position of a GPS data block, but got EOF.')
position = int.from_bytes(position, 'big')
if position < 0:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect the\
position of a GPS data block, but got an invalid value `{position}'.")
size = mp4_file.read(4)
if len(size) < 4:
error_position = format(mp4_file.tell() - len(size), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect the'
' size of a GPS data block, but got EOF.')
size = int.from_bytes(size, 'big')
if size < 0:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect the\
size of a GPS data block, but got an invalid value `{size}'.")
if position == 0 or size == 0:
print(f'{mp4_file.name}: Warning: The index of GPS data blocks is\
not recorded.', file=sys.stderr)
else:
gps_data_block_index = GpsDataBlockIndex(position, size)
gps_data_block_indices.append(gps_data_block_index)
if mp4_file.tell() != next_position:
error_position = format(mp4_file.tell(), '#010x')
raise GpsDataError(f'{mp4_file_path}:{error_position}: Expect EOF, but'
' find additional data.')
return gps_data_block_indices
def read_little_endian_single(mp4_file: io.FileIO) -> float:
data = mp4_file.read(4)
if len(data) < 4:
error_position = format(mp4_file.tell() - len(data), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a\
little-endian single-precision floating point number, but got EOF.')
data = int.from_bytes(data, 'little')
sign = (data & 0x80000000) >> 31
exponent = ((data & 0x7F800000) >> 23) - 127
mantissa = (data & 0x007FFFFF) | 0x00800000
sign = '+' if sign == 0 else '-'
exponent = str(exponent - 23)
mantissa_hex = format(mantissa, '08x')
return float.fromhex(f'{sign}0x{mantissa_hex}p{exponent}')
class Time(object):
def __init__(self, time: datetime.datetime):
if time.tzinfo is None:
raise ValueError(
"Expect an aware `datetime' object, but got naive one.")
self._time = time.astimezone(datetime.timezone.utc)
def as_local_time(self) -> datetime.datetime:
return self._time.astimezone()
def __repr__(self) -> str:
result = self._time.strftime("%Y-%m-%dT%H:%M:%S%z")
return re.sub('(\\+\\d{2})(\\d{2})$', '\\1:\\2', result)
def __lt__(self, other) -> bool:
return self._time < other._time
def __eq__(self, other) -> bool:
return self._time == other._time
class Latitude(object):
def __init__(self, degree: float):
if degree < -90 or 90 < degree:
raise ValueError("An invalid latitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) -> str:
return format(self._degree, '.6F')
def __lt__(self, other) -> bool:
return self._degree < other._degree
def __eq__(self, other) -> bool:
return self._degree == other._degree
class Longitude(object):
def __init__(self, degree: float):
if degree < -180 or 180 < degree:
raise ValueError("An invalid longitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) -> str:
return format(self._degree, '.6F')
def __lt__(self, other) -> bool:
return self._degree < other._degree
def __eq__(self, other) -> bool:
return self._degree == other._degree
class Speed(object):
def __init__(self, meter_per_second: float):
self._meter_per_second = meter_per_second
def __repr__(self) -> str:
return format(self._meter_per_second, '.2F')
class Azimuth(object):
def __init__(self, degree: float):
if degree < 0 or 360 <= degree:
raise ValueError(f"An invalid azimuth degree: `{degree}'.")
self._degree = degree
def __repr__(self) -> str:
return format(self._degree, '.2F')
class TrackPoint(object):
def __init__(self, time: Time, status: str, latitude: Optional[Latitude],
longitude: Optional[Longitude], speed: Speed,
azimuth: Azimuth, x_acceleration: int, y_acceleration: int,
z_acceleration: int):
if (status == 'V' or status is None) != (latitude is None):
raise ValueError('Inconsistent arguments:'
f' status = {status}, latitude = {latitude}')
if (status == 'V' or status is None) != (longitude is None):
raise ValueError('Inconsistent arguments:'
f' status = {status}, longitude = {longitude}')
self._time = time
self._status = status
self._latitude = latitude
self._longitude = longitude
self._speed = speed
self._azimuth = azimuth
self._x_acceleration = x_acceleration
self._y_acceleration = y_acceleration
self._z_acceleration = z_acceleration
@property
def time(self) -> Time:
return self._time
@property
def status(self) -> str:
return self._status
@property
def latitude(self) -> Optional[Latitude]:
return self._latitude
@property
def longitude(self) -> Optional[Longitude]:
return self._longitude
@property
def speed(self) -> Speed:
return self._speed
@property
def azimuth(self) -> Azimuth:
return self._azimuth
@property
def x_acceleration(self) -> int:
return self._x_acceleration
@property
def y_acceleration(self) -> int:
return self._y_acceleration
@property
def z_acceleration(self) -> int:
return self._z_acceleration
@property
def name(self) -> str:
local_time = self._time.as_local_time()
return local_time.strftime('%Y%m%d%H%M%S')
def format_as_csv(self) -> str:
if self._time is not None:
local_time = self._time.as_local_time()
result = local_time.strftime('%Y/%m/%d %H:%M:%S')
else:
result = ''
status = self._status if self._status is not None else ''
result += f',{status}'
latitude = str(self._latitude) if self._latitude is not None else ''
result += f',{latitude}'
longitude = str(self._longitude) if self._longitude is not None else ''
result += f',{longitude}'
result += f',{self._speed}'
result += f',{self._azimuth}'
result += f',{self._x_acceleration}'
result += f',{self._y_acceleration}'
result += f',{self._z_acceleration}'
return result
def __repr__(self) -> str:
latitude = str(self._latitude) if self._latitude is not None else ''
longitude = str(self._longitude) if self._longitude is not None else ''
return f'{self._time},{latitude},{longitude}'
def __lt__(self, other) -> bool:
return self._time < other._time
def __eq__(self, other) -> bool:
return self._time == other._time and self._latitude == other._latitude\
and self._longitude == other._longitude
class TrackSegment(object):
def __init__(self):
self._track_points = []
def append_track_point(self, track_point: TrackPoint) -> None:
self._track_points.append(track_point)
def __len__(self) -> int:
return len(self._track_points)
def __iter__(self) -> Iterable[TrackPoint]:
return iter(self._track_points)
_UNKNOWN_BYTES\
= b'\x00\x21\x17\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\xBC\xC7\x17\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\x3C\xDB\x17\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\x18\xB5\x18\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\xA0\xFE\x19\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\x20\xF9\x1B\x00\x00\x00\x00\x00\x80\x01\x00\x00\x01\x00\x00\x00\
\xAC\xB3\x1C\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00'
def parse_mp4_file(mp4_file_path: pathlib.Path) -> List[TrackPoint]:
track_points = []
with open(mp4_file_path, 'rb') as mp4_file:
gps_data_block_indices = get_gps_data_block_indices(mp4_file)
for gps_data_block_index in gps_data_block_indices:
mp4_file.seek(gps_data_block_index.position)
if mp4_file.tell() != gps_data_block_index.position:
error_position = gps_data_block_index.position
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' a GPS data block, but got EOF.')
large_block_size = mp4_file.read(4)
if len(large_block_size) < 4:
error_position = format(
mp4_file.tell() - len(large_block_size), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
the size of a GPS data block, but got EOF.')
large_block_size = int.from_bytes(large_block_size, 'big')
if large_block_size != gps_data_block_index.size:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f'{mp4_file_path}:{error_position}: The\
size of a GPS data block is not equal to the one stored in the index.')
large_block_end = mp4_file.tell() - 4 + large_block_size
signature = mp4_file.read(8)
if len(signature) < 8:
error_position = format(mp4_file.tell() - len(signature),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
the signature of a GPS data block, but got EOF.')
signature = signature.decode('UTF-8')
if signature != 'freeGPS ':
error_position = format(mp4_file.tell() - 8, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
`freeGPS ' as the signature of a GPS data block, but got `{signature}'.")
small_block_size = mp4_file.read(4)
if len(small_block_size) < 4:
error_position = format(
mp4_file.tell() - len(small_block_size), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
the size of a GPS data block, but got EOF.')
small_block_size = int.from_bytes(small_block_size, 'little')
if small_block_size != 88:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
`88' as the size of a GPS data block, but got `{small_block_size}'.")
small_block_end = mp4_file.tell() + small_block_size
padding = mp4_file.read(32)
if len(padding) < 32:
error_position = format(mp4_file.tell() - len(padding),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' zero padding, but got EOF.')
for j, b in enumerate(padding):
if b != 0:
error_position = format(mp4_file.tell() - 32 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
hour = mp4_file.read(4)
if len(hour) < 4:
error_position = format(mp4_file.tell() - len(hour), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the hour of time, but got EOF.')
hour = int.from_bytes(hour, 'little')
if hour < 0 or 24 <= hour:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the hour of time, but got an invalid value `{hour}'.")
minute = mp4_file.read(4)
if len(minute) < 4:
error_position = format(mp4_file.tell() - len(minute), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the minute of time, but got EOF.')
minute = int.from_bytes(minute, 'little')
if minute < 0 or 60 <= minute:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the minute of time, but got an invalid value `{minute}'.")
second = mp4_file.read(4)
if len(second) < 4:
error_position = format(mp4_file.tell() - len(second), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the second of time, but got EOF.')
second = int.from_bytes(second, 'little')
if second < 0 or 60 <= second:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the second of time, but got an invalid value `{second}'.")
year = mp4_file.read(4)
if len(year) < 4:
error_position = format(mp4_file.tell() - len(year), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the year of time, but got EOF.')
year = int.from_bytes(year, 'little')
if year == 0:
error_position = format(mp4_file.tell() - 4, '#010x')
if hour != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year == 0' but `hour != 0'.")
if minute != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year == 0' but `minute != 0'.")
if second != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year == 0' but `second != 0'.")
else:
year += 2000
month = mp4_file.read(4)
if len(month) < 4:
error_position = format(mp4_file.tell() - len(month), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the month of time, but got EOF.')
month = int.from_bytes(month, 'little')
if month == 0:
if year != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year != 0' but `month == 0'.")
assert(hour == 0)
assert(minute == 0)
assert(second == 0)
elif month < 1 or 12 < month:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the month of time, but got an invalid value `{month}'.")
day = mp4_file.read(4)
if len(day) < 4:
error_position = format(mp4_file.tell() - len(day), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the day of time, but got EOF.')
day = int.from_bytes(day, 'little')
if day == 0:
if year != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year != 0' but `day == 0'.")
assert(month == 0)
assert(hour == 0)
assert(minute == 0)
assert(second == 0)
elif day < 1 or 31 < day:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the day of time, but got an invalid value `{day}'.")
if year == 0:
assert(month == 0)
assert(day == 0)
assert(hour == 0)
assert(minute == 0)
assert(second == 0)
time = None
else:
time = datetime.datetime.now(datetime.timezone.utc)
time = time.astimezone()
time = time.replace(
year=year, month=month, day=day, hour=hour, minute=minute,
second=second, microsecond=0)
time = Time(time)
if time is None:
padding = mp4_file.read(4)
if len(padding) < 4:
error_position = format(mp4_file.tell() - len(padding),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:'
' Expect zero-padding, but got EOF.')
padding = int.from_bytes(padding, 'little')
if padding != 0:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect"
f" zero-padding, but got `{padding}'.")
status = None
latitude_type = '0'
longitude_type = '0'
else:
status = mp4_file.read(1)
if len(status) < 1:
error_position = format(mp4_file.tell() - len(status),
'#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect a status'
' character, but got EOF.')
status = status.decode('UTF-8')
if status not in ('A', 'V'):
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect `A' or `V' as a status character, but got an invalid character\
`{status}'.")
latitude_type = mp4_file.read(1)
if len(latitude_type) < 1:
error_position = format(
mp4_file.tell() - len(latitude_type), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:\
Expect a latitude type, but got EOF.')
latitude_type = latitude_type.decode('UTF-8')
if status == 'A':
if latitude_type not in ('N', 'S'):
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect `N' or\
`S' as a latitude type, but got an invalid character `{latitude_type}'.")
else:
assert(status == 'V')
if latitude_type != '0':
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect `0' as a latitude type, but got an invalid character\
`{latitude_type}'.")
longitude_type = mp4_file.read(1)
if len(longitude_type) < 1:
error_position = format(
mp4_file.tell() - len(longitude_type), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:\
Expect a longitude type, but got EOF.')
longitude_type = longitude_type.decode('UTF-8')
if status == 'A':
if longitude_type not in ('E', 'W'):
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect `E' or\
`W' as a longitude type, but got an invalid character `{longitude_type}'.")
else:
assert(status == 'V')
if longitude_type != '0':
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect `0' as a longitude type, but got an invalid character\
`{longitude_type}'.")
padding = mp4_file.read(1)
if len(padding) < 1:
error_position = format(mp4_file.tell() - len(padding),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:'
' Expect zero padding, but got EOF.')
if padding[0] != 0:
error_position = format(mp4_file.tell() - 1, '#010x')
byte = format(padding[0], '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
if status == 'A':
latitude_dmm = read_little_endian_single(mp4_file)
latitude_degree = math.floor(latitude_dmm / 100)
if latitude_degree < 0 or 90 < latitude_degree:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.")
latitude_minute = latitude_dmm - latitude_degree * 100
if latitude_minute < 0 or 60 <= latitude_minute:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.")
latitude_degree += latitude_minute / 60
latitude = Latitude(latitude_degree)
else:
assert(status == 'V' or status is None)
padding = mp4_file.read(4)
if len(padding) < 4:
error_position = format(
mp4_file.tell() - len(padding), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:'
' Expect zero padding, but got EOF.')
for j, b in enumerate(padding):
if b != 0:
error_position = format(
mp4_file.tell() - 4 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
latitude = None
if status == 'A':
longitude_dmm = read_little_endian_single(mp4_file)
longitude_degree = math.floor(longitude_dmm / 100)
if longitude_degree < 0 or 180 < longitude_degree:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect a longitude in DMM format, but got an invalid value\
`{longitude_dmm}'.")
longitude_minute = longitude_dmm - longitude_degree * 100
if longitude_minute < 0 or 60 <= longitude_minute:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect a longitude in DMM format, but got an invalid value\
`{longitude_dmm}'.")
longitude_degree += longitude_minute / 60
longitude = Longitude(longitude_degree)
else:
assert(status == 'V' or status is None)
padding = mp4_file.read(4)
if len(padding) < 4:
error_position = format(
mp4_file.tell() - len(padding), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:'
' Expect zero padding, but got EOF.')
for j, b in enumerate(padding):
if b != 0:
error_position = format(
mp4_file.tell() - 4 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
longitude = None
speed = read_little_endian_single(mp4_file)
# Presume that speed is recorded in knots.
speed *= (1852 / 3600)
speed = Speed(speed)
azimuth = read_little_endian_single(mp4_file)
if azimuth < 0 or 360 <= azimuth:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
azimuth degree, but got an invalid value `{azimuth}'.")
azimuth = Azimuth(azimuth)
x_acceleration = mp4_file.read(4)
if len(x_acceleration) < 4:
error_position = format(
mp4_file.tell() - len(x_acceleration), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' X-axis acceleration, but got EOF.')
x_acceleration = int.from_bytes(
x_acceleration, 'little', signed=True)
y_acceleration = mp4_file.read(4)
if len(y_acceleration) < 4:
error_position = format(
mp4_file.tell() - len(y_acceleration), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' Y-axis acceleration, but got EOF.')
y_acceleration = int.from_bytes(
y_acceleration, 'little', signed=True)
z_acceleration = mp4_file.read(4)
if len(z_acceleration) < 4:
error_position = format(
mp4_file.tell() - len(z_acceleration), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' Z-axis acceleration, but got EOF.')
z_acceleration = int.from_bytes(
z_acceleration, 'little', signed=True)
if mp4_file.tell() != small_block_end:
error_position = format(mp4_file.tell(), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
the end of a GPS data block, but got additional data.')
padding_size = large_block_end - small_block_end
if padding_size < 532:
error_position = format(mp4_file.tell(), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
more than or equal to 532-byte padding, but got only {padding_size}-byte\
padding.')
padding = mp4_file.read(padding_size)
if len(padding) < padding_size:
error_position = format(
mp4_file.tell() - len(padding), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
{padding_size}-byte padding, but got EOF.')
for j, b in enumerate(padding[:420]):
if b != 0:
error_position = format(small_block_end + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
# `_UNKNOWN_BYTES` may appear in the zero padding. However,
# what this means is unknown. Therefore, just skip it if it
# appears.
if padding[420:532] != _UNKNOWN_BYTES:
for j, b in enumerate(padding[420:532]):
if b != 0:
error_position = format(small_block_end + 420 + j,
'#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
for j, b in enumerate(padding[532:]):
if b != 0:
error_position = format(small_block_end + 532 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
track_point = TrackPoint(
time, status, latitude, longitude, speed, azimuth,
x_acceleration, y_acceleration, z_acceleration)
track_points.append(track_point)
return track_points
def read_input_paths(input_paths: List[pathlib.Path]) -> List[TrackPoint]:
track_points = []
for input_path in input_paths:
if input_path.is_dir():
file_paths = []
for dirpath, dirnames, filenames in os.walk(input_path):
dirpath = pathlib.Path(dirpath)
for filename in filenames:
file_path = dirpath / filename
if file_path.suffix not in ('.mp4', '.MP4'):
continue
file_paths.append(file_path)
file_paths.sort()
for file_path in file_paths:
track_points.extend(parse_mp4_file(file_path))
else:
track_points.extend(parse_mp4_file(input_path))
return track_points
def write_csv_file(args: Arguments,
track_points: List[TrackPoint]) -> pathlib.Path:
if args.name is None:
print("`--name' is required to output a CSV file.", file=sys.stderr)
sys.exit(1)
csv_file_path = pathlib.Path(f'{args.name}.csv')
if csv_file_path.exists():
if not args.overwrite:
print(f"{csv_file_path}: File already exists.", file=sys.stderr)
sys.exit(1)
with open(csv_file_path, 'w') as csv_file:
for track_point in track_points:
print(track_point.format_as_csv(), file=csv_file)
return csv_file_path
def create_track_segments(
args: Arguments, track_points: List[TrackPoint]) -> List[TrackSegment]:
new_track_points = []
for track_point in track_points:
if track_point.status != 'A':
assert(track_point.latitude is None)
assert(track_point.longitude is None)
continue
assert(track_point.latitude is not None)
assert(track_point.longitude is not None)
new_track_points.append(track_point)
track_points = new_track_points
track_points.sort()
if len(track_points) == 0:
return []
unique_track_points = []
it = iter(track_points)
representative_track_point = next(it)
while True:
track_point = next(it, None)
if track_point is None:
unique_track_points.append(representative_track_point)
break
if track_point.time != representative_track_point.time:
unique_track_points.append(representative_track_point)
representative_track_point = track_point
continue
if track_point.latitude == representative_track_point.latitude\
and track_point.longitude == representative_track_point.longitude:
continue
if args.how_to_unique == 'first':
continue
elif args.how_to_unique == 'last':
representative_track_point = track_point
else:
raise RuntimeError("There exist track points with the same\
timestamp but different coordinates. Use `--uniq' option.")
track_segments = []
track_segments.append(TrackSegment())
for track_point in unique_track_points:
track_segments[0].append_track_point(track_point)
return track_segments
def as_xml_attribute(data: str) -> str:
return xml.sax.saxutils.quoteattr(data)
def as_xml_data(data: str) -> str:
return xml.sax.saxutils.escape(data)
def get_local_time_in_iso8601() -> str:
utc_now = datetime.datetime.now(datetime.timezone.utc)
local_aware_now = utc_now.astimezone()
local_time_in_iso8601 = local_aware_now.strftime('%Y-%m-%dT%H:%M:%S%z')
return re.sub('([+-]\\d{2})(\\d{2})$', '\\1:\\2', local_time_in_iso8601)
def write_gpx_file(args: Arguments,
track_segments: List[TrackSegment]) -> pathlib.Path:
all_track_points = []
for track_segment in track_segments:
for track_point in track_segment:
all_track_points.append(track_point)
name = args.name
if name is None:
if len(all_track_points) == 0:
raise ValueError(
"`--name' is not specified, and there is no track point.")
all_track_points.sort()
name = all_track_points[0].name
gpx_file_path = pathlib.Path(f'{name}.gpx')
bounds = None
if len(all_track_points) > 0:
latitudes = list(t.latitude for t in all_track_points)
latitudes.sort()
longitudes = list(t.longitude for t in all_track_points)
longitudes.sort()
bounds = (latitudes[0], longitudes[0], latitudes[-1], longitudes[-1])
if gpx_file_path.exists():
if not args.overwrite:
print(f'{gpx_file_path}: Error: File already exists.',
file=sys.stderr)
sys.exit(1)
with open(gpx_file_path, 'w') as gpx_file:
print('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>',
file=gpx_file)
print('<gpx xmlns="http://www.topografix.com/GPX/1/1" version="1.1"'
' creator="papago2gpx">', file=gpx_file)
print(' <metadata>', file=gpx_file)
print(f' <name>{as_xml_data(name)}</name>', file=gpx_file)
if args.description is not None:
description = as_xml_data(args.description)
print(f' <desc>{description}</desc>', file=gpx_file)
if args.author_name is not None or args.author_email is not None:
print(' <author>', file=gpx_file)
if args.author_name is not None:
author_name = as_xml_data(args.author_name)
print(f' <name>{author_name}</name>', file=gpx_file)
if args.author_email is not None:
author_email_parts = args.author_email.split('@', 1)
if len(author_email_parts) != 2:
raise RuntimeError(
f'An invalid E-mail address: {args.author_email}')
author_email_id = as_xml_attribute(author_email_parts[0])
author_email_domain = as_xml_attribute(author_email_parts[1])
print(f' <email id={author_email_id}\
domain={author_email_domain}/>', file=gpx_file)
print(' </author>', file=gpx_file)
if args.copyright is not None:
copyright = as_xml_attribute(args.copyright)
print(f' <copyright author={copyright}', end='', file=gpx_file)
copyright_year = args.copyright_year
copyright_license = args.copyright_license
if copyright_year is not None or copyright_license is not None:
print('>', file=gpx_file)
if copyright_year is not None:
copyright_year = as_xml_data(str(copyright_year))
print(f' <year>{copyright_year}</year>',
file=gpx_file)
if copyright_license is not None:
copyright_license = as_xml_data(copyright_license)
print(f' <license>{copyright_license}</license>',
file=gpx_file)
print(' </copyright>', file=gpx_file)
else:
print('/>', file=gpx_file)
print(f' <time>{get_local_time_in_iso8601()}</time>', file=gpx_file)
if args.keywords is not None:
keywords = as_xml_data(args.keywords)
print(f' <keywords>{keywords}</keywords>', file=gpx_file)
if bounds is not None:
print(f' <bounds minlat="{bounds[0]}" minlon="{bounds[1]}"\
maxlat="{bounds[2]}" maxlon="{bounds[3]}"/>', file=gpx_file)
print(' </metadata>', file=gpx_file)
print(' <trk>', file=gpx_file)
if args.track_name is not None:
track_name = as_xml_data(args.track_name)
print(f' <name>{track_name}</name>', file=gpx_file)
if args.track_comment is not None:
track_comment = as_xml_data(args.track_comment)
print(f' <cmt>{track_comment}</cmt>', file=gpx_file)
if args.track_description is not None:
track_description = as_xml_data(args.track_description)
print(f' <desc>{track_description}</desc>', file=gpx_file)
if args.track_type is not None:
track_type = as_xml_data(args.track_type)
print(f' <type>{track_type}</type>', file=gpx_file)
for track_segment in track_segments:
print(' <trkseg>', file=gpx_file)
for track_point in track_segment:
print(f' <trkpt lat="{track_point.latitude}"\
lon="{track_point.longitude}">', file=gpx_file)
print(f' <time>{track_point.time}</time>',
file=gpx_file)
print(' </trkpt>', file=gpx_file)
print(' </trkseg>', file=gpx_file)
print(' </trk>', file=gpx_file)
print('</gpx>', file=gpx_file)
proc = subprocess.run(
['xmllint', '--schema', 'gpx.xsd', str(gpx_file_path)],
stdin=subprocess.DEVNULL, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, encoding='UTF-8')
if proc.returncode != 0:
print(f"""Failed to validate the GPX file `{gpx_file_path}'.
command: {proc.args}
stdout: {proc.stdout}
stderr: {proc.stderr}
returncode: {proc.returncode}""", file=sys.stderr)
return gpx_file_path
if __name__ == '__main__':
args = Arguments()
track_points = read_input_paths(args.input_paths)
csv_file_path = write_csv_file(args, track_points)
print(f"Succeeded! The result is output to `{csv_file_path}'.")
track_segments = create_track_segments(args, track_points)
if args.name is None and len(track_segments) == 0:
print("`--name' is not specified, and there is no track segment.",
file=sys.stderr)
sys.exit(1)
if len(track_segments) == 0:
print('WARNING: There is no track segment.', file=sys.stderr)
gpx_file_path = write_gpx_file(args, track_segments)
print(f"Succeeded! The result is output to `{gpx_file_path}'.")
sys.exit(0)
|
2,626 | c331802cf5a09bc8db8ddbfa37636a01cf73684e | # coding: utf-8
import sys
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from flask_assets import ManageAssets
from app import app
from assets import assets
from db import db
from .changepassword import ChangePassword
from .create_superuser import SuperUserCommand
from .scaffold import ScaffoldingCommand
from .upload_static_files import UploadStaticFiles
class MyMan(Manager):
def run(self, commands=None, default_command=None):
"""
Prepares manager to receive command line input. Usually run
inside "if __name__ == "__main__" block in a Python script.
:param commands: optional dict of commands. Appended to any commands
added using add_command().
:param default_command: name of default command to run if no
arguments passed.
"""
if commands:
self._commands.update(commands)
if default_command is not None and len(sys.argv) == 1:
sys.argv.append(default_command)
try:
result = self.handle(sys.argv[0], sys.argv[1:])
except SystemExit as e:
result = e.code
sys.exit(result or 0)
man = MyMan(app)
man.add_command('db', MigrateCommand)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
manager.add_command('create_superuser', SuperUserCommand())
manager.add_command('upload_static_files', UploadStaticFiles())
manager.add_command('change_password', ChangePassword())
manager.add_command('scaffold', ScaffoldingCommand())
manager.add_command("assets", ManageAssets(assets))
|
2,627 | 45449e728dadd241b00f5c4bfb3fd3950f04037c | class Background(object):
def __init__(self, name):
self.name = name
self.description = ''
self.prTraits = []
self.ideals = []
self.bonds = []
self.flaws = []
def getBackName(self):
return self.name
def setBackDesc(self,desc):
self.description = desc
def getBackDesc(self):
return self.description
def addPrTrait(self, trait):
self.prTraits.append(trait)
def getPrTraits(self):
return self.prTraits
def addIdeal(self, ideal):
self.ideals.append(ideal)
def getIdeals(self):
return self.ideals
def addBond(self, bond):
self.bonds.append(bond)
def getBonds(self):
return self.bonds
def addFlaw(self, flaw):
self.flaws.append(flaw)
def getFlaws(self):
return self.flaws
acolyte = Background('Acolyte')
acolyte.setBackDesc('You have spent you life in the service of a temple or to a specific god or pantheon of gods. You act as an intermediary between the realm of the holy and the mortal world, performing sacred rites and offering sacrifices in order to conduct worshipers into the presence of the divine.\n')
acolyte.addPrTrait('I idolize a particular hero of my faith and constantly refer to that person\'s deeds and example.\n')
acolyte.addPrTrait('I can find common ground between the fiercest enemies, empathizing with them and always working toward peace.\n')
acolyte.addPrTrait('I see omens in every event and action. The gods try to speak to us, we just need to listen.')
acolyte.addPrTrait('Nothing can shake my optimistic attitude')
acolyte.addPrTrait('I quote (or misquote) sacred texts and proverbs in almost every situation.\n')
acolyte.addPrTrait('I am tolerant (or intolerant) of other faits and respect (or condemn) the worsip of other gods.\n')
acolyte.addPrTrait('I\'ve enjoyed fine food, drink, and high society among my temple\'s elite. Rough living grates on me.\n')
acolyte.addPrTrait('I\'ve spent so long in the temple that I have little practical experience dealing with people in the outside world.\n')
acolyte.addIdeal('The ancient traditions of worship and sacrifice must be preserved and upheld.\n')
acolyte.addIdeal('I alwyas try to help those in need, no matter what the personal cost.\n')
acolyte.addIdeal('We must help bring about the changes the gods are constantly working in the world.\n')
acolyte.addIdeal('I hope to one day rise to the top of my faith\'s religious hierarchy.\n')
acolyte.addIdeal('I trust that my deity will guide my actions. I have faith that if I work hard, things will go well.\n')
acolyte.addIdeal('I seek to prove myself worthy of by god\'s favor by matching my actions against his or her teachings.\n')
acolyte.addBond('I would die to recover an ancient relic of my faith that was lost long ago.\n')
acolyte.addBond('I will someday get revenge on the corrupt temple hierarchy who branded me a heretic.\n')
acolyte.addBond('I owe my life to the priest who took me in whem my parents died.\n')
acolyte.addBond('Everything i do is for the common people.')
acolyte.addBond('I will do anything to protect the temple where I served.')
acolyte.addBond('I seek to preserve a sacred text that my enemies consider heretical and seek to destroy.\n')
acolyte.addFlaw('I judge others harshly, and myself even more severely.\n')
acolyte.addFlaw('I put too much trust in those who wield power within my temple\'s hierarchy.\n')
acolyte.addFlaw('My piety sometimes leads me to blindly trust those that mrofess faith in my god.\n')
acolyte.addFlaw('I am inflexible in my thinking.\n')
acolyte.addFlaw('I am suspicious of strangers and expect the worst of them.\n')
acolyte.addFlaw('Once I pick a goal, I become obsessed with it to the detriment of everything else in my life.\n')
charlatan = Background('Charlatan')
charlatan.setBackDesc('You have always had a way with people. You know what makes them tick, you can tease out their hearts\' desires after a few minutes of conversation, and with a few leading questions you can read them like they were children\'s books. It\'s a useful talent, and one that you\'re perfectly willing to use for your advantage.\n')
charlatan.addPrTrait('I fall in and out of love easily, and am always pursuing someone.\n')
charlatan.addPrTrait('I have a joke for every occasion, especially occasions where humor is inappropriate.\n')
charlatan.addPrTrait('Flattery is my preferred trick for getting what I want.\n')
charlatan.addPrTrait('I\'m a born gambler who can\'t resist taking a risk for a potential payoff.\n')
charlatan.addPrTrait('I lie about almost everything, even when there\'s no good reason to.\n')
charlatan.addPrTrait('Sarcasm and insults are my weapons of choice.\n')
charlatan.addPrTrait('I keep multiple holy symbols on me and invoke whatever deity might come in useful at any given moment.\n')
charlatan.addPrTrait('I pocket anything i see tha tmight have some value.\n')
charlatan.addIdeal('I am a free spirit—no one tells me what to do.\n')
charlatan.addIdeal('I never target people who can\'t afford to lose a few coins.\n')
charlatan.addIdeal('I distribute the money i acquire to the people who really need it.\n')
charlatan.addIdeal('I never run the same con twice.\n')
charlatan.addIdeal('Material goods come and go. Bonds of friendship last forever.\n')
charlatan.addIdeal('I\'m determined to make something of myself.\n')
charlatan.addBond('I fleeced the wrong person and must work to ensure that this individual never crosses paths with me or those i care about.\n')
charlatan.addBond('I owe everything to my mentor—a horrible person who\'s probably rotting in jail somewhere.\n')
charlatan.addBond('Somewhere out there, I have a child who doesn\'t know me. I\'m making the world better for him or her.\n')
charlatan.addBond('I came from a noble family, and one day I\'ll reclaim my lands and title from those who stole them from me.\n')
charlatan.addBond('A powerful person killed someone I love. Some day soon, I\'ll have my revenge...\n')
charlatan.addBond('I swindled and ruined a person who didn\'t deserve it. I seek to atone for my misdeeds but might never be able to forgive myself.\n')
charlatan.addFlaw('I can\'t resist a pretty face.\n')
charlatan.addFlaw('I\'m always in debt. I spend my ill-gotten gains on decadent luxuries faster than I bring them in.\n')
charlatan.addFlaw('I\'m convinced that no one could ever fool me the way I fool others.\n')
charlatan.addFlaw('I\'m too greedy for my own good. I can\'t resist taking a risk if there\'s money involved.\n')
charlatan.addFlaw('I can\'t resist swindling people who are more powerful than me.\n')
charlatan.addFlaw('I hate to admit it and will hate myself for it, but I\'ll run and preserve my own hide if the going gets tough.\n')
criminal = Background('Criminal')
criminal.setBackDesc('You are an experienced criminal with a history of breaking the law. You have spent a lot of time among other criminals and still have contacts with the criminal underworld. You\'re far closer than most people to the world of murder, theft, and violence that pervades the underbelly of civilization, and you have survived up to this point by flounting the rules and regulations of society.\n')
criminal.addPrTrait('I always have a plan for what to do when things go wrong.\n')
criminal.addPrTrait('I am always calm, no matter what the situation. I never raise my voice or let my emotions control me.\n')
criminal.addPrTrait('The first thign i do in a new place is note the locations of everything valuable—or where cuch things coulg be hidden.\n')
criminal.addPrTrait('I would rather make a new friend than a new enemy.\n')
criminal.addPrTrait('I am incredibly slow to trust. Those who seep the fairest often have the most to hide.\n')
criminal.addPrTrait('I don\'t pay attention to the risks in a situation. Never tell me the odds.\n')
criminal.addPrTrait('The best way to get me to do something is to tell me I can\'t do it.\n')
criminal.addPrTrait('I blow up at the slightest insult.\n')
criminal.addIdeal('I don\'t steal from others in the trade.\n')
criminal.addIdeal('Chains are meant to be broken, as those who would forge them.\n')
criminal.addIdeal('I steal from the wealthy so that i can help people in need.\n')
criminal.addIdeal('I will do whatever it takes to become wealthy.\n')
criminal.addIdeal('I\'m loyal to my friends, not to any ideals, and everyone else can take a trip down the Styx for all I care.\n')
criminal.addIdeal('There\'s a spark of good in everyone.\n')
criminal.addBond('I\'m trying to pay off an old debt I owe to a generous benefactor.\n')
criminal.addBond('My Ill-gotten gains go to support my family.\n')
criminal.addBond('Something important was taken from me, and I aim to steal it back.\n')
criminal.addBond('I will become the greatest thief that had ever lived.\n')
criminal.addBond('I\'m guilty of a terrible crime. I hope i can redeem myself for it.\n')
criminal.addBond('Someone I loved died becoues of a mistake I made. That will never happen again.\n')
criminal.addFlaw('When I see something valuable, I can\'t think about anything but how to steal it.\n')
criminal.addFlaw('When faced with a choice between money and my friends, I usually choose the money.\n')
criminal.addFlaw('If there\'s a plan, I\'ll forget it. If i don\'t forget it, I\'ll ignore it.\n')
criminal.addFlaw('I have a "tell" that reveals when I\'m lying.\n')
criminal.addFlaw('I turn tail and run when things look bad.\n')
criminal.addFlaw('An innocent person is in prison for a crime that I committed. I\'m ok with that.\n')
entertainer = Background('Entertainer')
entertainer.setBackDesc('You thrive in front of an audience. You know how to entrance them, entertain them, and even inspire them. Your poetics can stir the hearts of those who hear you, awakening greif or joy, laughter or anger. Your music raises the spirits or captures their sorrow. Your dance steps captivate, your humor cuts to the quick. Whatever techniques you use, your art is your life.\n')
entertainer.addPrTrait('I know a story relevant to almost every situation.\n')
entertainer.addPrTrait('Whenever I come to a new place, I collect local rumors and spread gossip.\n')
entertainer.addPrTrait('I’m a hopeless romantic, always searching for that “special someone.”\n')
entertainer.addPrTrait('Nobody stays angry at me or around me for long, since I can defuse any amount of tension.\n')
entertainer.addPrTrait('I love a good insult, even one directed at me.\n')
entertainer.addPrTrait('I get bitter if I’m not the center of attention.\n')
entertainer.addPrTrait('I’ll settle for nothing less than perfection.\n')
entertainer.addPrTrait('I change my mood or my mind as quickly as I change key in a song.\n')
entertainer.addIdeal('When I perform, I make the world better than it was.\n')
entertainer.addIdeal('The stories, legends, and songs of the past must never be forgotten, for they teach us who we are.\n')
entertainer.addIdeal('The world is in need of new ideas and bold action.\n')
entertainer.addIdeal('I\'m only in it for the money and fame.\n')
entertainer.addIdeal('I like seeing the smiles on people\'s faces whei I perform. That\'s all that matters.\n')
entertainer.addIdeal('Art should reflect the soul; it should come from within and reveal who we really are.\n')
entertainer.addBond('My instrument is my most treasured possession, and it reminds me of someone I love.\n')
entertainer.addBond('Someone stoll my precious instrument, and someday I\'ll get it back.\n')
entertainer.addBond('I want to become famous, whatever it takes.\n')
entertainer.addBond('I idolize a hero of the old tales and measures my deeds against that person\'s.\n')
entertainer.addBond('I will do anything to prove myelf superior to my hated rival.\n')
entertainer.addBond('I would do anything for the other members of my old troupe.\n')
entertainer.addFlaw('I\'ll do anything to win fame and renown.\n')
entertainer.addFlaw('I\'m a sucker for a pretty face.\n')
entertainer.addFlaw('A scandal prevents me from ever going home again. That kind of trouble seems to follow me around.\n')
entertainer.addFlaw('I once satirized a noble who still wants my head. It was a mistake that i will likely repeat.\n')
entertainer.addFlaw('I have trouble keeping my feelings hidden. My sharp tongue lands me in trouble.\n')
entertainer.addFlaw('Despite my best efforts, I am unreliable to my friends.\n')
folkHero = Background('Folk Hero')
folkHero.setBackDesc('You come from a humble social rank, but you are destined for so much more. Already the people of your home village regard you as their champion, and your destiny calls you to stand against the tyrants and monsters that threaten the common folk everywhere.\n')
folkHero.addPrTrait('I judge people by their actions, not their words.\n')
folkHero.addPrTrait('If someone is in trouble, I’m always ready to lend help.\n')
folkHero.addPrTrait('When I set my mind to something, I follow through no matter what gets in my way.\n')
folkHero.addPrTrait('I have a strong sense of fair play and always try to find the most equitable solution to arguments.\n')
folkHero.addPrTrait('I\'m confident in my own abilities and do what I can to instill confidence in others.\n')
folkHero.addPrTrait('Thinking is for other people. I prefer action.\n')
folkHero.addPrTrait('I misuse long words in an attempt to sound smarter.\n')
folkHero.addPrTrait('I get bored easily. When am I going to get on with my destiny?\n')
folkHero.addIdeal('Peole deserve to be treated with dignity and respect.\n')
folkHero.addIdeal('No one should get preferentail treatment before the law, and no one is above the law.\n')
folkHero.addIdeal('Tyrants must not be allowed to oppress the people\n')
folkHero.addIdeal('If I become strong, I can take what I want—What I deserve.\n')
folkHero.addIdeal('There\'s no good in pretending to be something I\'m not.\n')
folkHero.addIdeal('Nothing and no one can steer me away from my higher calling.\n')
folkHero.addBond('I have a family, but I have no idea where they are. One day, I hope to see them again.\n')
folkHero.addBond('I worked the land, I love the land, and I will protect the land.\n')
folkHero.addBond('A proud noble once gave me a horrible beating, and I will take my revenge on any bully I encounter.\n')
folkHero.addBond('My tools are symbols of my past life, and I carry them so that I will never forget my roots.\n')
folkHero.addBond('I protect those who cannot protect themselves.\n')
folkHero.addBond('I wish my childhood sweetheart had come with me to pursue my destiny.\n')
folkHero.addFlaw('The tyrant who rules my land will stop at nothing to see me killed.\n')
folkHero.addFlaw('I\'m convinced of the significance of my destiny, and blind to my shortcomings and the risk of failure.\n')
folkHero.addFlaw('The people who knew me when I was young know my shameful secret, so I can never go home again.\n')
folkHero.addFlaw('I have a weakness for the vices of the city, especially hard drink.\n')
folkHero.addFlaw('Secretly, I believe that things would be better if I were a tyrant lording over the land.\n')
folkHero.addFlaw('I have trouble trusting my allies.\n')
guildArtisan = Background('Guild Artisan')
guildArtisan.setBackDesc('You are a member of an artisan\'s guild, skilled in a particular field and closely associated with other artisans. You are a well-establishedpart of the mercantile world, freed by talent and wealth from the constraints of a feudal social order. You learned your skills as ans apprentice to a master artisan, under the sponsorship of your guild, untill you became a master in your own right.\n')
guildArtisan.addPrTrait('I believe that anything worth doing is worth doing right. I can\'t help it—I\'m a perfectionist.\n')
guildArtisan.addPrTrait('I\'m a snob who looks down on those who can\'t appreciate fine art.\n')
guildArtisan.addPrTrait('I always want to know how things work and what makes people tick.\n')
guildArtisan.addPrTrait('I\'m full of witty aphorisms and have a proverb for every occasion.\n')
guildArtisan.addPrTrait('I\'m rude to people who lack my commitment to hard work and fair play.\n')
guildArtisan.addPrTrait('I like to talk at length about my profession.\n')
guildArtisan.addPrTrait('I don\'t part with my money easily and will haggle tirelessly to get the best deal possible.\n')
guildArtisan.addPrTrait('I\'m well known for my work, and I want to make sure everyone appreciates it. I\'m always taken aback when people haven\'t heard o f me.\n')
guildArtisan.addIdeal('It is the duty of all civilized people to strengthen the bonds of community and the security of civilization.\n')
guildArtisan.addIdeal('My talents were given to me so that I could use them to benefit the world.\n')
guildArtisan.addIdeal('Everyone should be free to pursue his or her own livelihood.\n')
guildArtisan.addIdeal('I\'m only in it for the money.\n')
guildArtisan.addIdeal('I\'m committed to the people I care about, not to ideals.\n')
guildArtisan.addIdeal('I work hard to be teh best there is at my craft.\n')
guildArtisan.addBond('The workshop where I learned my trade is the most important place in the world to me.\n')
guildArtisan.addBond('I created a great work for someone, and then found them unworthy to receive it. I\'m still looking for someone worthy.\n')
guildArtisan.addBond('I owe my guild a great debt for forging me into the person I am today.\n')
guildArtisan.addBond('I pursue wealth to secure someone\'s love.\n')
guildArtisan.addBond('One day I will return to my guild and prove that I am the greatest artisan of them all.\n')
guildArtisan.addBond('I will get revenge on the evil forces that destroyed my place of business and ruined my livelihood.\n')
guildArtisan.addFlaw('I\'ll do anything to get my hands on something rare or priceless.\n')
guildArtisan.addFlaw('I\'m quick to assume that someone is trying to cheat me.\n')
guildArtisan.addFlaw('No one must ever learn that I once stole money from guild coffers.\n')
guildArtisan.addFlaw('I\'m never satisfied with what I have—I always want more.\n')
guildArtisan.addFlaw('I would kill to acquire a noble title.\n')
guildArtisan.addFlaw('I\'m horribly jealous of anyone who can outshine my handiwork. Everywhere I go, I\'m surrounded by rivals.\n')
hermit = Background('Hermit')
hermit.setBackDesc('You lived in seclusion—either in a sheltered community such as a monastery, or entirely alone—for a formative part of your life. In your time apart from the lcamor of society, you found quiety, solitude, and perhaps some of the answers you were looking for.\n')
hermit.addPrTrait('I\'ve been isolated for so long that I rarely speak, preferring gestures and the occasional grunt.\n')
hermit.addPrTrait('I am utterly serene, even in the face of disaster.\n')
hermit.addPrTrait('The leader of my community had something wise to say on every topic, and I am eager to share that wisdom.\n')
hermit.addPrTrait('I feel tremendous empathy for all who suffer.\n')
hermit.addPrTrait('I\'m oblivious to etiquette and social expectations.\n')
hermit.addPrTrait('I connect everything that happens to me to a grand, cosmic plan.\n')
hermit.addPrTrait('I often get lost in my own thoughts and contemplation, becoming oblivious to my surroundings.\n')
hermit.addPrTrait('I am working on a grand philosophical theory and love sharing my ideas.\n')
hermit.addIdeal('My gifts are meant to be shared with all, not used for my own benefit.\n')
hermit.addIdeal('Emotions must not cloud our sense of what is right and true, or our logical thinking.\n')
hermit.addIdeal('Inquiry and curiosity are the pillars of progress.\n')
hermit.addIdeal('Solitude and contemplation are paths toward mystical or magical power.\n')
hermit.addIdeal('Meddling in the affairs of others only causes trouble.\n')
hermit.addIdeal('If you know yourself, there\'s nothing left to know.\n')
hermit.addBond('Nothing is more important to me than the other members of my hermitage, order, or association.\n')
hermit.addBond('I entered seclusion to hide frome the ones who might still be hunting me. I must someday confront them.\n')
hermit.addBond('I\'m still seeking the enlightenment I pursued in my seclusion, and it still eludes me.\n')
hermit.addBond('I entered seclusion because I loved someone I could not have.\n')
hermit.addBond('Should my discovery come to light, it could bring ruin to the world.\n')
hermit.addBond('My isolation gave me great insight into a great evil that only I can destroy.\n')
hermit.addFlaw('Now that I\'ve returned to the world, I enjoy its delights a little too much.\n')
hermit.addFlaw('I harbor dark, bloodthirsty thoughts that my isolation and meditation failed to quell.\n')
hermit.addFlaw('I am dogmatic in my thoughts and philosophy.\n')
hermit.addFlaw('I let my need to win arguments overshadow friendships and harmony.\n')
hermit.addFlaw('I\'d risk too much to uncover a lost bit of knowledge.\n')
hermit.addFlaw('I like keeping secrets and won\'t share them with anyone.\n')
noble = Background('Noble')
noble.setBackDesc('You understand wealth, power, and privilege. You carry a noble title, and your family owns land, collects taxes, and wields significant political influence. You might be a pampered aristocrat unfamiliar with work or discomfort, a former merchant just elevated to the nobility, or a disinherited scoundrel with a disproportionate sense of entitlement. Or you could be an honest, hard-working landowner who cares deeply about the people who live and work on your land, keenly aware of your responsibility to them.\n')
noble.addPrTrait('My eloquent flattery makes everyone I talk to feel like the most wonderful and important person in the world.\n')
noble.addPrTrait('The common folk love me for my kindness and generosity.\n')
noble.addPrTrait('No one could doubt by looking at my regal bearing that I am a cut above the unwashed masses.\n')
noble.addPrTrait('I take great pains to always look my best and follow the latest fashions.\n')
noble.addPrTrait('I don\'t like to get my hands dirty, and I won\'t be caught dead in unsuitable accommodations.\n')
noble.addPrTrait('Despite my noble birth, I do not place myself above other folk. We all have the same blood.\n')
noble.addPrTrait('My favor, once lost, is lost forever.\n')
noble.addPrTrait('If you do me an injury, I will crush you, ruin your name, and salt your fields.\n')
noble.addIdeal('Respect is due to me because of my position, but all people regardless of station deserve to be treated with dignity.\n')
noble.addIdeal('It is my duty to respect the authority of those aboce me, just as those below me must respect mine.\n')
noble.addIdeal('I must prove that I can handle myself without the coddling of my family.\n')
noble.addIdeal('If I can attain more power, no one will tell me what to do.\n')
noble.addIdeal('Blood runs thicker than water.\n')
noble.addIdeal('It is my duty to protect and care for the people beneth me.\n')
noble.addBond('I will face any challenge to win the approval of my family.\n')
noble.addBond('My house\'s alliance with another noble family must be sustained at all costs.\n')
noble.addBond('Nothing is more important than the other members of my family.\n')
noble.addBond('I am in love with the heir of a family that my family despises.\n')
noble.addBond('My loyalty to my soverign is unwabering.\n')
noble.addBond('The common folk must see me as a hero of the people.\n')
noble.addFlaw('I secretly believe that everyone is beneath me.\n')
noble.addFlaw('I hide a truly scandalous secret that could ruin my family forever.\n')
noble.addFlaw('I too often hear veiled insults and threats in every word addressed to me, and I\'m quick to anger.\n')
noble.addFlaw('I have an insatiable desire for carnal pleasures.\n')
noble.addFlaw('In fact, the world does revolve around me.\n')
noble.addFlaw('By my words and actions, I often bring shame to my family.\n')
outlander = Background('Outlander')
outlander.setBackDesc('You grew up in the wilds, far from civilization and the comforts of town and technology. You\'ve witnessed the migration of herds larger than forests, survived weather more extreme than any city-dweller could comprehend, and enjoyed the solitude of being the only thinking creature for miles in any direction. The wilds are in your blood, wheather you were a nomad, an explorer, a recluse, a hunter-gatherer, or even a marauder. Even in places where you don\'t know the specific features of the terrain, you know the ways of the wild.\n')
outlander.addPrTrait('I\'m driven by a wanderlust that led me away from home.\n')
outlander.addPrTrait('I watch over my friends as if they were a litter of newborn pups.\n')
outlander.addPrTrait('I once ran twenty-five miles without stopping to warn my clan of an approaching orc horde. I\'d do it again if I had to.\n')
outlander.addPrTrait('I have a lesson for every situation, drawn from observing nature.\n')
outlander.addPrTrait('I place no stock in wealthy or well-mannered folk. Money and manners won\'t save you from a hungry owlbear.\n')
outlander.addPrTrait('I\'m always picking things up, absently fiddling with them, and sometimes accidentally breaking them.\n')
outlander.addPrTrait('I feel far more comfortable around animals than people.\n')
outlander.addPrTrait('I was, in fact, raised by wolves.\n')
outlander.addIdeal('Life is like the seasons, in constant change, and we must change with it.\n')
outlander.addIdeal('It is each person\'s responsibility to make the most happiness for the whole tribe.\n')
outlander.addIdeal('If I dishonor myself, then I dishonor my whole clan.\n')
outlander.addIdeal('The strongest are meant to rule.\n')
outlander.addIdeal('The natural world is more important than all the constraints of civilization.\n')
outlander.addIdeal('I must earn glory in battle, for myself and my clan.\n')
outlander.addBond('My family, clan, or tribe is the most important thing in my life, even when they are far from me.\n')
outlander.addBond('An injury to the unspoiled wilderness of my home is an injury to me.\n')
outlander.addBond('I will bring terrible wrath down on the evildoers who destroyed my homeland.\n')
outlander.addBond('I am the last of my tribe, and it is up to me to ensure their names enter legend.\n')
outlander.addBond('I suffer awful visions of a coming disaster and will do anything to prevent it.\n')
outlander.addBond('It is my duty to provide children to sustain my tribe.\n')
outlander.addFlaw('I am too enamored of ale, wine, and other intoxicants.\n')
outlander.addFlaw('There\'s no room for caution in a life lived to the fullest.\n')
outlander.addFlaw('I remeber every insult I\'ve received and nurse a silent resentment toward anyone who\'s ever wronged me.\n')
outlander.addFlaw('I am slow to trust members of other races, tribes, and societies.\n')
outlander.addFlaw('Violence is my answer to almost any challange.\n')
outlander.addFlaw('Don\'t expect me to save those who can\'t save themselves. It is nature\'s way that the strong thrive and the weak perish.\n')
sage = Background('Sage')
sage.setBackDesc('You spent years learning the lore of the multiverse. You scoured manuscripts, studie scrolls, and listened to the greatest experts on the subjects that interest you. Your efforts have made you a master in your fields of study.\n')
sage.addPrTrait('I use polysyllabic words that convey the empression of great erudition.\n')
sage.addPrTrait('I\'ve read every book in the world\'s greatest libraries—or I like to boast that I have.\n')
sage.addPrTrait('I\'m used to helping out those who aren\'t as smart as I am, and I patiently explain anything and everything to others.\n')
sage.addPrTrait('There\'s nothing I like more than a good mystery.\n')
sage.addPrTrait('I\'m willing to listen to every side of an argument before I make my own judgment.\n')
sage.addPrTrait('I . . . speak . . . slowly . . . when talking . . . to idiots, . . . which . . . almost . . . everyone . . . is . . . compared . . . to me.\n')
sage.addPrTrait('I am horribly, horribly awkward in social situations.\n')
sage.addPrTrait('I\'m convinced that people are always trying to steal my secrets.\n')
sage.addIdeal('The path to power and self-improvement is through knowledge.\n')
sage.addIdeal('What is beautiful points us beyond itself toward what is true.\n')
sage.addIdeal('Emotions must not cloud our logical thinking.\n')
sage.addIdeal('Hothing should fetter the infinite possibility inherent in all existance.\n')
sage.addIdeal('Knowledge is the path to power and domination.\n')
sage.addIdeal('The goal of a life of study is the betterment of oneself.\n')
sage.addBond('It is my duty to protect my students')
sage.addBond('I have an ancient text that holds terrible secrets that must not fall into the wrong hands.\n')
sage.addBond('I work to preserve a library, university, scriptorium, or monastery.\n')
sage.addBond('My life\'s work is a series of tomes related to a specific field of lore.\n')
sage.addBond('I\'ve been searching my whole life for the answer to a certain question.\n')
sage.addBond('I sold my soul for knowledge. I hope to do great deeds and win it back.\n')
sage.addFlaw('I am easily distracted by the promise of information.\n')
sage.addFlaw('Most people scream and run when they see a demon, I stop and take notes on its anatomy.\n')
sage.addFlaw('Unlocking an ancient mystery is worth the price of a civilization.\n')
sage.addFlaw('I overlook obvious solutions in favor of complicated ones.\n')
sage.addFlaw('I speak without really thinking through my words, invariably insulting others.\n')
sage.addFlaw('I can\'t keep a secret to save my life, or anyone else\'s.\n')
sailor = Background('Sailor')
sailor.setBackDesc('You sailed on a seagoing vessel for years. In that time, you faced down mighty storms, monsters of the deep, and those who wanted to sink your craft to the bottomless depths. Your first love is the distant line of the horizon, but the time has come to try your hand at something new.\n')
sailor.addPrTrait('My friends know they can rely on me, no matter what.\n')
sailor.addPrTrait('I work hard so that I can play hard when the work is done.\n')
sailor.addPrTrait('I enjoy sailing into new ports and making new friends over a flagon of ale.\n')
sailor.addPrTrait('I stretch the truth for the sake of a good story.\n')
sailor.addPrTrait('To me, a tavern brawl is a nice way to get to know a new city.\n')
sailor.addPrTrait('I never pass up a friendly wager.\n')
sailor.addPrTrait('My language is as foul as an otyuggh nest.\n')
sailor.addPrTrait('I like a job well done, especially if I can convince someone else to do it.\n')
sailor.addIdeal('The thing that keeps a ship together is mutual respect between captain and crew.\n')
sailor.addIdeal('We all do the work, so we all share in the rewards.\n')
sailor.addIdeal('The sea is freedom—the freedom to go anywhere and do anything.\n')
sailor.addIdeal('I\'m a predator, and the other ships on the sea are my prey.\n')
sailor.addIdeal('I\'m committed to my crewmates, not to ideals.\n')
sailor.addIdeal('Someday I\'ll own my own ship and chart my own destiny.\n')
sailor.addBond('I\'m loyal to my captain first, everything else second.\n')
sailor.addBond('The ship is most important—crewmates and captains come and go.\n')
sailor.addBond('I\'ll always remember my first ship.\n')
sailor.addBond('In a harbor town, I have a paramour whose eyes nearly stole me from the sea.\n')
sailor.addBond('I was cheated out of my fair share of the profits, and I want to get my due.\n')
sailor.addBond('Ruthless pirates murdered my captain and crewmates, plundered our ship, and left me to die. Vengeance will be mine.\n')
sailor.addFlaw('I follow orders, even if I think they\'re wrong.\n')
sailor.addFlaw('I\'ll say anything to avoid having to do extra work.\n')
sailor.addFlaw('Once someone questions my courage, I never back down no matter how dangerous the situation.\n')
sailor.addFlaw('Once I start drinking, it\'s hard for me to stop.\n')
sailor.addFlaw('I can\'t help but pocket loose coins and other trinkets I come across.\n')
sailor.addFlaw('My pride will probably lead to my destruction.\n')
soldier = Background('Soldier')
soldier.setBackDesc('War has been your life for as long as you care to remember. You trained as a youth, studied the use of weapons and armor, learned basic survival techniques, including how to stay alive on the battlefield. You might have been part of a standing national army or a mercenary company, or perhaps a memver of a local militia who rose to prominence during a recent war.\n')
urchin = Background('Urchin')
urchin.setBackDesc('You grew up on the streets alone, orphaned, and poor. You had no one to watch over you or to provide for you, so you learned to provide for yourself. You fought fiercely over food and kept a constant watch out for other desperate souls who might steal from you. You slept on rooftops and in alleyways, exposed to the elements, and endured sickness without the advantage of medicine or a place to recuperate. You\'ve survived despite all odds, and did so through cunning, strength, speed, or some combination of each.\n')
#urchin.addPrTrait()
backgroundList = [acolyte,charlatan,criminal,entertainer,folkHero,guildArtisan,hermit,noble,outlander,sage,sailor,soldier,urchin] |
2,628 | dff5e75460637cf175b1b65af3320d01dc2e35b6 | from django.db import models
from django.utils import timezone
class User(models.Model):
class Meta:
db_table = "User"
app_label = "backlog"
webin_id = models.CharField(
"ENA's submission account id", max_length=15, unique=True, primary_key=True
)
registered = models.BooleanField(
"A copy of ENA's ROLE_METAGENOME_SUBMITTER flag. Set to True if submitter is registered with EMG.",
default=False,
)
consent_given = models.BooleanField(
"A copy of ENA's ROLE_METAGENOME_ANALYSIS flag. Set to True if submitter gave permission to access and analyse their private data.",
default=False,
)
email_address = models.CharField("Submitters email address.", max_length=200)
first_name = models.CharField(max_length=30, null=True)
surname = models.CharField(max_length=50, null=True)
first_created = models.DateTimeField(auto_now_add=True, null=True)
class Submission(models.Model):
class Meta:
db_table = "Submission"
app_label = "backlog"
primary_accession = models.CharField(max_length=20, unique=True, null=True)
secondary_accession = models.CharField(max_length=20, unique=True, null=True)
uuid = models.CharField(max_length=100, blank=True, unique=True, null=True)
created = models.DateTimeField(default=timezone.now)
submitter = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)
class Biome(models.Model):
class Meta:
db_table = "Biome"
app_label = "backlog"
biome_id = models.IntegerField(primary_key=True, unique=True)
biome_name = models.CharField(max_length=60)
lft = models.IntegerField()
rgt = models.IntegerField()
depth = models.IntegerField()
lineage = models.CharField(max_length=500)
class StudyError(models.Model):
class Meta:
db_table = "StudyErrorType"
app_label = "backlog"
name = models.CharField(max_length=100, unique=True)
description = models.TextField()
class Pipeline(models.Model):
class Meta:
db_table = "Pipeline"
app_label = "backlog"
version = models.FloatField(primary_key=True)
class Blacklist(models.Model):
class Meta:
db_table = "Blacklist"
app_label = "backlog"
date_blacklisted = models.DateField(auto_now_add=True)
pipeline_version = models.ForeignKey(Pipeline, on_delete=models.CASCADE)
error = models.ForeignKey(StudyError, on_delete=models.CASCADE)
user = models.CharField(max_length=16)
comment = models.TextField(null=False)
class Study(models.Model):
class Meta:
db_table = "Study"
app_label = "backlog"
unique_together = ("primary_accession", "secondary_accession")
primary_accession = models.CharField(max_length=20)
secondary_accession = models.CharField(max_length=20)
title = models.CharField(max_length=4000, null=True)
description = models.CharField(max_length=4000, null=True, blank=True)
scientific_name = models.CharField(max_length=200, null=True, blank=True)
public = models.BooleanField(default=True)
hold_date = models.DateField(null=True)
first_created = models.DateTimeField(auto_now_add=True, null=True)
last_updated = models.DateTimeField(auto_now=True, null=True)
ena_last_update = models.DateField(null=True)
mixs_compliant = models.NullBooleanField()
pubmed = models.TextField(null=True)
webin = models.CharField(max_length=100, null=True)
blacklisted = models.ForeignKey(Blacklist, on_delete=models.CASCADE, null=True)
submitter = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)
class Run(models.Model):
class Meta:
db_table = "Run"
app_label = "backlog"
study = models.ForeignKey(Study, on_delete=models.CASCADE)
primary_accession = models.CharField(max_length=20, unique=True)
sample_primary_accession = models.CharField(max_length=20, blank=True, null=True)
compressed_data_size = models.BigIntegerField(
help_text="Sum of filesizes of compressed input. (bytes)", null=True, blank=True
)
biome = models.ForeignKey(
Biome,
to_field="biome_id",
db_column="biome_id",
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
inferred_biome = models.ForeignKey(
Biome,
related_name="inferred_run_biome",
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
base_count = models.BigIntegerField(null=True, blank=True)
read_count = models.BigIntegerField(null=True, blank=True)
instrument_platform = models.CharField(max_length=4000)
instrument_model = models.CharField(max_length=4000)
library_strategy = models.CharField(max_length=150, null=True, db_index=True)
library_layout = models.CharField(max_length=20)
library_source = models.CharField(max_length=20, null=True)
ena_last_update = models.DateField(null=True)
last_updated = models.DateTimeField(auto_now=True, null=True)
public = models.BooleanField(default=True)
class UserRequest(models.Model):
class Meta:
db_table = "UserRequest"
app_label = "backlog"
user = models.ForeignKey(User, on_delete=models.DO_NOTHING, db_column="user_id")
first_created = models.DateTimeField(auto_now_add=True, null=True)
last_updated = models.DateTimeField(auto_now=True, null=True)
priority = models.IntegerField(default=0)
rt_ticket = models.IntegerField(unique=True)
class AssemblyType(models.Model):
class Meta:
db_table = "AssemblyType"
app_label = "backlog"
assembly_type = models.CharField(max_length=80, unique=True, null=False)
def __str__(self):
return self.assembly_type
# Assemblies received from ENA
class Assembly(models.Model):
class Meta:
db_table = "Assembly"
app_label = "backlog"
study = models.ForeignKey(Study, on_delete=models.CASCADE)
primary_accession = models.CharField(max_length=20, unique=True)
biome = models.ForeignKey(
Biome,
to_field="biome_id",
db_column="biome_id",
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
inferred_biome = models.ForeignKey(
Biome,
db_column="inferred_biome_id",
to_field="biome_id",
related_name="inferred_assembly_biome",
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
public = models.BooleanField(default=True)
ena_last_update = models.DateField(null=True)
assembly_type = models.ForeignKey(
"AssemblyType",
db_column="assembly_type_id",
on_delete=models.DO_NOTHING,
blank=True,
null=True,
)
class Assembler(models.Model):
class Meta:
db_table = "Assembler"
app_label = "backlog"
name = models.CharField(max_length=20)
version = models.CharField(max_length=20)
class AssemblyJobStatus(models.Model):
class Meta:
db_table = "AssemblyJobStatus"
app_label = "backlog"
description = models.CharField(max_length=100)
class AssemblyJobResult(models.Model):
class Meta:
db_table = "AssemblyJobResult"
app_label = "backlog"
execution_time = models.BigIntegerField(
help_text="Total execution time (including restarts) of the assembler, in seconds."
)
peak_mem = models.BigIntegerField(
help_text="Peak memory usage of the assembler, in megabytes."
)
n50 = models.IntegerField()
l50 = models.IntegerField()
num_contigs = models.IntegerField()
assembly_length = models.BigIntegerField()
largest_contig = models.BigIntegerField()
coverage = models.FloatField()
# average depth of coverage of the assembly
coverage_depth = models.FloatField()
class AssemblyJob(models.Model):
class Meta:
db_table = "AssemblyJob"
app_label = "backlog"
assembler = models.ForeignKey(Assembler, on_delete=models.DO_NOTHING)
status = models.ForeignKey(AssemblyJobStatus, on_delete=models.DO_NOTHING)
submission = models.ForeignKey(Submission, on_delete=models.DO_NOTHING, null=True)
request_id = models.ForeignKey(
UserRequest,
on_delete=models.DO_NOTHING,
null=True,
db_column="request_id",
)
directory = models.CharField(max_length=255, null=True, blank=True)
input_size = models.BigIntegerField(
help_text="Sum of filesizes of compressed input. (bytes)"
)
reason = models.TextField(
null=True,
help_text="Filled iff assembly will not be submitted to ENA, specifies the reason why.",
)
requester = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)
priority = models.IntegerField(
choices=[(1, "Low"), (2, "Medium"), (3, "High")], null=True
)
result = models.ForeignKey(AssemblyJobResult, on_delete=models.CASCADE, null=True)
estimated_peak_mem = models.BigIntegerField(
help_text="Estimated peak memory usage of the assembler, in megabytes.",
null=True,
)
uploaded_to_ena = models.NullBooleanField()
bam_uploaded = models.NullBooleanField()
new_ena_assembly = models.CharField(max_length=20, null=True)
runs = models.ManyToManyField(
Run, through="RunAssemblyJob", related_name="assemblyjobs", blank=True
)
# Assembly instances for runs
class RunAssemblyJob(models.Model):
class Meta:
db_table = "RunAssemblyJob"
app_label = "backlog"
unique_together = (("run", "assembly_job"),)
run = models.ForeignKey(Run, on_delete=models.CASCADE)
assembly_job = models.ForeignKey(AssemblyJob, on_delete=models.CASCADE)
# Show all runs used to create an assembly
class RunAssembly(models.Model):
class Meta:
db_table = "RunAssembly"
app_label = "backlog"
run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)
assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)
class AnnotationJobStatus(models.Model):
class Meta:
db_table = "AnnotationJobStatus"
app_label = "backlog"
description = models.CharField(max_length=20)
class AnnotationJob(models.Model):
PRIORITY_LOW = 1
PRIORITY_MEDIUM = 2
PRIORITY_HIGH = 3
PRIORITIES = [
(PRIORITY_LOW, "Low"),
(PRIORITY_MEDIUM, "Medium"),
(PRIORITY_HIGH, "High"),
]
# Pipeline execution result status.
# For example the pipeline may find no CDS so most steps
# aren't going to be executed for this data set.
RESULT_NO_TAX = "no_tax"
RESULT_NO_QC = "no_qc"
RESULT_NO_CDS = "no_cds"
RESULT_NO_CDS_TAX = "no_cds_tax"
# pipeline completed all the stages
RESULT_FULL = "full"
RESULT_CHOICES = (
(RESULT_NO_TAX, "No Taxonomy results"),
(RESULT_NO_QC, "Failed QC"),
(RESULT_NO_CDS, "No CDS found"),
(RESULT_FULL, "No problems"),
(RESULT_NO_CDS_TAX, "No CDS or taxonomy found"),
)
pipeline = models.ForeignKey(Pipeline, on_delete=models.DO_NOTHING)
status = models.ForeignKey(
AnnotationJobStatus, on_delete=models.DO_NOTHING, db_index=True
)
priority = models.IntegerField(choices=PRIORITIES)
request = models.ForeignKey(
UserRequest, on_delete=models.DO_NOTHING, null=True, db_column="request_id"
)
directory = models.CharField(max_length=255, null=True, blank=True)
last_updated = models.DateTimeField(auto_now=True, null=True)
runs = models.ManyToManyField(
Run, through="RunAnnotationJob", related_name="annotationjobs", blank=True
)
attempt = models.IntegerField(default=0)
result_status = models.CharField(
max_length=10, choices=RESULT_CHOICES, blank=True, null=True
)
class Meta:
db_table = "AnnotationJob"
app_label = "backlog"
# Annotation instance for a run
class RunAnnotationJob(models.Model):
class Meta:
db_table = "RunAnnotationJob"
app_label = "backlog"
unique_together = (("run", "annotation_job"),)
run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)
annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)
class AssemblyAnnotationJob(models.Model):
class Meta:
db_table = "AssemblyAnnotationJob"
app_label = "backlog"
assembly = models.ForeignKey(
Assembly, on_delete=models.DO_NOTHING, related_name="assemblyannotationjobs"
)
annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)
class AssemblyProteinDB(models.Model):
STATUS_COMPLETED = 1
STATUS_FAIL = 0
STATUS = ((STATUS_COMPLETED, "Completed"), (STATUS_FAIL, "Failed"))
FAIL_FASTA_MISSING = 1
FAIL_PIPELINE_VERSION = 2
FAIL_FASTA_DIR = 3
FAIL_SUPRESSED = 4
FAIL_MGYC = 5
FAIL_MGYP = 6
FAIL_METADATA = 7
FAIL_MGYC_MGYP = 8
FAIL_MGYC_METADATA = 9
FAIL_MGYP_METADATA = 10
FAIL_MGYC_MGYP_METADATA = 11
FAIL_LEGACY = 12
FAIL_REASONS = (
(FAIL_FASTA_MISSING, "Missing protein fasta file"),
(FAIL_PIPELINE_VERSION, "Assembly was added with higher version of pipeline"),
(FAIL_FASTA_DIR, "Assembly results directory is missing"),
(FAIL_SUPRESSED, "Suppressed assembly"),
(FAIL_MGYC, "Incorrect number of sequences for MGYC.fasta"),
(FAIL_MGYP, "Incorrect number of sequences for MGYP.fasta"),
(FAIL_METADATA, "Incorrect number of records for metadata"),
(FAIL_MGYC_MGYP, "Incorrect MGYC and MGYP but metadata is OK"),
(FAIL_MGYC_METADATA, "Incorrect number of sequences for MGYC.fasta and metadata table/file"),
(FAIL_MGYP_METADATA, "Incorrect number of sequences for MGYP.fasta and metadata table/file"),
(FAIL_MGYC_MGYP_METADATA, "Incorrect number of sequences for MGYC, MGYP and metadata"),
(FAIL_LEGACY, "Assembly marked as legacy"),
)
assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)
status = models.IntegerField("status", choices=STATUS)
fail_reason = models.IntegerField(
"fail_reason", choices=FAIL_REASONS, null=True, blank=True
)
pipeline = models.ForeignKey(Pipeline, null=True, on_delete=models.DO_NOTHING)
last_updated = models.DateTimeField("Last updated", auto_now=True)
assembly_id_pdb = models.IntegerField("id_pdb", null=True)
legacy = models.IntegerField("assembly_id for new accession for legacy assembly", null=True, blank=True)
class Meta:
app_label = "backlog"
db_table = "AssemblyProteinDB"
|
2,629 | 709e54daea4fea112539af3da83b00a43a086399 | import pandas as pd
df = pd.read_csv("search.csv")
df0 = df[df['re_0']<df['re_1']]
df1 = df[df['re_0']>df['re_1']].ix[:, ['re_1', 'im_1', 're_0', 'im_0']]
df1.columns = ['re_0', 'im_0', 're_1', 'im_1']
df = pd.concat([df0, df1]).sort_values(by=["re_0"])
eps = pow(10.0, -4.0)
first = True
res = []
val_old = None
for (k, val) in df.iterrows():
z0 = val['re_0']+1.0j*val['im_0']
z1 = val['re_1']+1.0j*val['im_1']
if (first):
res.append([z0, z1])
first = False
else:
z0_old = val_old['re_0']+1.0j*val_old['im_0']
z1_old = val_old['re_1']+1.0j*val_old['im_1']
print k, z0, z1, abs(z0_old-z0)+ abs(z1_old-z1)
if(abs(z0_old-z0) + abs(z1_old-z1) >eps):
res.append([z0, z1])
val_old = val
f = open('filtered.csv', 'w')
for [z0, z1] in res:
print >>f, "{0},{1},{2},{3}".format(z0.real, z0.imag, z1.real, z1.imag)
"""
for i in range(len(df)-1):
print i
z0 = df.ix[i,:]['re_0'] + 1.0j * df.ix[i,:]['im_0']
z1 = df.ix[i,:]['re_1'] + 1.0j * df.ix[i,:]['im_1']
z0p = df.ix[i+1,:]['re_0'] + 1.0j * df.ix[i+1,:]['im_0']
z1p = df.ix[i+1,:]['re_1'] + 1.0j * df.ix[i+1,:]['im_1']
if(abs(z0-z0p)>eps and abs(z1-z1p)>eps):
res.append([z0p, z1p])
print res
print len(df)
"""
|
2,630 | 55a26eb2625acb201677f5ff50fde809402c9b93 | import json
import os, django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dangjianyun.settings")# project_name 项目名称
django.setup()
from dangjiansite.djfuncs import *
import os
import datetime
import requests
import time
import urllib3
import base64
import csv
import random
from bs4 import BeautifulSoup
from dangjiansite.models import *
class Runner():
# def __init__(self, appid='TJZHDJ01', username='024549', password='Aa1234'):
def __init__(self, appid='TJZHDJ01', username='', password=''):
urllib3.disable_warnings()#屏蔽ssl告警
self.currentTime = datetime.datetime.now().strftime("%H:%M:%S")
self.username = username
self.password = password
self.thumbedFilePath = './lib/'.format(username)
self.logFilePath = './log/'.format(username)
self.errFilePath = './err/'.format(username)
# self.thumbedFileList = self.getThumbFromFile()
self.thumbedFileList = []
self.debug = True
self.session = requests.session()
self.appid = appid#应该是本设备安装app的id 等换个设备试一下就知道了
self.headers ={
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 6.0; HUAWEI MLA-AL10 Build/HUAWEIMLA-AL10)',
'header_version': '80',
'system': 'android',
'Connection': 'Keep-Alive',
'Host': 'mapi.dangjianwang.com',
}
self.token = self.getToken()
time.sleep(0.1)
self.thumbPageList = self.getPages(urls=[
'https://mapi.dangjianwang.com/v3_1/Learn/List',
'https://mapi.dangjianwang.com/v3_1/Activities/List',
'https://mapi.dangjianwang.com/v3_1/Hotspots/Hotlist'
])
self.thumbPages = [i[1] for i in self.thumbPageList]
time.sleep(0.1)
self.helpPageList = self.getPages(urls=['https://mapi.dangjianwang.com/v3_1/Help/List', ])
self.helpPages = [i[1] for i in self.helpPageList]
self.helpResults = {}
time.sleep(0.1)
self.studyPageList = self.getPagesII(urls=['https://mapi.dangjianwang.com/v3_1/Study/MaterialCollList'])
self.studyPages = [i[1] for i in self.studyPageList]
time.sleep(0.1)
self.studyRsults = {}
self.thumbedPages = []
self.thumbResults = {}
self.helpedPages = []
self.multiThumbed = []#考虑最后要写入文件之中
self.viewsResults = []
self.examC19Info = []
self.examlist = []
self.qaList = []
def getCurrentTime(self):
return datetime.datetime.now().strftime("%H:%M:%S")
def writeErr2File(self, err):
path = self.logFilePath
fullPath = '{}{}err.txt'.format(path, self.username)
if not os.path.exists(path):
os.mkdir(path)
with open(fullPath, 'a') as f:
f.write('{}:{}\n'.format(self.currentTime, err))
print('err已经写入{}'.format(fullPath))
def writeLog2File(self, log):
path = self.logFilePath
fullPath = '{}{}logs.txt'.format(path, self.username)
if not os.path.exists(path):
os.mkdir(path)
with open(fullPath, 'a') as f:
f.write('{}:{}\n'.format(self.currentTime, log))
print('log已经写入{}'.format(fullPath))
def writeThumb2File(self, id):
path = self.thumbedFilePath
fullPath = '{}{}thumbs.txt'.format(path, self.username)
if not os.path.exists(path):
os.mkdir(path)
with open(fullPath, 'a') as f:
f.write(',{}'.format(id))
print('点赞记录已经写入{}'.format(fullPath))
def getThumbFromFile(self):
'''
:return: 文件中id组成的列表
'''
path = self.thumbedFilePath
inFileList = []
fullPath = '{}{}thumbs.txt'.format(path, self.username)
if not os.path.exists(fullPath):
return fullPath
with open(fullPath, 'r') as f:
inFileList.extend(list(set(f.readlines()[0].split(','))))
# print('getThumbFormFile', inFileList)
with open(fullPath, 'w') as f1:
f1.write(','.join(sorted(inFileList)))
return inFileList
def getExcuteTimes(self):
'''
返回点赞等自动执行的次数的字典
:return:
'''
excuteTimes = {}
credInfo = self.getCredItinfo()
print(credInfo)
currentScore = credInfo[0]
# 点赞次数
thumbScore = credInfo[1]['信息评论'].split('/')[0]
thumbExcuteTimes = 10 - int(thumbScore)
excuteTimes.update({'thumb': thumbExcuteTimes})
# 帮助次数
helpScore = credInfo[1]['互助广场回答'].split('/')[0]
helpExctuteTimes = 2 - int(helpScore)
excuteTimes.update({'help': helpExctuteTimes})
# 党员视角发布次数
viewScore = credInfo[1]['党员视角发布'].split('/')[0]
viewExcuteTimes = int((4 - int(viewScore)) / 2)
excuteTimes.update({'view': viewExcuteTimes})
# 在线知识竞答次数
examScore = credInfo[1]['在线知识竞答'].split('/')[0]
examExcuteTimes = int((4 - int(examScore)) / 2)
excuteTimes.update({'exam': examExcuteTimes})
# 学习次数
flag = int(credInfo[1]['在线阅读学习资料'].split('/')[1]) - int(credInfo[1]['在线阅读学习资料'].split('/')[0])
flag1 = int(credInfo[1]['学习资料写体会'].split('/')[1]) - int(credInfo[1]['学习资料写体会'].split('/')[0])
examExcuteTimes = 1 if flag != 0 or flag1 != 0 else 0
excuteTimes.update({'study': examExcuteTimes})
return excuteTimes
def getToken(self):
'''
获得一个连接的token
每个连接都需要使用到
:return:
'''
data = {
'appid': self.appid,
'username': self.username,
'password': self.password,
}
longinurl = 'https://mapi.dangjianwang.com/v3_1/login'
r = self.session.post(url=longinurl, data=data, verify=False)
rjson = r.json()
# print(type(rjson))
# print(rjson)
if rjson['code'] == '200':
return rjson['token']
else:
print('token 获得失败')
return None
def getRJson(self, url):
data={
'token': self.token,
'appid': self.appid
}
return self.session.post(url=url, data=data, verify=False).json()
def getUserInfo(self):
'''
获得一大串用户的信息,暂时没用
:return:
'''
infoUrl = 'https://mapi.dangjianwang.com/v3_1/User/UserInfo'
return self.getRJson(url=infoUrl)
def getCredItinfoToday(self):
'''
获得人员当前的得分等级参数
:return:
'''
creditInfourl = 'https://mapi.dangjianwang.com/v3_1/User/CreditInfo'
info = self.getRJson(url=creditInfourl)
fullScore = info['data']['full']
gainScore = info['data']['gain']
currentLevel = info['data']['level']
username = info['data']['name']
ret = {
'fullScore': fullScore,
'gainScore': gainScore,
'currentLevel': currentLevel,
'username': username,
}
return ret
def getCredItinfo(self):
'''
获得用户的今日积分状态
可用来判断是否需要再继续流程
数据如下
('35', [('连续登录', '3/3'), ('手机端登录', '2/2'), ('信息评论', '10/10'), ('党员视角发布', '4/4'), ('互助广场回答', '2/2'), ('学习资料写体会', '5/5'), ('在线阅读学习资料', '5/5'), ('在线知识竞答', '4/4')])
:return:(haved_credit, credit_detail)
'''
creditInfourl = 'https://mapi.dangjianwang.com/v3_1/User/CreditInfo'
haved_credit = 0
credit_detail = {}
info = self.getRJson(url=creditInfourl)
for k, v in info.items():
if k == 'data':
for k2, v2 in v.items():
if k2 == 'haved_credit':
haved_credit = v2
if k2 == 'credit_detail':
for i in v2:
credit_detail.update({i['title']: i['score']})
return (haved_credit, credit_detail)
def getPages(self, urls):
pages = []
for url in urls:
data = self.getRJson(url=url)
for k, v in data.items():
if k == 'data':
for i in v:
# pages.append({'pageId': i['id'], 'pageTitle': i['title']})
# pages.append(i['id'])
pages.append((i['title'], i['id']))
return pages
def getPagesII(self, urls):
def getRJson(url):
data = {
'token': self.token,
'appid': self.appid,
'type_id': '791',
'page_index': '1',
}
return self.session.post(url=url, data=data, verify=False).json()
pages = []
for url in urls:
data = getRJson(url=url)
for k, v in data.items():
# print(k, v)
if k == 'data':
for i in v:
# pages.append({'pageId': i['id'], 'pageTitle': i['title']})
# pages.append(i['id'])
pages.append((i['name'], i['id']))
return pages
def doThumb(self, id):
'''
点赞函数,操作与id对应的页面
每次记录对应的信息到文件
:return:
'''
contents = [
'关注',
'关注!',
'关注!!']
data = {
'id': id,
'comment': random.choice(contents),
'token': self.token,
'appid': self.appid,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Activities/CommentAct'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
print(rjson)
if rjson['code'] == '1003':
self.token = self.getToken()
elif rjson['code'] == '200':
result = rjson['msg']
if result == '操作成功':
self.thumbedPages.append(id)
# print(self.thumbPageList)
# print(len(self.thumbPageList), len(list(set(self.thumbPageList))))
for i in list(set(self.thumbPageList)):
if id == i[1]:
temp = {'title': i[0]}
self.thumbResults.update(temp)
log = '信息点赞:\n主题: {}\n提交:{}'.format(i[0], data['comment'])
detail = '{} 主题:{}\n回复:{}\n'.format(self.getCurrentTime(), i[0], data['comment'])
write2File(self, './results/', 'result.txt', log)
thumbInfo = {'title': i[0], 'reply': data['comment']}
self.thumbPages.remove(id)
self.writeThumb2File(id=id)
return (detail, thumbInfo)
elif rjson['code'] == '500' and rjson['msg'] == '评论过快,请求休息一会':
print('因评论过快,等待一段时间')
time.sleep(20)
else:
print('rjson', rjson)
# self.multiThumbed.append(id)
self.thumbedPages.remove(id)#不成功的时候也要去掉不然总会选到
self.writeThumb2File(id=id)
log = '点赞:{}'.format(rjson)
self.writeLog2File(log)
print(log)
time.sleep(10)
def doHelp(self, id, callback=None):
'''
互助功能
:param id:
:return:
'''
detail = ''
helpInfo = None
log = ''
content = [
'把党的政治建设摆在首位!',
'不忘初心,牢记使命!',
'发展史第一要务,人才是第一资源,创新是第一动力。',
'要把党的领导贯彻到依法治国全过程和各方面',
'毫不动摇坚持中国共产党领导',]
data = {
'id': id,
'content': random.choice(content),
'token': self.token,
'appid': self.appid,
}
print(data)
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Help/PostComment'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
if rjson['code'] == '200':
result = rjson['msg']
if result == '操作成功':
self.helpedPages.append(id)
self.helpPages.remove(id)
#记录成功的到result
for i in self.helpPageList:
if id == i[1]:
curTime = self.getCurrentTime()
# print('('*88)
# print(curTime)
self.helpResults.update({'title': id[0]})
log = '互助:\n主题: {}\n提交内容: {}'.format(i[0], rjson['comment'])
write2File(self, './results/', 'result.txt', log)
# #写入数据库
detail = '{} 主题: {}\n提交内容: {}\n'.format(curTime, i[0], rjson['comment'].strip())
helpInfo = {'title': i[0], 'reply': rjson['comment']}
else:
pass
else:
pass
log = '帮助:{}'.format(rjson)
self.writeLog2File(log)
print(log)
return (detail, log, helpInfo)
def doView(self):
'''
党员视角发布功能
:return:
'''
content = [
'全面的小康,覆盖的人口要全面,是惠及全体人民的小康。',
'不忘初心,牢记使命,坚持终身学习!']
data = {
'content': random.choice(content),
'token': self.token,
'appid': self.appid,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Viewpoint/Create'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
if rjson['code'] == '200':
result = rjson['msg']
if result == '操作成功':
self.viewsResults.append(1)
# self.viewsResults.append(id)
else:
pass
log = '党员视角:{}'.format(rjson)
detail = '{} 党员视角:\n发布内容:{}\n'.format(self.getCurrentTime(), rjson['data']['content'])
publicContent = rjson['data']['content']
# print(detail)
# self.writeLog2File(log)
# print('党员视角'*12)
# print(id)
# print(log)
# print('党员视角' * 12)
return (detail, publicContent)
def doStudy(self, mid):
'''
前三个post函数的响应的三个请求
get用来获得填写的内容
最后一个post是学习完离开并检测时间的函数如果成功说明该次学习成功。
:param mid:
:return:
'''
interval = 60 * 5 + 5
def post1():
data = {
'mid': mid,
'token': self.token,
'appid': self.appid,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1//Study/CheckCollStatus'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
# print(rjson)
log = '学习post1:{}'.format(rjson)
self.writeLog2File(log)
print(log)
def post2():
data = {
'token': self.token,
'appid': self.appid,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Login/CheckToken'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
# print(rjson)
log = '学习post2:{}'.format(rjson)
self.writeLog2File(log)
print(log)
def post3():
data = {
'mid': mid,
'token': self.token,
'appid': self.appid,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Study/GetFeelingsNum'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
# print(rjson)
log = '学习post3:{}'.format(rjson)
self.writeLog2File(log)
print(log)
def get1():
url = 'https://mapi.dangjianwang.com/v3_1/Study/MaterialDetail?token={}&mid={}'.format(self.token, mid)
rjson = self.session.get(url=url)
text = rjson.content
soup = BeautifulSoup(text, 'html.parser')
retContents = []
for div in soup.find_all('p'):
p = div.text.strip()
retContents.append(p if 100 > len(p) < 200 else p[0:200])
return random.choice(retContents)
def recordFeeling(content=None):
if not content:
content = '伟大的时代造就伟大的人物。邓小平同志就是从中国人民和中华民族近代以来伟大斗争中产生的伟人,' \
'是我们大家衷心热爱的伟人。我们很多同志都曾经在他的领导和指导下工作过,他的崇高风范对我们来说是那样熟悉、那样亲切。' \
'邓小平同志崇高鲜明又独具魅力的革命风范,将激励我们在实现“两个一百年”奋斗目标、实现中华民族伟大复兴中国梦的征程上奋勇前进。'
data = {
'mid': mid,
'token': self.token,
'appid': self.appid,
'content': content
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Study/RecordFeeling'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
# print(rjson)
log = '学习recordFeeling:{}'.format(rjson)
self.writeLog2File(log)
print('in recordFeeling')
print(log)
if rjson['code'] == '200':
return {'content': content}
elif rjson['code'] == '1120':
addtion = [
'我们必须坚定不移,任何时候任何情况下都不能动摇',
'人民有信心,国家才有未来,国家才有力量。',
'新时代,属于自强不息、勇于创造的奋斗者。',
'民主政治建设有序推进,依法治市迈出新步伐。',
'一切公职人员,都必须牢记始终为人民利益和幸福而努力工作。',
]
return recordFeeling(content= '{}\n{}'.format(content, random.choice(addtion)))
else:
return None
#记录回复的心得
def readTime():
data = {
'mid': mid,
'token': self.token,
'appid': self.appid,
'time': interval,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/Study/ReadTime'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
# print(rjson)
log = '学习readTime:{}'.format(rjson)
# self.studyRsults.update({'学习readTime', rjson})
self.writeLog2File(log)
print(log)
post1()
time.sleep(1)
post2()
time.sleep(1)
post3()
time.sleep(1)
content = get1()
time.sleep(1)
# time.sleep(interval)
count = 0
print('开始学习请稍后')
for i in range(interval):
count += 1
# print(i + 1)
if count % 30 == 0:
print('已用时{}秒'.format(count))
time.sleep(1)
# time.sleep(5)
print('填写的学习体会', content)
self.studyRsults.update(recordFeeling(content=content))
time.sleep(1)
readTime()
time.sleep(1)
pass
def doExam(self):
'''
:param self:
:return:
'''
ids = []
data = {
'page': '1',
'page_size': '20',
'token': self.token,
'appid': self.appid,
}
examlistUrl = 'https://mapi.dangjianwang.com/v3_1/quora/examlist'
rjson = self.session.post(url=examlistUrl,
data=data,
verify=False).json()
# print(rjson)
# for i in rjson['data']:
# print(i)
time.sleep(0.3)
#########################################################
print('*' * 99)
data = {
'page': '1',
'page_size': '20',
'token': self.token,
'appid': self.appid,
}
banklistUrl = 'https://mapi.dangjianwang.com/v3_1/exam/banklist'
rjson = self.session.post(url=banklistUrl,
data=data,
verify=False).json()
# print(rjson)
for i in rjson['data']:
tem = (i['bank_name'], i['id'])
self.examlist.append(tem)
if i['bank_name'] == '十九大报告100题(单选)':
# if i['bank_num'] == '65':
temp = {
'title': i['bank_name'],
'detail': i['detail'],
'id': i['id'],
}
self.examC19Info.append(temp)
# print(self.examC19Info)
# print(self.examlist)
time.sleep(0.3)
#########################################################
print('*' * 99)
data = {
'bank': '6',
'token': self.token,
'appid': self.appid,
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/randexam'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
# print(rjson)
aa = rjson['data']
paper = aa['id']
for i in aa['questions']:
temp = {'id': i['id'], 'content': i['content']}
ids.append(temp)
#########################################################
print('*' * 99)
time.sleep(0.5)
# 以下答题交卷
answers = []
# 先得到答案
for i in ids:
# 丛书据库获得答案
correctAnswer = Qa.objects.filter(question__contains=i['content'])[0]
answerText = correctAnswer.answerText
answer = correctAnswer.answer
#从文键获得答案
# answerText = getAnswer(i['content'])[2]
# answer = getAnswer(i['content'])[1]
temp = {'index': i['id'], 'answer': answer}
qa = {'index': i['id'], 'answer': answer, 'answerText': answerText}
self.qaList.append(qa)
print(qa, i['content'])
answers.append(temp)
time.sleep(1)
hdata = {
'token': self.token,
'appid': self.appid,
'paper': paper,
'answers': json.dumps(answers),
# 'answers': [{'answer': 'A', 'index': '639'}, {'answer': 'A', 'index': '639'}],
}
# print('hdata:', hdata)
commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/handpaper'
rjson = self.session.post(url=commitUrl,
data=hdata,
verify=False).json()
print(rjson)
print(self.examlist)
print(self.examC19Info)
print(self.qaList)
def getAnswerInfo(self):
'''
获得答题的结果与正确率
:return:
'''
data = {
'token': self.token,
'appid': self.appid,
'page_size': '20',
'page_index': 'page_index',
}
commitUrl = 'https://mapi.dangjianwang.com/v3_1/exam/randexam'
rjson = self.session.post(url=commitUrl,
data=data,
verify=False).json()
print(rjson)
'''
https://mapi.dangjianwang.com/v3_1/exam/randexam 答题地址 主id是交卷的paper 这里要获取到questions里的id 等于回答问题中的index
appid TJZHDJ01
bank 6
token 5jTY47PbPZ0KdUprwmfJVfH4cX23tyDcV25XrEYkWVvElH3YjJpIb1JCDwq_
https://mapi.dangjianwang.com/v3_1/exam/handpaper 交卷的连接
appid TJZHDJ01
answers [{"index":"635","answer":"D"},{"index":"640","answer":"C"},{"index":"641","answer":"B"},{"index":"665","answer":"B"},{"index":"670","answer":"B"},{"index":"673","answer":"B"},{"index":"677","answer":"C"},{"index":"682","answer":"B"},{"index":"684","answer":"C"},{"index":"690","answer":"A"}]
token 5jTY47PbPZ0KdUprwmfJVfH4cX23tyDcV25XrEYkWVvElH3YjJpIb1JCDwq_
paper 4565894
https://mapi.dangjianwang.com/v3_1/exam/banklist 获得答题情况的连接
appid TJZHDJ01
page_size 20
token 5jTY47PbPZxXeRxlkzScAPWidyvssy3TBD5Y9UYiCQnMmCfa2pRNb1JCDwq_
page_index 1
--------------------------------------------------
https://mapi.dangjianwang.com/v3_1/Study/MaterialCollList 学习的id列表
appid TJZHDJ01
page_size 20
type_id 791
token 5jTY47PbPZJbeh9ixjfOUvaoI3604SrSAz5Zokt3DAmfz3qIis4Yb1JCDwq_
page_index 1
下面是针对791id列表中的访问地址
https://mapi.dangjianwang.com/v3_1//Study/CheckCollStatus
post1:
appid TJZHDJ01
mid 9729
token 5jTY47PbPZoOKEUwlDCaAKWqICGwt3_OVzlVpk5yW1bMyS_M3J5Db1JCDwq_
post2:
https://mapi.dangjianwang.com/v3_1/Login/CheckToken
appid TJZHDJ01
token 5jTY47PbPZoOKEUwlDCaAKWqICGwt3_OVzlVpk5yW1bMyS_M3J5Db1JCDwq_
post3:
https://mapi.dangjianwang.com/v3_1/Study/GetFeelingsNum
appid TJZHDJ01
mid 9729
token 5jTY47PbPZoOKEUwlDCaAKWqICGwt3_OVzlVpk5yW1bMyS_M3J5Db1JCDwq_
get1 https://mapi.dangjianwang.com/v3_1/Study/MaterialDetail?token={}&mid={} 获得页面
post 发表体会
https://mapi.dangjianwang.com/v3_1/Study/RecordFeeling
appid TJZHDJ01
content 伟大的时代造就伟大的人物。邓小平同志就是从中国人民和中华民族近代以来伟大斗争中产生的伟人,是我们大家衷心热爱的伟人。我们很多同志都曾经在他的领导和指导下工作过,他的崇高风范对我们来说是那样熟悉、那样亲切。邓小平同志崇高鲜明又独具魅力的革命风范,将激励我们在实现“两个一百年”奋斗目标、实现中华民族伟大复兴中国梦的征程上奋勇前进。
mid 9729
token 5jTY47PbPckOdUlllmfOCaCvcy7ls3rSVmxRoE0gDg3EmyrYi5Ucb1JCDwq_
post 结束学习
https://mapi.dangjianwang.com/v3_1/Study/ReadTime
appid TJZHDJ01
time 362
mid 9729
token 5jTY47PbPckOdUlllmfOCaCvcy7ls3rSVmxRoE0gDg3EmyrYi5Ucb1JCDwq_
---------------------------------------
https://mapi.dangjianwang.com/v3_1/Help/List 这里获得帮助id
https://mapi.dangjianwang.com/v3_1/Help/PostComment 提交评论的地址
appid TJZHDJ01
content 不忘初心,牢记使命!
id 55984
token 5jTY47PbPcpZe0s1xDLKAqKoIimx6SnSVjcApB92DF3Nmy/djZ1Nb1JCDwq_
把党的政治建设摆在首位!
不忘初心,牢记使命!
-------------------------------
发布的内容
https://mapi.dangjianwang.com/v3_1/Viewpoint/Create
appid TJZHDJ01
content 不忘初心牢记使命
token 5jTY47PbPZ9deR5rkTXIB/b/fymw5HvbAj9R900gDArNnXqE1s9Kb1JCDwq_
不忘初心,牢记使命,坚持终身学习!
全面的小康,覆盖的人口要全面,是惠及全体人民的小康。
-----------------------------
点赞错误
{'msg': '重复评论过多,请您修改后重新提交。', 'code': '500'}
''' |
2,631 | fe7fb9a4a5ca2bb8dab0acf440eb2fac127264ce | #!/usr/bin/env python
from google.appengine.ext.webapp import template
from google.appengine.ext import ndb
import logging
import os.path
import webapp2
import json
from webapp2_extras import auth
from webapp2_extras import sessions
from webapp2_extras.auth import InvalidAuthIdError
from webapp2_extras.auth import InvalidPasswordError
import tip
def user_required(handler):
"""
Decorator that checks if there's a user associated with the current session.
Will also fail if there's no session present.
"""
def check_login(self, *args, **kwargs):
auth = self.auth
if not auth.get_user_by_session():
self.redirect(self.uri_for('login'), abort=True)
else:
return handler(self, *args, **kwargs)
return check_login
class BaseHandler(webapp2.RequestHandler):
@webapp2.cached_property
def auth(self):
"""Shortcut to access the auth instance as a property."""
return auth.get_auth()
@webapp2.cached_property
def user_info(self):
"""Shortcut to access a subset of the user attributes that are stored
in the session.
The list of attributes to store in the session is specified in
config['webapp2_extras.auth']['user_attributes'].
:returns
A dictionary with most user information
"""
return self.auth.get_user_by_session()
@webapp2.cached_property
def user(self):
"""Shortcut to access the current logged in user.
Unlike user_info, it fetches information from the persistence layer and
returns an instance of the underlying model.
:returns
The instance of the user model associated to the logged in user.
"""
u = self.user_info
return self.user_model.get_by_id(u['user_id']) if u else None
@webapp2.cached_property
def user_model(self):
"""Returns the implementation of the user model.
It is consistent with config['webapp2_extras.auth']['user_model'], if set.
"""
return self.auth.store.user_model
@webapp2.cached_property
def session(self):
"""Shortcut to access the current session."""
return self.session_store.get_session(backend="datastore")
def render_template(self, view_filename, params=None):
if not params:
params = {}
user = self.user_info
params['user'] = user
path = os.path.join(os.path.dirname(__file__), 'views', view_filename)
self.response.out.write(template.render(path, params))
def send_json(self, message):
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(message))
def display_message(self, message):
"""Utility function to display a template with a simple message."""
params = {
'message': message
}
self.render_template('message.html', params)
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
class MainHandler(BaseHandler):
def get(self):
user = self.user
if not user:
self.render_template('about.html')
else:
params = {
'balance': user.balance,
}
self.render_template('home.html', params)
class AboutHandler(BaseHandler):
def get(self):
self.render_template('about.html')
class TrendingHandler(BaseHandler):
def get(self):
self.render_template('trending.html')
class TipHandler(BaseHandler):
@user_required
def get(self):
self._serve_page()
@user_required
def post(self):
failed=False
user = self.user
tipReceiver = self.request.get('tipReceiver')
tipReceiver = self.user_model.get_by_auth_id(tipReceiver)
amount = self.request.get('tip')
amount = float(amount)
try:
tip.tip(user, tipReceiver, amount)
except:
failed=True
self._serve_page(failed)
def _serve_page(self, failed=False):
params = {
'failed': failed
}
self.render_template('tip.html', params)
def serve_profile_page(self):
user = self.user
params = {
'auth_id': user.auth_ids[0],
'first_name': user.name,
'last_name': user.last_name,
'email_address': user.email_address,
'balance': user.balance,
}
self.render_template('profile.html', params)
class AddCreditsHandler(BaseHandler):
@user_required
def get(self):
self._serve_page()
@user_required
def post(self):
user = self.user
credits = self.request.get('credits')
credits = float(credits)
user.balance += credits
user.put()
#User a redirect here instead
serve_profile_page(self)
def _serve_page(self):
user = self.user
params = {
}
self.render_template('add_credits.html', params)
class LogHandler(BaseHandler):
@user_required
def get(self):
user = self.user
keys = tip.TipTransactionLogShardConfig.all_keys(user)
logs = keys[0].get()
if logs:
message = { 'logs': logs.logs }
else:
message = None
self.send_json(message)
class ProfileHandler(BaseHandler):
@user_required
def get(self):
serve_profile_page(self)
class SignupHandler(BaseHandler):
def get(self):
self.render_template('signup.html')
def post(self):
user_name = self.request.get('username')
email = self.request.get('email')
name = self.request.get('name')
password = self.request.get('password')
last_name = self.request.get('lastname')
unique_properties = ['email_address']
user_data = self.user_model.create_user(user_name,
unique_properties,
email_address=email, name=name, password_raw=password,
last_name=last_name, balance=float(0), tip_log_count=0, verified=False)
if not user_data[0]: #user_data is a tuple
self.display_message('Unable to create user for email %s because of \
duplicate keys %s' % (user_name, user_data[1]))
return
user = user_data[1]
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='v', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to verify their address. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
self.display_message(msg.format(url=verification_url))
class ForgotPasswordHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
user = self.user_model.get_by_auth_id(username)
if not user:
logging.info('Could not find any user entry for username %s', username)
self._serve_page(not_found=True)
return
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='p', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to reset their password. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
self.display_message(msg.format(url=verification_url))
def _serve_page(self, not_found=False):
username = self.request.get('username')
params = {
'username': username,
'not_found': not_found
}
self.render_template('forgot.html', params)
class VerificationHandler(BaseHandler):
def get(self, *args, **kwargs):
user = None
user_id = kwargs['user_id']
signup_token = kwargs['signup_token']
verification_type = kwargs['type']
# it should be something more concise like
# self.auth.get_user_by_token(user_id, signup_token)
# unfortunately the auth interface does not (yet) allow to manipulate
# signup tokens concisely
user, ts = self.user_model.get_by_auth_token(int(user_id), signup_token,
'signup')
if not user:
logging.info('Could not find any user with id "%s" signup token "%s"',
user_id, signup_token)
self.abort(404)
# store user data in the session
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if verification_type == 'v':
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), signup_token)
if not user.verified:
user.verified = True
user.put()
self.display_message('User email address has been verified.')
return
elif verification_type == 'p':
# supply user to the page
params = {
'user': user,
'token': signup_token
}
self.render_template('resetpassword.html', params)
else:
logging.info('verification type not supported')
self.abort(404)
class SetPasswordHandler(BaseHandler):
@user_required
def post(self):
password = self.request.get('password')
old_token = self.request.get('t')
if not password or password != self.request.get('confirm_password'):
self.display_message('passwords do not match')
return
user = self.user
user.set_password(password)
user.put()
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), old_token)
self.display_message('Password updated')
class LoginHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
password = self.request.get('password')
try:
u = self.auth.get_user_by_password(username, password, remember=True,
save_session=True)
user = self.user
tip.coalesce_balance(user)
self.redirect(self.uri_for('home'))
except (InvalidAuthIdError, InvalidPasswordError) as e:
logging.info('Login failed for user %s because of %s', username, type(e))
self._serve_page(True)
def _serve_page(self, failed=False):
username = self.request.get('username')
params = {
'username': username,
'failed': failed
}
self.render_template('login.html', params)
class LogoutHandler(BaseHandler):
def get(self):
self.auth.unset_session()
self.redirect(self.uri_for('home'))
config = {
'webapp2_extras.auth': {
'user_model': 'models.User',
'user_attributes': ['name']
},
'webapp2_extras.sessions': {
'secret_key': 'YOUR_SECRET_KEY'
}
}
app = webapp2.WSGIApplication([
webapp2.Route('/', MainHandler, name='home'),
webapp2.Route('/home', MainHandler, name='home'),
webapp2.Route('/about', AboutHandler, name='about'),
webapp2.Route('/trending', TrendingHandler, name='trending'),
webapp2.Route('/tip', TipHandler, name='tip'),
webapp2.Route('/add_credits', AddCreditsHandler, name='add_credits'),
webapp2.Route('/get_logs', LogHandler, name='get_logs'),
webapp2.Route('/profile', ProfileHandler, name='profile'),
webapp2.Route('/signup', SignupHandler),
webapp2.Route('/<type:v|p>/<user_id:\d+>-<signup_token:.+>',
handler=VerificationHandler, name='verification'),
webapp2.Route('/password', SetPasswordHandler),
webapp2.Route('/forgot', ForgotPasswordHandler, name='forgot'),
webapp2.Route('/login', LoginHandler, name='login'),
webapp2.Route('/logout', LogoutHandler, name='logout'),
], debug=True, config=config)
logging.getLogger().setLevel(logging.DEBUG)
|
2,632 | 8dcd4914c58a7ecafdfdd70b698ef3b7141386a6 | alunos = list()
while True:
nome = str(input('Nome: '))
nota1 = float(input('Nota 1: '))
nota2 = float(input('Nota 2: '))
media = (nota1+nota2)/2
alunos.append([nome, [nota1, nota2], media])
pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]
if pergunta == 'N':
break
print('-=' *30)
print(f'{"Nº":<4}{"Nome":<10}{"Média":>8}')
print('-' *30)
for i, v in enumerate(alunos):
print(f'{i:<4}{v[0]:<10}{v[2]:>8}')
while True:
print('-' *30)
notas_aluno = int(input('Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))
if notas_aluno == 999:
print('Fim do Boletim.')
break
if notas_aluno <= len(alunos)-1:
print(f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}') |
2,633 | 46fd4b976526a1bc70cf902bdb191feea8b84ad9 | import pygame
import time as time_
import random
import os
from pygame.locals import *
from math import sin, cos, pi
from sys import exit
# ---------------------------
from unzip import *
unzip()
# ---------------------------
from others import *
from gaster_blaster import *
from board import *
from bone import *
from sans import *
from player import *
from functions import *
# ----------------------------------------------------------------
'''初始化'''
os.environ["SDL_VIDEO_WINDOW_POS"] = "100,100"
pygame.init()
if FULL_SCREEN:
display = pygame.display.set_mode((1920, 1080), FULLSCREEN)
else:
display = pygame.display.set_mode(SCREEN_SIZE)
screen = pygame.Surface(SCREEN_SIZE).convert_alpha()
mask_surface_blue = pygame.Surface(SCREEN_SIZE).convert_alpha() # 蓝色攻击的mask
mask_surface_orange = pygame.Surface(SCREEN_SIZE).convert_alpha() # 橙色攻击的mask
mask_surface_normal = pygame.Surface(SCREEN_SIZE).convert_alpha() # 普通攻击的mask
pygame.display.set_caption("UPPERTALE") #标题
pygame.display.set_icon(pygame.image.load("res/icon-32.png")) #图标
fps = pygame.time.Clock() # 帧数计时器
frames = 60
# -----------------------------------
'''因为需要修改全局变量
所以不得不写在主文件里的函数'''
def players_turn(text):
def tmp():
global is_players_turn, battle_text, shown_index
is_players_turn = True
battle_text = text
shown_index = 0
bones.clear()
blasters.clear()
boards.clear()
attacks.append(tmp)
def set_turn_time(time):
def next_turn(screen):
global stop
stop = False
tasks.append(Task(next_turn, time))
def add_attack(func):
attacks.append(func)
return func
def shake(screen):
global screen_shaking
screen_shaking = True
def unshake(screen):
global screen_shaking
screen_shaking = False
def set_screen_angle(angle):
global screen_angle
screen_angle = angle
def start_testing():
attacks.clear()
# -------------------------------------
'''回合'''
# 吟唱
@add_attack
def yinchang_1():
global BOX_POS, BOX_SIZE
BOX_POS = [230, 230]
BOX_SIZE = [170, 160]
if DEBUG:
# 测试区开始
pass
# 测试区结束
sans.say("准备好了?")
# 开头杀
@add_attack
def first_round1():
set_turn_time(50)
sans.hand_direction = DOWN
player.type = BLUE_SOUL
player.direction = DOWN
player.falling_speed = 10
player.falling = True
tasks.append(Task(shake,
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
tasks.append(Task(unshake,
((BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10) + 5))
tasks.append(Task(lambda screen : slam_sound.play(),
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
for x in range(BOX_POS[0], BOX_POS[0] + BOX_SIZE[0], 10):
bones.append(
Bone(
pos=[x, BOX_POS[1] + BOX_SIZE[1] - 7],
speed=[0, -5],
direction=UP,
time1=8,
time2=40,
length=1000,
type_=1
)
)
bones.append(
Bone(
pos=[x, BOX_POS[1] + BOX_SIZE[1] - 47],
speed=[0, 0],
direction=UP,
time1=200,
time2=48,
length=1000,
type_=1
)
)
bones.append(
Bone(
pos=[x, BOX_POS[1] + BOX_SIZE[1] - 47],
speed=[0, 5],
direction=UP,
time1=8,
time2=248,
length=1000,
type_=1
)
)
@add_attack
def first_round2():
set_turn_time(50)
sans.hand_direction = LEFT
player.type = BLUE_SOUL
player.direction = LEFT
player.falling_speed = 10
player.falling = True
tasks.append(Task(shake,
(player.pos[0] - BOX_POS[0]) // 10))
tasks.append(Task(unshake,
((player.pos[0] - BOX_POS[0]) // 10) + 5))
tasks.append(Task(lambda screen : slam_sound.play(),
(player.pos[0] - BOX_POS[0]) // 10))
for y in range(BOX_POS[1], BOX_POS[1] + BOX_SIZE[1], 10):
bones.append(
Bone(
pos=[BOX_POS[0] - 7, y],
speed=[0, 0, 5],
direction=LEFT,
time1=8,
time2=30,
length=0,
type_=2
)
)
bones.append(
Bone(
pos=[BOX_POS[0] - 7, y],
speed=[0, 0, 0],
direction=LEFT,
time1=150,
time2=38,
length=40,
type_=2
)
)
bones.append(
Bone(
pos=[BOX_POS[0] - 7, y],
speed=[0, 0, -5],
direction=LEFT,
time1=8,
time2=188,
length=40,
type_=2
)
)
@add_attack
def first_round3():
set_turn_time(450)
player.type = RED_SOUL
for _ in range(0, 300, 2):
bones.append(
Bone(
pos=BOX_POS,
length=40 + sin(_ / 20) * 40,
direction=UP,
speed=[7, 0],
time1=1000,
time2=_,
)
)
bones.append(
Bone(
pos=[BOX_POS[0], BOX_POS[1] + 25 + (sin(_ / 20) * 40) + 60],
length=1000,
direction=UP,
speed=[7, 0],
time1=1000,
time2=_,
)
)
@add_attack
def first_round4():
sans.headtype = SANS_LOOK_LEFT
sans.say("只是第一个回合而已,何必用尽全力?")
@add_attack
def first_round5():
set_turn_time(1)
sans.headtype = SANS_NORMAL
pygame.mixer.music.play(-1)
players_turn("* ...")
@add_attack
def zjj_1():
set_turn_time(60)
global BOX_POS, BOX_SIZE
BOX_POS = [200, 230]
BOX_SIZE = [200, 150]
sans.hand_direction = DOWN
player.type = BLUE_SOUL
player.direction = DOWN
player.falling_speed = 10
tasks.append(Task(shake,
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
tasks.append(Task(unshake,
((BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10) + 5))
tasks.append(Task(lambda screen : slam_sound.play(),
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
@add_attack
def zjj_2():
set_turn_time(11 * 100)
def zjj(screen):
angle = random.randint(240, 300)
blasters.append(GasterBlaster(
pos=[
player.pos[0] + math.cos(math.radians(angle)) * 200,
player.pos[1] + math.sin(math.radians(angle)) * 200],
angle=angle - 180,
time1=10,
time2=30,
width=30,
color=BLUE
))
for _ in range(10):
tasks.append(Task(zjj, _ * 100))
bones.append(
Bone(
pos=[BOX_POS[0] - 20, BOX_POS[1] - 8],
length=BOX_SIZE[1] - 30 - 16,
direction=DOWN,
time1=1000,
time2=_ * 100 + 60,
speed=[2, 0],
type_=2
))
bones.append(
Bone(
pos=[BOX_POS[0] + BOX_SIZE[0] + 20, BOX_POS[1] - 8],
length=BOX_SIZE[1] - 30 - 16,
direction=DOWN,
time1=1000,
time2=_ * 100 + 60,
speed=[-2, 0],
type_=2
))
bones.append(
Bone(
pos=[BOX_POS[0] - 20, BOX_POS[1] + BOX_SIZE[1] - 10 - 8],
length=1000,
direction=DOWN,
time1=1000,
time2=_ * 100 + 60,
speed=[2, 0],
type_=1
))
bones.append(
Bone(
pos=[BOX_POS[0] + BOX_SIZE[0] + 20, BOX_POS[1] + BOX_SIZE[1] - 10 - 8],
length=1000,
direction=DOWN,
time1=1000,
time2=_ * 100 + 60,
speed=[-2, 0],
type_=1
))
players_turn("* ...")
@add_attack
def blue_bone():
set_turn_time(700)
global BOX_POS, BOX_SIZE
BOX_POS = [150, 250]
BOX_SIZE = [350, 120]
sans.hand_direction = DOWN
player.type = BLUE_SOUL
player.direction = DOWN
player.falling_speed = 10
tasks.append(Task(shake,
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
tasks.append(Task(unshake,
((BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10) + 5))
tasks.append(Task(lambda screen : slam_sound.play(),
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
for _ in range(10):
bones.append(
Bone(
pos=[BOX_POS[0], BOX_POS[1] - 8],
length=BOX_SIZE[1] - 30 - 16,
direction=DOWN,
time1=1000,
time2=_ * 60 + 60,
speed=[4, 0],
type_=2
))
bones.append(
Bone(
pos=[BOX_POS[0], BOX_POS[1] + BOX_SIZE[1] - 10 - 8],
length=1000,
direction=DOWN,
time1=1000,
time2=_ * 60 + 60,
speed=[4, 0],
type_=1
))
bones.append(
Bone(
pos=BOX_POS,
length=1000,
direction=DOWN,
time1=1000,
time2=_ * 60 + 60 + 16,
speed=[4, 0],
type_=1,
color=BLUE
))
@add_attack
def orange_bone():
def start_spinning(screen):
global spinning_left
spinning_left = True
def stop_spinning(screen):
global spinning_left
spinning_left = False
tasks.append(Task(start_spinning, 0))
tasks.append(Task(stop_spinning, 180))
tasks.append(Task(lambda screen:set_screen_angle(180), 181))
tasks.append(Task(start_spinning, 520))
tasks.append(Task(stop_spinning, 700))
tasks.append(Task(lambda screen:set_screen_angle(0), 701))
set_turn_time(700)
sans.hand_direction = UP
player.type = BLUE_SOUL
player.direction = UP
player.falling_speed = 10
tasks.append(Task(shake,
(player.pos[1] - BOX_POS[1]) // 10))
tasks.append(Task(unshake,
((player.pos[1] - BOX_POS[1]) // 10) + 5))
tasks.append(Task(lambda screen : slam_sound.play(),
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
for _ in range(10):
bones.append(
Bone(
pos=[BOX_POS[0], BOX_POS[1] - 8],
length=10,
direction=DOWN,
time1=1000,
time2=_ * 60 + 60,
speed=[8, 0],
type_=2
))
bones.append(
Bone(
pos=[BOX_POS[0], BOX_POS[1] + 30 + 16],
length=1000,
direction=DOWN,
time1=1000,
time2=_ * 60 + 60,
speed=[8, 0],
type_=1
))
bones.append(
Bone(
pos=BOX_POS,
length=1000,
direction=DOWN,
time1=1000,
time2=_ * 60 + 60 + 8,
speed=[8, 0],
type_=1,
color=ORANGE
))
players_turn("* ...")
@add_attack
def bone_gap():
set_turn_time(1000)
global BOX_POS, BOX_SIZE
BOX_POS = [150, 230]
BOX_SIZE = [300, 150]
sans.hand_direction = DOWN
player.type = BLUE_SOUL
player.direction = DOWN
player.falling_speed = 10
tasks.append(Task(shake,
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
tasks.append(Task(unshake,
((BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10) + 5))
tasks.append(Task(lambda screen : slam_sound.play(),
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
for _ in range(10):
x = BOX_POS[0] + random.randint(100, BOX_SIZE[0] - 100)
bones.append(Bone(
pos=[x, BOX_POS[1]],
time1=10,
time2=_ * 100,
speed=[0, 0, BOX_SIZE[1] / 10],
length=0,
direction=DOWN,
color=BLUE
))
bones.append(Bone(
pos=[x, BOX_POS[1]],
time1=10,
time2=_ * 100 + 10,
speed=[0, 0, -BOX_SIZE[1] / 10],
length=BOX_SIZE[1],
direction=DOWN,
color=BLUE
))
tasks.append(Task(shake,_ * 100 + 10))
tasks.append(Task(unshake,_ * 100 + 15))
tasks.append(Task(lambda screen : slam_sound.play(),
_ * 100 + 15))
y = BOX_POS[1] + random.randint(70, BOX_SIZE[1] - 30)
bones.append(Bone(
pos=[BOX_POS[0], y],
time1=10,
time2=_ * 100,
speed=[0, 0, BOX_SIZE[0] / 10],
length=0,
direction=RIGHT,
color=ORANGE
))
bones.append(Bone(
pos=[BOX_POS[0], y],
time1=10,
time2=_ * 100 + 10,
speed=[0, 0, -BOX_SIZE[0] / 10],
length=BOX_SIZE[0],
direction=RIGHT,
color=ORANGE
))
bones.append(
Bone(
pos=[BOX_POS[0], BOX_POS[1] - 8],
length=y - BOX_POS[1] - 16,
direction=DOWN,
time1=1000,
time2=_ * 100 + 60,
speed=[(x - BOX_POS[0]) / 30, 0],
type_=2
))
bones.append(
Bone(
pos=[BOX_POS[0] + BOX_SIZE[0], BOX_POS[1] - 8],
length=y - BOX_POS[1] - 16,
direction=DOWN,
time1=1000,
time2=_ * 100 + 60,
speed=[-((BOX_SIZE[0] + BOX_POS[0] - x) / 30), 0],
type_=2
))
bones.append(
Bone(
pos=[BOX_POS[0], y + 8],
length=1000,
direction=DOWN,
time1=1000,
time2=_ * 100 + 60,
speed=[(x - BOX_POS[0]) / 30, 0],
type_=1
))
bones.append(
Bone(
pos=[BOX_POS[0] + BOX_SIZE[0], y + 8],
length=1000,
direction=DOWN,
time1=1000,
time2=_ * 100 + 60,
speed=[-((BOX_SIZE[0] + BOX_POS[0] - x) / 30), 0],
type_=1
))
players_turn("* ...")
@add_attack
def board_1():
set_turn_time(10)
global BOX_POS, BOX_SIZE
BOX_POS = [50, 240]
BOX_SIZE = [500, 140]
sans.hand_direction = DOWN
player.type = BLUE_SOUL
player.direction = DOWN
player.falling_speed = 10
tasks.append(Task(shake,
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
tasks.append(Task(unshake,
((BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10) + 5))
tasks.append(Task(lambda screen : slam_sound.play(),
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
@add_attack
def board_2():
set_turn_time(600)
tasks.append(Task(shake, 70))
tasks.append(Task(unshake, 75))
blasters.append(
GasterBlaster(
pos=[10, BOX_POS[1] + BOX_SIZE[1]],
angle=0,
time1=10,
time2=70,
time3=10,
width=70
)
)
blasters.append(
GasterBlaster(
pos=[10, BOX_POS[1]],
angle=0,
time1=10,
time2=70,
time3=10,
width=30
)
)
for x in range(BOX_POS[0], BOX_POS[0] + BOX_SIZE[0], 12):
bones.append(
Bone(
pos=[x, BOX_POS[1] + BOX_SIZE[1] - 30],
length=1000,
direction=UP,
time1=1000,
time2=100,
speed=[0, 0],
type_=1
)
)
bones.append(
Bone(
pos=[x, BOX_POS[1] - 8],
length=5,
direction=DOWN,
time1=1000,
time2=100,
speed=[0, 0],
type_=2
)
)
boards.append(
Board(
pos=[BOX_POS[0],BOX_POS[1] + BOX_SIZE[1] - 40],
length=40,
speed=[1, 0],
time1=BOX_SIZE[0],
time2=100,
direction=UP
)
)
for _ in range(0, 20, 4):
bones.append(
Bone(
pos=[BOX_POS[0] + BOX_SIZE[0],
BOX_POS[1] + BOX_SIZE[1] - 40 - 25],
length=1000,
direction=UP,
time1=BOX_SIZE[0] // 4,
time2=150 + (_ * 30),
speed=[-4, 0]
)
)
def start_spinning(screen):
global spinning_left
spinning_left = True
def stop_spinning(screen):
global spinning_left
spinning_left = False
tasks.append(Task(start_spinning, 200))
tasks.append(Task(stop_spinning, 380))
tasks.append(Task(start_spinning, 500))
tasks.append(Task(stop_spinning, 680))
tasks.append(Task(lambda screen:set_screen_angle(0), 682))
@add_attack
def board_3():
set_turn_time(100)
sans.hand_direction = LEFT
player.type = BLUE_SOUL
player.direction = LEFT
player.falling_speed = 10
tasks.append(Task(shake,
(player.pos[0] - BOX_POS[0]) // 10))
tasks.append(Task(unshake,
((player.pos[0] - BOX_POS[0]) // 10) + 5))
tasks.append(Task(lambda screen : slam_sound.play(),
(player.pos[0] - BOX_POS[0]) // 10))
tasks.append(Task(shake, 60))
tasks.append(Task(unshake, 65))
blasters.append(
GasterBlaster(
pos=[BOX_POS[0], 10],
angle=90,
time1=10,
time2=50,
time3=0,
width=50
)
)
@add_attack
def board_4():
set_turn_time(0)
bones.clear()
players_turn("* ...")
@add_attack
def board_2_1():
set_turn_time(10)
global BOX_POS, BOX_SIZE
BOX_POS = [50, 240]
BOX_SIZE = [500, 140]
sans.hand_direction = DOWN
player.type = BLUE_SOUL
player.direction = DOWN
player.falling_speed = 10
tasks.append(Task(shake,
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
tasks.append(Task(unshake,
((BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10) + 5))
tasks.append(Task(lambda screen : slam_sound.play(),
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
@add_attack
def board_2_2():
set_turn_time(600)
tasks.append(Task(shake, 70))
tasks.append(Task(unshake, 75))
blasters.append(
GasterBlaster(
pos=[10, BOX_POS[1] + BOX_SIZE[1]],
angle=0,
time1=10,
time2=70,
time3=10,
width=70
)
)
tasks.append(Task(shake, 250))
tasks.append(Task(unshake, 255))
blasters.append(
GasterBlaster(
pos=[10, BOX_POS[1] + BOX_SIZE[1] - 20],
angle=0,
time1=10,
time2=70,
time3=250,
width=70
)
)
boards.append(
Board(
pos=[BOX_POS[0] + BOX_SIZE[0],
BOX_POS[1] + BOX_SIZE[1] - 30 - 10],
time1=1000,
time2=0,
speed=[-2, 0],
length=40
)
)
boards.append(
Board(
pos=[BOX_POS[0] + BOX_SIZE[0],
BOX_POS[1] + BOX_SIZE[1] - 30 - 10],
time1=1000,
time2=100,
speed=[-1.5, 0],
length=40
)
)
boards.append(
Board(
pos=[BOX_POS[0] + BOX_SIZE[0],
BOX_POS[1] + BOX_SIZE[1] - 30 - 10],
time1=1000,
time2=200,
speed=[-1, 0],
length=40
)
)
boards.append(
Board(
pos=[BOX_POS[0] + BOX_SIZE[0],
BOX_POS[1] + BOX_SIZE[1] - 30 - 30],
time1=1000,
time2=300,
speed=[-3, 0],
length=80
)
)
for x in range(BOX_POS[0], BOX_POS[0] + BOX_SIZE[0], 12):
bones.append(
Bone(
pos=[x, BOX_POS[1] + BOX_SIZE[1] - 30],
length=1000,
direction=UP,
time1=400,
time2=100,
speed=[0, 0],
type_=1
)
)
bones.append(
Bone(
pos=[x, BOX_POS[1] + BOX_SIZE[1] - 30],
length=1000,
direction=UP,
time1=1000,
time2=500,
speed=[0, 0],
type_=1
)
)
players_turn("* ...")
@add_attack
def bone_lid1():
set_turn_time(70)
global BOX_SIZE, BOX_POS
BOX_POS = [200, 240]
BOX_SIZE = [200, 150]
sans.hand_direction = DOWN
player.type = BLUE_SOUL
player.direction = DOWN
player.falling_speed = 10
tasks.append(Task(shake,
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
tasks.append(Task(unshake,
((BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10) + 5))
tasks.append(Task(lambda screen : slam_sound.play(),
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
bones.append(
RotatableBone(
pos=[BOX_POS[0] - 70, BOX_POS[1] + BOX_SIZE[1]],
time1=1000,
length=130,
angle=45,
speed=[5, 0, 0, 0]
)
)
bones.append(
RotatableBone(
pos=[BOX_POS[0] + BOX_SIZE[0] + 70, BOX_POS[1] + BOX_SIZE[1]],
time1=1000,
length=130,
angle=-45,
speed=[-5, 0, 0, 0]
)
)
@add_attack
def bone_lid2():
set_turn_time(60)
sans.hand_direction = UP
player.type = BLUE_SOUL
player.direction = UP
player.falling_speed = 10
player.falling = True
tasks.append(Task(shake,
(player.pos[1] - BOX_POS[1]) // 10))
tasks.append(Task(unshake,
((player.pos[1] - BOX_POS[1]) // 10) + 5))
tasks.append(Task(lambda screen : slam_sound.play(),
(BOX_POS[1] + BOX_SIZE[1] - player.pos[1]) // 10))
bones.append(
RotatableBone(
pos=[BOX_POS[0] - 20, BOX_POS[1]],
time1=1000,
length=130,
angle=-45,
speed=[5, 0, 0, 0]
)
)
bones.append(
RotatableBone(
pos=[BOX_POS[0] + BOX_SIZE[0] + 20, BOX_POS[1]],
time1=1000,
length=130,
angle=45,
speed=[-5, 0, 0, 0]
)
)
@add_attack
def bone_lid3():
set_turn_time(1300)
player.type = RED_SOUL
for _ in range(20):
bones.append(
RotatableBone(
pos=[BOX_POS[0], BOX_POS[1] - 20],
time1=1000,
time2=_ * 60,
length=260,
angle=-45,
speed=[0, 2, 0, 0]
)
)
bones.append(
RotatableBone(
pos=[BOX_POS[0], BOX_POS[1] + BOX_SIZE[1] + 20],
time1=1000,
time2=_ * 60,
length=260,
angle=45,
speed=[0, -2, 0, 0]
)
)
bones.append(
RotatableBone(
pos=[BOX_POS[0] + BOX_SIZE[0], BOX_POS[1] - 20],
time1=1000,
time2=_ * 60 + 30,
length=260,
angle=45,
speed=[0, 2, 0, 0]
)
)
bones.append(
RotatableBone(
pos=[BOX_POS[0] + BOX_SIZE[0], BOX_POS[1] + BOX_SIZE[1] + 20],
time1=1000,
time2=_ * 60 + 30,
length=260,
angle=-45,
speed=[0, -2, 0, 0]
)
)
players_turn("* ...")
@add_attack
def mercy1():
pygame.mixer.music.pause()
sans.say("好了,我也累了,不如我们休息一下?")
@add_attack
def mercy2():
sans.say("这也是一个改过自新的机会,")
@add_attack
def mercy3():
sans.say("赶紧按下饶恕,")
@add_attack
def mercy4():
sans.headtype = SANS_NO_EYES
sans.say("否则你绝对不想见到下一个回合")
@add_attack
def mercy5():
set_turn_time(0)
sans.headtype = SANS_NORMAL
players_turn("* ...")
@add_attack
def before_flash():
sans.say("好吧,看来你已经做出了自己的选择。")
@add_attack
def flash_round():
set_turn_time(10)
global blackout
flash_sound.play()
blackout = True
bones.clear()
blasters.clear()
boards.clear()
def flash(screen):
global blackout
blackout = False
flash_sound.play()
pygame.mixer.music.unpause()
tasks.append(Task(flash, 10))
def flash_round_1():
set_turn_time(150)
global _boxsize, _boxpos, BOX_POS, BOX_SIZE
player.type = BLUE_SOUL
player.direction = DOWN
BOX_SIZE = _boxsize = [150, 150]
BOX_POS = _boxpos = [230, 230]
player.pos = [BOX_POS[0] + BOX_SIZE[0] / 2,
100000]
direction = random.randint(0, 1)
blasters.append(
GasterBlaster(
pos=[BOX_POS[0] - 30, BOX_POS[1] + BOX_SIZE[1] - 30],
angle=0,
time1=0,
time2=30,
time3=10,
width=90
)
)
blasters.append(
GasterBlaster(
pos=[BOX_POS[0] - 30, BOX_POS[1] - 30],
angle=0,
time1=0,
time2=30,
time3=60,
width=90
)
)
if direction:
blasters.append(
GasterBlaster(
pos=[BOX_POS[0] + BOX_SIZE[0], BOX_POS[1] - 30],
angle=90,
time1=0,
time2=30,
time3=10,
width=90
)
)
blasters.append(
GasterBlaster(
pos=[BOX_POS[0], BOX_POS[1] - 30],
angle=90,
time1=0,
time2=30,
time3=60,
width=90
)
)
else:
blasters.append(
GasterBlaster(
pos=[BOX_POS[0], BOX_POS[1] - 30],
angle=90,
time1=0,
time2=30,
time3=10,
width=90
)
)
blasters.append(
GasterBlaster(
pos=[BOX_POS[0] + BOX_SIZE[0], BOX_POS[1] - 30],
angle=90,
time1=0,
time2=30,
time3=60,
width=90
)
)
for angle in range(0, 360, 10):
bones.append(RotatableBone(
pos=[BOX_POS[0] + BOX_SIZE[0] / 2 + cos(radians(angle)) * BOX_SIZE[0] / 2,
BOX_POS[1] + BOX_SIZE[1] / 2 + 25 + sin(radians(angle)) * BOX_SIZE[1] / 2],
length=25,
angle=angle,
time1=150
)
)
if angle % 30 == 0:
bones.append(RotatableBone(
pos=[BOX_POS[0] + BOX_SIZE[0] / 2,
BOX_POS[1] + BOX_SIZE[1] / 2 + 25],
length=40,
angle=angle,
speed=[0, 0, 0, 5],
time1=130,
time2=20
)
)
def flash_round_2():
set_turn_time(100)
global _boxsize, _boxpos, BOX_POS, BOX_SIZE
BOX_SIZE = _boxsize = [150, 150]
BOX_POS = _boxpos = [230, 230]
player.type = RED_SOUL
player.pos = [BOX_POS[0] + BOX_SIZE[0] / 2,
BOX_POS[1] + BOX_SIZE[1] / 2]
def zjj(screen):
angle = random.randint(-140, -40)
d = random.randint(10, 200)
blasters.append(GasterBlaster(
pos=[
player.pos[0] + math.cos(math.radians(angle)) * d,
player.pos[1] + math.sin(math.radians(angle)) * d],
angle=angle - 180,
time1=0,
time2=20,
width=50
))
for _ in range(0, 50):
tasks.append(Task(zjj, _ / 2))
def flash_round_3():
set_turn_time(100)
global _boxsize, _boxpos, BOX_POS, BOX_SIZE
BOX_SIZE = _boxsize = [150, 150]
BOX_POS = _boxpos = [200, 230]
player.type = RED_SOUL
player.pos = [BOX_POS[0] + BOX_SIZE[0] / 2,
BOX_POS[1] + BOX_SIZE[1] / 2]
blasters.append(
GasterBlaster(
pos=[BOX_POS[0] + BOX_SIZE[0] / 2, 50],
angle=90,
time1=10,
time2=70,
time3=0,
width=60
)
)
blasters.append(
GasterBlaster(
pos=[50, BOX_POS[1] + BOX_SIZE[1] / 2],
angle=0,
time1=10,
time2=70,
time3=0,
width=60
)
)
def flash_round_4():
set_turn_time(100)
global _boxsize, _boxpos, BOX_POS, BOX_SIZE
BOX_SIZE = _boxsize = [150, 150]
BOX_POS = _boxpos = [230, 230]
player.type = RED_SOUL
player.pos = [BOX_POS[0] + BOX_SIZE[0] / 2,
BOX_POS[1] + BOX_SIZE[1] / 2]
blasters.append(
GasterBlaster(
pos=[BOX_POS[0] - 10, BOX_POS[1] - 10],
angle=45,
time1=10,
time2=70,
time3=0,
width=60
)
)
blasters.append(
GasterBlaster(
pos=[BOX_POS[0] - 10, BOX_POS[1] + BOX_SIZE[1] + 10],
angle=-45,
time1=10,
time2=70,
time3=0,
width=60
)
)
def flash_round_5():
set_turn_time(100)
global _boxsize, _boxpos, BOX_POS, BOX_SIZE
BOX_SIZE = _boxsize = [150, 150]
BOX_POS = _boxpos = [230, 230]
player.type = RED_SOUL
player.pos = [BOX_POS[0] + BOX_SIZE[0] / 2,
BOX_POS[1] + BOX_SIZE[1] / 2]
blasters.append(
GasterBlaster(
pos=[BOX_POS[0], 50],
angle=90,
time1=10,
time2=70,
time3=0,
width=60
)
)
blasters.append(
GasterBlaster(
pos=[BOX_POS[0] + BOX_SIZE[0], 50],
angle=90,
time1=10,
time2=70,
time3=0,
width=60
)
)
blasters.append(
GasterBlaster(
pos=[50, BOX_POS[1] + 50],
angle=0,
time1=10,
time2=70,
time3=0,
width=100
)
)
def flash_round_6():
set_turn_time(100)
global _boxsize, _boxpos, BOX_POS, BOX_SIZE
BOX_SIZE = _boxsize = [150, 150]
BOX_POS = _boxpos = [230, 230]
player.type = RED_SOUL
player.pos = [BOX_POS[0] + BOX_SIZE[0] / 2,
BOX_POS[1] + BOX_SIZE[1] / 2]
blasters.append(
GasterBlaster(
pos=[BOX_POS[0], 50],
angle=90,
time1=10,
time2=70,
time3=0,
width=60
)
)
blasters.append(
GasterBlaster(
pos=[BOX_POS[0] + BOX_SIZE[0], 50],
angle=90,
time1=10,
time2=70,
time3=0,
width=60
)
)
blasters.append(
GasterBlaster(
pos=[50, BOX_POS[1] + BOX_SIZE[1] - 50],
angle=0,
time1=10,
time2=70,
time3=0,
width=100
)
)
def flash_round_7():
set_turn_time(150)
global BOX_SIZE, BOX_POS, _boxpos, _boxsize
BOX_POS = _boxpos = [230, 230]
BOX_SIZE = _boxsize = [150, 150]
player.type = RED_SOUL
player.pos = [BOX_POS[0] + BOX_SIZE[0] / 2,
BOX_POS[1] + BOX_SIZE[1] / 2]
for _ in range(3):
bones.append(
RotatableBone(
pos=[BOX_POS[0], BOX_POS[1] - 20],
time1=1000,
time2=_ * 50 + 20,
length=150,
angle=-20,
speed=[0, 4, 0, 0]
)
)
bones.append(
RotatableBone(
pos=[BOX_POS[0], BOX_POS[1] + BOX_SIZE[1] + 20],
time1=1000,
time2=_ * 50 + 20,
length=150,
angle=20,
speed=[0, -4, 0, 0]
)
)
bones.append(
RotatableBone(
pos=[BOX_POS[0] + BOX_SIZE[0], BOX_POS[1] - 20],
time1=1000,
time2=_ * 50 + 50,
length=150,
angle=20,
speed=[0, 4, 0, 0]
)
)
bones.append(
RotatableBone(
pos=[BOX_POS[0] + BOX_SIZE[0], BOX_POS[1] + BOX_SIZE[1] + 20],
time1=1000,
time2=_ * 50 + 50,
length=150,
angle=-20,
speed=[0, -4, 0, 0]
)
)
random_attacks = [flash_round_1,
flash_round_2,
flash_round_3,
flash_round_4,
flash_round_5,
flash_round_6,
flash_round_7]
for _ in range(5):
attacks.append(random.choice(random_attacks))
attacks.append(flash_round)
players_turn("* ...")
@add_attack
def windmill():
set_turn_time(1200)
global BOX_POS, BOX_SIZE, before_strike, after_strike
def before_strike():
global sans_damage
sans_damage = 1
after_strike = lambda : ...
BOX_POS = [150, 240]
BOX_SIZE = [150, 150]
def movegb(screen):
for i in range(4):
blasters[i].angle += 1
blasters[i].end_angle += 1
blasters[i].radian += radians(-1)
blasters[i].back_speed = 0
for angle in range(360 * 5):
tasks.append(Task(movegb, angle * 0.4 + 100))
def enablerecoil(screen):
for b in blasters:
b.norecoil = False
tasks.append(Task(enablerecoil, 800))
for angle in range(0, 360, 90):
blasters.append(GasterBlaster(
pos=[150 + 150 / 2, 240 + 150 / 2],
angle=angle,
time1=10,
time2=1000,
width=30,
time3=0,
norecoil=True
))
players_turn("* ...")
@add_attack
def gameend():
...
# ------------------------------------
"""主程序"""
while True:
# ---------------------------------------------------------
'''实例化'''
from locals_ import *
time = 0
_boxpos = [0, 0]
_boxsize = SCREEN_SIZE[:]
rightdown = SCREEN_SIZE[:]
time1 = 0
time2 = 0
delta = 1
blasters = []
bones = []
tasks = []
warns = []
texts = []
boards = []
before_strike = None
after_strike = None
sans = Sans([280, 80])
player = Player([0, 0])
actions = {
"* check" : CHECK_SANS,
"* heal ({} time(s) left)" : HEAL_SANS
}
mc_actions = {
"* spare" : MERCY_SANS_SPARE,
"* flee" : MERCY_SANS_FLEE
}
pygame.mixer.music.stop()
if FULL_SCREEN:
display = pygame.display.set_mode((1920, 1080), FULLSCREEN)
else:
display = pygame.display.set_mode(SCREEN_SIZE)
while True:
time1 = time_.time()
# 屏幕震动
if screen_shaking:
screen_offset[0] = random.randint(-5, 5)
screen_offset[1] = random.randint(-5, 5)
else:
screen_offset = [0, 0]
# 屏幕旋转
if spinning_left:
screen_angle -= 1
# 屏幕旋转
if spinning_right:
screen_angle += 1
# 测试区
if DEBUG:...
# 战斗框位移
if _boxpos[0] != BOX_POS[0]:
if abs(BOX_POS[0] - _boxpos[0]) < 0.1:
_boxpos[0] = BOX_POS[0]
else:
_boxpos[0] += (BOX_POS[0] - _boxpos[0]) / 5
if _boxpos[1] != BOX_POS[1]:
if abs(BOX_POS[1] - _boxpos[1]) < 0.1:
_boxpos[1] = BOX_POS[1]
else:
_boxpos[1] += (BOX_POS[1] - _boxpos[1]) / 5
# 战斗框大小
if rightdown[0] != BOX_POS[0] + BOX_SIZE[0]:
if abs(BOX_POS[0] + BOX_SIZE[0] - rightdown[0]) < 0.1:
rightdown[0] = BOX_POS[0] + BOX_SIZE[0]
else:
rightdown[0] += (BOX_POS[0] + BOX_SIZE[0] - rightdown[0]) / 5
if rightdown[1] != BOX_POS[1] + BOX_SIZE[1]:
if abs(BOX_POS[1] + BOX_SIZE[1] - rightdown[1]) < 0.1:
rightdown[1] = BOX_POS[1] + BOX_SIZE[1]
else:
rightdown[1] += (BOX_POS[1] + BOX_SIZE[1] - rightdown[1]) / 5
_boxsize = [
rightdown[0] - _boxpos[0],
rightdown[1] - _boxpos[1]
]
if time >= len(attacks):
exit()
if not stop and not is_players_turn:
attacks[time]()
time += 1
stop = True
screen.fill((0, 0, 0, 255))
display.fill((0, 0, 0))
mask_surface_blue.fill((0, 0, 0, 0))
mask_surface_orange.fill((0, 0, 0, 0))
mask_surface_normal.fill((0, 0, 0, 0))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.quit()
exit()
if event.key in (K_z, K_RETURN):
if sans.show_index >= len(sans.text) and sans.show_text == True:
sans.show_text = False
stop = False
elif page in (CHECK_SANS, HEAL_SANS, HEAL_SANS_CANT) and shown_index >= len(battle_text):
is_players_turn = False
stop = False
page = MAIN_PAGE
player.pos = [
BOX_POS[0] + BOX_SIZE[0] / 2,
BOX_POS[1] + BOX_SIZE[1] / 2
]
player.select_sound.play()
else:
player.choose = is_players_turn
if is_players_turn and page != FIGHT_SANS:
player.select_sound.play()
if event.key in (K_x, K_RSHIFT):
sans.show_index = len(sans.text)
shown_index = len(battle_text)
player.back = True
player.choice = 0
if event.key == K_UP:
player.going_up = True
if event.key == K_DOWN:
player.going_down = True
if event.key == K_LEFT:
player.going_left = True
if event.key == K_RIGHT:
player.going_right = True
if event.key == K_F4:
if FULL_SCREEN:
display = pygame.display.set_mode(SCREEN_SIZE)
FULL_SCREEN = 0
else:
display = pygame.display.set_mode((1920, 1080), FULLSCREEN)
FULL_SCREEN = 1
if event.key == K_F2:
restarting = True
if DEBUG:
if event.key == K_n:
bones.clear()
boards.clear()
blasters.clear()
stop = False
if event.key == K_EQUALS:
frames += 1
if event.key == K_MINUS:
frames -= 1
if event.type == KEYUP:
if event.key == K_UP:
player.going_up = False
if event.key == K_DOWN:
player.going_down = False
if event.key == K_LEFT:
player.going_left = False
if event.key == K_RIGHT:
player.going_right = False
if event.key == K_ESCAPE:
pygame.quit()
exit()
if event.key in (K_z, K_RETURN):
player.choose = False
if event.key in (K_x, K_RSHIFT):
player.back = False
'''检测&更新'''
# 战斗框
pygame.draw.rect(screen, (255, 255, 255, 255), pygame.Rect((_boxpos[0] - 5, _boxpos[1] - 5),
(_boxsize[0] + 10, _boxsize[1] + 10)))
pygame.draw.rect(screen, (0, 0, 0, 255), pygame.Rect(_boxpos, _boxsize)) # 内遮挡
# 骨头
for b in bones:
b.show(screen,
mask_surface_blue,
mask_surface_orange,
mask_surface_normal)
if b.stop:
bones.remove(b)
# 警告框
for w in warns:
w.show(screen)
if w.stop:
warns.remove(w)
# 板子
for b in boards:
b.show(screen)
if b.stop:
boards.remove(b)
if b.rect.colliderect(player.rect) and player.falling:
player.pos[0] += b.speed[0]
player.pos[1] += b.speed[1]
if player.direction == DOWN:
player.pos[1] = b.rect.top - 7
elif player.direction == UP:
player.pos[1] = b.rect.bottom - 1
elif player.direction == RIGHT:
player.pos[0] = b.rect.left - 7
elif player.direction == LEFT:
player.pos[0] = b.rect.right - 1
player.falling = False
"""外遮挡"""
pygame.draw.rect(screen, (0, 0, 0, 255), pygame.Rect((0, 0), (SCREEN_SIZE[0], _boxpos[1] - 5)))
pygame.draw.rect(screen, (0, 0, 0, 255), pygame.Rect((0, _boxpos[1] - 5), (_boxpos[0] - 5, _boxsize[1] + 10)))
pygame.draw.rect(screen, (0, 0, 0, 255), pygame.Rect((0, _boxpos[1] + _boxsize[1] + 5),
(SCREEN_SIZE[0], SCREEN_SIZE[1] - (_boxpos[1] + _boxsize[1]) - 5)))
pygame.draw.rect(screen, (0, 0, 0, 255), pygame.Rect((_boxpos[0] + _boxsize[0] + 5, _boxpos[1] - 5),
(SCREEN_SIZE[0] - (_boxpos[0] + _boxsize[0]) - 5, _boxsize[1] + 10)))
'''显示UI(外面)'''
pygame.draw.rect(screen, (191, 0, 0, 255), pygame.Rect((275, 400), (92, 20)))
if player.KR:
pygame.draw.rect(screen, (255, 0, 255, 255), pygame.Rect((275 + player.HP, 400), (round(player.KR), 20)))
pygame.draw.rect(screen, (255, 255, 0, 255), pygame.Rect((275, 400), (player.HP, 20)))
screen.blit(
font2.render(
"{:0>2.0f} / 92".format(player.HP + player.KR),
True,
(255, 255, 255) if not round(player.KR) else (255, 0, 255)
),
(
415,
400
)
)
screen.blit(hp_image, (240, 405))
screen.blit(kr_image, (375, 405))
screen.blit(
font2.render(
"Chara LV 19", True, (255, 255, 255)
), (30, 400)
)
# 显示文本
for text in texts:
screen.blit(
font.render(
text[1], True, (255, 255, 255)
), text[0]
)
if DEBUG:
screen.blit(
font2.render(
"DEBUG", True, (0, 0, 255)
), (200, 0)
)
# 显示帧数
screen.blit(
font2.render(
"FPS:{:0>3d}".format(round(1 / delta)), True, (0, 0, 255)
), (0, 0)
)
if fight:
screen.blit(fight_highlight_image, fight_pos)
else:
screen.blit(fight_default_image, fight_pos)
if act:
screen.blit(act_highlight_image, act_pos)
else:
screen.blit(act_default_image, act_pos)
if item:
screen.blit(item_highlight_image, item_pos)
else:
screen.blit(item_default_image, item_pos)
if mercy:
screen.blit(mercy_highlight_image, mercy_pos)
else:
screen.blit(mercy_default_image, mercy_pos)
# 鳝丝(要放在外面)
sans.show(screen)
if show_sans_damage:
if sans_damage == MISS:
screen.blit(miss_image, (250, 60))
# GB炮(要放在外面)
for t in blasters:
t.show(screen,
mask_surface_blue,
mask_surface_orange,
mask_surface_normal)
if t.stop:
blasters.remove(t)
# 其他东西,blahblahblah(外面)
for t in tasks:
t.show(screen)
if t.stop:
tasks.remove(t)
if is_players_turn: # 玩家回合
BOX_POS = [30, 250]
BOX_SIZE = [570, 130]
if page == MAIN_PAGE:
if shown_index < len(battle_text):
shown_index += 1
text_sound.play()
x = 40
y = 250
for char in battle_text[:shown_index]:
if char != '\n':
screen.blit(
battle_font.render(char, True, (255, 255, 255)),
(x, y)
)
x += 12
if x > BOX_POS[0] + BOX_SIZE[0] or char == "\n":
y += 16
x = 40
player.type = CURSOR_SOUL
player.options = (
(fight_pos[0] + 10, fight_pos[1] + 15),
( act_pos[0] + 10, act_pos[1] + 15),
( item_pos[0] + 10, item_pos[1] + 15),
(mercy_pos[0] + 10, mercy_pos[1] + 15)
)
if player.choice == 0:
fight = True
act = False
item = False
mercy = False
if player.choice == 1:
fight = False
act = True
item = False
mercy = False
if player.choice == 2:
fight = False
act = False
item = True
mercy = False
if player.choice == 3:
fight = False
act = False
item = False
mercy = True
if player.choose:
page = [FIGHT, ACT, 0, MERCY][player.choice]
player.choose = False
player.choice = 0
fight = False
act = False
item = False
mercy = False
if page == ACT:
player.options = [(40, 255)]
screen.blit(
battle_font.render("* sans", True, (255, 255, 255)),
(40, 250)
)
if player.choose:
page = [ACT_SANS][player.choice]
player.choose = False
player.choice = 0
if player.back:
page = MAIN_PAGE
if page == ACT_SANS:
player.options = []
y = 250
for _ in actions.keys():
if actions[_] == HEAL_SANS:
_ = _.format(heal_times_left)
screen.blit(
battle_font.render(_, True, (255, 255, 255)),
(40, y)
)
player.options.append((40, y + 5))
y += 20
if player.choose:
page = list(actions.values())[player.choice]
if page == HEAL_SANS:
if heal_times_left > 0:
heal(player, 92)
heal_times_left -= 1
else:
page = HEAL_SANS_CANT
player.choose = False
player.choice = 0
if player.back:
page = ACT
if page == CHECK_SANS:
player.type = RED_SOUL
player.pos = [
-100,
-100
]
battle_text = "* Sans\n The TRUE HERO.\n ATK:1\n DEF:1\n Nothing to say."
if shown_index < len(battle_text):
shown_index += 1
text_sound.play()
x = 40
y = 250
for char in battle_text[:shown_index]:
if char != '\n':
screen.blit(
battle_font.render(char, True, (255, 255, 255)),
(x, y)
)
x += 12
if x > BOX_POS[0] + BOX_SIZE[0] or char == "\n":
y += 20
x = 40
if page == HEAL_SANS:
player.type = RED_SOUL
player.pos = [
-100,
-100
]
battle_text = "* You are healthy again now.\n* {} time(s) left.".format(heal_times_left)
if shown_index < len(battle_text):
shown_index += 1
text_sound.play()
x = 40
y = 250
for char in battle_text[:shown_index]:
if char != '\n':
screen.blit(
battle_font.render(char, True, (255, 255, 255)),
(x, y)
)
x += 12
if x > BOX_POS[0] + BOX_SIZE[0] or char == "\n":
y += 20
x = 40
if page == HEAL_SANS_CANT:
player.type = RED_SOUL
player.pos = [
-100,
-100
]
battle_text = "* No more times for you to heal!"
if shown_index < len(battle_text):
shown_index += 1
text_sound.play()
x = 40
y = 250
for char in battle_text[:shown_index]:
if char != '\n':
screen.blit(
battle_font.render(char, True, (255, 255, 255)),
(x, y)
)
x += 12
if x > BOX_POS[0] + BOX_SIZE[0] or char == "\n":
y += 20
x = 40
if page == FIGHT:
player.options = [(40, 255)]
screen.blit(
battle_font.render("* sans", True, (255, 255, 255)),
(40, 250)
)
if player.choose:
page = [FIGHT_SANS][player.choice]
player.choose = False
player.choice = 0
choice_pos = [50, 250]
if player.back:
page = MAIN_PAGE
if page == FIGHT_SANS:
player.type = RED_SOUL
player.pos = [
-100,
-100
]
target_img.set_alpha(target_alpha)
if not choice_blink:
if target_alpha >= 255:
choice_going = True
else:
target_alpha += 10
screen.blit(target_img, [BOX_POS[0] + 10, BOX_POS[1] + 5])
screen.blit([choice_img, choice_blink_img][choice_ani_index // 5 % 2], choice_pos)
choice_ani_index += choice_blink
choice_pos[0] += choice_going * 8
if choice_going and (player.choose or choice_pos[0] > BOX_POS[0] + BOX_SIZE[0]):
choice_going = False
choice_blink = True
tasks.append(Strike(sans.pos[:]))
if not before_strike:
sans.target_pos = [100, 80]
else:
before_strike()
if choice_blink:
blink_time += 1
if blink_time > 60:
show_sans_damage = False
choice_going = False
choice_blink = False
choice_ani_index = 0
target_alpha = 0
blink_time = 0
is_players_turn = False
stop = False
page = MAIN_PAGE
if not after_strike:
sans.target_pos = [250, 80]
else:
after_strike()
player.pos = [
BOX_POS[0] + BOX_SIZE[0] / 2,
BOX_POS[1] + BOX_SIZE[1] / 2
]
elif blink_time > 30:
target_alpha -= 10
show_sans_damage = True
if page == MERCY:
player.options = [(40, 255)]
screen.blit(
battle_font.render("* sans", True, (255, 255, 255)),
(40, 250)
)
if player.choose:
page = [MERCY_SANS][player.choice]
player.choose = False
player.choice = 0
if player.back:
page = MAIN_PAGE
if page == MERCY_SANS:
player.options = []
y = 250
for _ in mc_actions.keys():
screen.blit(
battle_font.render(_, True, (255, 255, 255)),
(40, y)
)
player.options.append((40, y + 5))
y += 20
if player.choose:
page = list(mc_actions.values())[player.choice]
player.choose = False
player.choice = 0
if player.back:
page = MERCY
if page == MERCY_SANS_SPARE: # 你都饶恕了,想必也不想继续玩了()
exit()
if page == MERCY_SANS_FLEE: # 你都逃跑了,想必也不想继续玩了()
exit()
# 你死了
if player.HP + player.KR <= 0:
DEAD = True
if DEAD or restarting:
break
# 判定伤害
blue_mask = pygame.mask.from_surface(mask_surface_blue)
orange_mask = pygame.mask.from_surface(mask_surface_orange)
normal_mask = pygame.mask.from_surface(mask_surface_normal)
if mask_collide(blue_mask, player.mask, [0, 0], player.mask_pos):
if any([player.going_up, player.going_down, player.going_left, player.going_right, player.falling]):
damage(player)
if mask_collide(orange_mask, player.mask, [0, 0], player.mask_pos):
if not any([player.going_up, player.going_down, player.going_left, player.going_right, player.falling]):
damage(player)
if mask_collide(normal_mask, player.mask, [0, 0], player.mask_pos):
damage(player)
# 玩家
player.show(screen, _boxpos, _boxsize)
# 黑屏攻击
if blackout:
screen.fill(0x000000)
"""将screen的图像加工后放入display"""
if not FULL_SCREEN:
rotated_screen = pygame.transform.rotate(screen, screen_angle)
else:
screen_rect = screen.get_rect()
rotated_screen = pygame.transform.rotate(
pygame.transform.scale(
screen,
(
round(screen_rect.size[1] / screen_rect.size[0] * 1920),
1080
)
),
screen_angle
)
rotated_rect = rotated_screen.get_rect()
if not FULL_SCREEN:
rotated_rect.center = [SCREEN_SIZE[0] // 2, SCREEN_SIZE[1] // 2]
else:
rotated_rect.center = [960, 540]
display.blit(rotated_screen,
(rotated_rect.x + screen_offset[0],
rotated_rect.y + screen_offset[1]))
fps.tick(frames)
pygame.display.update()
time2 = time_.time()
delta = time2 - time1
if not restarting:
ticks = 0
heart_offset = [0, 0]
while True:
'''死后的'''
pygame.mixer.music.stop()
ticks += 1
screen.fill((0, 0, 0, 255))
if ticks >= 200:
break
if ticks >= 160:
screen.blit(alive_img, player.rect)
if ticks == 160:
split_sound.play()
elif ticks >= 100:
screen.blit(dead_img,
(player.rect.x + heart_offset[0],
player.rect.y + heart_offset[1]))
heart_offset = [random.randint(-2, 2), random.randint(-2, 2)]
elif ticks >= 60:
screen.blit(dead_img, player.rect)
if ticks == 60:
split_sound.play()
else:
screen.blit(alive_img, player.rect)
if not FULL_SCREEN:
rotated_screen = pygame.transform.rotate(screen, screen_angle)
else:
screen_rect = screen.get_rect()
rotated_screen = pygame.transform.rotate(
pygame.transform.scale(
screen,
(
round(screen_rect.size[1] / screen_rect.size[0] * 1920),
1080
)
),
screen_angle
)
rotated_rect = rotated_screen.get_rect()
if not FULL_SCREEN:
rotated_rect.center = [SCREEN_SIZE[0] // 2, SCREEN_SIZE[1] // 2]
else:
rotated_rect.center = [960, 540]
display.blit(rotated_screen,
(rotated_rect.x + screen_offset[0],
rotated_rect.y + screen_offset[1]))
fps.tick(frames)
pygame.display.update()
|
2,634 | 164b0afde225119a8fbd4ccfccbbbc3550aa75fe | import json
import os
import numpy as np
import pandas as pd
import py4design.py2radiance as py2radiance
import py4design.py3dmodel.calculate as calculate
from py4design import py3dmodel
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca", "Kian Wee Chen"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
from cea.constants import HOURS_IN_YEAR
from cea.resources.radiation_daysim.geometry_generator import BuildingGeometry
from cea import suppress_3rd_party_debug_loggers
suppress_3rd_party_debug_loggers()
def create_sensor_input_file(rad, chunk_n):
sensor_file_path = os.path.join(rad.data_folder_path, "points_" + str(chunk_n) + ".pts")
sensor_file = open(sensor_file_path, "w")
sensor_pts_data = py2radiance.write_rad.sensor_file(rad.sensor_positions, rad.sensor_normals)
sensor_file.write(sensor_pts_data)
sensor_file.close()
rad.sensor_file_path = sensor_file_path
def generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type, orientation, normal, intersection):
mid_pt = py3dmodel.calculate.face_midpt(occface)
location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)
moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(mid_pt, location_pt, occface))
if srf_type == 'roofs':
xdim = ydim = roof_dim
else:
xdim = ydim = wall_dim
# put it into occ and subdivide surfaces
sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)
# calculate list of properties per surface
sensor_intersection = [intersection for x in sensor_surfaces]
sensor_dir = [normal for x in sensor_surfaces]
sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]
sensor_type = [srf_type for x in sensor_surfaces]
sensor_orientation = [orientation for x in sensor_surfaces]
sensor_area = [calculate.face_area(x) * (1.0 - scalar) for x, scalar in zip(sensor_surfaces, sensor_intersection)]
return sensor_dir, sensor_cord, sensor_type, sensor_area, sensor_orientation, sensor_intersection
def calc_sensors_building(building_geometry, grid_size):
sensor_dir_list = []
sensor_cord_list = []
sensor_type_list = []
sensor_area_list = []
sensor_orientation_list = []
sensor_intersection_list = []
surfaces_types = ['walls', 'windows', 'roofs']
sensor_vertical_grid_dim = grid_size["walls_grid"]
sensor_horizontal_grid_dim = grid_size["roof_grid"]
for srf_type in surfaces_types:
occface_list = getattr(building_geometry, srf_type)
if srf_type == 'roofs':
orientation_list = ['top'] * len(occface_list)
normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)
interesection_list = [0] * len(occface_list)
elif srf_type == 'windows':
orientation_list = getattr(building_geometry, "orientation_{srf_type}".format(srf_type=srf_type))
normals_list = getattr(building_geometry, "normals_{srf_type}".format(srf_type=srf_type))
interesection_list = [0] * len(occface_list)
else:
orientation_list = getattr(building_geometry, "orientation_{srf_type}".format(srf_type=srf_type))
normals_list = getattr(building_geometry, "normals_{srf_type}".format(srf_type=srf_type))
interesection_list = getattr(building_geometry, "intersect_{srf_type}".format(srf_type=srf_type))
for orientation, normal, face, intersection in zip(orientation_list, normals_list, occface_list,
interesection_list):
sensor_dir, \
sensor_cord, \
sensor_type, \
sensor_area, \
sensor_orientation, \
sensor_intersection = generate_sensor_surfaces(face,
sensor_vertical_grid_dim,
sensor_horizontal_grid_dim,
srf_type,
orientation,
normal,
intersection)
sensor_intersection_list.extend(sensor_intersection)
sensor_dir_list.extend(sensor_dir)
sensor_cord_list.extend(sensor_cord)
sensor_type_list.extend(sensor_type)
sensor_area_list.extend(sensor_area)
sensor_orientation_list.extend(sensor_orientation)
return sensor_dir_list, sensor_cord_list, sensor_type_list, sensor_area_list, sensor_orientation_list, sensor_intersection_list
def calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):
sensors_coords_zone = []
sensors_dir_zone = []
sensors_total_number_list = []
names_zone = []
sensors_code_zone = []
sensor_intersection_zone = []
for building_name in building_names:
building_geometry = BuildingGeometry.load(os.path.join(geometry_pickle_dir, 'zone', building_name))
# get sensors in the building
sensors_dir_building, \
sensors_coords_building, \
sensors_type_building, \
sensors_area_building, \
sensor_orientation_building, \
sensor_intersection_building = calc_sensors_building(building_geometry, grid_size)
# get the total number of sensors and store in lst
sensors_number = len(sensors_coords_building)
sensors_total_number_list.append(sensors_number)
sensors_code = ['srf' + str(x) for x in range(sensors_number)]
sensors_code_zone.append(sensors_code)
# get the total list of coordinates and directions to send to daysim
sensors_coords_zone.extend(sensors_coords_building)
sensors_dir_zone.extend(sensors_dir_building)
# get total list of intersections
sensor_intersection_zone.append(sensor_intersection_building)
# get the name of all buildings
names_zone.append(building_name)
# save sensors geometry result to disk
pd.DataFrame({'BUILDING': building_name,
'SURFACE': sensors_code,
'orientation': sensor_orientation_building,
'intersection': sensor_intersection_building,
'Xcoor': [x[0] for x in sensors_coords_building],
'Ycoor': [x[1] for x in sensors_coords_building],
'Zcoor': [x[2] for x in sensors_coords_building],
'Xdir': [x[0] for x in sensors_dir_building],
'Ydir': [x[1] for x in sensors_dir_building],
'Zdir': [x[2] for x in sensors_dir_building],
'AREA_m2': sensors_area_building,
'TYPE': sensors_type_building}).to_csv(locator.get_radiation_metadata(building_name), index=None)
return sensors_coords_zone, sensors_dir_zone, sensors_total_number_list, names_zone, sensors_code_zone, sensor_intersection_zone
def isolation_daysim(chunk_n, cea_daysim, building_names, locator, radiance_parameters, write_sensor_data, grid_size,
max_global, weatherfile, geometry_pickle_dir):
# initialize daysim project
daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.format(n=chunk_n))
print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=daysim_project.project_path))
# calculate sensors
print("Calculating and sending sensor points")
sensors_coords_zone, \
sensors_dir_zone, \
sensors_number_zone, \
names_zone, \
sensors_code_zone, \
sensor_intersection_zone = calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir)
num_sensors = sum(sensors_number_zone)
daysim_project.create_sensor_input_file(sensors_coords_zone, sensors_dir_zone, num_sensors, "w/m2")
print("Starting Daysim simulation for buildings: {buildings}".format(buildings=names_zone))
print("Total number of sensors: {num_sensors}".format(num_sensors=num_sensors))
print('Writing radiance parameters')
daysim_project.write_radiance_parameters(radiance_parameters["rad_ab"], radiance_parameters["rad_ad"],
radiance_parameters["rad_as"], radiance_parameters["rad_ar"],
radiance_parameters["rad_aa"], radiance_parameters["rad_lr"],
radiance_parameters["rad_st"], radiance_parameters["rad_sj"],
radiance_parameters["rad_lw"], radiance_parameters["rad_dj"],
radiance_parameters["rad_ds"], radiance_parameters["rad_dr"],
radiance_parameters["rad_dp"])
print('Executing hourly solar isolation calculation')
daysim_project.execute_gen_dc()
daysim_project.execute_ds_illum()
print('Reading results...')
solar_res = daysim_project.eval_ill()
# check inconsistencies and replace by max value of weather file
print('Fixing inconsistencies, if any')
solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)
# Check if leap year and remove extra day
if solar_res.shape[1] == HOURS_IN_YEAR + 24:
print('Removing leap day')
leap_day_hours = range(1416, 1440)
solar_res = np.delete(solar_res, leap_day_hours, axis=1)
print("Writing results to disk")
index = 0
for building_name, \
sensors_number_building, \
sensor_code_building, \
sensor_intersection_building in zip(names_zone,
sensors_number_zone,
sensors_code_zone,
sensor_intersection_zone):
# select sensors data
selection_of_results = solar_res[index:index + sensors_number_building]
selection_of_results[np.array(sensor_intersection_building) == 1] = 0
items_sensor_name_and_result = dict(zip(sensor_code_building, selection_of_results.tolist()))
index = index + sensors_number_building
# create summary and save to disk
write_aggregated_results(building_name, items_sensor_name_and_result, locator, weatherfile)
if write_sensor_data:
write_sensor_results(building_name, items_sensor_name_and_result, locator)
# erase daysim folder to avoid conflicts after every iteration
print('Removing results folder')
daysim_project.cleanup_project()
def write_sensor_results(building_name, items_sensor_name_and_result, locator):
with open(locator.get_radiation_building_sensors(building_name), 'w') as outfile:
json.dump(items_sensor_name_and_result, outfile)
def write_aggregated_results(building_name, items_sensor_name_and_result, locator, weatherfile):
geometry = pd.read_csv(locator.get_radiation_metadata(building_name))
geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'
solar_analysis_fields = ['windows_east_kW',
'windows_west_kW',
'windows_south_kW',
'windows_north_kW',
'walls_east_kW',
'walls_west_kW',
'walls_south_kW',
'walls_north_kW',
'roofs_top_kW']
solar_analysis_fields_area = ['windows_east_m2',
'windows_west_m2',
'windows_south_m2',
'windows_north_m2',
'walls_east_m2',
'walls_west_m2',
'walls_south_m2',
'walls_north_m2',
'roofs_top_m2']
dict_not_aggregated = {}
for field, field_area in zip(solar_analysis_fields, solar_analysis_fields_area):
select_sensors = geometry.loc[geometry['code'] == field].set_index('SURFACE')
area_m2 = select_sensors['AREA_m2'].sum()
array_field = np.array([select_sensors.loc[surface, 'AREA_m2'] *
np.array(items_sensor_name_and_result[surface])
for surface in select_sensors.index]).sum(axis=0)
dict_not_aggregated[field] = array_field / 1000 # in kWh
dict_not_aggregated[field_area] = area_m2
data_aggregated_kW = (pd.DataFrame(dict_not_aggregated)).round(2)
data_aggregated_kW["Date"] = weatherfile["date"]
data_aggregated_kW.set_index('Date', inplace=True)
data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))
|
2,635 | 365d031a31f3596df6fb71e620c293382d6ead1f | import os
import numpy as np
import networkx as nx
from matplotlib import colors, cm
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from mpl_toolkits.mplot3d import Axes3D, art3d
from typing import Union, Sequence, List, Tuple, Optional
import wknml
from wkskel.types import Nodes, Parameters
class Skeleton:
"""The Skeleton class facilitates scientific analysis and manipulation of webKnossos tracings.
It is designed as a high-level interface for working with nml files generated e.g with webKnossos. It makes use of
the (low-level) `wknml` package mostly as an I/O interface to nml files.
Class Attributes:
DEFAULTS (dict): Global default parameters which are passed to each skeleton object instance
"""
DEFAULTS = {
'node': {
'radius': 100,
'comment': ''
},
'tree': {
'color': (0.0, 0.0, 0.0, 1.0)
}
}
def __init__(self, nml_path: str = None, parameters: Parameters = None, strict = True):
""" The Skeleton constructor expects either a path to a nml file or a Parameters object as input arguments
Args:
nml_path: Path to nml file. If constructed via an nml file, the skeleton object is populated with all the
trees and additional properties specified in the .nml file
parameters (optional): Parameters (wkskel.types.Parameters) specifying the most rudimentary properties
of the skeleton.
strict (optional): Controls assertions ensuring that resulting skeleton objects are compatible with
webKnossos. Default: True
Examples:
Using nml_path:
nml_path = '/path/to/example.nml'
skel = Skeleton(nml_path)
Using parameters:
parameters = Skeleton.define_parameters(name="2017-01-12_FD0156-2", scale=(11.24, 11.24, 32))
skel = Skeleton(parameters=parameters)
"""
assert (nml_path is not None) ^ (parameters is not None), \
'To construct a skeleton object, either a path to a nml file or the skeleton parameters need to passed'
self.nodes = list()
self.edges = list()
self.names = list()
self.colors = list()
self.tree_ids = list()
self.group_ids = list()
self.groups = list()
self.branchpoints = list()
self.parameters = Parameters()
self.nml_path = str()
self.strict = strict
self.defaults = self.DEFAULTS
# Construct from nml file
if nml_path is not None:
assert os.path.exists(nml_path), \
'not a valid path: {}'.format(nml_path)
try:
with open(nml_path, "rb") as f:
nml = wknml.parse_nml(f)
except IOError:
print('not a valid nml file: {}'.format(nml_path))
self._nml_to_skeleton(nml)
# Construct from parameters
else:
assert type(parameters) is Parameters, \
'provided parameters must be of type wkskel.types.Parameters'
self._parameters_to_skeleton(parameters)
def add_tree(self,
nodes: Nodes = Nodes(),
edges: Union[List[Tuple[int, int]], np.ndarray] = None,
tree_id: int = None,
name: str = '',
group_id: int = None,
color: Tuple[float, float, float, float] = None):
""" Appends new tree to skeleton.
Args:
nodes (optional): Nodes representing tree to be added
edges (optional): Edges representing tree to be added
tree_id (optional): Tree id to be used for new tree. Default: Highest current tree id + 1
name (optional): Name to be used for new tree. Default: Empty str
group_id (optional): Group id to be used for new tree. If passed group id does not exist, it is created.
Default: None
color (optional): Color to be used for new tree specified as (r, g, b, alpha). Default: (0, 0, 0, 1)
"""
if edges is None:
edges = np.empty((0, 2), dtype=np.uint32)
elif type(edges) is list:
edges = np.asarray(edges)
if self.strict & (len(nodes) > 1):
assert Skeleton._num_conn_comp(Skeleton._get_graph(nodes, edges)) == 1, \
'Added tree consists of more than one connected component'
if tree_id is None:
tree_id = self.max_tree_id() + 1
if (group_id is not None) & (group_id not in self.groups_ids()):
self.add_group(id=group_id)
if color is None:
color = self.defaults['tree']['color']
self.nodes.append(nodes)
self.edges.append(edges)
self.tree_ids.append(tree_id)
self.group_ids.append(group_id)
self.names.append(name)
self.colors.append(color)
def add_tree_from_skel(self,
skel: 'Skeleton',
tree_idx: int,
group_id: int = None,
name: str = None):
""" Appends a specific tree contained in a different skeleton object to the skeleton.
Args:
skel: Source skeleton object (different from the one calling this method) to be added
tree_idx: Source tree index of tree to be added
group_id (optional): Target group id to which the added tree should be assigned. Default: None
name (optional): Target name for the added tree
"""
if group_id not in self.groups_ids():
self.add_group(id=group_id)
if name is None:
name = skel.names[tree_idx]
skel._reset_node_ids(self.max_node_id() + 1)
skel._reset_tree_ids(self.max_tree_id() + 1)
self.nodes = self.nodes + [skel.nodes[tree_idx]]
self.edges = self.edges + [skel.edges[tree_idx]]
self.tree_ids = self.tree_ids + [skel.tree_ids[tree_idx]]
self.group_ids = self.group_ids + [group_id]
self.names = self.names + [name]
self.colors = self.colors + [skel.colors[tree_idx]]
return self
def add_trees_from_skel(self, skel: 'Skeleton'):
""" Appends all trees contained in a different skeleton object to the skeleton.
This method attempts to preserve the relative group structure found in the skeleton object to be added
Args:
skel: Source skeleton object (different from the one calling this method) to be added
"""
skel._reset_node_ids(self.max_node_id() + 1)
skel._reset_tree_ids(self.max_tree_id() + 1)
max_group_id = self.max_group_id()
if max_group_id is not None:
skel._reset_group_ids(max_group_id + 1)
self.nodes = self.nodes + skel.nodes
self.edges = self.edges + skel.edges
self.tree_ids = self.tree_ids + skel.tree_ids
self.group_ids = self.group_ids + skel.group_ids
self.groups = self.groups + skel.groups
self.names = self.names + skel.names
self.colors = self.colors + skel.colors
return self
def add_nodes_as_trees(self,
nodes: Nodes,
tree_ids: List[int] = None,
group_ids: List[int] = None,
names: List[str] = None,
colors: List[Tuple[float, float, float, float]] = None):
""" Appends each of the specified nodes as separate trees to the skeleton (1 node each).
Args:
nodes: Nodes representing the trees to be added
tree_ids (optional): Tree ids to be assigned to the newly added trees. Default: Global max + [1, n]
group_ids (optional): Group ids to be assigned to the newly added trees. Default: None
names (optional): Names to be assigned to the newly added trees.
colors (optional): Colors to be used for the new trees specified as (r, g, b, alpha). Default: (0, 0, 0, 1)
"""
if tree_ids is None:
tree_id_start = self.max_tree_id() + 1
tree_id_end = tree_id_start + len(nodes)
tree_ids = list(range(tree_id_start, tree_id_end))
if group_ids is None:
group_ids = [None for x in range(len(nodes))]
if names is None:
names = ['' for x in range(len(nodes))]
if colors is None:
colors = [(0.0, 0.0, 0.0, 1.0) for x in range(len(nodes))]
for node_idx, _ in nodes.iterrows():
self.add_tree(
nodes=nodes[node_idx:node_idx+1],
tree_id=tree_ids[node_idx],
group_id=group_ids[node_idx],
name=names[node_idx],
color=colors[node_idx]
)
def delete_tree(self, idx: int = None, id: int = None):
""" Deletes tree with specified idx or id.
Args:
idx: Linear index of tree to be deleted
id: Id of tree to be deleted
"""
if id is not None:
idx = self.tree_ids.index(id)
self.nodes.pop(idx)
self.edges.pop(idx)
self.names.pop(idx)
self.colors.pop(idx)
self.tree_ids.pop(idx)
self.group_ids.pop(idx)
def add_group(self, parent_id: int = None, id: int = None, name: str = None):
""" Adds a new group to skeleton object.
Args:
parent_id: Parent group id to which new group is added as a child. Default: None (root group)
id: Id of new group to be added. Default: Current max group id + 1
name: Name of new group to be added. Default: 'Group {}'.format(id)
Returns:
id: Id of added group
name: Name of added group
"""
if parent_id is not None:
assert (parent_id in self.group_ids), ('Parent id does not exist')
if id is None:
id = int(np.nanmax(np.asarray(self.group_ids, dtype=np.float)) + 1)
else:
assert (id not in self.groups_ids()), ('Id already exists')
if name is None:
name = 'Group {}'.format(id)
new_group = wknml.Group(id, name, [])
if parent_id is None:
self.groups.append(new_group)
else:
self.groups = Skeleton._group_append(self.groups, parent_id, new_group)
return id, name
def delete_group(self, id, target_id):
# TODO
pass
def define_nodes(self,
position_x: List[int],
position_y: List[int],
position_z: List[int],
id: List[int] = None,
radius: Optional[List[int]] = None,
rotation_x: Optional[List[float]] = None,
rotation_y: Optional[List[float]] = None,
rotation_z: Optional[List[float]] = None,
inVP: Optional[List[int]] = None,
inMag: Optional[List[int]] = None,
bitDepth: Optional[List[int]] = None,
interpolation: Optional[List[bool]] = None,
time: Optional[List[int]] = None,
comment: Optional[List[int]] = None) -> Nodes:
""" Generates new nodes table from data.
Args:
position_x: Node position x
position_y: Node position y
position_z: Node position z
id (optional): (Globally unique) Node id. Default: New unique ids are generated
radius (optional): Node radius
rotation_x (optional): Node rotation x
rotation_y (optional): Node rotation y
rotation_z (optional): Node rotation z
inVP (optional): Viewport index in which node was placed
inMag (optional): (De-)Magnification factor in which node was placed
bitDepth (optional): Bit (Color) Depth in which node was placed
interpolation (optional): Interpolation state in which node was placed
time (optional): Time stamp at which node was placed
comment (optional): Comment associated with node
Returns:
nodes: Nodes object
"""
if id is None:
id_max = self.max_node_id()
id = list(range(id_max+1, id_max+len(position_x)+1))
nodes = Nodes.from_list(id, position_x, position_y, position_z, radius, rotation_x, rotation_y,
rotation_z, inVP, inMag, bitDepth, interpolation, time, comment)
return nodes
def define_nodes_from_positions(self, positions: np.ndarray) -> Nodes:
""" Generates new nodes table from positions only (node ids are generated automatically).
Args:
positions (N x 3): Numpy array holding the (x,y,z) positions to be returned as nodes in a Nodes table
Returns:
nodes: Nodes object
"""
id_max = self.max_node_id()
id = np.array(range(id_max + 1, id_max + positions.shape[0] + 1)).reshape(-1, 1)
nodes = Nodes.from_numpy(np.append(id, positions, axis=1))
return nodes
def get_distances_to_node(self,
positions: Union[Sequence[Tuple[int, int, int]], np.ndarray],
node_id: int = None,
tree_idx: int = None,
node_idx: int = None,
unit: str = 'um') -> List[np.ndarray]:
""" Get the (euclidean) distances from the specified node to the provided (x,y,z) positions
Args:
positions (N x 3): Target (x,y,z) positions to which the distances should be computed
node_id: Node id of the node for which the distances should be computed
tree_idx: Tree idx of the node for which the distances should be computed
node_idx: Node idx of the node for which the distances should be computed
unit (optional): Unit flag specifying in which unit the distances should be returned.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)
Returns:
distances: Array holding distances
"""
assert (node_id is not None) ^ ((tree_idx is not None) & (node_idx is not None)), \
'Either provide node_id or both tree_idx and node_idx'
if type(positions) is not np.ndarray:
positions = np.array(positions)
if node_id is not None:
node_idx, tree_idx = self.node_id_to_idx(node_id)
unit_factor = self._get_unit_factor(unit)
distances = Skeleton.get_distance(positions, np.array(self.nodes[tree_idx].position.values[node_idx]), unit_factor)
return distances
def get_distance_to_nodes(self,
position: Union[Tuple[int, int, int], np.ndarray],
tree_idx: int,
unit: str = 'um') -> List[np.ndarray]:
""" Get the (euclidean) distances from the nodes of the specified tree to the provided (x,y,z) position
Args:
position (1 x 3): Target (x,y,z) position to which the node distances should be computed
tree_idx: Tree idx for which node distances should be computed
unit (optional): Unit flag specifying in which unit the distances should be returned.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer). Default: 'um' (micrometer)
Returns:
distances: Array holding distances
"""
if type(position) is not np.ndarray:
position = np.array(position)
unit_factor = self._get_unit_factor(unit)
distances = Skeleton.get_distance(np.array(self.nodes[tree_idx].position.values), position, unit_factor)
return distances
def get_graph(self, tree_idx):
""" Returns the networkx graph representation of a tree.
Args:
tree_idx: Linear index of the tree to be returned as graph object
Returns:
graph: Graph object
"""
nodes = self.nodes[tree_idx]
edges = self.edges[tree_idx]
graph = Skeleton._get_graph(nodes, edges)
return graph
def get_shortest_path(self, node_id_start: int, node_id_end: int) -> List[int]:
""" Returns the shortest path between two nodes of a tree.
Args:
node_id_start: Node id of start node
node_id_end: Node id of end node
Returns:
shortest_path: Node indices comprising the shortest path
"""
_, tree_idx_start = self.node_id_to_idx(node_id_start)
_, tree_idx_end = self.node_id_to_idx(node_id_end)
assert tree_idx_start == tree_idx_end, 'Provided node ids need to be part of the same tree'
graph = self.get_graph(tree_idx_start)
shortest_path = nx.shortest_path(graph, node_id_start, node_id_end)
return shortest_path
def plot(self,
tree_inds: Union[int, List[int]] = None,
view: str = None,
colors: Union[Tuple[float, float, float, float], List[Tuple[float, float, float, float]], str] = None,
unit: str = 'um',
show: bool = True,
ax: plt.axes = None):
""" Generates a (3D) line plot of the trees contained in the skeleton object.
Args:
tree_inds (optional): Tree indices to be plotted.
Default: All trees are plotted
view (optional): Plot as 2D projection on orthonormal plane.
Options: 'xy', 'xz', 'yz'
Default: Plot as 3D projection
colors (optional): Colors in which trees should be plotted. If only one RGBA tuple is specified, it is
broadcasted over all trees. Alternatively, a list providing RGBA tuples for each tree can be passed.
Lastly, the name of a mnatplotlib colormap (https://matplotlib.org/tutorials/colors/colormaps.html) can
be passed as a str.
Default: Skeleton colors (self.colors) are used
unit (optional): Specifies in which unit the plot should be generated.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer).
Default: 'um' (micrometer)
show (optional): Displays the plot in an interactive window. For repeatedly plotting on the same axes, set
to False. Default: True
ax: Axes to be plotted on.
Returns:
ax: Axes which was plotted on
"""
if tree_inds is None:
tree_inds = list(range(len(self.nodes)))
elif tree_inds is int:
tree_inds = [tree_inds]
if colors is None:
colors = self.colors
elif type(colors) is str:
cmap = cm.get_cmap(colors)
colors = [cmap(x) for x in np.linspace(0, 1, self.num_trees())]
elif type(colors[0]) is not Sequence:
colors = [colors] * self.num_trees()
unit_factor = self._get_unit_factor(unit)
allowed_views = ['xy', 'xz', 'yz']
if view is not None:
assert (view in allowed_views), \
'The passed view argument: {} is not among the allowed views: {}'.format(view, allowed_views)
if ax is None:
fig = plt.figure()
if view is None:
ax = fig.add_subplot(111, projection='3d')
else:
ax = fig.add_subplot(111, projection='rectilinear')
else:
if view is None:
assert (ax.name == '3d'), \
'To generate a 3D skeleton plot, the projection type of the passed axes must be 3D'
else:
assert (ax.name != '3d'), \
'To generate a 2D skeleton plot, the projection type of the passed axes must be rectilinear'
lims_min = []
lims_max = []
for tree_idx in tree_inds:
edges = self.edges[tree_idx].copy()
nodes = self.nodes[tree_idx].copy()
if len(nodes) > 0:
nodes['position'] = nodes['position'].multiply(unit_factor)
if view == 'xy':
nodes = nodes.drop([('position', 'z')], axis=1)
elif view == 'xz':
nodes = nodes.drop([('position', 'y')], axis=1)
elif view == 'yz':
nodes = nodes.drop([('position', 'x')], axis=1)
lims_min.append(np.min(nodes['position'].values, axis=0))
lims_max.append(np.max(nodes['position'].values, axis=0))
segments = []
for edge in edges:
n0 = nodes['position'][nodes.id == edge[0]].values[0]
n1 = nodes['position'][nodes.id == edge[1]].values[0]
segment = [[c for c in n0], [c for c in n1]]
segments.append(segment)
if view is None:
line_collection = art3d.Line3DCollection(segments=segments, colors=colors[tree_idx])
ax.add_collection3d(line_collection)
else:
line_collection = LineCollection(segments=segments, colors=colors[tree_idx])
ax.add_collection(line_collection)
lim_min = np.min(np.array(lims_min), axis=0)
lim_max = np.max(np.array(lims_max), axis=0)
ax.set_xlim(lim_min[0], lim_max[0])
ax.set_ylim(lim_min[1], lim_max[1])
if view is None:
ax.set_zlim(lim_min[2], lim_max[2])
else:
ax.set_aspect('equal')
if show:
plt.show()
return ax
def write_nml(self, nml_write_path):
""" Writes the present state of the skeleton object to a .nml file.
Args:
nml_write_path: Path to which .nml file should be written
"""
# If the object does not have any trees, construct an empty tree before writing to enable webKnossos import
if self.num_trees() == 0:
self.add_tree()
nml = self._skeleton_to_nml()
with open(nml_write_path, "wb") as f:
wknml.write_nml(f, nml)
# Convenience Methods
def node_id_to_idx(self, node_id: int) -> (int, int):
""" Returns the linear tree and node indices for the provided node id."""
node_idx = None
for tree_idx, nodes in enumerate(self.nodes):
index_list = nodes[nodes['id'] == node_id].index.tolist()
if index_list:
node_idx = index_list[0]
break
assert (node_idx is not None), \
'node id {} does not exist'.format(node_id)
return node_idx, tree_idx
def node_idx_to_id(self, node_idx: int, tree_idx: int) -> int:
""" Returns the node id for the provided tree and node idx."""
node_id = self.nodes[tree_idx].loc[node_idx, 'id'].values[0]
return node_id
def min_group_id(self) -> int:
""" Returns lowest group id. If no groups are defined, return None"""
group_ids = np.asarray(self.group_ids, dtype=np.float)
if np.all(np.isnan(group_ids)):
group_id = None
else:
group_id = int(np.nanmin(group_ids))
return group_id
def max_group_id(self) -> int:
""" Returns highest group id. If no groups are defined, return None"""
group_ids = np.asarray(self.group_ids, dtype=np.float)
if np.all(np.isnan(group_ids)):
group_id = None
else:
group_id = int(np.nanmax(group_ids))
return group_id
def min_node_id(self) -> int:
""" Returns lowest global node id."""
if len(self.nodes) > 0:
min_node_id = min([min(nodes.id) if len(nodes) > 0 else 0 for nodes in self.nodes])
else:
min_node_id = 0
return min_node_id
def max_node_id(self) -> int:
""" Returns highest global node id."""
if len(self.nodes) > 0:
max_node_id = max([max(nodes.id) if len(nodes) > 0 else 0 for nodes in self.nodes])
else:
max_node_id = 0
return max_node_id
def min_tree_id(self) -> int:
""" Returns lowest global tree id."""
return min(self.tree_ids) if len(self.tree_ids)>0 else 0
def max_tree_id(self) -> int:
""" Returns highest global tree id."""
return max(self.tree_ids) if len(self.tree_ids)>0 else 0
def num_trees(self) -> int:
"""Returns number of trees contained in skeleton object."""
return len(self.nodes)
def groups_ids(self) -> List[int]:
""" Returns all ids defined in groups tree"""
_, groups_ids = Skeleton._group_get_ids(self.groups)
return groups_ids
# Private Methods
def _get_unit_factor(self, unit: str) -> np.ndarray:
""" Returns factor for unit conversion
Args:
unit: Unit for which to return the conversion factor.
Options: 'vx' (voxels), 'nm' (nanometer), 'um' (micrometer)
Returns:
unit_factor (shape=(3,)): Unit conversion factors
"""
unit_factors = {
'vx': np.array((1, 1, 1)),
'nm': np.array(self.parameters.scale),
'um': np.array(self.parameters.scale)/1000
}
assert unit in unit_factors.keys(), 'Invalid unit'
unit_factor = unit_factors[unit]
return unit_factor
def _reset_node_ids(self, start_id: int):
""" Resets node ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest node id should be set.
"""
add_id = start_id - self.min_node_id()
for tree_idx, _ in enumerate(self.nodes):
self.nodes[tree_idx].nodes['id'] += add_id
self.edges[tree_idx] += add_id
def _reset_tree_ids(self, start_id: int):
""" Resets tree ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest tree id should be set.
"""
add_id = start_id - self.min_tree_id()
self.tree_ids = [tree_id + add_id for tree_id in self.tree_ids]
def _reset_group_ids(self, start_id: int):
""" Resets group ids of skeleton to begin with start value.
Args:
start_id: Start value to which the lowest group id should be set.
"""
min_group_id = self.min_group_id()
if min_group_id is not None:
add_id = start_id - min_group_id
self.group_ids = [i + add_id if i is not None else i for i in self.group_ids]
self.groups = [Skeleton._group_modify_id(group, id_modifier=lambda x: x + add_id) for group in self.groups]
def _parameters_to_skeleton(self, parameters):
""" Generates bare skeleton object from parameters."""
self.parameters = parameters
def _nml_to_skeleton(self, nml):
""" Converts wknml to skeleton data structures."""
self.groups = nml.groups
self.branchpoints = nml.branchpoints
self.parameters = Parameters(**nml.parameters._asdict())
for tree in nml.trees:
self.add_tree(
nodes=Skeleton._nml_nodes_to_nodes(nml_nodes=tree.nodes, nml_comments=nml.comments),
edges=np.array([(edge.source, edge.target) for edge in tree.edges]),
group_id=tree.groupId,
name=tree.name,
color=tree.color
)
def _skeleton_to_nml(self):
""" Converts skeleton to wknml data structures."""
trees = []
for tree_idx, tree_id in enumerate(self.tree_ids):
nml_nodes = Skeleton._nodes_to_nml_nodes(self.nodes[tree_idx])
nml_edges = Skeleton._edges_to_nml_edges(self.edges[tree_idx])
tree = wknml.Tree(
id=tree_id,
color=self.colors[tree_idx],
name=self.names[tree_idx],
groupId=self.group_ids[tree_idx],
nodes=nml_nodes,
edges=nml_edges
)
trees.append(tree)
nml = wknml.NML(
parameters=wknml.NMLParameters(**self.parameters._asdict()),
trees=trees,
branchpoints=self.branchpoints,
comments=self._skeleton_to_nml_comments(),
groups=self.groups
)
return nml
def _skeleton_to_nml_comments(self):
""" Converts skeleton to wknml comments."""
nml_comments = []
for nodes in self.nodes:
comment_nodes = nodes[nodes['comment'].notnull()]
for _, row in comment_nodes.iterrows():
nml_comment = wknml.Comment(
node=row['id'].values[0],
content=row['comment'].values[0]
)
nml_comments.append(nml_comment)
return nml_comments
# Static Methods
@staticmethod
def define_parameters(
name: str,
scale: Tuple[float, float, float],
offset: Tuple[float, float, float] = (0, 0, 0),
time: int = 0,
editPosition: Tuple[float, float, float] = (1.0, 1.0, 1.0),
editRotation: Tuple[float, float, float] = (0.0, 0.0, 0.0),
zoomLevel: float = 1.0,
taskBoundingBox: Tuple[int, int, int, int, int, int] = None,
userBoundingBox: Tuple[int, int, int, int, int, int] = None) -> Parameters:
parameters = Parameters(
name=name,
scale=scale,
offset=offset,
time=time,
editPosition=editPosition,
editRotation=editRotation,
zoomLevel=zoomLevel,
taskBoundingBox=taskBoundingBox,
userBoundingBox=userBoundingBox
)
return parameters
# Static Methods
@staticmethod
def get_distance(positions: np.ndarray, position: np.ndarray, unit_factor: np.ndarray = None):
""" Get the (euclidean) distances between positions and a target position
Args:
positions (N x 3): Array holding (multiple) x, y, z positions
position (1 x 3): Array holding x, y, z position to which the distances should be computed
unit_factors (1 x 3 Array, optional): Conversion factors with which distances are multiplied. Default (1,1,1)
Returns:
distances: Arrays holding distances
"""
if unit_factor is None:
unit_factor = np.array([1, 1, 1])
distances = np.sqrt(np.sum(((positions - position) * unit_factor.reshape(1, 3)) ** 2, axis=1))
return distances
# Static Private Methods
@staticmethod
def _nml_nodes_to_nodes(nml_nodes, nml_comments):
""" Converts wknml nodes (list of named tuples) to skeleton nodes (DataFrame subclass)."""
data = [(node.id, node.position[0], node.position[1], node.position[2], node.radius, node.rotation[0],
node.rotation[1], node.rotation[2], node.inVp, node.inMag, node.bitDepth, node.interpolation,
node.time, np.nan) for node in nml_nodes]
nodes = Nodes(data=data)
# Add comments to nodes table
comment_node_ids = [comment.node for comment in nml_comments]
comment_strings = [comment.content for comment in nml_comments]
nodes_ids_comments = nodes.id[nodes.id.isin(comment_node_ids)]
for id in nodes_ids_comments:
id_comment = comment_strings[comment_node_ids.index(id)]
nodes.loc[nodes.id == id, ('comment', '')] = id_comment
return nodes
@staticmethod
def _nodes_to_nml_nodes(nodes):
""" Converts skeleton nodes (DataFrame subclass) to wknml nodes (list of named tuples)."""
nml_nodes = []
for idx, row in nodes.iterrows():
nml_node = wknml.Node(
id=int(row.id),
position=tuple(row.position.values),
radius=float(row.radius),
rotation=tuple(row.rotation.values),
inVp=int(row.inVp),
inMag=int(row.inMag),
bitDepth=int(row.bitDepth),
interpolation=bool(row.interpolation.values),
time=int(row.time)
)
nml_nodes.append(nml_node)
return nml_nodes
@staticmethod
def _edges_to_nml_edges(edges):
""" Converts skeleton edges (numpy array) to wknml edges (list of named tuples)."""
nml_edges = []
for idx in range(edges.shape[0]):
nml_edge = wknml.Edge(
source=int(edges[idx, 0]),
target=int(edges[idx, 1]),
)
nml_edges.append(nml_edge)
return nml_edges
@staticmethod
def _group_append(groups, id, new_group):
""" Appends new group as a child of existing group with specified id. Currently only works up to depth=3."""
path_inds = []
_, _, idx = Skeleton._group_parent(groups, id)
while id is not None:
path_inds.append(idx)
id, idx, _ = Skeleton._group_parent(groups, id)
path_inds = list(reversed(path_inds))
if len(path_inds) == 1:
groups[path_inds[0]]._replace(children=new_group)
elif len(path_inds) == 2:
groups[path_inds[0]].children[path_inds[1]]._replace(children=new_group)
elif len(path_inds) == 3:
groups[path_inds[0]].children[path_inds[1]].children[path_inds[2]]._replace(children=new_group)
return groups
@staticmethod
def _group_parent(groups, id, parent_id=None, parent_idx=None, child_idx=None):
""" Returns the id of the parent group for a (child) group with specified id."""
for group in groups:
if id in [x.id for x in group.children]:
parent_id = group.id
parent_idx = groups.index(group)
child_idx = [x.id for x in group.children].index(id)
else:
parent_id, parent_idx, child_idx = Skeleton._group_parent(group.children, id, parent_id, parent_idx, child_idx)
return parent_id, parent_idx, child_idx
@staticmethod
def _group_modify_id(group, id_modifier):
""" Modifies group ids with the passed id_modifier (e.g. lambda) function."""
group = group._replace(id=id_modifier(group.id))
group = group._replace(children=list(map(lambda g: Skeleton._group_modify_id(g, id_modifier), group.children)))
return group
@staticmethod
def _group_get_ids(groups, ids = []):
for group in groups:
ids.append(group.id)
Skeleton._group_get_ids(group.children, ids)
return groups, ids
@staticmethod
def _get_graph(nodes: Nodes, edges: np.ndarray):
""" Returns the networkx graph representation of provided nodes and edges."""
graph = nx.Graph()
graph.add_nodes_from(nodes['id'])
attrs = nodes.set_index('id').to_dict('index')
nx.set_node_attributes(graph, attrs)
graph.add_edges_from(edges)
return graph
@staticmethod
def _num_conn_comp(graph):
""" Returns number of connected components for graph"""
return nx.number_connected_components(graph)
|
2,636 | 2b1ec29d665aa93cd53644b62efcd1305b34e13e | print('Welcome aboard, Oleksij!')
|
2,637 | 4550ed971eef36badf46a44adcc593324a5292cf | from typing import Optional,List
from fastapi import FastAPI
from pydantic import BaseModel, Field
from redisqueue import RedisQueue,MyRedis
import random
class Award(BaseModel):
name: str
count: int
class Item(BaseModel):
luckname: str = Field(...,title="抽奖规则名称",max_lenght = 300)
total: int = Field(...,title="抽奖总人数",gt=0)
award: Optional[List[Award]] = Field(None,title="奖品列表")
other: str = Field(...,title="参与奖或者未中奖")
app = FastAPI()
class ResSuccess(BaseModel):
ret: int = 0
data
@app.get('/')
def read_root():
return {"Hello":"World"}
@app.post(
'/delect',
tags = ["抽奖接口"],
summary = "删除抽奖规则"
)
def delect(name:str):
rq = RedisQueue(name)
if rq.qsize:
rq.lpop(name)
return {
'ret':0,
'msg':"删除成功"
}
@app.post(
'/creat',
tags = ['抽奖接口'],
summary="创建抽奖规则"
)
def creat(item: Item):
"""
通过该接口可以创建一个抽奖规则
"""
myredis = MyRedis()
rq = RedisQueue(item.luckname)
print("ok")
if rq.qsize():
return {
"ret":500,
"msg":"该抽奖已经存在,请删除后重试"
}
result = {"ret":0, "item":item}
awardlist = item.award
lucklist =[]
luckdict = {}
for ward in awardlist:
luckdict[ward.name] = ward.count
for i in range(ward.count):
lucklist.append(ward.name)
othercount = item.total - len(lucklist)
if othercount:
luckdict[item.other] = othercount
others = [item.other] * othercount
lucklist = lucklist + others
random.shuffle(lucklist)
print(lucklist)
for luck in lucklist:
rq.put(luck)
myredis.hmset(item.luckname,luckdict)
result = {
'ret': 0,
'msg': "succses"
}
return result
@app.get('/luck', tags = ["抽奖接口"], summary="抽奖接口")
def luck(id: int,luckname: str):
"""
开始抽奖
"""
rd = RedisQueue(luckname)
myredis = MyRedis()
winner = luckname+"_winner"
if myredis.hexists(winner,id):
return {
"ret":0,
"msg":"您已经抽过了,不能再抽了"
}
award = rd.get_nowait()
if award:
myredis.hset(winner,id,award)
myredis.hincrby(luckname,award,-1)
result = {
"ret":0,
'data':{
"flag":1,
"msg":"恭喜你中奖了",
"award":award
}
}
else:
result = {
"ret":0,
'data':{
"flag":0,
"msg":"奖抽完了",
}
}
return result
@app.get('/luckman',tags = ["抽奖接口"],summary="查看中奖名单")
def luckman(luckname: str):
myredis = MyRedis()
winner = luckname + "_winner"
winnerlist = myredis.hgetall(winner)
print(winnerlist)
return {
"ret":0,
"data":winnerlist
}
@app.get('/remaining',tags = ["抽奖接口"],summary="查看剩余奖品列表")
def Remaining(luckname: str):
myredis = MyRedis()
remainlist = myredis.hgetall(luckname)
print(remainlist)
return {
"ret":0,
"data":remainlist
}
|
2,638 | 09284a96467b09c2ad7b65530c015fdb64b198a4 | # Copyright 2016 Osvaldo Santana Neto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from datetime import datetime
from decimal import Decimal
from enum import Enum
from typing import Dict, List, Optional, Sequence, Union
from correios import DATADIR, xml_utils
from correios.exceptions import PostingListSerializerError, TrackingCodesLimitExceededError
from correios.models.data import EXTRA_SERVICE_AR, EXTRA_SERVICE_MP
from correios.utils import get_wsdl_path, to_decimal, to_integer
from .models.address import ZipAddress, ZipCode
from .models.posting import (
EventStatus,
Freight,
FreightError,
NotFoundTrackingEvent,
Package,
PostingList,
ShippingLabel,
TrackingCode,
TrackingEvent
)
from .models.user import Contract, ExtraService, FederalTaxNumber, PostingCard, Service, StateTaxNumber, User
from .soap import SoapClient
KG = 1000 # g
class ValidRestrictResponse(Enum):
INITIAL_ZIPCODE_RESTRICTED = 9
FINAL_ZIPCODE_RESTRICTED = 10
INITIAL_AND_FINAL_ZIPCODE_RESTRICTED = 11
@classmethod
def restricted_codes(cls):
return [
cls.FINAL_ZIPCODE_RESTRICTED.value,
cls.INITIAL_AND_FINAL_ZIPCODE_RESTRICTED.value,
cls.FINAL_ZIPCODE_RESTRICTED.value
]
class ModelBuilder:
def build_service(self, service_data):
service = Service(
code=service_data.codigo,
id=service_data.id,
description=service_data.descricao,
category=service_data.servicoSigep.categoriaServico
)
return service
def build_posting_card(self, contract: Contract, posting_card_data):
posting_card = PostingCard(
contract=contract,
number=posting_card_data.numero,
administrative_code=posting_card_data.codigoAdministrativo,
)
posting_card.start_date = posting_card_data.dataVigenciaInicio
posting_card.end_date = posting_card_data.dataVigenciaFim
posting_card.status = posting_card_data.statusCartaoPostagem
posting_card.status_code = posting_card_data.statusCodigo
posting_card.unit = posting_card_data.unidadeGenerica
for service_data in posting_card_data.servicos:
service = self.build_service(service_data)
posting_card.add_service(service)
return posting_card
def build_contract(self, user: User, contract_data):
contract = Contract(
user=user,
number=contract_data.contratoPK.numero,
regional_direction=contract_data.codigoDiretoria,
)
contract.customer_code = contract_data.codigoCliente
contract.status_code = contract_data.statusCodigo
contract.start_date = contract_data.dataVigenciaInicio
contract.end_date = contract_data.dataVigenciaFim
for posting_card_data in contract_data.cartoesPostagem:
self.build_posting_card(contract, posting_card_data)
return contract
def build_user(self, user_data):
user = User(
name=user_data.nome,
federal_tax_number=FederalTaxNumber(user_data.cnpj),
state_tax_number=StateTaxNumber(user_data.inscricaoEstadual),
status_number=user_data.statusCodigo,
)
for contract_data in user_data.contratos:
self.build_contract(user, contract_data)
return user
def build_zip_address(self, zip_address_data):
zip_address = ZipAddress(
id=zip_address_data.id,
zip_code=zip_address_data.cep,
state=zip_address_data.uf,
city=zip_address_data.cidade,
district=zip_address_data.bairro,
address=zip_address_data.end,
complements=[zip_address_data.complemento, zip_address_data.complemento2]
)
return zip_address
def build_posting_card_status(self, response):
if response.lower() != "normal":
return PostingCard.CANCELLED
return PostingCard.ACTIVE
def build_tracking_codes_list(self, response):
codes = response.split(",")
return TrackingCode.create_range(codes[0], codes[1])
def _load_invalid_event(self, tracking_code: TrackingCode, tracked_object):
event = NotFoundTrackingEvent(
timestamp=datetime.now(),
comment=tracked_object.erro,
)
tracking_code.add_event(event)
def _load_events(self, tracking_code: TrackingCode, events):
for event in events:
timestamp = datetime.strptime("{} {}".format(event.data, event.hora), TrackingEvent.timestamp_format)
event = TrackingEvent(
timestamp=timestamp,
status=EventStatus(event.tipo, event.status),
location_zip_code=getattr(event, "codigo", ""),
location=getattr(event, "local", ""),
city=getattr(event, "cidade", ""),
state=getattr(event, "uf", ""),
receiver=getattr(event, "recebedor", ""),
document=getattr(event, "documento", ""),
comment=getattr(event, "comentario", ""),
description=getattr(event, "descricao", ""),
details=getattr(event, "detalhes", ""),
)
tracking_code.add_event(event)
def load_tracking_events(self, tracking_codes: Dict[str, TrackingCode], response):
result = []
for tracked_object in response.objeto:
tracking_code = tracking_codes[tracked_object.numero]
if 'erro' in tracked_object:
self._load_invalid_event(tracking_code, tracked_object)
else:
tracking_code.name = tracked_object.nome
tracking_code.initials = tracked_object.sigla
tracking_code.category = tracked_object.categoria
self._load_events(tracking_code, tracked_object.evento)
result.append(tracking_code)
return result
def build_freights_list(self, response):
result = []
for service_data in response.cServico:
freight = self.build_freight(service_data=service_data)
result.append(freight)
return result
def build_freight(self, service_data):
data = {
'service': Service.get(service_data.Codigo),
'error_code': to_integer(service_data.Erro),
'delivery_time': int(service_data.PrazoEntrega),
'value': to_decimal(service_data.ValorSemAdicionais),
'declared_value': to_decimal(service_data.ValorValorDeclarado),
'ar_value': to_decimal(service_data.ValorAvisoRecebimento),
'mp_value': to_decimal(service_data.ValorMaoPropria),
'saturday': service_data.EntregaSabado or "",
'home': service_data.EntregaDomiciliar or "",
'error_message': service_data.MsgErro or None
}
if (
data['error_code'] and
not data['error_code'] in ValidRestrictResponse.restricted_codes()
):
return FreightError(**data)
return Freight(**data)
class PostingListSerializer:
def _get_posting_list_element(self, posting_list):
element = xml_utils.Element("plp")
xml_utils.SubElement(element, "id_plp")
xml_utils.SubElement(element, "valor_global")
xml_utils.SubElement(element, "mcu_unidade_postagem")
xml_utils.SubElement(element, "nome_unidade_postagem")
xml_utils.SubElement(element, "cartao_postagem", text=str(posting_list.posting_card))
return element
def _get_sender_info_element(self, posting_list):
sender = posting_list.sender
posting_card = posting_list.posting_card
contract = posting_list.contract
sender_info = xml_utils.Element("remetente")
xml_utils.SubElement(sender_info, "numero_contrato", text=str(contract.number))
xml_utils.SubElement(sender_info, "numero_diretoria", text=str(contract.regional_direction_number))
xml_utils.SubElement(sender_info, "codigo_administrativo", text=str(posting_card.administrative_code))
xml_utils.SubElement(sender_info, "nome_remetente", cdata=sender.name)
xml_utils.SubElement(sender_info, "logradouro_remetente", cdata=sender.street)
xml_utils.SubElement(sender_info, "numero_remetente", cdata=sender.number)
xml_utils.SubElement(sender_info, "complemento_remetente", cdata=sender.complement)
xml_utils.SubElement(sender_info, "bairro_remetente", cdata=sender.neighborhood)
xml_utils.SubElement(sender_info, "cep_remetente", cdata=str(sender.zip_code))
xml_utils.SubElement(sender_info, "cidade_remetente", cdata=str(sender.city)[:30])
xml_utils.SubElement(sender_info, "uf_remetente", cdata=str(sender.state))
xml_utils.SubElement(sender_info, "telefone_remetente", cdata=sender.phone.short)
xml_utils.SubElement(sender_info, "fax_remetente", cdata="")
xml_utils.SubElement(sender_info, "email_remetente", cdata=sender.email)
return sender_info
def _get_shipping_label_element(self, shipping_label: ShippingLabel):
item = xml_utils.Element("objeto_postal")
xml_utils.SubElement(item, "numero_etiqueta", text=str(shipping_label.tracking_code))
xml_utils.SubElement(item, "codigo_objeto_cliente")
xml_utils.SubElement(item, "codigo_servico_postagem", text=str(shipping_label.service))
xml_utils.SubElement(item, "cubagem", text=str(shipping_label.posting_weight).replace(".", ","))
xml_utils.SubElement(item, "peso", text=str(shipping_label.package.weight))
xml_utils.SubElement(item, "rt1")
xml_utils.SubElement(item, "rt2")
receiver = shipping_label.receiver
address = xml_utils.SubElement(item, "destinatario")
xml_utils.SubElement(address, "nome_destinatario", cdata=str(receiver.name))
xml_utils.SubElement(address, "telefone_destinatario", cdata=receiver.phone.short)
xml_utils.SubElement(address, "celular_destinatario", cdata=receiver.cellphone.short)
xml_utils.SubElement(address, "email_destinatario", cdata=str(receiver.email))
xml_utils.SubElement(address, "logradouro_destinatario", cdata=str(receiver.street))
xml_utils.SubElement(address, "complemento_destinatario", cdata=str(receiver.complement))
xml_utils.SubElement(address, "numero_end_destinatario", text=str(receiver.number))
national = xml_utils.SubElement(item, "nacional")
xml_utils.SubElement(national, "bairro_destinatario", cdata=str(receiver.neighborhood))
xml_utils.SubElement(national, "cidade_destinatario", cdata=str(receiver.city)[:30])
xml_utils.SubElement(national, "uf_destinatario", text=str(receiver.state))
xml_utils.SubElement(national, "cep_destinatario", cdata=str(receiver.zip_code))
xml_utils.SubElement(national, "codigo_usuario_postal")
xml_utils.SubElement(national, "centro_custo_cliente")
xml_utils.SubElement(national, "numero_nota_fiscal", text=str(shipping_label.invoice_number))
xml_utils.SubElement(national, "serie_nota_fiscal", text=str(shipping_label.invoice_series))
xml_utils.SubElement(national, "valor_nota_fiscal", text=str(shipping_label.value).replace(".", ","))
xml_utils.SubElement(national, "natureza_nota_fiscal", text=str(shipping_label.invoice_type))
xml_utils.SubElement(national, "descricao_objeto", cdata=str(shipping_label.text)[:20])
xml_utils.SubElement(national, "valor_a_cobrar", text=str(shipping_label.billing).replace(".", ","))
extra_services = xml_utils.SubElement(item, "servico_adicional")
for extra_service in shipping_label.extra_services:
xml_utils.SubElement(extra_services, "codigo_servico_adicional",
text="{!s:>03}".format(extra_service.number))
xml_utils.SubElement(extra_services, "valor_declarado", text=str(shipping_label.value).replace(".", ","))
dimensions = xml_utils.SubElement(item, "dimensao_objeto")
xml_utils.SubElement(dimensions, "tipo_objeto", text="{!s:>03}".format(shipping_label.package.package_type))
xml_utils.SubElement(dimensions, "dimensao_altura", text=str(shipping_label.package.height))
xml_utils.SubElement(dimensions, "dimensao_largura", text=str(shipping_label.package.width))
xml_utils.SubElement(dimensions, "dimensao_comprimento", text=str(shipping_label.package.length))
xml_utils.SubElement(dimensions, "dimensao_diametro", text=str(shipping_label.package.diameter))
xml_utils.SubElement(item, "data_postagem_sara")
xml_utils.SubElement(item, "status_processamento", text="0")
xml_utils.SubElement(item, "numero_comprovante_postagem")
xml_utils.SubElement(item, "valor_cobrado")
return item
def get_document(self, posting_list: PostingList):
if not posting_list.shipping_labels:
raise PostingListSerializerError("Cannot serialize an empty posting list")
if posting_list.closed:
raise PostingListSerializerError("Cannot serialize a closed posting list")
root = xml_utils.Element("correioslog")
root.append(xml_utils.Element("tipo_arquivo", text="Postagem"))
root.append(xml_utils.Element("versao_arquivo", text="2.3"))
root.append(self._get_posting_list_element(posting_list))
root.append(self._get_sender_info_element(posting_list))
root.append(xml_utils.Element("forma_pagamento"))
for shipping_label in posting_list.shipping_labels.values():
root.append(self._get_shipping_label_element(shipping_label))
return root
def validate(self, document):
with open(os.path.join(DATADIR, "posting_list_schema.xsd")) as xsd:
xsd_document = xml_utils.parse(xsd)
schema = xml_utils.XMLSchema(xsd_document)
return schema.assert_(document)
def get_xml(self, document) -> bytes:
xmlstring = str(xml_utils.tostring(document, encoding="unicode"))
encoded_xmlstring = xmlstring.encode("iso-8859-1", errors='ignore')
return b'<?xml version="1.0" encoding="ISO-8859-1"?>' + encoded_xmlstring
class Correios:
PRODUCTION = "production"
TEST = "test"
MAX_TRACKING_CODES_PER_REQUEST = 50
# 'environment': ('url', 'ssl_verification')
sigep_urls = {
'production': (get_wsdl_path('AtendeCliente-production.wsdl'), True),
'test': (get_wsdl_path('AtendeCliente-test.wsdl'), False),
}
websro_url = get_wsdl_path('Rastro.wsdl')
freight_url = get_wsdl_path('CalcPrecoPrazo.asmx')
def __init__(self, username, password, timeout=8, environment="production"):
self.username = username
self.password = password
self.timeout = timeout
url, verify = self.sigep_urls[environment]
self.sigep_url = url
self.sigep_verify = verify
self.sigep_client = SoapClient(self.sigep_url, verify=self.sigep_verify, timeout=self.timeout)
self.sigep = self.sigep_client.service
self.websro_client = SoapClient(self.websro_url, timeout=self.timeout)
self.websro = self.websro_client.service
self.freight_client = SoapClient(self.freight_url, timeout=self.timeout)
self.freight = self.freight_client.service
self.model_builder = ModelBuilder()
def _auth_call(self, method_name, *args, **kwargs):
kwargs.update({
"usuario": self.username,
"senha": self.password,
})
return self._call(method_name, *args, **kwargs)
def _call(self, method_name, *args, **kwargs):
method = getattr(self.sigep, method_name)
return method(*args, **kwargs) # TODO: handle errors
def get_user(self, contract_number: Union[int, str], posting_card_number: Union[int, str]) -> User:
contract_number = str(contract_number)
posting_card_number = str(posting_card_number)
user_data = self._auth_call("buscaCliente", contract_number, posting_card_number)
return self.model_builder.build_user(user_data)
def find_zipcode(self, zip_code: Union[ZipCode, str]) -> ZipAddress:
zip_address_data = self._call("consultaCEP", str(zip_code))
return self.model_builder.build_zip_address(zip_address_data)
def verify_service_availability(self,
posting_card: PostingCard,
service: Service,
from_zip_code: Union[ZipCode, str],
to_zip_code: Union[ZipCode, str]) -> bool:
from_zip_code = ZipCode.create(from_zip_code)
to_zip_code = ZipCode.create(to_zip_code)
result = self._auth_call("verificaDisponibilidadeServico",
posting_card.administrative_code, str(service),
str(from_zip_code), str(to_zip_code))
return result
def get_posting_card_status(self, posting_card: PostingCard) -> bool:
result = self._auth_call("getStatusCartaoPostagem", posting_card.number)
return self.model_builder.build_posting_card_status(result)
def request_tracking_codes(self, user: User, service: Service, quantity=1, receiver_type="C") -> list:
result = self._auth_call("solicitaEtiquetas",
receiver_type, str(user.federal_tax_number),
service.id, quantity)
return self.model_builder.build_tracking_codes_list(result)
def generate_verification_digit(self, tracking_codes: Sequence[str]) -> List[int]:
tracking_codes = [TrackingCode(tc).nodigit for tc in tracking_codes]
result = self._auth_call("geraDigitoVerificadorEtiquetas",
tracking_codes)
return result
def _generate_xml_string(self, posting_list: PostingList) -> str:
posting_list_serializer = PostingListSerializer()
document = posting_list_serializer.get_document(posting_list)
posting_list_serializer.validate(document)
xml = posting_list_serializer.get_xml(document)
return xml.decode("ISO-8859-1")
def close_posting_list(self, posting_list: PostingList, posting_card: PostingCard) -> PostingList:
xml = self._generate_xml_string(posting_list)
tracking_codes = posting_list.get_tracking_codes()
id_ = self._auth_call("fechaPlpVariosServicos", xml,
posting_list.custom_id, posting_card.number, tracking_codes)
posting_list.close_with_id(id_)
return posting_list
def get_tracking_code_events(self, tracking_list):
if isinstance(tracking_list, (str, TrackingCode)):
tracking_list = [tracking_list]
if len(tracking_list) > Correios.MAX_TRACKING_CODES_PER_REQUEST:
msg = '{} tracking codes requested exceeds the limit of {} stabilished by the Correios'
msg = msg.format(len(tracking_list), Correios.MAX_TRACKING_CODES_PER_REQUEST)
raise TrackingCodesLimitExceededError(msg)
tracking_codes = {}
for tracking_code in tracking_list:
tracking_code = TrackingCode.create(tracking_code)
tracking_codes[tracking_code.code] = tracking_code
response = self.websro.buscaEventosLista(self.username, self.password, "L", "T", "101",
tuple(tracking_codes.keys()))
return self.model_builder.load_tracking_events(tracking_codes, response)
def calculate_freights(self,
posting_card: PostingCard,
services: List[Union[Service, int]],
from_zip: Union[ZipCode, int, str], to_zip: Union[ZipCode, int, str],
package: Package,
value: Union[Decimal, float] = 0.00,
extra_services: Optional[Sequence[Union[ExtraService, int]]] = None):
administrative_code = posting_card.administrative_code
services = [Service.get(s) for s in services]
from_zip = ZipCode.create(from_zip)
to_zip = ZipCode.create(to_zip)
if extra_services is None:
extra_services = []
else:
extra_services = [ExtraService.get(es) for es in extra_services]
response = self.freight.CalcPrecoPrazo(
administrative_code,
self.password,
",".join(str(s) for s in services),
str(from_zip),
str(to_zip),
package.weight / KG,
package.package_type,
package.length,
package.height,
package.width,
package.diameter,
"S" if EXTRA_SERVICE_MP in extra_services else "N",
value,
"S" if EXTRA_SERVICE_AR in extra_services else "N",
)
return self.model_builder.build_freights_list(response)
def calculate_delivery_time(self,
service: Union[Service, int],
from_zip: Union[ZipCode, int, str],
to_zip: Union[ZipCode, int, str]):
service = Service.get(service)
from_zip = ZipCode.create(from_zip)
to_zip = ZipCode.create(to_zip)
response = self.freight.CalcPrazo(str(service), str(from_zip), str(to_zip))
return response.cServico[0].PrazoEntrega
|
2,639 | a945d7f673d009a59e597cd3c99a886094ea9e57 | import sys
import json
import eventlet
import datetime
import flask
from flask import Flask
from flask import render_template
__version__ = 0.1
PORT = 8000
HOST = '0.0.0.0'
DEBUG = False
RELDR = False
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretkey'
@app.route('/login/')
def login():
return render_template('login.html', name=None)
@app.route('/chat/')
def chat():
return render_template('chat.html', name=None)
@app.route('/messages/')
def msg_search():
return render_template('search.html', name=None)
from .event_handlers import *
socketio.run(app, host=HOST, port=PORT, use_reloader=RELDR, debug=DEBUG, log_output=LOG)
|
2,640 | e3ee00efa0e929b87ca33b79dc6a6064b8758d4a | from django.conf.urls import url
from . import views
from .HouseView import CreateHouseView
app_name = 'voronoi'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^search/$', views.search, name='search'),
url(r'^house/create/$', CreateHouseView.as_view(), name='create'),
#url(r'^get_search_results/$', views.get_search_results, name='get_search_results'),
url(r'^get_search_json/$', views.get_search_json, name='get_search_json'),
url(r'^get_search_suggestions/$', views.get_search_suggestions, name='get_search_suggestions'),
# ex: /polls/5/
url(r'^(?P<house_id>[0-9]+)/$', views.detail, name='detail'),
# ex: /polls/5/results/
url(r'^(?P<house_id>[0-9]+)/ratings/$', views.ratings, name='ratings'),
] |
2,641 | 1552d862d3b9df45eda8c08256e8b4437ab08740 | from flask import Flask,request,Response
from spamapp.spam import SpamIdentify
from json import dumps,loads
app = Flask(__name__)
spam = SpamIdentify()
@app.route("/",methods=['GET'])
def home():
return Response(response=dumps({"msg":"App successfull"}), status=200, mimetype='application/json')
@app.route("/spamapi/",methods=['GET','POST'])
def apicall():
try:
predTxt = loads(request.data)
predTxt = predTxt['input']
response = spam.predict_data(predTxt)
return Response(response=dumps(response), status=200, mimetype='application/json')
except Exception as e:
print("Error",e)
return Response(response=dumps({"result": 6}), status=200, mimetype='application/json')
if __name__ == "__main__":
app.run(
host="192.168.2.240",
port=5000,
debug=True
) |
2,642 | 2a4f57cd0fc1c50cba06c285849432c6f71f28e2 | import json
import os
import sys
"""
Course: cmps 4883
Assignemt: A03
Date: 2/10/19
Github username: acdczlc
Repo url: https://github.com/acdczlc/4883-SWTools-Conley
Name: Zac Conley
Description:
Calculates all stats for questions about stats
"""
##############################################################
# MostTeams(dict of off and def players)
# gets player who played for most teams
#
# Params:
# dict of players
# Returns:
# player with most teams
def MostTeams(OffAndDef):
most=[]
count=0 # set comparison
for playerid, playerdata in OffAndDef.items():
if(playerdata['name']!=''): #only get real players
if(len(playerdata['Teams'])>count):
count=len(playerdata['Teams']) #get count
most=[[playerdata['name'],len(playerdata['Teams'])]] # replace with player
elif(len(playerdata['Teams'])==count):
most.append([playerdata['name'],len(playerdata['Teams'])]) # add multiple teams
return most
##############################################################
# MostTeamsOneYear(dict of off and def players)
# gets player who played for most teams in one year
#
# Params:
# dict of players
# Returns:
# player with most teams
def MostTeamsOneYear(OffAndDef):
teams={}
maximum={}
count=0
for playerid, playerdata in OffAndDef.items():
if(playerdata['name']!=''):
for years in playerdata: #avoids all keys except years
if(years!='Drops' and years!='NegRushYards' and years!='NegRush' and years!='Teams' and years!='PassForLoss' and years!="name"):
try: #try block to avoid nonplayers
if(len(playerdata[years])>count): # if player has most teams so far
if((len(playerdata[years]) not in teams.keys())):
teams.clear() # delete all previous players
teams[len(playerdata[years])]={}
teams[len(playerdata[years])][playerdata['name']]=years
count=len(playerdata[years])
elif(len(playerdata[years])==count): #multiple players have the same number of teams
teams[len(playerdata[years])].append(playerdata['name'],years)
except:
pass
return teams
##############################################################
# NegativeRushingYards(dict of off and def players)
# gets player with most negative rushing yards
#
# Params:
# dict of players
# Returns:
# player with most negative rushing yards
def NegativeRushingYards(OffAndDef):
NegRushYds=[]
yds=0
for playerid, playerdata in OffAndDef.items():
if(playerdata['NegRushYards']<yds):
yds=playerdata['NegRushYards']
NegRushYds=[[playerdata['name'],playerdata['NegRushYards']]]
elif(playerdata['NegRushYards']==yds):
NegRushYds.append([playerdata['name'],playerdata['NegRushYards']])
return NegRushYds
##############################################################
# NegativeRushes(dict of off and def players)
# gets player with most negative rushes
#
# Params:
# dict of players
# Returns:
# player with most negative rushes
def NegativeRushes(OffAndDef):
rushes=[]
att=0 #attempts
for player in OffAndDef:
if(OffAndDef[player]['NegRush']>att):
att=OffAndDef[player]['NegRush']
rushes=[[OffAndDef[player]['name'],OffAndDef[player]['NegRush']]]
elif(OffAndDef[player]['NegRush']==att):
rushes.append([OffAndDef[player]['name'],OffAndDef[player]['NegRush']])
return rushes
##############################################################
# MostPassForLoss(dict of off and def players)
# gets player with most negative rushes
#
# Params:
# dict of players
# Returns:
# player with most negative rushes
def MostPassForLoss(OffAndDef):
PassForLoss=[]
att=0 #attempts
for player in OffAndDef:
if(OffAndDef[player]['PassForLoss']>att):
att=OffAndDef[player]['PassForLoss']
PassForLoss=[[OffAndDef[player]['name'],OffAndDef[player]['PassForLoss']]]
elif(OffAndDef[player]['PassForLoss']==att):
PassForLoss.append([OffAndDef[player]['name'],OffAndDef[player]['PassForLoss']])
return PassForLoss
##############################################################
# MostPenalties(dict of team penalties)
# gets team with most penalties
#
# Params:
# dict of teams
# Returns:
# player with most negative rushes
def MostPenalties(penalties):
pens=[]
num=0
for teamname,teamdata in penalties.items():
if(teamdata['Penalties']>num):
num=teamdata['Penalties']
pens=[[teamname,teamdata['Penalties']]]
elif (teamdata['Penalties']==num):
pens.append([teamname,teamdata['Penalties']])
return pens
##############################################################
# TeamPenaltyYards(dict of team penalties)
# gets team with most penaltiy yards
#
# Params:
# dict of teams
# Returns:
# team with most penalty yards
def TeamPenaltyYards(penalties):
pens=[]
num=0
for teamname,teamdata in penalties.items():
if(teamdata['PenaltyYards']>num):
num=teamdata['PenaltyYards']
pens=[[teamname,teamdata['PenaltyYards']]]
elif (teamdata['PenaltyYards']==num):
pens.append([teamname,teamdata['PenaltyYards']])
return pens
##############################################################
# PenaltyWins(most penalized team,dict of team penalties)
# shows correlation between penalty and record
#
# Params:
# dict of teams, most penalized team
# Returns:
# team with most penaltys and least
def PenaltyWins(penalties):
x=MostPenalties(penalties) #calls function to get most penalized team
mostPenalized=[]
for temp in x:
mostPenalized.append(team[0])
least=penalties[mostPenalized[0]]['Penalties']
mostandleast=[[mostPenalized[0],penalties[mostPenalized[0]]['Wins'],penalties[mostPenalized[0]]['Losses']]] # sets most penalized record
leastTeam=[]
for teamname, teamdata in penalties.items():
if(teamdata['Penalties']<least):
least=teamdata['Penalties']
leastTeam=[[teamname,teamdata['Wins'],teamdata['Losses']]]
elif (teamdata['Penalties']==least):
leastTeam.append([teamname,teamdata['Wins'],teamdata['Losses']])
mostandleast.append(leastTeam[0]) #adds team and record to list at end
return mostandleast
##############################################################
# AverageNumberOfPlays()
# shows average number of plays
#
# Params:
# none
# Returns:
# avg number of plays
def AverageNumberOfPlays():
games=0
plays=0
for filename in os.listdir(os.path.dirname(os.path.abspath(__file__))+'/stats'): # sets path to all stats
with open(os.path.dirname(os.path.abspath(__file__))+"/stats/"+filename,"r") as json_file:
try: #gets all stats and stores each game in a dict
data=json.load(json_file)
except:
pass
else:
for gameid, gamedata in data.items():
if(gameid!="nextupdate"):
games+=1 #increment number of games
for driveid, drivedata in gamedata['drives'].items():
if(driveid!="crntdrv"):
plays+=drivedata['numplays'] #increment number of plays
avgplays=plays/games
return avgplays
##############################################################
# LongestFG(dict of fgs)
# longest field goal
#
# Params:
# dict of fgs
# Returns:
# longest field goal and kicker
def LongestFG(fg):
fgs=[]
length=0 #longest fg
for playerid,playerdata in fg.items():
if(playerdata['Long']>length):
length=playerdata['Long']
fgs=[[playerdata['Name'],playerdata['Long']]]
elif (playerdata['Long']==length):
fgs.append([playerdata['Name'],playerdata['Long']])
return fgs
##############################################################
# MostFG(dict of fgs)
# most made field goals
#
# Params:
# dict of fgs
# Returns:
# most made field goals and kicker
def MostFG(fg):
fgs=[]
count=0 #sets counter to 0
for playerid,playerdata in fg.items():
if(playerdata['FG']>count): #if largest number of fg so far
count=playerdata['FG']
fgs=[[playerdata['Name'],playerdata['FG']]]
elif (playerdata['FG']==count): #if same number of fg
fgs.append([playerdata['Name'],playerdata['FG']])
return fgs
##############################################################
# MostMFG(dict of fgs)
# most missed field goals
#
# Params:
# dict of fgs
# Returns:
# most missed field goals and kicker
def MostMFG(fg):
fgs=[]
count=0 #set counter to 0
for playerid,playerdata in fg.items():
if(playerdata['MFG']>count): #if most misses so far
count=playerdata['MFG']
fgs=[[playerdata['Name'],playerdata['MFG']]]
elif (playerdata['MFG']==count): #if same as most misses
fgs.append([playerdata['Name'],playerdata['MFG']])
return fgs
##############################################################
# MostDrops(dict of players)
# most drops
#
# Params:
# dict of players
# Returns:
# most drops
def MostDrops(OffAndDef):
drops=[]
count=0 #set drops to 0
for player in OffAndDef:
if(OffAndDef[player]['Drops']>count):
count=OffAndDef[player]['Drops']
drops=[[OffAndDef[player]['name'],OffAndDef[player]['Drops']]]
elif(OffAndDef[player]['Drops']==count):
drops.append([OffAndDef[player]['name'],OffAndDef[player]['Drops']])
return drops
path= os.path.dirname(os.path.abspath(__file__)) #set path to current location
f=open(path+'/OffAndDef.json','r') #open separated files
OffAndDef=json.load(f)
f.close()
f=open(path+'/Penalties.json','r')
penalties=json.load(f)
f.close()
f=open(path+'/FG.json','r')
fg=json.load(f)
f.close()
print("\n")
print("Name: Zac Conley")
print("Assignment: A03 - Nfl Stats")
print("Date: 2/10/19")
print("==================================================================================")
print("Question 1: Find the player(s) that played for the most teams.")
playerlist=MostTeams(OffAndDef)
for p in playerlist:
print(str(p[0]) + ": "+ str(p[1]) +" teams\n")
print("==================================================================================")
print("Question 2: Find the player(s) that played for multiple teams in one year.")
ans=MostTeamsOneYear(OffAndDef)
count=0
for numteams in ans.items():
for player in numteams[1].items():
print(player[1]+": " +player[0]+" "+str(numteams[0])+" teams." )
print
print("==================================================================================")
print("Question 3: Find the player(s) that had the most yards rushed for a loss.")
ans=NegativeRushingYards(OffAndDef)
for player in ans:
print(player[0]+": "+str(player[1])+" rushing yards.\n")
print("==================================================================================")
print("Question 4: Find the player(s) that had the most rushes for a loss.")
ans=NegativeRushes(OffAndDef)
for player in ans:
print(player[0]+": "+str(player[1])+" negative rushes.\n")
print("==================================================================================")
print("Question 5: Find the player(s) with the most number of passes for a loss.")
ans=MostPassForLoss(OffAndDef)
for player in ans:
print(player[0]+": "+str(player[1])+" negative passes.\n")
temp=[]
print("==================================================================================")
print("Question 6: Find the team with the most penalties.")
ans=MostPenalties(penalties)
for team in ans:
print(str(team[0])+" had "+str(team[1])+" penalties.\n")
print("==================================================================================")
print("Question 7: Find the team with the most yards in penalties.")
ans=TeamPenaltyYards(penalties)
for team in ans:
print(team[0]+": "+str(int(team[1]))+" penalty yards.\n")
print("==================================================================================")
print("Question 8: Find the correlation between most penalized teams and games won / lost.")
ans=PenaltyWins(penalties)
print("Most Penalties: "+ans[0][0]+": "+str(ans[0][1]) +"-" +str(ans[0][2]))
print("Least Penalties: "+ans[1][0]+" : "+str(ans[1][1])+"-" +str(ans[1][2])+"\n")
print("==================================================================================")
print("Question 9: Average number of plays in a game. (This may take up to a minute.)")
ans=AverageNumberOfPlays()
print("On average, there are " +str(ans) +" plays each game. \n")
print("==================================================================================")
print("Question 10: Longest field goal.")
ans=LongestFG(fg)
for player in ans:
print(player[0]+": "+str(player[1])+" yards.\n")
print("==================================================================================")
print("Question 11: Most field goals.")
ans=MostFG(fg)
for player in ans:
print(player[0]+": "+str(player[1])+" FGs.\n")
print("==================================================================================")
print("Question 12: Most missed field goals.")
ans=MostMFG(fg)
for player in ans:
print(player[0]+": "+str(player[1])+" missed FGs.\n")
print("==================================================================================")
print("Question 13: Most dropped passes.")
ans=MostDrops(OffAndDef)
for player in ans:
print(player[0]+": "+str(player[1])+" drops.") |
2,643 | 40ac3292befa2354878927ada0e10c24368a9d73 | import cv2
import numpy as np
cap = cv2.VideoCapture("./vStream.h264")
count = 0
while True:
ret, frame = cap.read()
if ret:
print("Decoded frame")
# cv2.imshow("frame", frame)
cv2.imwrite("fr_"+str(count)+".png", frame)
count += 1
else:
print("Couldn\'t decoded frame")
|
2,644 | de3eaa5823fb396050527c148273c30bed6ce8ca |
def drive(carspeed):
if carspeed>200:
print("very fast")
elif carspeed>100:
print("toofast")
elif carspeed>70 and carspeed<80:
print("optimal speed")
else:
print("below speed limit")
print(drive(234))
print(drive(34))
drive(134)
#how none will be removed?
def compare(a):
if a>11:
print("big")
elif a==10:
print("reallybig")
compare(10)
|
2,645 | 0774bad4082e0eb04ae3f7aa898c0376147e9779 | from models.bearing_registry import BearingRegistry
from models.faction import Faction
from models.maneuver import Maneuver
import time
class Activation:
"""
This class represents the Activation phase of a turn
"""
def __init__(self, game):
"""
Constructor
game: The game under way
"""
self._game = game
def execute(self):
"""
Run the Activation phase
"""
for pilot in self._game.pilots_by_skill():
pilot.active = True
# Apply this pilot's maneuver
pilot.chosen_maneuver.apply(pilot)
# Choose an action to perform
if pilot.can_perform_action():
chosen_action = self._game.player(pilot.faction).choose_action(pilot)
# TODO: Do something with this
pilot.active = False |
2,646 | 5dfe86d654e4184bab4401f8b634326996e42e9c | """
Naive Bayes Class
- Bernoulli Naive Bayes
- Multinomial Naive Bayes
- Gaussian Naive Bayes
Arthor: Zhenhuan(Steven) Sun
"""
import numpy as np
class BernoulliNB:
def __init__(self, k=1.0, binarize=0.0):
# Laplace Smoothing Factor
self.K = k
# the degree of binarization
self.binarize = binarize
def fit(self, X, y):
# binarize X
# since we assume data is bernoulli distributed we need to make sure
# that data consist of binary values
X = self._binarize(X)
# separate training data by classes(different target)
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
# number of different class
self.n_classes = len(np.unique(y))
# count the number of examples and number of features in X
self.n_examples, self.n_features = X.shape
# count the number of examples that belong to class k (0 or 1 in spam classification)
prior_numerator = np.array([len(x) for x in X_separated_by_class])
# compute the prior probability (P(y))
self.prior_prob = prior_numerator / self.n_examples
# compute the log prior probability (log(P(y))) for prediction
self.log_prior_prob = np.log(self.prior_prob)
# compute the conditional probability
# with laplace smoothing we assume we have seen each feature at least self.K times
conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])
conditional_prob_denominator = np.expand_dims(np.array([len(x) + 2 * self.K for x in X_separated_by_class]), axis=1)
self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator
return self
def predict(self, X):
# binarize X
X = self._binarize(X)
# compute log posterior probability log(P(y|X))
posterior_prob_numerator = np.array([(x * np.log(self.conditional_prob) +
np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) +
self.log_prior_prob for x in X])
posterior_prob_denominator = np.expand_dims(np.array([(x * np.log(self.conditional_prob) +
np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) +
self.log_prior_prob for x in X]).sum(axis=1), axis=1)
posterior_prob = posterior_prob_numerator - posterior_prob_denominator
# alternative solution
# since posterior_prob_denominator is a constant thus we don't bother compute the denominator
# compute the numerator is sufficient enough to make prediction and also it makes algorithm runs faster
#return np.argmax(posterior_prob_numerator, axis=1)
return np.argmax(posterior_prob, axis=1)
def _binarize(self, X):
# convert the values in X to binary values (0 or 1)
return np.where(X > self.binarize, 1, 0)
class MultinomialNB:
def __init__(self, k=1.0):
# Laplace Smoothing Factor
self.K = k
def fit(self, X, y):
# separate the training data by class
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
# number of different class
self.n_classes = len(np.unique(y))
# count the number of examples that belong to different classes
prior_numerator = [len(x) for x in X_separated_by_class]
# count the total number of examples in the training set
prior_denominator = X.shape[0]
# compute prior probability
self.prior_prob = np.array(prior_numerator) / prior_denominator
# compute log prior probability for prediction
self.log_prior_prob = np.log(self.prior_prob)
# compute the conditional probability's numerator for different class (with laplace smoothing)
# assume we have seen each feature at least once to avoid divide by zero error
conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])
# compute the conditional probability's denominator for different class
conditional_prob_denominator = np.expand_dims(conditional_prob_numerator.sum(axis=1), axis=1)
# compute the conditional probability for each feature and for each different classes
self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator
return self
def predict(self, X):
# compute the log conditional probability for each examples and for each different classes
log_conditional_prob = np.array([(x * np.log(self.conditional_prob)).sum(axis=1) for x in X])
# compute the posterior probability
posterior_pronb = log_conditional_prob + self.log_prior_prob
# make prediction
return np.argmax(posterior_pronb, axis=1)
class GaussianNB:
def __init__(self, k=1.0):
# Laplace Smoothing Factor
self.K = k
def fit(self, X, y):
# separate the training set by classes
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
# count the number of different classes
self.n_classes = len(np.unique(y))
# compute prior probability
self.prior_prob = np.array([len(x) / X.shape[0] for x in X_separated_by_class])
# compute mean vector for each class
self.mean_vector = np.array([np.array(x).sum(axis=0) / len(x) for x in X_separated_by_class])
# compute covariance matrix for each class
covariance_diagonal_matrices = []
for c, x in enumerate(X_separated_by_class):
mean_square_difference = 0
for x_i in x:
# compute the covariance matrix for each examples (slow as hell -> abandoned)
# mean_difference = np.expand_dims((x_i - self.mean_vector[c]), axis=1)
# mean_square_difference += mean_difference.dot(mean_difference.T)
# compute the diagnal entries of covariance matrix for each examples (much faster than above method)
mean_difference = x_i - self.mean_vector[c]
mean_square_difference += mean_difference ** 2
# convert the list of diagonal entries back to covariance diagonal matrix
# here we assumed that the mean square difference between each feature and its mean is at least 1 to make sure that
# there is no zero variance in the covariance matrix and thus we won't encounter divide by zero error in the future
covariance_diagonal_matrix = ((mean_square_difference + self.K) / len(x)) * np.identity(X.shape[1])
covariance_diagonal_matrices.append(covariance_diagonal_matrix)
self.covariance_diagonal_matrices = np.asarray(covariance_diagonal_matrices)
return self
def log_gaussian_distribution(self, x, mean, variance):
log_multiplier = -np.log(np.sqrt((2 * np.pi) * variance))
log_exponent = -(x - mean)**2 / (2 * variance)
return sum(log_multiplier + log_exponent)
def predict(self, X):
variances = []
for matrix in self.covariance_diagonal_matrices:
variance = matrix.diagonal()
variances.append(variance)
variances = np.array(variances)
# list that stores all test data's posterior probability
posterior_prob_collection = []
for x in X:
conditional_prob = []
for mean, variance in zip(self.mean_vector, variances):
# compute conditional probability for each class
conditional_prob.append(self.log_gaussian_distribution(x, mean, variance))
# compute posterior probability
posterior_prob = np.array(conditional_prob) + np.log(self.prior_prob)
posterior_prob_collection.append(posterior_prob)
posterior_prob_collection = np.array(posterior_prob_collection)
return np.argmax(posterior_prob_collection, axis=1) |
2,647 | 6cc56f73e58366a3906da537cc27fdd5a066ee34 | from django.conf.urls import url
#from .views import CommandReceiveView
from .views import index, send_message
urlpatterns = [
#url(r'^bot/(?P<bot_token>.+)/$', CommandReceiveView.as_view(), name='command'),
url(r'^send_message$', send_message, name='send_message'),
url(r'^$', index, name='index'),
]
|
2,648 | 1b3e64be988495454535ca96c7a1b6c20aa27076 | '''
A empresa Tchau de telefonia cobra:
-Abaixo de 200 minutos, R$ 0,20 por minuto
-Entre 200 e 400 minutos, R$ 0,18 por minuto
-Acima de 400 minutos, R$ 0,15 por minuto
- Bonus: - Acima de 800 minutos, R$ 0,08
Calcule a conta de telefone
'''
minutos = int(input('Minutos utilizados: '))
if minutos > 800:
total = minutos * 0.08
elif minutos > 400 and minutos <= 800:
total = minutos * 0.15
elif minutos < 200:
total = minutos * 0.2
else:
total = minutos * 0.18
print('Valor da conta: R$ %.2f' %total)
|
2,649 | 02bc97b963b970993fc947cfa41c73230dd4d9e4 | import swipe
def scheduleMultipoint(driver):
driver.find_element_by_id('com.dentist.android:id/calendarBt').click()
driver.find_element_by_id('com.dentist.android:id/addIb').click()
def time(driver):#就诊时间
driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()#就诊时间
driver.find_element_by_name('23:00').click()#时间
driver.find_element_by_name('00').click()#分钟
driver.find_element_by_name('15分钟').click()#时长
driver.find_element_by_name('完成').click()
def data(driver):#就诊日期
driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()#就诊日期
driver.find_element_by_name('完成').click()
def patient(driver):#患者
driver.find_element_by_id('com.dentist.android:id/patientLl').click()
#driver.find_element_by_id('com.dentist.android:id/layout_search').send_keys('总校')
#driver.find_element_by_id('com.dentist.android:id/contactLl').click()
driver.find_element_by_name('总校').click()
driver.find_element_by_name('总校').click()
def site(driver):#就诊地点
driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()#选择就诊地点
driver.find_element_by_id('com.dentist.android:id/select_city_layout').click()
driver.find_element_by_name('北京市').click()
driver.find_element_by_name('返回').click()
driver.find_element_by_name('欢乐口腔(华贸分院)').click()
def project(driver):#治疗项目
driver.find_element_by_name('牙位/治疗项目').click()
driver.find_element_by_name('修复').click()
driver.find_element_by_name('备牙').click()
driver.find_element_by_name('保存').click()
swipe.swipeUp(driver)
driver.find_element_by_name('发起预约').click()
driver.find_element_by_name('继续保存').click()
def subscribe(driver):
patient(driver)
data(driver)
time(driver)
site(driver)
project(driver)
|
2,650 | fb4818e742ed3c7d131c426811f839dbe70f03de | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from customer_support.views import update_existing_subscriber, \
add_new_subscriber
from .views import (EditSubscriberView,
DeActivateSubscriberView,
ReActivateSubscriberView,
SupportSubscriberReportView,
DashboardView)
urlpatterns = patterns('',
url(
regex=r'new_subscriber/$',
view=add_new_subscriber,
name="support.new_subscriber"
),
url(
regex=r'update_subscriber/(?P<pk>\d+)/$',
view=update_existing_subscriber,
name="support.update_subscriber"
),
url(
regex=r'edit_subscriber/$',
view=EditSubscriberView.as_view(),
name="support.edit_subscriber"
),
url(
regex=r'deactivate_subscriber/$',
view=DeActivateSubscriberView.as_view(),
name="support.deactivate_subscriber"
),
url(
regex=r'reactivate_subscriber/$',
view=ReActivateSubscriberView.as_view(),
name="support.reactivate_subscriber"
),
url(
regex=r'reports/$',
view=SupportSubscriberReportView.as_view(),
name="support.subscriber_report"
),
url(
regex=r'dashboard/$',
view=DashboardView.as_view(),
name="support.dashboard"
),
)
|
2,651 | 1c979d505b58025aae74865d6556c726ed3f0769 | # Generated by Django 2.2.15 on 2020-09-16 03:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('api', '0005_cashiershift_couriershift_couriershiftexpenses_dailyransom_expensestype_vehicleservice'),
]
operations = [
migrations.AlterField(
model_name='address',
name='city',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='city_addresses', related_query_name='city_address', to='api.City'),
),
migrations.AlterField(
model_name='address',
name='district',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='district_addresses', related_query_name='district_address', to='api.District'),
),
migrations.AlterField(
model_name='address',
name='street',
field=models.ForeignKey(max_length=255, on_delete=django.db.models.fields.Empty, related_name='street_addresses', related_query_name='street_address', to='api.Street', verbose_name='Улица'),
),
migrations.AlterField(
model_name='couriershift',
name='courier',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='couriers', related_query_name='courier', to=settings.AUTH_USER_MODEL, verbose_name='Курьер'),
),
migrations.AlterField(
model_name='couriershift',
name='vehicle',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shift_vehicles', related_query_name='shift_vehicle', to='api.Vehicle', verbose_name='Транспортное средство'),
),
migrations.AlterField(
model_name='couriershift',
name='vehicle_accepted_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vehicle_accepted_bys', related_query_name='vehicle_accepted_by', to=settings.AUTH_USER_MODEL, verbose_name='Принял'),
),
migrations.AlterField(
model_name='couriershift',
name='vehicle_given_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vehicle_given_bys', related_query_name='vehicle_given_by', to=settings.AUTH_USER_MODEL, verbose_name='Выдал'),
),
migrations.AlterField(
model_name='district',
name='city',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='city_districts', related_query_name='city_district', to='api.City'),
),
migrations.AlterField(
model_name='technicalservice',
name='address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_services', related_query_name='address_service', to='api.Address', verbose_name='Адрес СТО'),
),
migrations.AlterField(
model_name='vehicleservice',
name='service',
field=models.ForeignKey(on_delete=django.db.models.fields.Empty, related_name='service_vehicles', related_query_name='service_vehicle', to='api.TechnicalService'),
),
migrations.AlterField(
model_name='vehicleservice',
name='vehicle',
field=models.ForeignKey(on_delete=django.db.models.fields.Empty, related_name='vehicles', related_query_name='vehicle', to='api.Vehicle'),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.DateTimeField(editable=False, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),
('status', models.CharField(choices=[('new', 'Новый'), ('accepted', 'Принят'), ('canceled', 'Отменен'), ('done', 'Завершен'), ('in_progress', 'Выполняется')], default='new', max_length=100, verbose_name='Статус заказа')),
('accepted_time', models.DateTimeField(blank=True, null=True, verbose_name='Время подтверждения заказа')),
('start_time', models.DateTimeField(blank=True, null=True, verbose_name='Время начала выполнения заказа')),
('end_time', models.DateTimeField(blank=True, null=True, verbose_name='Время завершения заказа')),
('reciever_name', models.CharField(blank=True, max_length=255, null=True, verbose_name='Имя получателя')),
('info', models.TextField(blank=True, null=True, verbose_name='Дополнительные сведения')),
('ransom_sum', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Сумма выкупа')),
('wait_time', models.TimeField(blank=True, null=True, verbose_name='Время ожидания')),
('delivery_cost', models.IntegerField(blank=True, null=True, verbose_name='Стоимость даставки')),
('delivery_time', models.TimeField(blank=True, null=True, verbose_name='Время выполнения заказа')),
('courier_shift', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='courier_orders', related_query_name='courier_order', to='api.CourierShift', verbose_name='Смена курьера')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders_created_by', related_query_name='order_created_by', to=settings.AUTH_USER_MODEL, verbose_name='Кем создан')),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customers_orders', related_query_name='customer_order', to=settings.AUTH_USER_MODEL, verbose_name='Клиент')),
('delivery_from', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_delivery_from', related_query_name='address_from', to='api.Address', verbose_name='Забрать от')),
('delivery_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_delivery_to', related_query_name='address_to', to='api.Address', verbose_name='Куда доставить')),
],
options={
'get_latest_by': '-created_at',
'abstract': False,
},
),
migrations.CreateModel(
name='OperatorShift',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.DateTimeField(editable=False, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),
('start_time', models.DateField(auto_now_add=True, verbose_name='Начало смены')),
('end_time', models.DateField(blank=True, null=True, verbose_name='Конец смены')),
('operator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='operator_shifts', related_query_name='operator_shift', to=settings.AUTH_USER_MODEL, verbose_name='Оператор')),
],
options={
'get_latest_by': '-created_at',
'abstract': False,
},
),
]
|
2,652 | 668fe3d561d94be73f2f721fac89e9e25005769b | import socket
import threading
#WebSocket Server Address
WS_ADDR = ("127.0.0.1",9876)
def ws_handler(sock,addr):
print 'ws handshaking...'
print 'connected...'
print 'closing...'
def websocket_server():
print 'listening for a WS connection... '
svSock = socket.socket()
svSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
svSock.bind(WS_ADDR)
svSock.listen(5)
while (1):
wSock,wAddr = svSock.accept()
print 'accepted!'
threading.Thread(target=ws_handler,args=(wSock,wAddr)).start()
# a new listen thread
def listen_ws():
threading.Thread(target=websocket_server()).start() |
2,653 | ecc351cf95254e0bbc5021eff11c500fa0950bd3 | from bs4 import BeautifulSoup
from pprint import pprint
from scraper.sas.sas_models import SASEvent, SASCategory, SASCategoryStage, SASEventStage
from scraper.base_models.models import Event, Category, CategoryStage, EventStage, Participant, Result
from scraper.sas.sas_config import DESTINATION_URL, MTB_EVENT_TYPE, YEARS
from scraper import db
from datetime import datetime
import urllib
import json
import time
def scrape_sas():
pprint("Scraping Events")
get_mtb_events()
pprint("Getting categories and stages")
for event in db.session.query(SASEvent):
pprint(event.event_id)
get_categories_and_stages(event.event_reference, event.event_id)
#time.sleep(2)
for event_stage in db.session.query(SASEventStage):
pprint("Getting event stage results")
base_event_stage = db.session.query(EventStage).filter(EventStage.id==event_stage.event_stage_id).first()
if (base_event_stage.results):
pprint("Event has results")
else:
write_stage_results(event_stage.stage_reference, event_stage.event_stage_id, "event")
for category_stage in db.session.query(SASCategoryStage):
pprint("Getting category stage results")
base_category_stage = db.session.query(CategoryStage).filter(CategoryStage.id==category_stage.category_stage_id).first()
if (base_category_stage.results):
pprint("Category stage has results")
else:
write_stage_results(category_stage.stage_reference, category_stage.category_stage_id, "category")
for category in db.session.query(SASCategory):
pprint("Getting category results")
base_category = db.session.query(Category).filter(Category.id==category.category_id).first()
if (base_category.results):
pprint("Category has results")
else:
if (not base_category.category_stages):
write_category_results(category.stage_reference, category.id)
else:
pprint("No results but has category stages")
pprint("Scrape Complete")
def get_mtb_events():
for year in YEARS:
url = ("%s/participants/event-results/fetch-series-by-type?event_type=%s&event_year=%d" %
(DESTINATION_URL, MTB_EVENT_TYPE, year))
try:
page = urllib.request.urlopen(url)
content = page.read().decode("utf-8")
json_content = json.loads(content)
soup = BeautifulSoup(json_content['HTML'], "html.parser")
anchors = soup.find_all('a')
except (urllib.error.HTTPError, urllib.error.ConnectionResetError):
pass
for anchor in anchors:
event_reference = anchor["href"]
divs = anchor.find_all('div')
for div in divs:
if ("event-date" in div["class"]):
event_date = (div.find(text=True))
elif ("event-title" in div["class"]):
event_name = (div.find(text=True))
db_date = datetime.strptime(event_date, '%d %b %Y')
db_event = Event(event_name, db_date)
db_check = db.session.query(Event.title).filter(Event.title==event_name)
if not (db.session.query(db_check.exists()).scalar()):
db.session.add(db_event)
db.session.flush()
sas_event = SASEvent(db_event.id, event_reference)
db.session.add(sas_event)
db.session.commit()
def get_categories_and_stages(event_reference, event_id):
event = db.session.query(Event).filter(Event.id==event_id).first()
if (event.categories or event.event_stages):
pprint("Event Exists")
else:
url = (DESTINATION_URL + event_reference)
try:
page = urllib.request.urlopen(url)
except (urllib.error.HTTPError, urllib.error.URLError):
return
soup = BeautifulSoup(page, "html.parser")
check_stages = get_categories(soup, event_id)
def get_categories(soup, event_id):
category_div = soup.find('div', attrs={"id" : "category_container"})
#Check to see if event has categories first
if category_div:
divs = category_div.find_all('div')
for div in divs:
if div.has_attr("data-event-category-id"):
#Event has categories
category_reference = div["data-event-category-id"]
category_name = div["data-loading-text"]
category_own_stage_reference = div["data-event-stage-id"]
db_category = Category(category_name, event_id)
#Check both name and event id to allow duplicate names
db_category_check = db.session.query(Category.name).filter(
(Category.name==category_name) &
(Category.event_id==event_id))
#Check SAS category for duplicates as well
db_sas_category_check = db.session.query(SASCategory).filter(
(SASCategory.category_reference==category_reference) &
(SASCategory.stage_reference==category_own_stage_reference))
if not (db.session.query(db_category_check.exists()).scalar()):
db.session.add(db_category)
db.session.flush()
if not (db.session.query(db_sas_category_check.exists()).scalar()):
db_sas_category = SASCategory(category_reference, category_own_stage_reference, db_category.id)
db.session.add(db_sas_category)
db.session.flush()
db.session.commit()
if (div["data-multiple-event-stages"] == "1"):
#Event has stages with their own categories
get_category_stages(soup, db_category.id, category_reference)
else:
#Event does not have categories
get_event_stages(soup, event_id)
def get_category_stages(soup, category_id, category_reference):
stage_group_div = soup.find('div', attrs={"id" : ("ec_" + category_reference)})
stage_divs = stage_group_div.find_all('div')
for stage_div in stage_divs:
if stage_div.has_attr("data-stage-id"):
category_stage_reference = stage_div["data-stage-id"]
category_stage_name = stage_div["data-loading-text"]
db_category_stage = CategoryStage(category_stage_name, category_id)
#Check both name and category id to allow duplicate names
db_category_stage_check = db.session.query(CategoryStage.name).filter(
(CategoryStage.name==category_stage_name) &
(CategoryStage.category_id==category_id))
if not (db.session.query(db_category_stage_check.exists()).scalar()):
db.session.add(db_category_stage)
db.session.flush()
db_sas_category_stage = SASCategoryStage(db_category_stage.id, category_stage_reference)
db.session.add(db_sas_category_stage)
db.session.flush()
db.session.commit()
def get_event_stages(soup, event_id):
all_event_stage_divs = soup.find('div', class_ = "row categories_stages event-sub-types")
#Check if event has stages
if all_event_stage_divs:
event_stage_divs = all_event_stage_divs.find_all ('div')
for event_stage_div in event_stage_divs:
if event_stage_div.has_attr("data-stage-id"):
#Event has stages and no categories
event_stage_reference = event_stage_div["data-stage-id"]
event_stage_name = event_stage_div["data-loading-text"]
db_event_stage = EventStage(event_stage_name, event_id)
#Check if it exists by name and ID and add if it doesn't
db_event_stage_check = db.session.query(EventStage.name).filter(
(EventStage.name==event_stage_name) &
(EventStage.event_id==event_id))
if not (db.session.query(db_event_stage_check.exists()).scalar()):
db.session.add(db_event_stage)
db.session.flush()
db_sas_event_stage = SASEventStage(db_event_stage.id, event_stage_reference)
db.session.add(db_sas_event_stage)
db.session.flush()
db.session.commit()
else:
#Event has no stages or categories
#create new stage for just the overall results, unless event has no results
event_stage_reference_div = soup.find('div', class_ = "result-row load-results")
if event_stage_reference_div:
if event_stage_reference_div.has_attr("data-stage"):
event_stage_reference = event_stage_reference_div["data-stage"]
sas_event = db.session.query(SASEvent).filter(SASEvent.event_id==event_id).first()
db_event_stage_check = db.session.query(EventStage.name).filter(
(EventStage.name=="Overall Results") &
(EventStage.event_id==sas_event.event_id))
if not (db.session.query(db_event_stage_check.exists()).scalar()):
db_event_stage = EventStage("Overall Results", sas_event.event_id)
db.session.add(db_event_stage)
db.session.flush()
db_sas_event_stage = SASEventStage(db_event_stage.id, event_stage_reference)
db.session.add(db_sas_event_stage)
db.session.commit()
def get_results(event_reference):
url = ("%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999" %
(DESTINATION_URL, event_reference))
pprint(url)
try:
page = urllib.request.urlopen(url)
except (urllib.error.HTTPError, urllib.error.ConnectionResetError):
return
content = page.read().decode("utf-8")
json_content = json.loads(content)
json_results = json_content['rows']
return json_results
def write_stage_results(stage_reference, stage_id, stage_type):
results = get_results(stage_reference)
category_stage_id = None
event_stage_id = None
if (stage_type=="event"):
event_stage_id = stage_id
elif (stage_type=="category"):
category_stage_id = stage_id
if results:
for result in results:
participant_id = get_participant(result)
db_result_check = db.session.query(Result).filter(
(Result.position==result['overall_pos']) &
(Result.gender_position==result['gender_pos']) &
(Result.time==result['time_taken_seconds']) &
(Result.event_stage_id==event_stage_id) &
(Result.category_stage_id==category_stage_id))
if not (db.session.query(db_result_check.exists()).scalar()):
if (stage_type=="category"):
db_result = Result(result['overall_pos'], participant_id, result['gender_pos'],
result['time_taken_seconds'], None, category_stage_id, None)
elif (stage_type=="event"):
db_result = Result(result['overall_pos'], participant_id, result['gender_pos'],
result['time_taken_seconds'], event_stage_id, None, None)
db.session.add(db_result)
db.session.commit()
def write_category_results(category_reference, category_id):
results = get_results(category_reference)
for result in results:
participant_id = get_participant(result)
db_result_check = db.session.query(Result).filter(
(Result.position==result['overall_pos']) &
(Result.gender_position==result['gender_pos']) &
(Result.time==result['time_taken_seconds']) &
(Result.category_id==category_id)).first()
if not db_result_check:
db_category_result = Result(result['overall_pos'], participant_id,
result['gender_pos'], result['time_taken_seconds'], None, None, category_id)
db.session.add(db_category_result)
db.session.commit()
def get_participant(result):
if result['date_of_birth']:
birth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d').date()
else:
birth_date = None
db_participant_check = db.session.query(Participant).filter(
(Participant.first_name==result['first_name']) &
(Participant.last_name==result['last_name']) &
(Participant.sex==result['person_sex']) &
(Participant.birth_date==birth_date))
if not (db.session.query(db_participant_check.exists()).scalar()):
db_participant = Participant(result['first_name'], result['last_name'],
result['person_sex'], birth_date)
db.session.add(db_participant)
db.session.commit()
return db_participant.id
else:
return db_participant_check.first().id
|
2,654 | 48a4331e4b26ea81f1c52ae76db1e92a57cb378c | from django.urls import path
from .views import *
from .utils import *
app_name = 'gymapp'
urlpatterns = [
# CLIENT PATHS ##
# CLIENT PATHS ##
# CLIENT PATHS ##
# CLIENT PATHS ##
# general pages
path('', ClientHomeView.as_view(), name='clienthome'),
path('about/', ClientAboutView.as_view(), name='clientabout'),
path('contact/', ClientContactCreateView.as_view(), name='clientcontact'),
# path('makeanappointment/', ClientAppointmentCreateView.as_view(),
# name='clientappointmentcreate'),
path('products/', ClientProductListView.as_view(), name='clientproductlist'),
path('product/<int:pk>/detail/',ClientProductDetailView.as_view(),
name='clientproductdetail'),
path('trainers/', ClientTrainerListView.as_view(), name='clienttrainerlist'),
path('trainer/<slug:slug>/detail/', ClientTrainerDetailView.as_view(),
name='clienttrainerdetail'),
path('services/', ClientServiceListView.as_view(),
name='clientservicelist'),
path('services/<slug:slug>/detail/',
ClientServiceDetailView.as_view(), name='clientservicedetail'),
path('schedule/<slug:slug>/detail/',
ClientScheduleDetailView.as_view(), name='clientscheduledetail'),
path('testimonial/',
TestimonialListView.as_view(), name='testimoniallist'),
# path('slider/',
# SliderListView.as_view(), name='sliderlist'),
path('facilities/', ClientFacilityListView.as_view(),
name='clientfacilitylist'),
path('facilities/<slug:slug>/details',
ClientFacilityDetailView.as_view(), name='clientfacilitydetail'),
path('events/', ClientEventListView.as_view(),
name='clienteventlist'),
path('events/<slug:slug>/details',
ClientEventDetailView.as_view(), name='clienteventdetail'),
path('notices/', ClientNoticeListView.as_view(), name='clientnoticelist'),
path('notices/<slug:slug>/details',
ClientNoticeDetailView.as_view(), name='clientnoticedetail'),
path('pages/<slug:slug>/details',
ClientPageDetailView.as_view(), name='clientpagedetail'),
path('images/', ClientImageListView.as_view(), name='clientimagelist'),
path('videos/', ClientVideoListView.as_view(), name='clientvideolist'),
path('blogs/', ClientBlogListView.as_view(), name='clientbloglist'),
path('blogs/<slug:slug>/details',
ClientBlogDetailView.as_view(), name='clientblogdetail'),
path('schedules/', ClientScheduleListView.as_view(), name='clientschedulelist'),
path('404/', ClientPageNotFoundView.as_view(), name='clientpagenotfound'),
path('subscribe/', ClientSubscriberCreateView.as_view(),
name='clientsubscribercreate'),
path('search/result/', SearchResultView.as_view(), name="searchresult"),
path('login/', ClientLoginView.as_view(), name='clientlogin'),
path('logout/', ClientLogoutView.as_view(), name='clientlogout'),
path('register/', ClientRegistrationView.as_view(), name='clientcreate'),
path('cart_update',cart_update,name = 'cart_update'),
path('carts/<int:pk>/items/total/',ClientCartTotalView.as_view(), name='clientcarttotal'),
]
|
2,655 | 753cc532e4d049bacff33c97de4d80bb9ab8ece8 | # Head start.
# ask me for this solution: 6cb9ce6024b5fd41aebb86ccd40d8080
# this line is not needed, just for better output:
from pprint import pprint
# just remove the top line
def count_or_add_trigrams(trigram, trigrams_so_far):
'''
Takes a trigram, and a list of previously seen trigrams
and yields the same list with all discovered and counted
trigrams.
Adds given trigram if not found,
increments the trigram counter if found.
'''
for entry in trigrams_so_far:
test_trigram = entry[0]
if test_trigram == trigram:
entry[1] += 1
break
else:
trigrams_so_far.append([trigram, 1])
return trigrams_so_far
test_trigrams = [
['a', 'b', 'c'],
['d', 'e', 'f'],
['b', 'd', 'e'],
['d', 'e', 'f'],
['a', 'a', 'a'],
['d', 'e', 'f']
]
trigram_count = []
for trigram in test_trigrams:
print('I have been given this trigram:', end=' ')
pprint(trigram)
trigram_count = count_or_add_trigrams(trigram, trigram_count)
print('After finishing this operation, my data looks like:')
pprint(trigram_count)
print('-------------------------------------------------------------')
print('After doing all test trigrams, this is what I have:')
pprint(trigram_count)
|
2,656 | 4b255b648f67e6bcc30eecc7975bbb1a356b2499 | #到达终点的最小步数 leetcode原题 754 https://leetcode.com/problems/reach-a-number/solution/
# 分情况讨论:到target与到abs(target)的情况是一样的
# 1. total = 1+2+...+k,求total刚好大于等于n的k,可知到达target至少要用k步,此时超出d=total-k
# 2. 如果d为偶数,则只需将d/2步反向即可,k步即可到达target
# 3. 如果d为奇数,则k步不可能到达,因为任何反转都会改变偶数距离,不可能消去d,则再走一步判断d+k+1是否为偶数
# 4. 如果为偶数,说明k+1步可到
# 5. 如果d+k+1为奇数,且已知d为奇数,说明k+1为偶数,不可能在k+1步走到,再走一步,d+k+1+k+2必为偶数,k+2步可到
class Solution(object):
def reachNumber(self, target):
target = abs(target)
k = 0
while target > 0:
k += 1
target -= k
return k if target % 2 == 0 else k + 1 + k%2
if __name__ == '__main__':
s = input()
s1 = Solution()
print(s1.solution(s)) |
2,657 | 6efd22feb4f96de74633276b1ec8550f8d853075 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def test_register_new_accont(self):
cos = self.cos
cos.get("https://wizzair.com/pl-pl#/")
cos.find_elements_by_class_name('navigation__button navigation__button--simple').click()
cos.find_elements_by_class_name('content__link1').click()
cos.find_elemebts_by_name('firstName').click()
cos.find_elemebts_by_name('firstName').clear()
cos.find_elemebts_by_name('firstName').send_keys("Jonasz")
cos.find_elemebts_by_name('lastName').click()
cos.find_elemebts_by_name('lastName').clear()
cos.find_elemebts_by_name('lastName').send_keys("Zsanoj")
cos.find_elements_by_class_name('rf-switch__label').click()
cos.find_elemebts_by_name('mobilePhone').click()
cos.find_elemebts_by_name('mobilePhone').clear()
cos.find_elemebts_by_name('mobilePhone').send_keys('71661234567')
cos.find_elemebts_by_name('email').click()
cos.find_elemebts_by_name('email').clear()
cos.find_elemebts_by_name('email').send_keys('Jonasz.Zsanoj@gmail.cooooom')
cos.find_elemebts_by_name('password').click()
cos.find_elemebts_by_name('password').clear()
cos.find_elemebts_by_name('password').send_keys('zaq1@WSX')
cos.find_elements_by_class_name('rf-input__input rf-input__input--empty').click()
|
2,658 | 5ee667e8394ccacf83bfe4baec228373619b4edb | import AVFoundation
from PyObjCTools.TestSupport import TestCase
class TestAVAssetSegmentReport(TestCase):
def test_enum_types(self):
self.assertIsEnumType(AVFoundation.AVAssetSegmentType)
def test_constants(self):
self.assertEqual(AVFoundation.AVAssetSegmentTypeInitialization, 1)
self.assertEqual(AVFoundation.AVAssetSegmentTypeSeparable, 2)
|
2,659 | 278f0ece7cc2c7bb2ec1a3a2a7401bf3bc09611d | #!/usr/bin/env python
# Copyright (c) 2016, SafeBreach
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import urllib2
import argparse
import time
import datetime
import email.utils
import binascii
import csv
import multiprocessing.pool
####################
# Global Variables #
####################
__version__ = "1.0"
__author__ = "Itzik Kotler"
__copyright__ = "Copyright 2016, SafeBreach"
#############
# Functions #
#############
def __wait_till_next_minute():
sleeptime = 60 - datetime.datetime.utcnow().second
time.sleep(sleeptime)
def __calc_delta(expires_field, date_field):
now_date = datetime.datetime(*email.utils.parsedate(date_field)[:6])
expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])
return expires_date - now_date
def __str2bits(string):
bits = []
if string.startswith('0b'):
bits = list(string[2:])
else:
# Convert text to binary, use the str repr to convert to list, skip 2 bytes to jump over '0b' prefix
bits = list(bin(int(binascii.hexlify(string), 16)))[2:]
# We're using .pop() so it's reverse() the order of the list
bits.reverse()
return bits
def main(args):
parser = argparse.ArgumentParser(prog='cachetalk')
parser.add_argument('url', metavar='URL', type=str, help='dead drop URL')
parser.add_argument('poll_interval', metavar='SECONDS', nargs='?', type=int,
help='polling intervals (i.e. the delta)')
parser.add_argument('-s', '--always-sync', action='store_true', help='always start on the top of the minute')
parser.add_argument('-f', '--force-start', action='store_true', help='start immediately without synchronizing')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose output')
parser.add_argument('-q', '--quiet', action='store_true', help='less output')
parser.add_argument('-1', '--try-once', action='store_true', help='try to write once and stop')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-w', '--write', nargs=1, type=str, metavar='DATA', help='connect to URL and write DATA')
group.add_argument('-r', '--read', nargs=1, type=int, metavar='LEN', help='monitor URL and read LEN amount of bits')
group.add_argument('-t', '--test', action='store_true', help='print HTTP Server Expires and calculate the delta')
group.add_argument('-b', '--batch', nargs=2, type=str, metavar=('FILE.CSV', 'R|W'), help='In batch mode you can supply a file with a list of URLs, DELTAs, and 1/0\'s')
args = parser.parse_args(args=args[1:])
if not args.url.startswith('http'):
args.url = 'http://' + args.url
if args.verbose:
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1)))
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPSHandler(debuglevel=1)))
req_headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}
req = urllib2.Request(args.url, headers=req_headers)
if args.batch:
print "START BATCH MODE"
pool = multiprocessing.pool.ThreadPool(processes=8)
threads = []
batch_mode = args.batch[1].lower()
results = []
with open(args.batch[0], 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
batch_argv = [sys.argv[0], '-1', '-s']
if batch_mode == 'r':
batch_argv.append('-r 1')
else:
batch_argv.append('-w0b' + row[2])
batch_argv.append(row[0])
batch_argv.append(row[1])
print "Calling Thread w/ %s" % (batch_argv[1:])
threads.append(pool.apply_async(main,(batch_argv,)))
for result in threads:
results.append(result.get())
# That's what happened when you commit code the night before the talk ;-)
results = reduce(lambda x,y: x+y, map(lambda x: str(x), reduce(lambda x,y: x+y, results)))
print "END OF BATCH MODE\n\n"
print ">>> RESULT: %s <<<" % results
elif args.test:
# Test-mode
try:
http_response = urllib2.urlopen(req)
http_response.read()
print '\n' + args.url + ':'
print "=" * (len(args.url) + 1) + '\n'
print "Expires equal to: %s" % http_response.headers['Expires']
print "Date equal to: %s\n" % http_response.headers['Date']
# Every hit changes Expires? Can't use URL for cache talking ...
if http_response.headers['Expires'] == http_response.headers['Date']:
print "NOT GOOD!"
else:
print "MAYBE ... (DELTA equals %s)" % __calc_delta(http_response.headers['Expires'],
http_response.headers['Date'])
except TypeError:
# expires_date = datetime.datetime(*email.utils.parsedate(expires_field)[:6])
# TypeError: 'NoneType' object has no attribute '__getitem__'
print "`Expires' Value is Number and not a Date! Can't calculate delta ...\n"
except KeyError:
# Maybe it's not Expires?
print "Can't find `Expires' Header in HTTP Response ...\n"
except urllib2.HTTPError as e:
# Connection error
print "ERROR: %s for %s" % (str(e), args.url)
else:
# Write/Read Mode
first_sync = args.force_start
bits = []
if not args.read:
bits = __str2bits(args.write[0])
if not args.quiet:
print "--- INPUT (%s) ---" % args.write[0]
print ''.join(bits)
print "--- INPUT = %d BITS --" % (len(bits))
initial_poll_interval = args.poll_interval
last_input_bit = -1
last_poll_interval = -1
after_fp = False
sliding_delta = 0
if args.read:
if args.poll_interval < 11:
sliding_delta = 1
else:
sliding_delta = 10
args.poll_interval = args.poll_interval + sliding_delta
while True:
if not first_sync or args.always_sync:
if not args.quiet:
print "[%s]: Synchronizing ..." % time.asctime()
__wait_till_next_minute()
first_sync = True
print "[%s]: Synchronized! Need to sleep another %d second(s) ..." % (time.asctime(), args.poll_interval)
time.sleep(args.poll_interval)
print "[%s]: Work time!" % time.asctime()
observed_delta = None
if args.read:
# Read, append bit to bits array depends on the HTTP response
input_bit = 0
http_response = urllib2.urlopen(req)
http_response.read()
# Negative delta? (Minus sliding_delta, as read interval is always + sliding_delta to give the writer a buffer)
observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])
if observed_delta.total_seconds() < args.poll_interval - sliding_delta:
input_bit = 1
print "(READING | R#: %d | E: %s | D: %s | D2: %s): BIT %d" % (
http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],
observed_delta.total_seconds(), input_bit)
if last_input_bit == 0 and input_bit == 1 and last_poll_interval == observed_delta.total_seconds():
args.poll_interval = observed_delta.total_seconds()
print "*** FALSE POSITIVE! (Ignored; Changed to 0)"
bits.append(0)
last_input_bit = 0
after_fp = True
else:
args.poll_interval = observed_delta.total_seconds() + (sliding_delta + 1)
if after_fp:
# After False-positive and bit 1? Writer back online!
if input_bit == 1:
after_fp = False
else:
# After False-positive and bit 0? It's still False-positive ... Go back to original cycle!
args.poll_interval = initial_poll_interval
bits.append(input_bit)
last_input_bit = input_bit
last_poll_interval = args.poll_interval - (sliding_delta + 1)
if len(bits) == args.read[0]:
break
else:
# Write, pop bit form the bits array
try:
output_bit = bits.pop()
if output_bit == '0':
print "(WRITING | R#: =OFFLINE= | E: =OFFLINE= | D: =OFFLINE=): BIT 0"
if len(bits) == 0:
break
continue
while True:
http_response = urllib2.urlopen(req)
http_response.read()
observed_delta = __calc_delta(http_response.headers['Expires'], http_response.headers['Date'])
print "(WRITING | R#: %d | E: %s | D: %s | D2: %s): BIT 1" % (
http_response.getcode(), http_response.headers['Expires'], http_response.headers['Date'],
observed_delta.total_seconds())
if observed_delta.total_seconds() != args.poll_interval and not args.try_once:
print "*** RETRY!"
retry_sleep = observed_delta.total_seconds()
if retry_sleep == 0:
retry_sleep = 1
time.sleep(retry_sleep)
continue
# Do-while Writer is not aligned w/ Expires
break
if len(bits) == 0:
break
except IndexError:
break
if not args.quiet:
print "!!! EOF !!!"
if not bits:
bits = __str2bits(args.write[0])
if not args.quiet:
print "--- OUTPUT ---"
print ''.join(map(str, bits))
print "--- OUTPUT = %d BITS --" % (len(bits))
print " "
n = int(''.join(map(str, bits)), 2)
try:
print binascii.unhexlify('%x' % n)
except TypeError:
# TypeError: Odd-length string if n = 0 or 1
if len(bits) == 1:
pass
else:
raise
return bits
###############
# Entry Point #
###############
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
2,660 | ec2be72f81d260c491cdc31b68b34401fb49b91e | import copy
import numpy as np
from PySide2.QtCore import QItemSelectionModel, QObject, Signal
from PySide2.QtWidgets import (
QComboBox, QLineEdit, QSizePolicy, QTableWidgetItem
)
from hexrd.constants import chargestate
from hexrd.material import Material
from hexrd.ui.periodic_table_dialog import PeriodicTableDialog
from hexrd.ui.scientificspinbox import ScientificDoubleSpinBox
from hexrd.ui.thermal_factor_editor import ThermalFactorEditor
from hexrd.ui.ui_loader import UiLoader
from hexrd.ui.utils import block_signals
COLUMNS = {
'symbol': 0,
'charge': 1,
'occupancy': 2,
'thermal_factor': 3
}
DEFAULT_CHARGE = '0'
DEFAULT_U = Material.DFLT_U[0]
OCCUPATION_MIN = 0
OCCUPATION_MAX = 1
THERMAL_FACTOR_MIN = -1.e7
THERMAL_FACTOR_MAX = 1.e7
U_TO_B = 8 * np.pi ** 2
B_TO_U = 1 / U_TO_B
class MaterialSiteEditor(QObject):
site_modified = Signal()
def __init__(self, site, parent=None):
super().__init__(parent)
loader = UiLoader()
self.ui = loader.load_file('material_site_editor.ui', parent)
self._site = site
self.charge_comboboxes = []
self.occupancy_spinboxes = []
self.thermal_factor_spinboxes = []
self.update_gui()
self.setup_connections()
def setup_connections(self):
self.ui.select_atom_types.pressed.connect(self.select_atom_types)
self.ui.thermal_factor_type.currentIndexChanged.connect(
self.thermal_factor_type_changed)
for w in self.site_settings_widgets:
w.valueChanged.connect(self.update_config)
self.ui.table.selectionModel().selectionChanged.connect(
self.selection_changed)
self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)
self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)
def select_atom_types(self):
dialog = PeriodicTableDialog(self.atom_types, self.ui)
if not dialog.exec_():
return
self.atom_types = dialog.selected_atoms
@property
def site(self):
return self._site
@site.setter
def site(self, v):
self._site = v
self.update_gui()
@property
def atoms(self):
return self.site['atoms']
@property
def total_occupancy(self):
return sum(x['occupancy'] for x in self.atoms)
@property
def fractional_coords(self):
return self.site['fractional_coords']
@property
def thermal_factor_type(self):
return self.ui.thermal_factor_type.currentText()
def U(self, val):
# Take a thermal factor from a spin box and convert it to U
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = B_TO_U
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
def B(self, val):
# Take a thermal factor from a spin box and convert it to B
type = self.thermal_factor_type
if type == 'U':
multiplier = U_TO_B
elif type == 'B':
multiplier = 1
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
def thermal_factor(self, atom):
# Given an atom, return the thermal factor in either B or U
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = U_TO_B
else:
raise Exception(f'Unknown type: {type}')
return atom['U'] * multiplier
@property
def atom_types(self):
return [x['symbol'] for x in self.site['atoms']]
@atom_types.setter
def atom_types(self, v):
if v == self.atom_types:
# No changes needed...
return
# Reset all the occupancies
atoms = self.atoms
previous_u_values = {x['symbol']: x['U'] for x in atoms}
previous_charges = {x['symbol']: x['charge'] for x in atoms}
atoms.clear()
for symbol in v:
# Use previous values if available. Otherwise, use the defaults.
atom = {
'symbol': symbol,
'U': previous_u_values.get(symbol, DEFAULT_U),
'charge': previous_charges.get(symbol, DEFAULT_CHARGE),
}
atoms.append(atom)
self.reset_occupancies()
self.update_table()
self.emit_site_modified_if_valid()
@property
def num_rows(self):
return self.ui.table.rowCount()
@property
def selected_row(self):
selected = self.ui.table.selectionModel().selectedRows()
return selected[0].row() if selected else None
def select_row(self, i):
if i is None or i >= self.num_rows:
# Out of range. Don't do anything.
return
# Select the row
selection_model = self.ui.table.selectionModel()
selection_model.clearSelection()
model_index = selection_model.model().index(i, 0)
command = QItemSelectionModel.Select | QItemSelectionModel.Rows
selection_model.select(model_index, command)
def selection_changed(self):
self.update_enable_states()
def update_enable_states(self):
enable_remove = self.num_rows > 1 and self.selected_row is not None
self.ui.remove_atom_type.setEnabled(enable_remove)
def remove_selected_atom(self):
if self.selected_row is None:
return
atom_types = self.atom_types
del atom_types[self.selected_row]
self.atom_types = atom_types
def create_symbol_label(self, v):
w = QTableWidgetItem(v)
return w
def create_charge_combobox(self, charge, symbol):
cb = QComboBox(self.ui.table)
if charge not in chargestate[symbol]:
raise Exception(f'Invalid charge {charge} for {symbol}')
cb.addItems(chargestate[symbol])
cb.setCurrentText(charge)
cb.currentIndexChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
cb.setSizePolicy(size_policy)
self.charge_comboboxes.append(cb)
return cb
def create_occupancy_spinbox(self, v):
sb = ScientificDoubleSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(OCCUPATION_MIN)
sb.setMaximum(OCCUPATION_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.occupancy_spinboxes.append(sb)
return sb
def create_thermal_factor_spinbox(self, v):
sb = ThermalFactorSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(THERMAL_FACTOR_MIN)
sb.setMaximum(THERMAL_FACTOR_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
sb.setToolTip('Double-click to open tensor editor')
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.thermal_factor_spinboxes.append(sb)
return sb
def clear_table(self):
self.charge_comboboxes.clear()
self.occupancy_spinboxes.clear()
self.thermal_factor_spinboxes.clear()
self.ui.table.clearContents()
def update_gui(self):
with block_signals(*self.site_settings_widgets):
for i, w in enumerate(self.fractional_coords_widgets):
w.setValue(self.fractional_coords[i])
self.update_total_occupancy()
self.update_table()
self.reset_scalar_tensor_toggle()
def reset_scalar_tensor_toggle(self):
any_scalars = any(not isinstance(w.value(), np.ndarray)
for w in self.thermal_factor_spinboxes)
with block_signals(self.ui.convert_u_to_tensors):
self.ui.convert_u_to_tensors.setChecked(not any_scalars)
def update_table(self):
prev_selected = self.selected_row
block_list = [
self.ui.table,
self.ui.table.selectionModel()
]
with block_signals(*block_list):
atoms = self.site['atoms']
self.clear_table()
self.ui.table.setRowCount(len(atoms))
for i, atom in enumerate(atoms):
w = self.create_symbol_label(atom['symbol'])
self.ui.table.setItem(i, COLUMNS['symbol'], w)
w = self.create_charge_combobox(atom['charge'], atom['symbol'])
self.ui.table.setCellWidget(i, COLUMNS['charge'], w)
w = self.create_occupancy_spinbox(atom['occupancy'])
self.ui.table.setCellWidget(i, COLUMNS['occupancy'], w)
v = self.thermal_factor(atom)
w = self.create_thermal_factor_spinbox(v)
self.ui.table.setCellWidget(i, COLUMNS['thermal_factor'], w)
self.update_occupancy_validity()
if prev_selected is not None:
select_row = (prev_selected if prev_selected < self.num_rows
else self.num_rows - 1)
self.select_row(select_row)
# Just in case the selection actually changed...
self.selection_changed()
def thermal_factor_type_changed(self):
self.update_thermal_factor_header()
self.update_table()
# Update the text for the tensor toggle as well
text = f'Convert {self.thermal_factor_type} to tensors'
self.ui.convert_u_to_tensors.setText(text)
def update_thermal_factor_header(self):
w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])
w.setText(self.thermal_factor_type)
def update_config(self):
for i, w in enumerate(self.fractional_coords_widgets):
self.fractional_coords[i] = w.value()
for atom, combobox in zip(self.atoms, self.charge_comboboxes):
atom['charge'] = combobox.currentText()
for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):
atom['occupancy'] = spinbox.value()
for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):
atom['U'] = self.U(spinbox.value())
self.update_total_occupancy()
self.update_occupancy_validity()
self.emit_site_modified_if_valid()
def update_total_occupancy(self):
self.ui.total_occupancy.setValue(self.total_occupancy)
def reset_occupancies(self):
total = 1.0
atoms = self.atoms
num_atoms = len(atoms)
for atom in atoms:
atom['occupancy'] = total / num_atoms
self.update_total_occupancy()
self.update_occupancy_validity()
@property
def site_valid(self):
return self.occupancies_valid
@property
def occupancies_valid(self):
return self.total_occupancy <= 1.0
def update_occupancy_validity(self):
valid = self.occupancies_valid
color = 'white' if valid else 'red'
msg = '' if valid else 'Sum of occupancies must be <= 1'
self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')
self.ui.total_occupancy.setToolTip(msg)
def emit_site_modified_if_valid(self):
if not self.site_valid:
return
self.site_modified.emit()
@property
def fractional_coords_widgets(self):
return [
self.ui.coords_x,
self.ui.coords_y,
self.ui.coords_z
]
@property
def site_settings_widgets(self):
return self.fractional_coords_widgets
def convert_u_to_tensors(self, b):
def scalar_to_tensor(spinbox):
if isinstance(spinbox.value(), np.ndarray):
# Already a tensor
return
tensor = np.zeros(6, dtype=np.float64)
tensor[:3] = spinbox.value()
spinbox.setValue(tensor)
def tensor_to_scalar(spinbox):
value = spinbox.value()
if not isinstance(value, np.ndarray):
# Already a scalar
return
# Use the previous spinbox value if available
scalar = spinbox.editor.ui.scalar_value.value()
if (np.isclose(scalar, 0) and np.allclose(value[:3], value[0]) and
np.allclose(value[3:], 0)):
# If the previous value is zero, and the tensor is diagonal,
# use the diagonal value
scalar = value[0]
spinbox.setValue(scalar)
f = scalar_to_tensor if b else tensor_to_scalar
for spinbox in self.thermal_factor_spinboxes:
f(spinbox)
class ThermalFactorSpinBox(ScientificDoubleSpinBox):
def __init__(self, parent=None):
super().__init__(parent)
self.editor = ThermalFactorEditor(0, parent)
self.setLineEdit(ThermalFactorLineEdit(self, self))
self.valueChanged.connect(self.update_editor_value)
def value(self):
return self.editor.value
def setValue(self, v):
self.editor.value = v
if self.editor.is_tensor:
# Force an update
super().setValue(super().value())
self.valueChanged.emit(super().value())
self.setReadOnly(True)
else:
super().setValue(v)
self.valueChanged.emit(v)
self.setReadOnly(False)
def update_editor_value(self):
if not self.editor.is_tensor:
self.editor.value = super().value()
def textFromValue(self, value):
if not hasattr(self, 'editor') or not self.editor.is_tensor:
return super().textFromValue(value)
return 'Tensor'
def open_editor(self):
original = copy.deepcopy(self.editor.value)
if not self.editor.exec_():
self.editor.value = original
return
self.setValue(self.editor.value)
class ThermalFactorLineEdit(QLineEdit):
def __init__(self, spinbox, parent=None):
super().__init__(parent)
self.spinbox = spinbox
def mousePressEvent(self, event):
if self.isReadOnly():
self.open_editor()
return
super().mousePressEvent(event)
def mouseDoubleClickEvent(self, event):
self.open_editor()
def open_editor(self):
self.spinbox.open_editor()
|
2,661 | 254f34c923d49374e09b579c5bc1b17b8c69c0e4 | from dataclasses import dataclass
from models.user import User
class Customer(User):
def __init__(self, first_name: str, last_name: str, user_name: str, email: str, password: str):
super(Customer, self).__init__(first_name, last_name, user_name, email, password)
# def __str__(self):
# return f"'Firstname' : {self.get__first_name},"
|
2,662 | ec9de8d54113806ab327f05e077edefa74258adb | #!/usr/bin/env python
import re
class Solution:
def __new__(self, p):
nr_counts, nr_consonants, replaced = self.count_vowels_consonants(self, p)
inversed = ''.join(c.lower() if c.isupper() else c.upper() for c in p)
replaced_by_ = p.replace(' ' ,'-')
combined_queries = str(nr_counts) + ' ' + str(nr_consonants) + '::'
combined_queries += str(self.reverse_words(inversed)) + '::'
combined_queries += str(replaced_by_ )+ '::' + str(replaced)
return combined_queries
def count_vowels_consonants(self, text):
vowels_list = ['A', 'E', 'I', 'O', 'U']
consonants = 0
vowels = 0
string = ''
for character in text:
if character.isalpha():
if character.upper() in vowels_list:
vowels += 1
string += 'pv'
else:
consonants += 1
string += character
return (vowels, consonants, string)
def reverse_words(word):
list_string = word.split(' ')
list_string.reverse()
string = ' '.join(list_string)
return string
if __name__ == '__main__':
solutions = Solution('The iterator is just clutter')
# solutions = Solution('The')
print(solutions) |
2,663 | 4ff7e83c6e85a041578a8b3471cbbb7e0c2543e6 | # 1-[2-3-4-5]-1
# 순열로 돌리고, 백트래킹으로 걷어내기
def DFS(idx, cost, cur_loc):
global min_cost
if min_cost < cost: return
if idx == N and arr[cur_loc][0]:
if min_cost > cost + arr[cur_loc][0]:
min_cost = cost + arr[cur_loc][0]
return
for i in range(1, N):
if way[i] or not arr[cur_loc][i] : continue
way[i] =1
DFS(idx+1, cost+arr[cur_loc][i], i)
way[i] = 0
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
way = [0] * N
min_cost = 100 * N
DFS(1, 0, 0)
print(min_cost) |
2,664 | 7e7a50cb8e66a71c1df2d61241f8a55c042b7d59 | import os
import sys
import pytest
def run_test(file_name, capture_stdout=True, allure_dir=None):
cmd = [
file_name, "-vvv",
]
if capture_stdout:
cmd.append("-s")
test_name = os.path.splitext(os.path.basename(file_name))[0]
alluredir = os.path.normpath("%s/%s/" % (allure_dir or "allure-results", test_name))
cmd.extend(["--alluredir", alluredir])
print(cmd)
sys.exit(pytest.main(cmd))
|
2,665 | 2b796fb99e4607d310a533e8d9897100c4df087d | class ListNode:
def __init__(self,listt,node,g,h):
self.node_list = []
for element in listt:
self.node_list.append(element)
self.node_list.append(node)
self.g=g
self.f = int(g)+int(h);
self.ID = node
def is_Goal(self,complete_nodes):
if complete_nodes in self.node_list:
return True
return False
|
2,666 | b324c520400f04719b17121b0b4c2d23915e8841 | 5 1
6 1x
1112#Desember@@@@@ |
2,667 | eb6a4170e5427f10eda4d650996c2cbd8a34ca21 | Relevance
Thus, designing an automatic MWP solver, with semantic understanding and
inference capability, has been considered as a crucial step towards general AI.
Solving a math problem manually involves too many steps. So MWP will reduc
Attachment final.pdf added.Conversation opened. 1 read message.
Skip to content
Using Gmail with screen readers
jithin
5 of about 62
Code
Inbox
x
jithin p <jithinappu.p6@gmail.com>
Attachments
Tue, 6 Mar, 23:44
to me
Attachments area
import wx
import MySQLdb
import nltk
import string
from string import punctuation
from nltk.corpus import stopwords
from nltk.corpus import wordnet as w
from wx import grid as gr
from itertools import chain
from nltk.corpus.reader import NOUN
from scipy import spatial
import os
import sys
dbc=''
database_name=''
#natural_query=''
query=''
path=os.path.dirname(os.path.realpath(sys.argv[0]))
class MainWindow(wx.Frame) :
def __init__(self,parent,id) :
wx.Frame.__init__(self,parent,id,'Natural Query To SQL Translator',size=(500,400))
panel = wx.Panel(self)
panel.SetBackgroundColour(wx.Colour(200,200,225))
font1 = wx.Font(30, wx.DEFAULT, wx.MODERN, wx.FONTWEIGHT_BOLD)
#name_top = wx.StaticText(panel, -1, "Natural Query To", (42,50), (360,-1),wx.ALIGN_CENTER)
#name_top.SetFont(font)
name = wx.StaticText(panel, -1, "Query Translator", (59,50), (360,-1),wx.ALIGN_CENTER)
name.SetFont(font1)
font2 = wx.Font(12, wx.DEFAULT, wx.MODERN, wx.FONTWEIGHT_BOLD)
name.SetForegroundColour('blue')
translator_button = wx.Button(panel,label="Translator",pos=(160,200),size=(175,60))
translator_button.SetBackgroundColour(wx.Colour(220,220,230))
translator_button.SetFont(font2)
self.Bind(wx.EVT_BUTTON, self.translating_window, translator_button)
#self.Bind(wx.EVT_BUTTON, self.database_window, translator_button)
statusbar = self.CreateStatusBar()
menubar = wx.MenuBar()
first = wx.Menu()
second = wx.Menu()
first.Append(wx.NewId(),"New Window","This is new window")
second.Append(wx.NewId(),"Open...","Open new window")
menubar.Append(first,"File")
menubar.Append(second,"Edit")
self.SetMenuBar(menubar)
'''
def databse_window(self,event):
dtbase_window = create_databse_window(parent=None,id=-1)
dtbase_window.Show()
'''
def translating_window(self,event):
translate_window = create_translate_window(parent=None,id=-1)
translate_window.Show()
'''
class create_databse_window(wx.Frame) :
def __init__(self,parent,id) :
wx.Frame.__init__(self,parent,id,'Query Translator',size=(500,300))
self.panel = wx.Panel(self)
self.panel.SetBackgroundColour(wx.Colour(200,200,225))
font = wx.Font(12, wx.DEFAULT, wx.DEFAULT, wx.FONTWEIGHT_NORMAL)
self.database_name = wx.StaticText(self.panel, -3, "Database Name", (42,85), (360,-1))
self.database_name.SetFont(font)
self.database_name_text = wx.TextCtrl(self.panel, -1, "", pos=(200,75), size=(400,42))
self.natural_query_text.SetInsertionPoint(0)
'''
class create_translate_window(wx.Frame) :
global dbc
global database_name
global natural_query
global query
def __init__(self,parent,id) :
wx.Frame.__init__(self,parent,id,'Query Translator',size=(650,600))
self.panel = wx.Panel(self)
self.panel.SetBackgroundColour(wx.Colour(200,200,225))
font = wx.Font(12, wx.DEFAULT, wx.DEFAULT, wx.FONTWEIGHT_NORMAL)
database_connect_button = wx.Button(self.panel, label="Connect", pos=(262,50), size=(120,40))
database_connect_button.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.connect_database, database_connect_button)
database_button = wx.Button(self.panel, label="Select Database", pos=(250,130), size=(150,40))
database_button.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.select_database, database_button)
self.selected_dtname = wx.StaticText(self.panel, -3, "Database", (42,215), (360,-1))
self.selected_dtname.SetFont(font)
self.sel_dtname = wx.TextCtrl(self.panel, -1, pos=(207,210), size=(250,-1))
self.sel_dtname.SetInsertionPoint(0)
self.natural_query = wx.StaticText(self.panel, -3, "English query", (42,290), (360,-1))
self.natural_query.SetFont(font)
self.natural_query_text = wx.TextCtrl(self.panel, -1, pos=(185,280), size=(300,42), style=wx.TE_MULTILINE)
self.natural_query_text.SetInsertionPoint(0)
generate_button = wx.Button(self.panel, label="Generate", pos=(265,360), size=(120,40))
generate_button.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.generate_query, generate_button)
self.sql_query = wx.StaticText(self.panel, -3, "SQL query", (42,450), (360,-1))
self.sql_query.SetFont(font)
self.sql_query_text = wx.TextCtrl(self.panel, -1, pos=(185,440), size=(300,42), style=wx.TE_MULTILINE)
self.sql_query_text.SetInsertionPoint(0)
result_button = wx.Button(self.panel, label="Result", pos=(265,519), size=(120,40))
result_button.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.show_result, result_button)
def connect_database(self,event):
global dbc
try:
self.dbc=MySQLdb.connect("localhost","root","")
dbc=self.dbc
#print dbc
box=wx.MessageDialog(None,"Connection Established",'Alert',wx.OK)
ans=box.ShowModal()
box.Destroy()
except:
box=wx.MessageDialog(None,"Error occured while establishing connection",'Alert',wx.OK)
ans=box.ShowModal()
box.Destroy()
#def generate_query(self,event):
# t=self.natural_query_text.GetValue()
# print t
def select_database(self,event):
#lobal dbc
try:
temp=self.dtbase_window.GetSize()
except:
self.dtbase_window = self.create_databse_window(parent=None,id=1)
self.dtbase_window.Show()
self.dtbase_window.Bind(wx.EVT_CLOSE,self.addDatabase,self.dtbase_window)
#print dbc
def addDatabase(self,event):
try:
global database_name
#print database_name
self.dt_name=database_name
self.sel_dtname.SetValue(self.dt_name)
self.dtbase_window.Destroy()
except:
self.dtbase_window.Destroy()
def generate_query(self,event):
global query
self.n_query_feature_file=[]
#global natural_query
#print "hdgfhgf"
t=self.natural_query_text.GetValue()
self.natural_queryy=t
#print self.natural_queryy
self.n_query_feature_file.append(feature(self.natural_queryy))
#print self.n_query_feature_file
for f in self.n_query_feature_file:
f.extract_feature()
f.csv_file()
f.mapping()
print "query"
print query
self.queryy=query
if len(self.queryy) != 0:
self.sql_query_text.SetValue(self.queryy)
def show_result(self,event):
#global query
try:
temp=self.reslt_window.GetSize()
except:
self.reslt_window = self.create_result_window(parent=None,id=1)
self.reslt_window.Show()
#self.reslt_window.Bind(wx.EVT_CLOSE,self.addDatabase,self.dtbase_window)
class create_databse_window(wx.Frame):
global dbc
global database_name
def __init__(self,parent,id) :
wx.Frame.__init__(self,parent,id,'Select Database',size=(590,350))
self.panel = wx.Panel(self)
self.panel.SetBackgroundColour(wx.Colour(200,200,225))
font = wx.Font(12, wx.DEFAULT, wx.DEFAULT, wx.FONTWEIGHT_NORMAL)
self.sel_dtbase = wx.StaticText(self.panel, -3, "Select Database", (42,100), (360,-1))
self.sel_dtbase.SetFont(font)
self.dt_choice=wx.Choice(self.panel,-1,pos=(190,95),size=(250,30))
self.dt_choice.SetSelection(0)
refresh_button = wx.Button(self.panel, label="Refresh", pos=(450,95), size=(90,30))
refresh_button.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.list_dt_base, refresh_button)
select_button = wx.Button(self.panel, label="Select", pos=(250,200), size=(95,30))
select_button.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.database_return, select_button)
#t = self.dt_choice.GetSelection()
#print t
#print dbc
def list_dt_base(self,event):
global dbc
global database_name
#try:
self.list_dtnames=[]
self.dbc=dbc
#print dbc
cursor=self.dbc.cursor()
cursor.execute("SHOW DATABASES")
self.dt_names=cursor.fetchall()
#print self.dt_names
for i in self.dt_names:
#cursor.execute("DESC "+i[0])
name_t=i[0]
#t=(i[0],det)
self.list_dtnames.append(name_t)
#self.dt_choice.SetItems(name_t)
#database_name=self.list_dtnames
self.dt_choice.SetItems(self.list_dtnames)
#print self.list_dtnames
#except:
# box=wx.MessageDialog(None,"Error occured. Connect database",'Alert',wx.OK)
# ans=box.ShowModal()
# box.Destroy()
def database_return(self,event):
try:
global dbc
global database_name
self.dbc=dbc
t = self.dt_choice.GetSelection()
#print self.list_dtnames[t]
cursor=self.dbc.cursor()
cursor.execute("USE "+self.list_dtnames[t])
dt_choose=cursor.fetchall()
print dt_choose
database_name=self.list_dtnames[t]
#self.sel_dtname.SetValue(database_name)
self.Close()
except:
box=wx.MessageDialog(None,"Database no longer exist. Hit the refresh button",'Alert',wx.OK)
ans=box.ShowModal()
box.Destroy()
class create_result_window(wx.Frame):
global dbc
global database_name
global query
def __init__(self,parent,id) :
wx.Frame.__init__(self,parent,id,'Result',size=(500,600))
self.panel = wx.Panel(self)
self.panel.SetBackgroundColour(wx.Colour(200,200,225))
font = wx.Font(12, wx.DEFAULT, wx.DEFAULT, wx.FONTWEIGHT_NORMAL)
self.queryy=query
self.dbc=dbc
attribute_name=[]
t=self.queryy.split(' ')
tt=[]
for i in t:
tt.append(i.split(','))
print tt
for i in range(len(tt)):
if 'FROM' in tt[i]:
s=i
#s=tt.index('FROM')
#if len(tt) > 0:
#for i in range(len(tt)):
# attribute_name.append(tt[i])
#else:
for i in tt[1:s]:
for j in i:
attribute_name.append(j)
if '*' in attribute_name:
cursor=self.dbc.cursor()
cursor.execute("DESC "+tt[s+1][0])
det=cursor.fetchall()
attribute_name=[]
for i in range(len(det)):
attribute_name.append(det[i][0])
#try:
cursor=self.dbc.cursor()
cursor.execute(self.queryy)
result=cursor.fetchall()
print result
n_rows=len(result)
n_cols=len(result[0])
table=gr.Grid(self.panel, -1, size=(500,600))
#print attribute_name
table.CreateGrid(n_rows,n_cols)
for i in range(len(attribute_name)):
table.SetColLabelValue(i,attribute_name[i])
for i in range(len(result)):
for j in range(len(result[i])):
table.SetCellValue(i,j,str(result[i][j]))
#except:
#print "grid error"
class feature():
global dbc
global database_name
global query
def __init__(self,query):
self.natural_query=query
self.token=nltk.tokenize.word_tokenize(self.natural_query)
print self.token
def extract_feature(self):
global query
self.natural_query_features=[]
self.list1=self.token
#Removing punctuations
remov_p=[]
for i in self.list1:
if i in punctuation:
remov_p.append(self.list1.index(i))
remov_p.reverse()
for j in remov_p[:]:
#print j
del(self.list1[j])
#print self.list2
self.featuress=self.list1
#print self.featuress
#word co-occurrence matrix
self.occurr=[]
self.words=[]
self.list2=self.featuress
for i in self.list2:
if i not in self.words:
self.words.append(i)
w=5
#self.list4 = self.list3
self.occurr_val=[]
for i in range(len(self.list2)):
self.occurr=[0 for x in range(len(self.words)+1)]
self.occurr[0]=self.list2[i]
j=i
#while (j+w) <= (len(self.list1)-1):
if (j+w+1) <= (len(self.list2)-1):
j=j+w+1
else:
j=len(self.list2)
#print "j"
#print j
for k in range(i+1,j):
#print "i"
#print i
#self.occurr_val.append(self.occurr)
self.word=self.list2[k]
try:
for p in range(len(self.words)):
if self.words[p] == self.list2[i]:
ind_row_word=p
if self.list2[k] == self.list2[i]:
occ=w-(k-i-1)
ind=self.words.index(self.word)
#self.occurr[ind]+=occ
self.occurr_val[ind_row_word][ind+1]+=occ
else:
occ=w-(k-i-1)
#print k
ind=self.words.index(self.word)
#self.occurr[ind]+=occ
self.occurr_val[ind_row_word][ind+1]+=occ
except:
if self.list2[k] == self.list2[i]:
occ=w-(k-i-1)
ind=self.words.index(self.word)
self.occurr[ind+1]+=occ
#if k+1 > j-1:
# self.occurr[k]+=0
else:
occ=w-(k-i-1)
#print k
ind=self.words.index(self.word)
self.occurr[ind+1]+=occ
#self.occurr_val.append(self.occurr)
#print self.words[i]
if len(self.occurr_val) != len(self.words):
self.occurr_val.append(self.occurr)
print self.occurr_val
#Postagging
self.list3=self.featuress
tagged_string=nltk.pos_tag(self.list3)
self.featuress=tagged_string
print self.featuress
#Noun clause extracting
self.noun_clause_list=[]
self.list4=self.featuress
for i in range(len(self.list4)):
if self.list4[i][1] == 'NN' or self.list4[i][1] == 'NNS':
self.noun_clause_list.append(self.list4[i][0])
print self.noun_clause_list
'''
#Removing stopwords
self.list5=self.featuress
remov_s=[]
stop_words = set(stopwords.words('english'))
for i in range(len(self.list5)):
if self.list5[i][0] in stop_words:
remov_s.append(i)
#print remov_s
remov_s.reverse()
#print remov_s
for j in range(len(remov_s)):
#print self.list4
t=remov_s[j]
#print self.list5[t]
del(self.list5[t])
#print self.list5
self.featuress=self.list5
print self.featuress
'''
#Finding Cosine-similarity of noun-pro noun
self.list6=self.featuress
self.list7=self.occurr_val
self.list_pro_noun=[]
#self.temp_occ_val=[]
self.occ_values_n_p=[]
for i in range(len(self.list6)):
self.list_noun=[]
if self.list6[i][1] == 'NN' or self.list6[i][1] == 'NNS':
ind=self.words.index(self.list6[i][0])
for j in self.list7[ind][1:]:
self.list_noun.append(j)
for k in self.list7:
self.list_noun.append(k[ind+1])
#print self.list_noun
for j in range(i+1,len(self.list6)):
self.temp_occ_val=[]
#self.list_pro_noun=[]
if self.list6[j][1] == 'NNP':
ind1=self.words.index(self.list6[j][0])
for l in self.list7[ind1][1:]:
self.list_pro_noun.append(l)
for m in self.list7:
self.list_pro_noun.append(m[ind1+1])
#print self.list_pro_noun
#self.list_pro_noun=[]
occ_value=1-spatial.distance.cosine(self.list_noun,self.list_pro_noun)
self.temp_occ_val.append(self.list6[i][0])
self.temp_occ_val.append(self.list6[j][0])
self.temp_occ_val.append(occ_value)
#print occ_value
self.list_pro_noun=[]
self.occ_values_n_p.append(self.temp_occ_val)
self.occ_values_n_p.sort()
#Remove empty lists
del_list=[]
#self.occ_values_n_p
for i in range(len(self.occ_values_n_p)):
if len(self.occ_values_n_p[i]) == 0:
del_list.append(i)
del_list.reverse()
for j in del_list[:]:
print del_list
del(self.occ_values_n_p[j])
#self.occ_values_n_p.sort(reverse=True)
#Sorting the list
sort_t=[]
sort_tt=self.occ_values_n_p
self.occ_values_n_p=[]
for i in sort_tt:
sort_t.append(i[2])
sort_t.sort(reverse=True)
for i in sort_t:
for j in sort_tt:
if i == j[2]:
self.occ_values_n_p.append(j)
print self.occ_values_n_p
#Finding cosine similarity of verb-noun
self.list8=self.featuress
self.list9=self.occurr_val
#self.list_noun=[]
self.list_noun1=[]
#self.temp_occ_val=[]
self.occ_values_v_n=[]
for i in range(len(self.list8)):
self.list_verb=[]
if self.list8[i][1] == 'VB' or self.list8[i][1] == 'VBP':
ind=self.words.index(self.list8[i][0])
for j in self.list9[ind][1:]:
self.list_verb.append(j)
for k in self.list9:
self.list_verb.append(k[ind+1])
#print self.list_verb
for j in range(i+1,len(self.list8)):
self.temp_occ_val=[]
#self.list_pro_noun=[]
if self.list8[j][1] == 'NN' or self.list8[j][1]=='NNS' or self.list8[j][1]=='NNP':
ind1=self.words.index(self.list8[j][0])
for l in self.list9[ind1][1:]:
self.list_noun1.append(l)
for m in self.list9:
self.list_noun1.append(m[ind1+1])
#print self.list_noun1
#self.list_pro_noun=[]
occ_value=1-spatial.distance.cosine(self.list_verb,self.list_noun1)
self.temp_occ_val.append(self.list8[i][0])
self.temp_occ_val.append(self.list8[j][0])
self.temp_occ_val.append(occ_value)
#print self.temp_occ_val
self.list_noun1=[]
self.occ_values_v_n.append(self.temp_occ_val)
self.occ_values_v_n.sort()
#Remove empty lists
del_list=[]
for i in range(len(self.occ_values_v_n)):
if len(self.occ_values_v_n[i]) == 0:
del_list.append(i)
del_list.reverse()
for j in del_list:
del(self.occ_values_v_n[j])
#self.occ_values_v_n.sort(reverse=True)
#Sorting the list
sort_t=[]
sort_tt=self.occ_values_v_n
self.occ_values_v_n=[]
for i in sort_tt:
sort_t.append(i[2])
sort_t.sort(reverse=True)
for i in sort_t:
for j in sort_tt:
if i == j[2]:
self.occ_values_v_n.append(j)
print self.occ_values_v_n
#Finding cosine-similarity of noun-number
self.list10=self.featuress
self.list11=self.occurr_val
#self.list_noun=[]
self.list_number=[]
#self.temp_occ_val=[]
self.occ_values_n_num=[]
for i in range(len(self.list10)):
self.list_noun2=[]
if self.list10[i][1] == 'NN' or self.list10[i][1] == 'NNS':
ind=self.words.index(self.list10[i][0])
for j in self.list11[ind][1:]:
self.list_noun2.append(j)
for k in self.list11:
self.list_noun2.append(k[ind+1])
#print self.list_noun
for j in range(i+1,len(self.list10)):
self.temp_occ_val=[]
#self.list_pro_noun=[]
if self.list10[j][1] == 'CD':
ind1=self.words.index(self.list10[j][0])
for l in self.list11[ind1][1:]:
self.list_number.append(l)
for m in self.list11:
self.list_number.append(m[ind1+1])
#print self.list_pro_noun
#self.list_pro_noun=[]
occ_value=1-spatial.distance.cosine(self.list_noun2,self.list_number)
self.temp_occ_val.append(self.list10[i][0])
self.temp_occ_val.append(self.list10[j][0])
self.temp_occ_val.append(occ_value)
#print occ_value
self.list_number=[]
self.occ_values_n_num.append(self.temp_occ_val)
self.occ_values_n_num.sort()
#Remove empty lists
del_list=[]
for i in range(len(self.occ_values_n_num)):
if len(self.occ_values_n_num[i]) == 0:
del_list.append(i)
del_list.reverse()
for j in del_list:
del(self.occ_values_n_num[j])
#self.occ_values_n_num.sort(reverse=True)
#Sorting the list
sort_t=[]
sort_tt=self.occ_values_n_num
self.occ_values_n_num=[]
for i in sort_tt:
sort_t.append(i[2])
sort_t.sort(reverse=True)
for i in sort_t:
for j in sort_tt:
if i == j[2]:
self.occ_values_n_num.append(j)
print self.occ_values_n_num
#Find cosine-similarity of noun-noun
self.list12=self.featuress
self.list13=self.occurr_val
#self.list_nounN=[]
self.list_nounn=[]
#self.temp_occ_val=[]
self.occ_values_n_n=[]
for i in range(len(self.list12)):
self.list_noun3=[]
if self.list12[i][1] == 'NN' or self.list12[i][1] == 'NNS':
ind=self.words.index(self.list12[i][0])
for j in self.list13[ind][1:]:
self.list_noun3.append(j)
for k in self.list13:
self.list_noun3.append(k[ind+1])
#print self.list_noun
for j in range(i+1,len(self.list12)):
self.temp_occ_val=[]
#self.list_pro_noun=[]
if self.list12[j][1] == 'NN' or self.list12[j][1] == 'NNS':
ind1=self.words.index(self.list12[j][0])
for l in self.list13[ind1][1:]:
self.list_nounn.append(l)
for m in self.list13:
self.list_nounn.append(m[ind1+1])
occ_value=1-spatial.distance.cosine(self.list_noun3,self.list_nounn)
self.temp_occ_val.append(self.list12[i][0])
self.temp_occ_val.append(self.list12[j][0])
self.temp_occ_val.append(occ_value)
#print self.temp_occ_val
self.list_nounn=[]
self.occ_values_n_n.append(self.temp_occ_val)
self.occ_values_n_n.sort()
#Remove empty lists
del_list=[]
for i in range(len(self.occ_values_n_n)):
if len(self.occ_values_n_n[i]) == 0:
del_list.append(i)
del_list.reverse()
for j in del_list:
del(self.occ_values_n_n[j])
#self.occ_values_n_n.sort(reverse=True)
#Sorting the list
sort_t=[]
sort_tt=self.occ_values_n_n
self.occ_values_n_n=[]
for i in sort_tt:
sort_t.append(i[2])
sort_t.sort(reverse=True)
for i in sort_t:
for j in sort_tt:
if i == j[2]:
self.occ_values_n_n.append(j)
print self.occ_values_n_n
#Find cosine values of wh-noun
self.list15=self.featuress
self.list16=self.occurr_val
self.list_Noun=[]
#self.temp_occ_val=[]
self.occ_values_w_n=[]
for i in range(len(self.list15)):
self.list_wh=[]
if self.list15[i][1] == 'WDT' or self.list15[i][1] == 'WP' or self.list15[i][1] == 'WP$' or self.list15[i][1] == 'WRB':
ind=self.words.index(self.list15[i][0])
for j in self.list16[ind][1:]:
self.list_wh.append(j)
for k in self.list16:
self.list_wh.append(k[ind+1])
#print self.list_noun
for j in range(i+1,len(self.list15)):
self.temp_occ_val=[]
#self.list_pro_noun=[]
if self.list15[j][1] == 'NN' or self.list15[j][1] == 'NNS' or self.list15[j][1] == 'NNP':
ind1=self.words.index(self.list15[j][0])
for l in self.list16[ind1][1:]:
self.list_Noun.append(l)
for m in self.list16:
self.list_Noun.append(m[ind1+1])
occ_value=1-spatial.distance.cosine(self.list_wh,self.list_Noun)
self.temp_occ_val.append(self.list15[i][0])
self.temp_occ_val.append(self.list15[j][0])
self.temp_occ_val.append(occ_value)
#print self.temp_occ_val
self.list_Noun=[]
self.occ_values_w_n.append(self.temp_occ_val)
self.occ_values_w_n.sort()
#Remove empty lists
del_list=[]
for i in range(len(self.occ_values_w_n)):
if len(self.occ_values_w_n[i]) == 0:
del_list.append(i)
del_list.reverse()
for j in del_list:
del(self.occ_values_w_n[j])
#self.occ_values_w_n.sort(reverse=True)
#print self.occ_values_w_n
#Sorting the list
sort_t=[]
sort_tt=self.occ_values_w_n
self.occ_values_w_n=[]
for i in sort_tt:
sort_t.append(i[2])
sort_t.sort(reverse=True)
for i in sort_t:
for j in sort_tt:
if i == j[2]:
self.occ_values_w_n.append(j)
print self.occ_values_w_n
def mapping(self):
global dbc
global database_name
global query
self.dbc=dbc
self.table_names=[]
name_synonyms=[]
syn_set=[]
syn_set_noun_t=[]
self.extract_table_name=[]
self.table_names_t=[]
syn_set_table_t=[]
self.lower_noun=[]
syn_set_table=[]
self.maped_table_names=[]
self.query=[]
self.select_clause='SELECT'
self.from_clause='FROM'
self.where_clause=''
self.nouns=self.noun_clause_list
cursor=self.dbc.cursor()
cursor.execute("SHOW TABLES")
table_name=cursor.fetchall()
#print table_name
#Finding table names
for i in range(len(table_name)):
self.table_names.append(table_name[i][0])
print self.table_names
table_det=[]
cursor=self.dbc.cursor()
for i in range(len(self.table_names)):
cursor.execute("DESC "+self.table_names[i])
det=cursor.fetchall()
t=(self.table_names[i],det)
table_det.append(t)
print table_det
'''
#Finding synonyms and tables
for i in range(len(self.nouns)):
if self.nouns[i] not in self.table_names:
syns=w.synsets(self.nouns[i])
#print syns
#print syns[0].name()
for j in syns:
syn_set=list(chain.from_iterable([j.lemma_names()]))
#print syn_set
for k in range(len(syn_set)):
if syn_set[k] in self.table_names:
self.extract_table_name.append(syn_set[k])
#print "found"
'''
#Converting to lower case
for i in range(len(self.table_names)):
l_name=self.table_names[i].lower()
self.table_names_t.append(l_name)
for j in range(len(self.nouns)):
l_noun=self.nouns[j].lower()
self.lower_noun.append(l_noun)
for i in range(len(self.table_names_t)):
syns_table=w.synsets(self.table_names_t[i],NOUN)
syn_set_table_t=[]
for j in syns_table:
syn_set_table_t.append(list(chain.from_iterable([j.lemma_names()])))
syn_set_table.append(syn_set_table_t)
#print syn_set_table
#print self.table_names_t
#Finding synonyms and tables
for i in range(len(self.lower_noun)):
#lower_case_name=self.noun[i].lower()
if self.lower_noun[i] not in self.table_names_t:
syns_noun=w.synsets(self.nouns[i],NOUN)
#print syns
#print syns[0].name()
for j in syns_noun:
syn_set_noun=list(chain.from_iterable([j.lemma_names()]))
print syn_set_noun
for k in range(len(syn_set_noun)):
for l in range(len(syn_set_table)):
for m in range(len(syn_set_table[l])):
if syn_set_noun[k] in syn_set_table[l][m]:
try:
self.noun_table=self.lower_noun[i]
self.extract_table_name.append(self.table_names[l])
#print self.table_names[l]
#print self.extract_table_name
#print "found"
except:
pass
else:
self.noun_table=self.lower_noun[i]
ind=self.table_names_t.index(self.lower_noun[i])
self.extract_table_name.append(self.table_names[ind])
#print self.extract_table_name
for i in self.extract_table_name:
if i not in self.maped_table_names:
self.maped_table_names.append(i)
#print self.maped_table_names
#print self.noun_table
#Attribute mapping
syn_set_attribute=[]
table_attr=[]
self.extract_table_attr=[]
self.mapped_attr=[]
self.list14=[]
self.from_clause+=' '
self.from_clause+=self.maped_table_names[0]
if len(self.maped_table_names) == 1:
'''
self.list14=self.featuress
for i in range(len(self.list14)):
if self.list14[i][1] == 'WDT' or self.list14[i][1] == 'WP' or self.list14[i][1] == 'WP$' or self.list14[i][1] == 'WRB':
attribute_name=self.occ_values_w_n[0][1]
for i in table_det:
if i[0] == self.maped_table_names[0]:
for j in i[1]:
table_attr.append(j[0])
#print table_attr
syns_attribute=w.synsets(j[0],NOUN)
syn_set_attribute_t=[]
for k in syns_attribute:
syn_set_attribute_t.append(list(chain.from_iterable([k.lemma_names()])))
syn_set_attribute.append(syn_set_attribute_t)
#print syn_set_attribute
attr_l=attribute_name.lower()
if attr_l not in table_attr:
syns_attr=w.synsets(attr_l,NOUN)
for k in syns_attr:
syn_set_attr=list(chain.from_iterable([k.lemma_names()]))
#print syn_set_attr
for l in range(len(syn_set_attr)):
for m in range(len(syn_set_attribute)):
for n in range(len(syn_set_attribute[m])):
#print syn_set_attr[l]
#print syn_set_attribute[m][n]
if syn_set_attr[l] in syn_set_attribute[m][n]:
#print syn_set_attribute[m][n]
#print m
try:
self.extract_table_attr.append(table_attr[m])
except:
pass
for i in self.extract_table_attr:
if i not in self.mapped_attr:
self.mapped_attr.append(i)
print self.mapped_attr
self.where_clause+=' '
self.where_clause+=self.mapped_attr[0]
self.where_clause+='='
self.where_clause=self.where_clause+"'"+self.list14[i+1][1]+"'"
'''
#attribute_name=self.occ_values_v_n[0][1]
#self.select_clause+=self.self.occ_values_v_n[0]
#print attribute_name
#self.from_clause+=' '
#self.from_clause+=self.maped_table_names[0]
#Converting to lower case
try:
self.list14=self.featuress
for wh in range(len(self.list14)):
if self.list14[wh][1] == 'WDT' or self.list14[wh][1] == 'WP' or self.list14[wh][1] == 'WP$' or self.list14[wh][1] == 'WRB':
self.where_clause+='WHERE'
attribute_name=self.occ_values_w_n[0][1]
print "attribute_name"
print attribute_name
for i in table_det:
if i[0] == self.maped_table_names[0]:
for j in i[1]:
table_attr.append(j[0])
#print table_attr
syns_attribute=w.synsets(j[0],NOUN)
syn_set_attribute_t=[]
for k in syns_attribute:
syn_set_attribute_t.append(list(chain.from_iterable([k.lemma_names()])))
syn_set_attribute.append(syn_set_attribute_t)
print syn_set_attribute
attr_l=attribute_name.lower()
if attr_l not in table_attr:
syns_attr=w.synsets(attr_l,NOUN)
for k in syns_attr:
syn_set_attr=list(chain.from_iterable([k.lemma_names()]))
print syn_set_attr
for l in range(len(syn_set_attr)):
for m in range(len(syn_set_attribute)):
for n in range(len(syn_set_attribute[m])):
#print syn_set_attr[l]
#print syn_set_attribute[m][n]
if syn_set_attr[l] in syn_set_attribute[m][n]:
#print syn_set_attribute[m][n]
#print m
try:
self.extract_table_attr.append(table_attr[m])
except:
pass
for i in self.extract_table_attr:
#print i
#print self.mapped_attr
if i not in self.mapped_attr:
self.mapped_attr.append(i)
#print "i"
#print i
#print "self.mapped_attr"
#print self.mapped_attr
print "list"
print self.list14
occ_val_temp=0
for val in self.occ_values_n_n:
#print "333333"
if val[0] == self.occ_values_w_n[0][1]:
if val[2] >= occ_val_temp:
occ_val_temp=val[2]
val_temp=val[1]
#print val_temp
#print "val_temp"
for val in self.occ_values_n_num:
#print "333333"
if val[0] == self.occ_values_w_n[0][1]:
if val[2] >= occ_val_temp:
occ_val_temp=val[2]
val_temp=val[1]
#print val_temp
#print "val_temp"
for val in self.occ_values_n_p:
#print "333333"
if val[0] == self.occ_values_w_n[0][1]:
if val[2] >= occ_val_temp:
occ_val_temp=val[2]
val_temp=val[1]
print val_temp
print "val_temp"
print self.mapped_attr[0]
if not self.mapped_attr:
box=wx.MessageDialog(None,"Invalid Attribute name",'Alert',wx.OK)
ans=box.ShowModal()
box.Destroy()
else:
self.where_clause+=' '
self.where_clause+=self.mapped_attr[0]
#print "mapped_attr"
#print self.mapped_attr
#print self.where_clause
#self.where_clause+='='
print self.list14[wh+3][0]
#Finding where clause condition
try:
syn_set_con_t=[]
syn_set_con_q=[]
syn_set_con_q_g=[]
syns_con=w.synsets(self.list14[wh+3][0])
for c in syns_con:
syn_set_con_t=list(chain.from_iterable([c.lemma_names()]))
print syn_set_con_t
syns_q_con=w.synsets('lesser')
for c in syns_q_con:
syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))
syns_q_con=w.synsets('below')
for c in syns_q_con:
syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))
syns_q_con=w.synsets('lower')
for c in syns_q_con:
syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))
syns_q_con=w.synsets('fewer')
for c in syns_q_con:
syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))
syns_q_con=w.synsets('smaller')
for c in syns_q_con:
syn_set_con_q.append(list(chain.from_iterable([c.lemma_names()])))
#print "error"
print syn_set_con_q
syns_q_con=w.synsets('greater')
for c in syns_q_con:
syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))
#print syn_set_con_q_g
syns_q_con=w.synsets('larger')
for c in syns_q_con:
syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))
syns_q_con=w.synsets('above')
for c in syns_q_con:
syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))
#print syn_set_con_q_g
syns_q_con=w.synsets('higher')
for c in syns_q_con:
syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))
#print syn_set_con_q_g
syns_q_con=w.synsets('more')
for c in syns_q_con:
syn_set_con_q_g.append(list(chain.from_iterable([c.lemma_names()])))
#print syn_set_con_q_g
#print "condition entered"
#print self.list14
#print syn_set_con_q
for c in range(len(syn_set_con_t)):
#print syn_set_con_t[c]
for x in range(len(syn_set_con_q)):
#print syn_set_con_q[x]
for y in range(len(syn_set_con_q[x])):
#print "dgfjhdjfhjdhfjhfjdfhjhqqqqqqqq"
#print syn_set_con_t[c]
#print syn_set_con_q[x][y]
if syn_set_con_t[c] in syn_set_con_q[x][y]:
#print syn_set_con_t[c]
try:
print "try"
print self.list14[wh+6][0]
if self.list14[wh+6][0] == 'equal':
#self.where_clause+='<='
condition='<='
print condition
print "condition"
#else:
#self.where_clause+='<'
# condition='<'
except:
condition='<'
#print condition
#print "condition"
#else:
# condition='='
for c in range(len(syn_set_con_t)):
for x in range(len(syn_set_con_q_g)):
for y in range(len(syn_set_con_q_g[x])):
if syn_set_con_t[c] in syn_set_con_q_g[x][y]:
print syn_set_con_q_g[x][y]
print syn_set_con_t[c]
#print self.list14[wh+6][0]
try:
if self.list14[wh+6][0] == 'equal':
#self.where_clause+='<='
condition='>='
#else:
#self.where_clause+='<'
# condition='>'
except:
condition='>'
#print condition
#print "condition"
#else:
# condition='='
if len(condition) < 1:
condition='='
except:
condition='='
#print "condition"
#print condition
self.where_clause+=condition
self.where_clause=self.where_clause+"'"+str(val_temp)+"'"
#print self.list14
#print "where clause"
print self.where_clause
syn_set_attribute=[]
table_attr=[]
self.extract_table_attr=[]
self.mapped_attr=[]
self.list14=[]
attribute_name_t=[]
attribute_name=[]
attr_l=[]
#self.from_clause+=' '
#attribute_name=self.occ_values_v_n[0][1]
for i in self.occ_values_v_n[:]:
attribute_name_t.append(i[1])
print attribute_name_t
#print "attribute_name_t"
print self.noun_table[0]
#print len(attribute_name_t)
if len(attribute_name_t) > 1:
#print "entered"
for i in attribute_name_t:
#print i
if i != self.noun_table:
#print i
attribute_name.append(i)
print attribute_name
#print "ghfggfhgefhgehfehfghefgehfg"
#Removing nouns after wh from attributes list
try:
del_ind=[]
for d in range(len(attribute_name)):
if attribute_name[d] == self.occ_values_w_n[0][1]:
del_ind.append(d)
#del(attribute_name[del_ind])
print attribute_name[d]
del_ind.reverse()
print del_ind
for d in del_ind:
del(attribute_name[d])
except:
print "pass"
pass
#Removing table names if other attributes present
#self.select_clause+=self.self.occ_values_v_n[0]
#print "attribute_name 111"
#print attribute_name
for i in table_det:
if i[0] == self.maped_table_names[0]:
for j in i[1]:
table_attr.append(j[0])
#print table_attr
syns_attribute=w.synsets(j[0],NOUN)
syn_set_attribute_t=[]
for k in syns_attribute:
syn_set_attribute_t.append(list(chain.from_iterable([k.lemma_names()])))
syn_set_attribute.append(syn_set_attribute_t)
#print syn_set_attribute
for atn in attribute_name:
attr_l.append(atn.lower())
for atn in attr_l:
if atn not in table_attr:
syns_attr=w.synsets(atn,NOUN)
for k in syns_attr:
syn_set_attr=list(chain.from_iterable([k.lemma_names()]))
#print syn_set_attr
for l in range(len(syn_set_attr)):
for m in range(len(syn_set_attribute)):
for n in range(len(syn_set_attribute[m])):
#print syn_set_attr[l]
#print syn_set_attribute[m][n]
if syn_set_attr[l] in syn_set_attribute[m][n]:
#print syn_set_attribute[m][n]
#print m
try:
self.extract_table_attr.append(table_attr[m])
except:
pass
#print "self.extract_table_attr"
#print self.extract_table_attr
if len(self.extract_table_attr) < 1:
#print "fgvfhhfghfjghfjghfu"
select_attr=self.occ_values_v_n[0][1]
#print select_attr
if select_attr == 'details' or select_attr == 'contents' or select_attr == 'detail' or select_attr == 'content':
self.select_clause+=' '
self.select_clause+='*'
self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause
print self.query
else:
syns_tb=w.synsets(select_attr,NOUN)
for i in syns_tb:
syns_tbb=list(chain.from_iterable([i.lemma_names()]))
syns_tb_q=w.synsets(self.maped_table_names[0],NOUN)
#print self.maped_table_names[0]
#print syns_tb_q
for i in syns_tb_q:
syns_tbb_q=list(chain.from_iterable([i.lemma_names()]))
#print syns_tbb
#print syns_tbb_q
for i in range(len(syns_tbb)):
#for j in range(len(sysns_tbb_q)):
if syns_tbb[i] in syns_tbb_q:
#print "hgfhg"
self.select_clause+=' '
self.select_clause+='*'
self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause
print self.query
break
else:
for i in self.extract_table_attr:
if i not in self.mapped_attr:
self.mapped_attr.append(i)
print self.mapped_attr
self.select_clause+=' '
for i in range(len(self.mapped_attr)):
self.select_clause+=self.mapped_attr[i]
if i < (len(self.mapped_attr)-1):
self.select_clause+=','
#print self.select_clause
self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause
print self.query
except:
syn_set_attribute=[]
table_attr=[]
self.extract_table_attr=[]
self.mapped_attr=[]
self.list14=[]
attribute_name=[]
attr_l=[]
#print self.where_clause
#pass
#print "bgjgjshfcjhj"
#for i in self.occ_values_n_n:
#if self.maped_table_names[0] in i:
#print i
#attribute_name=i[1]
#attribute_name=attribute_name.lower()
try:
print "self.noun_table"
print self.noun_table
#attribute_name=self.occ_values_n_n[0][1]
for i in self.occ_values_n_n:
if self.noun_table in i:
for j in i:
if j != self.noun_table and isinstance(j,float) == False:
attribute_name.append(j)
try:
del_ind=attribute_name.index(self.occ_values_w_n[0][1])
del(attribute_name[del_ind])
except:
pass
if attribute_name[0] == 'details' or attribute_name[0] == 'detail' or attribute_name[0] == 'contents' or attribute_name[0] == 'content':
self.select_clause+=' '
self.select_clause+='*'
self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause
print self.query
else:
for i in table_det:
if i[0] == self.maped_table_names[0]:
for j in i[1]:
table_attr.append(j[0])
#print table_attr
syns_attribute=w.synsets(j[0],NOUN)
syn_set_attribute_t=[]
for k in syns_attribute:
syn_set_attribute_t.append(list(chain.from_iterable([k.lemma_names()])))
syn_set_attribute.append(syn_set_attribute_t)
#print syn_set_attribute
for atn in attribute_name:
attr_l.append(atn.lower())
for atn in attr_l:
if atn not in table_attr:
syns_attr=w.synsets(atn,NOUN)
for k in syns_attr:
syn_set_attr=list(chain.from_iterable([k.lemma_names()]))
print syn_set_attr
for l in range(len(syn_set_attr)):
for m in range(len(syn_set_attribute)):
for n in range(len(syn_set_attribute[m])):
#print syn_set_attr[l]
#print syn_set_attribute[m][n]
if syn_set_attr[l] in syn_set_attribute[m][n]:
#print syn_set_attribute[m][n]
#print m
try:
self.extract_table_attr.append(table_attr[m])
except:
pass
for i in self.extract_table_attr:
if i not in self.mapped_attr:
self.mapped_attr.append(i)
print self.mapped_attr
self.select_clause+=' '
if not self.mapped_attr:
box=wx.MessageDialog(None,"Invalid Attribute name",'Alert',wx.OK)
ans=box.ShowModal()
box.Destroy()
else :
#self.select_clause+=' '
#self.select_clause+=self.mapped_attr[0]
for i in range(len(self.mapped_attr)):
self.select_clause+=self.mapped_attr[i]
if i < (len(self.mapped_attr)-1):
self.select_clause+=','
self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause
print self.query
except:
self.select_clause+=' '
self.select_clause+='*'
self.query=self.select_clause+' '+self.from_clause+' '+self.where_clause
print self.query
query=self.query
def csv_file(self):
#global path
if not os.path.exists(os.path.dirname("matrix/matrix.csv")):
os.makedirs(os.path.dirname("matrix/matrix.csv"))
try:
os.remove("./matrix/matrix.csv")
file1 = open("./matrix/matrix.csv","a+")
except:
file1 = open("./matrix/matrix.csv","a+")
t = ","
for i in self.words:
t += i
t +=","
t+="\n"
file1.write(t)
for l in range(len(self.occurr_val)):
tt=''
for m in range(len(self.occurr_val[l])):
tt+=str(self.occurr_val[l][m])
tt+=','
tt+='\n'
file1.write(tt)
file1.close()
if __name__=='__main__' :
app=wx.PySimpleApp()
main_window=MainWindow(parent=None,id=-1)
main_window.Show()
app.MainLoop()
1.txt
Displaying 1.txt.e the manual work and time. As for children and
adults, people are most challenged by word problem solving
not because of their mathematical skills but because of text
comprehension. Regularly, incorrect answers to word
problems are because of correct calculations to incorrect
problem representation.
|
2,668 | bbe7df31a44ccf51c305cd620dc7c4155b7e1a97 | def get_bios_boot_order(self):
result = {
}
boot_device_list = []
boot_device_details = []
key = 'Bios'
bootsources = 'BootSources'
response = self.get_request((self.root_uri + self.systems_uri))
if (response['ret'] is False):
return response
result['ret'] = True
data = response['data']
bios_uri = data[key]['@odata.id']
response = self.get_request((self.root_uri + bios_uri))
if (response['ret'] is False):
return response
data = response['data']
boot_mode = data['Attributes']['BootMode']
if (boot_mode == 'Uefi'):
boot_seq = 'UefiBootSeq'
else:
boot_seq = 'BootSeq'
response = self.get_request((((self.root_uri + self.systems_uri) + '/') + bootsources))
if (response['ret'] is False):
return response
result['ret'] = True
data = response['data']
boot_device_list = data['Attributes'][boot_seq]
for b in boot_device_list:
boot_device = {
}
boot_device['Index'] = b['Index']
boot_device['Name'] = b['Name']
boot_device['Enabled'] = b['Enabled']
boot_device_details.append(boot_device)
result['entries'] = boot_device_details
return result |
2,669 | 22c0b8c8d598bb91bb2333343aad285bbcb4ee5b | import logging
import search_yelp
import uuid
from apiclient import errors
from google.appengine.api import taskqueue
def insert_worker(mirror_service, food_type=None):
logging.info('zip1 food_type %s' % food_type)
try:
location = mirror_service.locations().get(id='latest').execute()
latlong = '%s,%s' % (location.get('latitude'), location.get('longitude'))
except errors.HttpError, e:
latlong = None
logging.info('location %s' % latlong)
response = search_yelp.make_request(latlong, term=food_type)
body = {
'menuItems': [
{'action':'DELETE'}
]
}
is_first = True
for i in xrange(4):
#body['bundleId'] = str(uuid.uuid1()).replace('-','')
body['bundleId'] = 'bundle6'
#body['bundleId'] = 'bundleId3'
body['isBundleCover'] = is_first
if is_first:
body['html'] = '<article class=\"photo\">\n<img src=\"https://glasseats.appspot.com/static/images/GlassHomeRestaurantResults.png\" width=\"100%\" height=\"100%\">\n <div class=\"photo-overlay\"/>\n <section>\n</section>\n</article>\n'
else:
body['menuItems'] = [
{'action':'VOICE_CALL'},
{'action':'NAVIGATE'}
]
resto = response.values()[2][i]
try:
body['creator'] = {}
body['creator']['phoneNumber'] = resto['phone']
except KeyError:
logging.info('no phone_number')
try:
body['location'] = {}
body['location']['address'] = resto['location']['postal_code']
except KeyError:
logging.info('no location')
try:
image_url = resto['image_url'].replace('ms.jpg', 'l.jpg')
except KeyError:
image_url = None
try:
address = resto['location']['display_address'][0] +','+resto['location']['city']
except KeyError:
address = ''
try:
category = resto['categories'][0][0]
except KeyError:
category = ''
try:
phone_number = resto['phone']
except KeyError:
phone_number = ''
try:
rating_url = resto['rating_img_url']
except KeyError:
rating_url = ''
if image_url:
if food_type:
body['html'] = '<article class=\"photo\">\n<img src=\"' + image_url + '\" width=\"100%\" height=\"100%\">\n <div class=\"photo-overlay\"/>\n <section>\n <p class=\"align-center text-auto-size\">' + resto['name'] + '<br /><img src=\"'+rating_url+'\" /></p>\n </section>\n</article>\n'
else:
body['html'] = '<article class=\"photo\">\n<img src=\"' + image_url + '\" width=\"100%\" height=\"100%\">\n <div class=\"photo-overlay\"/>\n <section>\n <p class=\"align-center text-auto-size\">' + resto['name'] + '<br /><img src=\"'+rating_url+'\" /></p>\n </section>\n</article>\n'
else:
if food_type:
body['html'] = '<article class=\"photo\">\n <div class=\"photo-overlay\"/>\n <section>\n <p class=\"align-center text-auto-size\">' + resto['name'] + '<br /><img src=\"'+rating_url+'\" /></p>\n </section>\n</article>\n'
else:
body['html'] = '<article class=\"photo\">\n <div class=\"photo-overlay\"/>\n <section>\n <p class=\"align-center text-auto-size\">' + resto['name'] + '<br /><img src=\"'+rating_url+'\" /></p>\n </section>\n</article>\n'
is_first = False
mirror_service.timeline().insert(body=body).execute()
try:
del body['html']
except KeyError:
pass
try:
del body['text']
except KeyError:
pass
logging.info('zip3')
def insert_handler(food_type, user_id):
'''Inserting the yelp bundle into the timeline'''
taskqueue.add(url='/yelp_item', params={'user_id':user_id, 'food_type':food_type})
return 'The bundle item has been inserted'
|
2,670 | 5e2a8e95af88a582b6e760a53dfd41f880d66963 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Admin.ui'
#
# Created by: PyQt5 UI code generator 5.12
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from qtpandas.views.DataTableView import DataTableWidget
from qtpandas.models.DataFrameModel import DataFrameModel
import pandas as pd
class Ui_Admin(object):
def setupUi(self, Admin):
Admin.setObjectName("Admin")
Admin.resize(679, 490)
self.centralwidget = QtWidgets.QWidget(Admin)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.menu_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.menu_btn.setFont(font)
self.menu_btn.setObjectName("menu_btn")
self.horizontalLayout.addWidget(self.menu_btn)
self.user_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.user_btn.setFont(font)
self.user_btn.setObjectName("user_btn")
self.horizontalLayout.addWidget(self.user_btn)
self.order_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.order_btn.setFont(font)
self.order_btn.setObjectName("order_btn")
self.horizontalLayout.addWidget(self.order_btn)
self.back = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.back.setFont(font)
self.back.setObjectName("back")
self.horizontalLayout.addWidget(self.back)
self.verticalLayout.addLayout(self.horizontalLayout)
self.infoTable = DataTableWidget(self.centralwidget)
self.infoTable.setObjectName("infoTable")
self.verticalLayout.addWidget(self.infoTable)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.save = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.save.setFont(font)
self.save.setObjectName("save")
self.horizontalLayout_2.addWidget(self.save)
self.original = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.original.setFont(font)
self.original.setObjectName("original")
self.horizontalLayout_2.addWidget(self.original)
self.fresh = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.fresh.setFont(font)
self.fresh.setObjectName("fresh")
self.horizontalLayout_2.addWidget(self.fresh)
self.verticalLayout.addLayout(self.horizontalLayout_2)
Admin.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Admin)
self.menubar.setGeometry(QtCore.QRect(0, 0, 679, 23))
self.menubar.setObjectName("menubar")
self.menu_4 = QtWidgets.QMenu(self.menubar)
self.menu_4.setObjectName("menu_4")
Admin.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Admin)
self.statusbar.setObjectName("statusbar")
Admin.setStatusBar(self.statusbar)
self.update1 = QtWidgets.QAction(Admin)
self.update1.setObjectName("update1")
self.add = QtWidgets.QAction(Admin)
self.add.setObjectName("add")
self.update2 = QtWidgets.QAction(Admin)
self.update2.setObjectName("update2")
self.delete_2 = QtWidgets.QAction(Admin)
self.delete_2.setObjectName("delete_2")
self.delete_3 = QtWidgets.QAction(Admin)
self.delete_3.setObjectName("delete_3")
self.add_2 = QtWidgets.QAction(Admin)
self.add_2.setObjectName("add_2")
self.help = QtWidgets.QAction(Admin)
self.help.setObjectName("help")
self.actionAllEmpty = QtWidgets.QAction(Admin)
self.actionAllEmpty.setObjectName("actionAllEmpty")
self.menu_4.addAction(self.help)
self.menubar.addAction(self.menu_4.menuAction())
self.retranslateUi(Admin)
QtCore.QMetaObject.connectSlotsByName(Admin)
self.model=DataFrameModel()
self.infoTable.setModel(self.model)
def retranslateUi(self, Admin):
_translate = QtCore.QCoreApplication.translate
Admin.setWindowTitle(_translate("Admin", "后台管理界面"))
self.menu_btn.setText(_translate("Admin", "菜单管理"))
self.user_btn.setText(_translate("Admin", "用户管理"))
self.order_btn.setText(_translate("Admin", "订单信息"))
self.back.setText(_translate("Admin", "返回登录"))
self.save.setText(_translate("Admin", "保存数据"))
self.original.setText(_translate("Admin", "初始化"))
self.fresh.setText(_translate("Admin", "刷新"))
self.menu_4.setTitle(_translate("Admin", "帮助"))
self.update1.setText(_translate("Admin", "update"))
self.add.setText(_translate("Admin", "add"))
self.update2.setText(_translate("Admin", "update"))
self.delete_2.setText(_translate("Admin", "delete"))
self.delete_3.setText(_translate("Admin", "delete"))
self.add_2.setText(_translate("Admin", "add"))
self.help.setText(_translate("Admin", "help"))
self.actionAllEmpty.setText(_translate("Admin", "AllEmpty"))
|
2,671 | 0846f73482ad86158c3f4e37713d6d965e21d796 | """This file parses vbulletin forums"""
import re
import logging
from BeautifulSoup import BeautifulSoup as bs
import imaget
import pdb
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
date_marker = ["<!-- status icon and date -->", "<!-- / status icon and date -->"]
message_marker = ["<!-- message -->", "<!-- / message -->"]
sig_marker = ["<!-- sig -->", "<!-- / sig -->"]
edit_marker = ["<!-- edit note -->", "<!-- / edit note -->"]
def get_subforums(main_soup):
subforums = main_soup.findAll('td', attrs={'class':'alt1Active'})
sublinks = []
for s in subforums:
links = s.findAll('a')
for a in links:
if not "http" in a['href']:
break
link = a['href']
text = a.getText()
sublinks.append({'name':text, 'link':link})
return sublinks
def get_threads(subforum_soup):
"""This function gets information on the threads from the subforum page. It also returns the total number of pages"""
threads = subforum_soup.findAll('a', attrs={'id':lambda x:x and x.startswith('thread_title')}) #pulls out the thread links
#page _ of _
page = 1
page_count = subforum_soup.find('td', attrs={'class':'vbmenu_control'})
if page_count:
page_count = page_count.getText()
page_match = re.search(r'(\d+) .+? (\d+)', page_count)
if page_match:
page_count = int(page_match.group(2))
page = int(page_match.group(1))
logger.debug("get_threads: page_count = %d, page = %d" % (page_count, page))
else:
page_count = 1
page = 1
thread_counts = subforum_soup.findAll('td', attrs={'class':'alt2', 'title':lambda x:x and re.match(r'.+?: \d+?', x)})
if len(threads) != len(thread_counts):
logger.error('get_threads: thread-count mismatch. Threads = %d; thread_counts = %d' % (len(threads), len(thread_counts)))
logger.debug('get_threads: threads = %s' % str(threads))
logger.debug('get_threads: thread_counts = %s' % str(thread_counts))
threadlinks = []
for i in range(min(len(threads), len(thread_counts))):
t = threads[i]
c = thread_counts[i]
sanatized = c['title'].replace(',', '')
count = int(re.search(r'.+?: (\d+?) .+?: (\d+?)',sanatized).group(1)) + 1
text = t.getText()
link = t['href']
threadlinks.append({'name':text, 'link':link, 'count':count})
return threadlinks, (page, page_count)
def get_page(thread_url, pagenum):
return thread_url + "&page=" + str(pagenum)
def get_posts(page_soup):
page_soup = bs(page_soup)
#page _ of _
page_count = page_soup.find('td', attrs={'class':'vbmenu_control'})
if page_count:
page_count = page_count.getText()
page_match = re.search(r'(\d+) .+? (\d+)', page_count)
if page_match:
page_count = int(page_match.group(2))
page = int(page_match.group(1))
else:
page_count = 1
page = 1
posts = page_soup.findAll('table', attrs={'id':lambda x: x and re.match(r'post', x)})
logging.info('get_post: got %d posts' % len(posts))
post_list = []
for p in posts:
post_link = p.find('a', attrs={'name': lambda x: x and re.match(r'\d+', x)})['href']
post_string = str(p)
raw_message = extract(post_string, message_marker[0], message_marker[1])
date = extract(post_string, date_marker[0], date_marker[1])
date = strip_tags(date).strip()
message = get_message(raw_message)
sig = extract(post_string, sig_marker[0], sig_marker[1])
edit = extract(post_string, edit_marker[0], edit_marker[1])
msg_image_srcs = imaget.get_image_src(raw_message)
if msg_image_srcs: msg_image_srcs = msg_image_srcs[0]
print "message source: "
print msg_image_srcs
print "\n\n\n"
user = get_user(post_string, sig)
post_list.append({'date': date, 'message': message, 'edit': edit, 'message images': msg_image_srcs, 'user': user, 'link': post_link})
return post_list, (page, page_count)
def get_user(post_string, sig = ""):
user_tag = bs(post_string).find('td', attrs={'class':'alt2'})
user_link = user_tag.find('a', attrs={'class':'bigusername'})
if not user_link: return {'tag': user_tag, 'name': 'guest', 'link': None, 'join': None, 'sig': None, 'image': None, 'title': 'guest'}
user_name = user_link.getText()
user_link = user_link['href']
user_title = user_tag.findAll('div')[1].getText()
user_div = user_tag.findAll('div')
inner_ind = 2
while len(user_div[inner_ind].findAll('div'))<3:
inner_ind+=1
inner_name_soup = user_div[inner_ind].findAll('div')
join_date = inner_name_soup[0].getText()[len("Join Date: "):]
user_image_src = imaget.get_image_src(user_tag, 1)
return {'tag': user_tag, 'name':user_name, 'link': user_link, 'title': user_title, 'join': join_date, 'sig': sig, 'image': user_image_src}
def get_message(message_str):
message_soup = bs(message_str)
images = message_soup.findAll('img')
for item in images:
item.extract()
scripts = message_soup.findAll('script')
for item in scripts:
item.extract()
return str(message_soup)
def extract(string, start_marker, end_marker):
"""wrapper function for slicing into a string"""
start_loc = string.find(start_marker)
end_loc = string.find(end_marker)
if start_loc == -1 or end_loc == -1:
return ""
return string[start_loc+len(start_marker):end_loc]
def strip_tags(source):
return re.sub(r'<.+?>', '', source)
|
2,672 | c7d9bbdff9148c5d928de66f4406ee8b4e1bcdac | """
Card rarity parameters
"""
from typing import List, Optional
from django.db.models.query import Q
from cards.models.rarity import Rarity
from cardsearch.parameters.base_parameters import (
OPERATOR_MAPPING,
OPERATOR_TO_WORDY_MAPPING,
CardTextParameter,
CardSearchContext,
ParameterArgs,
QueryContext,
QueryValidationError,
)
class CardRarityParam(CardTextParameter):
"""
The parameter for searching by a card's rarity
"""
@classmethod
def get_parameter_name(cls) -> str:
return "rarity"
@classmethod
def get_search_operators(cls) -> List[str]:
return [":", "=", "<=", "<", ">", ">="]
@classmethod
def get_search_keywords(cls) -> List[str]:
return ["rarity", "r"]
def get_default_search_context(self) -> CardSearchContext:
return CardSearchContext.PRINTING
def __init__(self, negated: bool, param_args: ParameterArgs):
super().__init__(negated, param_args)
self.rarity: Optional[Rarity] = None
if self.operator == ":":
self.operator = "="
def validate(self, query_context: QueryContext) -> None:
try:
self.rarity = Rarity.objects.get(
Q(symbol__iexact=self.value) | Q(name__iexact=self.value)
)
except Rarity.DoesNotExist:
raise QueryValidationError(f'Couldn\'t find rarity "{self.value}"')
def query(self, query_context: QueryContext) -> Q:
if self.operator == "=":
query = Q(rarity=self.rarity)
else:
filter_ = f"rarity__display_order{OPERATOR_MAPPING[self.operator]}"
query = Q(**{filter_: self.rarity.display_order})
return ~query if self.negated else query
def get_pretty_str(self, query_context: QueryContext) -> str:
return (
"the rarity "
+ ("isn't" if self.negated else "is")
+ (
" " + OPERATOR_TO_WORDY_MAPPING[self.operator]
if self.operator not in (":", "=")
else ""
)
+ f" {self.rarity.name.lower()}"
)
|
2,673 | a35004e2b306ba1a8649ce66a1612f63a2b6bf39 | import hashlib
from django.conf import settings
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils.crypto import get_random_string
def hexdigest_sha256(*args):
r = hashlib.sha256()
for arg in args:
r.update(str(arg).encode('utf-8'))
return r.hexdigest()
def get_reply_addr(message_id, dest):
if not hasattr(settings, 'REPLY_EMAIL'):
return []
addr = settings.REPLY_EMAIL
pos = addr.find('@')
name = addr[:pos]
domain = addr[pos:]
key = hexdigest_sha256(settings.SECRET_KEY, message_id, dest.pk)[0:12]
return ['%s+%s%s%s%s' % (name, dest.profile.email_token, message_id, key, domain)]
def generate_message_token():
return get_random_string(length=60, allowed_chars='abcdefghijklmnopqrstuvwxyz0123456789')
def notify_by_email(template, data, subject, sender, dests, message_id, ref=None):
if hasattr(settings, 'REPLY_EMAIL') and hasattr(settings, 'REPLY_KEY'):
data.update({'answering': True})
text_message = render_to_string('conversations/emails/%s.txt' % template, data)
html_message = render_to_string('conversations/emails/%s.html' % template, data)
from_email = '{name} <{email}>'.format(
name=sender.get_full_name() or sender.username,
email=settings.DEFAULT_FROM_EMAIL)
# Generating headers
headers = {'Message-ID': "<%s.%s>" % (message_id, settings.DEFAULT_FROM_EMAIL)}
if ref:
# This email reference a previous one
headers.update({
'References': '<%s.%s>' % (ref, settings.DEFAULT_FROM_EMAIL),
})
mails = []
for dest in dests:
if not dest.email:
continue
reply_to = get_reply_addr(message_id, dest)
mails += [(subject, (text_message, html_message), from_email, [dest.email], reply_to, headers)]
messages = []
for subject, message, from_email, dest_emails, reply_to, headers in mails:
text_message, html_message = message
msg = EmailMultiAlternatives(subject, text_message, from_email, dest_emails, reply_to=reply_to,
headers=headers)
msg.attach_alternative(html_message, 'text/html')
messages += [msg]
with mail.get_connection() as connection:
connection.send_messages(messages)
|
2,674 | 7994d9605c8654053c9a85f8d37983da04f8003a | from datetime import datetime, timedelta
import os
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators import (StageToRedshiftOperator, LoadFactOperator,
LoadDimensionOperator, DataQualityOperator)
from helpers import SqlQueries
# AWS_KEY= os.environ.get('AWS_KEY')
# AWS_SECRET = os.environ.get('AWS_SECRET')
# Default arguments
default_args = {
'owner': 'shreyak',
'start_date': datetime(2020, 12, 1),
'end_date': datetime(2020, 12, 1),
'depends_on_past': False,
'retries': 3,
'retry_delay': timedelta(minutes=5),
'catchup': False,
'email_on_retry': False,
}
# Defining DAG
dag = DAG('udac_sparkify_dag',
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
schedule_interval='0 * * * *'
)
# Starting operator
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
# Operators to create Staging tables on Redshift
stage_events_to_redshift = StageToRedshiftOperator(
redshift_id = "redshift",
aws_id = "aws_credentials",
schema = "staging_events",
s3_path="udacity-dend",
s3_key = "log_data/",
query_end = "format as json 's3://udacity-dend/log_json_path.json'",
task_id='Stage_events',
dag=dag,
)
stage_songs_to_redshift = StageToRedshiftOperator(
task_id='Stage_songs',
redshift_id = "redshift",
aws_id = "aws_credentials",
schema = "staging_songs",
s3_path="udacity-dend",
s3_key = "song_data",
query_end = "json 'auto' compupdate off region 'us-west-2'",
dag=dag
)
# Operator to load fact table
load_songplays_table = LoadFactOperator(
task_id='Load_songplays_fact_table',
redshift_id = "redshift",
schema = "songplays",
query = "songplay_table_insert",
dag=dag,
append_only = False
)
# Operators to load dimension tables
load_user_dimension_table = LoadDimensionOperator(
task_id='Load_user_dim_table',
redshift_id = "redshift",
schema = "users",
query = "user_table_insert",
dag=dag,
append_only = False
)
load_song_dimension_table = LoadDimensionOperator(
task_id='Load_song_dim_table',
redshift_id = "redshift",
schema = "song",
query = "song_table_insert",
dag=dag,
append_only = False
)
load_artist_dimension_table = LoadDimensionOperator(
task_id='Load_artist_dim_table',
redshift_id = "redshift",
schema = "artist",
query = "artist_table_insert",
dag=dag,
append_only = False
)
load_time_dimension_table = LoadDimensionOperator(
task_id='Load_time_dim_table',
redshift_id = "redshift",
schema = "time",
query = "time_table_insert",
dag=dag,
append_only = False
)
# Operator for quality checks
run_quality_checks = DataQualityOperator(
task_id='Run_data_quality_checks',
redshift_id = "redshift",
tables = ["songplay", "users", "song", "artist", "time"],
dag=dag
)
# Ending operator
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
# Defining dependencies
start_operator >> stage_events_to_redshift >> load_songplays_table
start_operator >> stage_songs_to_redshift >> load_songplays_table
load_songplays_table >> load_song_dimension_table >> run_quality_checks
load_songplays_table >> load_user_dimension_table >> run_quality_checks
load_songplays_table >> load_artist_dimension_table >> run_quality_checks
load_songplays_table >> load_time_dimension_table >> run_quality_checks
run_quality_checks >> end_operator |
2,675 | d86fd2e6ef5dab4444772192471538842112b3fd | from skimage import data, filters, measure, exposure
from skimage.filters import threshold_mean
from skimage.transform import resize
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pyfits as pf
import time
import numpy as np
import healpy as hp
from healpy.projector import CartesianProj
from healpy.projector import MollweideProj
# benjamin.racine@astro.uio.no
start = time.time()
path = '/Users/trygveleithesvalheim/datafiles/'
# MAX SLIDES ON N: 134 (ID:135) and 136 (ID:137)
# MAX SLIDES ON S: 6 (ID:7)
# Data: 360 degrees longitude, 50-90 latitude
# Dimensions: (480,4320)
# Full map: (2160,4320) need to add 1680
NN = 338 # Identified clouds in N. hemisphere
NS = 438 # Identified clouds in S. hemisphere
nside = 512
npix = 12*nside**2
z = np.zeros((1680, 4320)) # Extra array for full sky array
"""
full
"""
c1 = 420
c2 = 475
hdulist = pf.open(path+'data/cubesCombinedN.fits')
Nfull = hdulist[0].data
hdulist = pf.open(path+'data/cubesCombinedS.fits')
Sfull = hdulist[0].data
fullLOSN = Nfull[c1:c2].sum(axis=(0))
fullLOSS = Sfull[c1:c2].sum(axis=(0))
# Add empty array for converting to full sky
fullLOSS = np.concatenate((fullLOSS, z), axis=0)
fullLOSN = np.concatenate((z,fullLOSN), axis=0)
full = fullLOSN + fullLOSS
"""
Add full first
"""
hdulist = pf.open(path+'data/LOScloudsN.fits')
LOScloudsN = hdulist[0].data
hdulist = pf.open(path+'data/LOScloudsS.fits')
LOScloudsS = hdulist[0].data
# LOS of all clouds
LOScloudsN = LOScloudsN.sum(axis=(0))
LOScloudsS = LOScloudsS.sum(axis=(0))
# Add empty array for converting to full sky
LOScloudsS = np.concatenate((LOScloudsS, z), axis=0)
LOScloudsN = np.concatenate((z,LOScloudsN), axis=0)
# Add N and S hemisphere
image_array = LOScloudsN+LOScloudsS
"""
GENERAL
"""
# Find theta and phi coordinates of image
theta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]
phi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])
# Get pixel positions of full picture
pix = hp.ang2pix(nside, theta, phi)
"""
GENERAL END
"""
# Make healpix map array
healpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)
# put image in healpy map array
healpix_map[pix] = image_array # Magic
#healpix_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN
"""
For full
"""
full_map = np.zeros(hp.nside2npix(nside), dtype=np.double)
full_map[pix] = full # Magic
#full_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN
"""
Full end
"""
le = full_map - healpix_map
ma = le[::-1]
ma[np.where(ma == 0)] = hp.UNSEEN
full_map[np.where(full_map == 0)] = hp.UNSEEN
fu = full_map[::-1]
healpix_map[np.where(healpix_map == 0)] = hp.UNSEEN
se = healpix_map[::-1]
"""
hp.write_map(path+'data/fullHI50.fits',fu, partial=True)
hp.write_map(path+'data/segmentedHI50.fits',se, partial=True)
hp.write_map(path+'data/cloudsFITS/subtractedHI50fits',ma, partial=True)
"""
#min = 4.
#max = 350.
hp.mollview(fu,title="Full map +50 GLAT",sub=311)
hp.mollview(se,title="Above threshold (4.0) +50 GLAT", sub = 312)
hp.mollview(ma,title="Diff +50 GLAT",sub=313)
plt.savefig('figs/diff4a.pdf', bbox_inches='tight',pad_inches=0.106)
plt.show()
"""
NX = 4320
NY = 2160
#image_array = resize(LOScloudsN,(NY,NX)) # Resizing image
"""
|
2,676 | f3e654a589cc1c16b36203dd358671d0426556e6 | import os
import requests
from pprint import pprint as pp
from lxml import html
from bs4 import BeautifulSoup
from dotenv import load_dotenv
import datetime
load_dotenv()
class PrometeoAPI:
def __init__(self, user, pwd):
self.base_url = 'https://prometeoapi.com'
self.session = requests.Session()
self.__user = user
self.__pwd = pwd
self._login()
def _generate_csrf_token(self, url):
'''
This function gets the csrf token from the login page needed to
do request in order log into the website
'''
response = self.session.get(url)
content = response.content
tree = html.fromstring(content)
csrf_element = tree.xpath("//input[@name='csrfmiddlewaretoken']")[0]
csrf = csrf_element.get('value')
return csrf
def _login(self):
'''
This function takes the username and password, logs in and sets api_key, user name, and
ammount of requests of the month, data available from the dashboard recieved after the log in
'''
url = f'{self.base_url}/dashboard/login/'
csrf = self._generate_csrf_token(url)
payload = {
'csrfmiddlewaretoken': csrf,
'username': self.__user,
'password': self.__pwd
}
response = self.session.request('POST', url, data=payload)
tree = html.fromstring(response.content)
page_title_element = tree.xpath("//title")[0]
page_title = str(page_title_element.text_content()).strip()
if 'Login - Prometeo' in page_title:
error = tree.xpath("//div[contains(@class,'alert')]")[0]
error_msj = self._strip_text(error)
raise Exception(f'Failed to log into the site, response text: {error_msj}')
username_element = tree.xpath("//nav//*[contains(@class,'login-info__data')]/p[contains(@class,'text-white')]")[
0]
self.username = self._strip_text(username_element)
api_key_element = tree.xpath("//p[contains(@class,'api-key-field')]")[0]
self.api_key = self._strip_text(api_key_element)
# requests_mes_element = tree.xpath("//p[contains(.,'Requests este mes:')]/b")[0]
# self.requests_mes = str(requests_mes_element.text_content()).strip()
def get_requests_current_month(self):
current_date = datetime.datetime.now()
request_url = f'{self.base_url}/dashboard/filter_requests/?format=json&month={current_date.month}&user_id=&year={current_date.year}'
response = self.session.get(request_url)
if response.status_code == 200:
json_table = response.json()
return json_table.get('usage_table')
def refresh_api_key(self):
csrf = self._generate_csrf_token(f'{self.base_url}/dashboard/')
headers = {'X-CSRFToken': csrf}
request_url = f'{self.base_url}/dashboard/reset-key/'
response = self.session.post(request_url, headers=headers)
self.api_key = response.json().get('api_key')
return self.api_key
def _strip_text(self, element):
return str(element.text_content()).strip()
if __name__ == '__main__':
api = PrometeoAPI(user=os.environ.get('PROMETEO_USERNAME'), pwd=os.environ.get('PROMETEO_PASSWORD'))
print(api.api_key)
print(api.username)
print(api.refresh_api_key())
pp(api.get_requests_current_month())
|
2,677 | e89ca4907373318bd55d0833730a30d981414992 | # =============================================================================
# ######################## Creator: Rhys Aeron Williams #######################
# ######################## Last update: 14th March 2019 #######################
# =============================================================================
# =============================================================================
# ####################### IMPORT THE LIBRARIES NECESSARY ######################
# =============================================================================
import cv2
from keras.preprocessing import image
import keras
import numpy as np
# =============================================================================
# ####################### LOAD THE MODEL TO RUN ###############################
# =============================================================================
model_file = "MODELS/5-2-19_v2.h5"
classifier = keras.models.load_model(model_file)
if classifier is not None:
print('model is loaded from ', model_file)
# =============================================================================
# ######################## USE THE WEBCAM ON THE LAPTOP #######################
# =============================================================================
cap = cv2.VideoCapture(0)
while(1):
# =============================================================================
# ######################## GET THE FRAME FROM WEBCAM ##########################
# =============================================================================
hasFrame, frame = cap.read()
if not hasFrame:
break
# =============================================================================
# ######################## RESIZE THE FRAME AND SAVE ##########################
# =============================================================================
h,w,bpp = np.shape(frame)
dim = (int(w/4), int(h/4))
frame_2 = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
cv2.imwrite("frame.jpg", frame_2)
# =============================================================================
# ################## READ THE FRAME AND PUT INTO CLASSIFIER ###################
# =============================================================================
test_frame = image.load_img('frame.jpg', target_size = (60,40))
test_frame = image.img_to_array(test_frame)
test_frame = np.expand_dims(test_frame, axis = 0)
result = classifier.predict(test_frame)
# =============================================================================
# ####################### GET PREDICITION OF THE FRAME ########################
# =============================================================================
if result[0][0] == 1:
prediction = 'good'
text_colour = (0,255,0)
else:
prediction = 'bad'
text_colour = (0,0,255)
cv2.putText(frame, prediction, (5,80),cv2.FONT_HERSHEY_SIMPLEX,
fontScale = 3, color = text_colour,thickness = 3)
# =============================================================================
# ############################# SHOW FRAMES (VIDEO) ###########################
# =============================================================================
cv2.imshow('Video', frame)
k = cv2.waitKey(10)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
2,678 | d025b0719c6eecdfccb2e39a58af7842f4229c72 | from abc import ABC, abstractmethod
class Book:
def __init__(self, title: str, content: str):
self.title = title
self.content = content
class Formatter(ABC):
@abstractmethod
def format(self, book: Book):
pass
class TitleFormatter(Formatter):
def format(self, book: Book) -> str:
return book.title
class ContentFormatter(Formatter):
def format(self, book: Book) -> str:
return book.content
class Printer:
def get_info(self, book: Book, formatter: Formatter):
return formatter.format(book)
|
2,679 | c65755d7a58c1cda7d6eea83876e0522a7ca9c74 | # 拆包
t1 = (4,7,3)
# a,b=t1 # ValueError:too many values to unpack(拆包) (expected 3, got 2)
a,b,c = t1
print(a,b,c)
a = t1
print(a)
# x,y,z = (6,) # ValueError: not enough values to unpack (expected 3, got 1)
# s1 = 'hello'
# s2 = s1
# 变量个数与元祖个数不一致
t1 = (12,23,42,12,43)
a,*_,c = t1
print(a,c,_)
a,c,*_ = t1
print(a,c,_)
a,*b,c = t1
print(a,c,b)
t1 = (9,4,9)
a,*b = t1
print(a,b) # *b表示未知个数0~n 0--[] 多个元素的话 ~ [1,2,3,4,5,.....]
print(*b)
'''
字符串
列表
通用
'''
t2 = (9,)
x,*y=t2
print(x,y) # 9,[]
# 添加元素
y.append('a')
y.append('b')
print(y) # ['a','b']
print(*y) # print() print(4,8,6) 4 8 6
'''
元祖:
1.符号(1,2,3) tuple
2.关键字 tuple
3.元祖的元素只能获取,不能增删改
符号:
+
*
is not
in / not in
系统函数:
max()
min()
sum()
len()
sorted() -----> 排序,返回的结果就是列表
tuple() -----> 元祖类型的强制转换
元祖自带函数:
index()
count()
拆装包:
x,*y = (1,2,3,4,5)
print(y)
print(*y)
'''
t2 = (4,5)+(1,2)
print(t2)
t3 = (3,4)*2
print(t3)
print(t2 is t3)
print(3 not in t3)
print(len(t2))
print(sorted(t2))
print(tuple(sorted(t2))) |
2,680 | b10badc172be119be5b2ab8ccc32cc95a0ed1e7a | import cv2
import pdb
import skvideo
import numpy as np
import pandas as pd
from tqdm import tqdm
from harp import fdops
from word2number import w2n
from harp.vid import VidReader
class RegPropData:
"""
Processes region proposal data.
"""
_df = None
props = None
"""Dictionary containing region proposal data properties """
def __init__(self, csv_path):
"""
Initialize a region proposal data instance.
Parameters
----------
csv_path : str
Path to csv file containing proposal information.
Note
----
It is assumed that the directory containing the proposals
csv file has `properties_session.cv` file. This file should
contain information about current session.
"""
# Checking files
fdops.check_if_file_exists(csv_path)
# loading proposal data as a data frame
self._df = pd.read_csv(csv_path)
# Dictionary containing proposal properties
self.props = self._get_properties(csv_path)
def _get_properties(self, csv_path):
"""
Creates a dictionary containing properties of proposal
data.
Parameters
----------
csv_path : str
Path to csv file containing proposal information
"""
props = {}
# File properties
loc, fname, ext = fdops.get_loc_name_ext(csv_path)
props['loc'] = loc
props['name'] = fname
props['ext'] = ext
# Video properties
props['W'] = self._df['W'].unique().item()
props['H'] = self._df['H'].unique().item()
props['FPS'] = self._df['FPS'].unique().item()
props['dur'] = self._df['dur'].unique().item()
props['vname'] = self._get_video_name(fname)
# Proposal properties
props['num_props'] = self._get_num_proposals()
return props
def write_proposals_to_video(self, vdir, frms_per_sec=1.0):
""" Writes proposals to video.
Parameters
----------
vdir : str
Directory where we can find video.
frms_per_sec : float, default 1
A value of 0.5 means that we will skip
`FPS x 1/(frms_per_sec) = 60` frames
"""
# Input video
vid_name = self.props['vname']
vfpath = fdops.get_files_with_kws(vdir, [vid_name, ".mp4"])
if len(vfpath) > 1:
raise Exception(f"More than one video found\n\t{vfpath}")
vin = VidReader(vfpath[0])
# Output video
ovid_path = f"{self.props['loc']}/{self.props['name']}.mp4"
vw = skvideo.io.FFmpegWriter(
ovid_path,
outputdict={'-vcodec': 'libx264','-r':'30'}
)
# Calculate frame numbers(POC) that we will use.
f0_start = 0 # starting frame poc
f0_end = vin.props['num_frames'] - 1 # ending frame poc
f0_skip = vin.props['frame_rate']*(1/frms_per_sec)
f0s = list(range(f0_start, f0_end, int(f0_skip)))
# Loop over each frame number and draw proposal regions
# over them
for f0 in tqdm(f0s):
frm = vin.get_frame(f0, c='bgr')
# Get proposals for frame f0
props = self._get_proposals_for_frame(f0)
# Proposals looop
for p in props:
if len(p) > 0:
w0, h0, w, h = p
frame = cv2.rectangle(
frm, (w0, h0), (w0+w, h0+h), (0, 256, 0), 1
)
# Write frame to output
vw.writeFrame(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
vw.close()
vin.release()
import sys; sys.exit()
def _get_proposals_for_frame(self, fn):
"""
Returns a list of proposal regions
Parameters
----------
fn : int
Frame number
"""
# Get dataframe that contains f0. It should have only one row
tdf = self._df.copy() # lower bound
tdf['f1'] = (tdf['f0'] # creating column
+ tdf['f'] - 1) # with last frame
df = tdf[fn >= tdf['f0']]
df = df[fn <= df['f1']]
if len(df) == 0:
return []
if len(df) > 1:
pdb.set_trace()
raise Exception("USER_ERROR: proposals csv is fishy\n"
f"{df}")
# Proposal string to numpy array
prop_list = df['props'].item().split(":")
# Loop over bounding box list and create a numpy array
if len(prop_list) > 0:
props = []
for p in prop_list:
coords = p.split("-")
if len(coords) == 4:
props += [[int(x) for x in coords]]
return props
def _get_video_name(self, fname):
""" Returns video name by parsing csv file name
Parameters
----------
fname : str
Name of csv file having proposals
"""
csv_name_split = fname.split("_")
thirty_fps_loc = csv_name_split.index("30fps")
video_name = "_".join(csv_name_split[0:thirty_fps_loc+1])
return video_name
def _get_num_proposals(self):
""" Returns number of proposals.
"""
total_props = self._df['nprops'].sum()
return total_props
|
2,681 | dd8f4b08b88d487b68e916e9f92c08c9c0bc39da | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sqlite3
# 连接到db文件
conn = sqlite3.connect('app.db')
# 创建一个Cursor:
cursor = conn.cursor()
# 查询所有表名:
cursor.execute("select name from sqlite_master where type = 'table' order by name")
print("Tables name:", cursor.fetchall())
# 查询表user的结构:
cursor.execute('PRAGMA table_info(user)')
print("Table structure:", cursor.fetchall())
# 执行查询表user内的所有记录:
cursor.execute('select * from user')
print("Table record:", cursor.fetchall())
cursor.close()
conn.close()
|
2,682 | 0f54853901a26b66fe35106593ded6c92785b8db | import asyncio
import logging
from datetime import datetime
from discord.ext import commands
from discord.ext.commands import Bot, Context
from humanize import precisedelta
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy_utils import ScalarListException
from config import CONFIG
from models import Reminder, db_session
from utils import (
DateTimeConverter,
get_database_user,
get_database_user_from_id,
get_name_string,
user_is_irc_bot,
)
LONG_HELP_TEXT = """
Add reminders for yourself or remove the last one you added.
"""
SHORT_HELP_TEXT = """Add or remove reminders."""
async def reminder_check(bot):
await bot.wait_until_ready()
while not bot.is_closed():
now = datetime.now()
reminders = (
db_session.query(Reminder)
.filter(Reminder.trigger_at <= now, Reminder.triggered == False) # noqa 712
.all()
)
for r in reminders:
if r.irc_name:
display_name = r.irc_name
else:
author_uid = r.user.user_uid
display_name = f"<@{author_uid}>"
channel = bot.get_channel(r.playback_channel_id)
message = f"Reminding {display_name}: " + r.reminder_content
await channel.send(message)
r.triggered = True
db_session.commit()
await asyncio.sleep(CONFIG.REMINDER_SEARCH_INTERVAL)
class Reminders(commands.Cog):
def __init__(self, bot: Bot):
self.bot = bot
self.bot.loop.create_task(reminder_check(self.bot))
@commands.group(help=LONG_HELP_TEXT, brief=SHORT_HELP_TEXT)
async def reminder(self, ctx: Context):
if not ctx.invoked_subcommand:
await ctx.send("Subcommand not found.")
@reminder.command(
help='Add a reminder, format "yyyy-mm-dd hh:mm" or "mm-dd hh:mm" or hh:mm:ss or hh:mm or xdxhxmxs or any ordered combination of the last format, then finally your reminder (rest of discord message).'
)
async def add(
self, ctx: Context, trigger_time: DateTimeConverter, *, reminder_content: str
):
now = datetime.now()
if not trigger_time:
await ctx.send("Incorrect time format, please see help text.")
elif trigger_time < now:
await ctx.send("That time is in the past.")
else:
# HURRAY the time is valid and not in the past, add the reminder
display_name = get_name_string(ctx.message)
# set the id to a random value if the author was the bridge bot, since we wont be using it anyways
# if ctx.message.clean_content.startswith("**<"): <---- FOR TESTING
if user_is_irc_bot(ctx):
author_id = 1
irc_n = display_name
else:
author_id = get_database_user(ctx.author).id
irc_n = None
trig_at = trigger_time
trig = False
playback_ch_id = ctx.message.channel.id
new_reminder = Reminder(
user_id=author_id,
reminder_content=reminder_content,
trigger_at=trig_at,
triggered=trig,
playback_channel_id=playback_ch_id,
irc_name=irc_n,
)
db_session.add(new_reminder)
try:
db_session.commit()
await ctx.send(
f"Thanks {display_name}, I have saved your reminder (but please note that my granularity is set at {precisedelta(CONFIG.REMINDER_SEARCH_INTERVAL, minimum_unit='seconds')})."
)
except (ScalarListException, SQLAlchemyError) as e:
db_session.rollback()
logging.exception(e)
await ctx.send(f"Something went wrong")
def setup(bot: Bot):
bot.add_cog(Reminders(bot))
|
2,683 | 23b2cc5b561a11ae7757a281a141491d5b7e23ca | from discord.ext import commands
def is_owner():
async def predicate(ctx):
return ctx.author.id == 98208218022428672
return commands.check(predicate)
class Staff(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(
name='stop',
aliases=['shutdown'],
description='This is a command for staff only to stop the bot'
)
@is_owner()
async def stop_bot(self, ctx):
"""Shutdown the bot"""
await ctx.send('Oh, alright... I\'ll just shutup I guess.. :wave:')
await self.bot.close()
|
2,684 | 05e468c2f64e33d6b390f681314ed7961bd4def7 | import time
import datetime
import math
import os
import random
import logzero
import logging
from logzero import logger
from sense_hat import SenseHat
import ephem
anyException = False
# program Time is here for easy acces (in minutes)
programTime = 175
# 2:55 min of runtime
# ____________________________
# DEFINE FUNCTIONS
# ____________________________
def setLoggingFile():
'''
This function will setup a logger and logfile
'''
# It will create a data01.csv file if it does not exist, data02.csv if previous exist etc
# but when data01.csv data02.csv data03.csv data04.csv data05.csv exist it will overwrite the data01.csv file
try:
# set dirPath
dirPath = os.path.dirname(os.path.realpath(__file__))
# set dir filenames
dirFiles = os.listdir(dirPath)
for itemNr in range(len(dirFiles)):
nameOfFile = 'data0'+str(itemNr+1)+".csv"
if nameOfFile =='data05.csv':
nameOfFile ='data01.csv'
break
if nameOfFile in dirFiles:
print('this file exsist' + str(nameOfFile))
else:
break
# Handle the Exception
except Exception as dummy:
# change global variable anyException to True, it will be logged at the end of run
global anyException
anyException = True
# set namefile to default one
nameOfFile = 'data01.csv'
# set logfile and custom formatter
logzero.logfile(dirPath+"/"+nameOfFile)
print(dirPath+"/"+nameOfFile)
formatter = logging.Formatter('_%(levelname)s_,line: %(lineno)d, %(message)s')
logzero.formatter(formatter)
def isItOversized():
'''
This function will check storage used to be sure files weight are less than 3gb
'''
try:
# set dirPath
dirPath = os.path.dirname(os.path.realpath(__file__))
# set dir filenames
dirFiles = os.listdir(dirPath)
# check files name
filesSize = 0
# add all files size to variable
for file in dirFiles:
filesSize+=os.stat(file).st_size
# check that filesSize variable is less than 3221225472 bites which is 3gb
# return False if it is smaller and return True when it is oversized
if filesSize < 3221225472:
return False
else:
return True
# Handle the exception as default => not oversized
except Exception as e_oversizedFun_ecxeption:
global anyException
anyException = True
logger.error('Time from start: %s,Time is: %s,ERROR: %s',timer1.minsOfRun(),timer1.nowForLog(),str(e_oversizedFun_ecxeption))
pictures('error')
return False
def measure(whatToMeasure):
'''
This function will measure temperature, humidity and pressure
'''
# reset variables
temp = 0
hum = 0
press = 0
failed = 0
try:
# TEMPERATURE
# Our code does 10 measurements
# ignore the 0 - corrupted
# and return the average result of measurements
# if 5 measurements will be corrupted it will return an error
if whatToMeasure == 'temp':
print('TEMP MEASURE:')
for dummy in range(10):
# sleep betwen measurements
time.sleep(0.2)
while failed<5:
# get Temperature from SenseHat
tempNow=sh.get_temperature()
print(tempNow)
# if measured temp is okay break (while filed<5) loop
# always measured temp give us floats with decimals
# so if temp will be close to 0 measured temp will be for example 0.2312
# so if measured temp is equal to 0 without decimals measured temp is corrupted
if(tempNow != 0):
break
else:
# if cant get temperature (temp=0) add 1 to failed variable
failed+=1
temp+=tempNow
if(failed<5):
# count the average data and round
temp/=10
temp=round(temp,2)
print('MEASURED TEMP IS: '+str(temp))
else:
# error is returned as string becouse it will be displayed on screen (showInfo function)
temp = 'ERROR'
# return measured temp
return temp
# HUMidITY
# same as temp but we measure humidity
if whatToMeasure == 'hum':
print('HUM MEASURE:')
for dummy in range(10):
time.sleep(0.2)
while failed<5:
humNow=sh.get_humidity()
print(humNow)
if(humNow != 0):
break
else:
failed+=1
hum+=humNow
if(failed<5):
hum/=10
hum=round(hum,2)
print('MEASURED HUM IS: '+str(hum))
else:
hum = 'ERROR'
return hum
# PRESSURE
# same as temp but we measure pressure
if whatToMeasure == 'press':
print('PRESS MEASURE:')
for dummy in range(10):
time.sleep(0.2)
while failed<5:
pressNow=sh.get_pressure()
print(pressNow)
if(pressNow != 0):
break
else:
failed+=1
press+=pressNow
if(failed<5):
press/=10
press=round(press,2)
print('MEASURED PRESS IS: '+str(press))
else:
press = 'ERROR'
return press
# handle exception and log it, display error image on screen
except Exception as e_measure_eception:
global anyException
anyException = True
logger.error('Time from start: %s,Time is: %s,ERROR: %s',timer1.minsOfRun(),timer1.nowForLog(),str(e_measure_eception))
pictures('error')
return "ERROR"
def pictures(idImg):
'''
This function displays images on SnenseHat pixel matrix scrren
'''
try:
# Set display rotation on 0 deg
rot = 0
sh.set_rotation(rot)
# Define some colors - keep brightness low
r = [50,0,0]
g = [0,50,0]
b = [0,0,50]
p = [50,0,50]
o = [0,0,0]
w = [50,50,50]
orientation = [0,90,180,270]
# Define an images
welcome_img = [
o,o,w,w,w,w,o,o,
o,w,w,w,w,w,w,o,
w,w,w,w,w,w,w,w,
w,w,w,w,w,w,w,w,
r,r,r,r,r,r,r,r,
r,o,r,r,r,r,o,r,
o,r,r,r,r,r,r,o,
o,o,r,r,r,r,o,o,
]
wait_img = [
g,g,g,g,g,g,g,g,
o,g,o,o,o,o,g,o,
o,o,g,o,o,g,o,o,
o,o,o,g,g,o,o,o,
o,o,o,g,g,o,o,o,
o,o,g,g,g,g,o,o,
o,g,g,g,g,g,g,o,
g,g,g,g,g,g,g,g,
]
temp_img1 = [
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,r,b,b,b,r,o,
o,o,r,b,b,b,r,o,
o,o,o,r,r,r,o,o,
]
temp_img2 = [
o,o,o,r,r,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,r,b,b,b,r,o,
o,o,r,b,b,b,r,o,
o,o,o,r,r,r,o,o,
]
temp_img3 = [
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,r,b,b,b,r,o,
o,o,r,b,b,b,r,o,
o,o,o,r,r,r,o,o,
]
temp_img4 = [
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,b,r,o,o,
o,o,o,r,b,r,o,o,
o,o,r,b,b,b,r,o,
o,o,r,b,b,b,r,o,
o,o,o,r,r,r,o,o,
]
temp_img5 = [
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,b,r,o,o,
o,o,r,b,b,b,r,o,
o,o,r,b,b,b,r,o,
o,o,o,r,r,r,o,o,
]
temp_img6 = [
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,o,r,r,r,o,o,
o,o,r,b,b,b,r,o,
o,o,r,b,b,b,r,o,
o,o,o,r,r,r,o,o,
]
hum_img1 = [
o,o,o,o,b,o,o,o,
o,o,o,b,b,o,o,o,
o,o,b,b,b,b,o,o,
o,o,b,b,b,b,o,o,
o,b,b,b,b,b,b,o,
o,b,b,b,b,b,b,o,
o,o,b,b,b,b,o,o,
o,o,o,b,b,o,o,o,
]
hum_img2 = [
o,o,o,o,o,o,o,o,
o,o,o,o,b,o,o,o,
o,o,o,b,b,o,o,o,
o,o,b,b,b,b,o,o,
o,o,b,b,b,b,o,o,
o,b,b,b,b,b,b,o,
o,b,b,b,b,b,b,o,
o,o,b,b,b,b,o,o,
]
hum_img3 = [
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,b,o,o,o,
o,o,o,b,b,o,o,o,
o,o,b,b,b,b,o,o,
o,o,b,b,b,b,o,o,
o,b,b,b,b,b,b,o,
b,b,b,b,b,b,b,b,
]
hum_img4 = [
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,b,o,o,o,
o,o,o,b,b,o,o,o,
o,o,b,b,b,b,o,o,
b,b,b,b,b,b,b,b,
]
hum_img5 = [
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
]
press_img1 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
p,o,o,o,o,o,o,p,
p,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img2 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
p,o,o,o,o,o,o,p,
p,p,o,o,o,o,o,p,
p,p,o,o,o,o,o,p,
p,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img3 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,p,o,o,o,o,o,p,
p,p,p,o,o,o,o,p,
p,p,p,o,o,o,o,p,
o,p,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img4 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,p,o,o,o,o,p,
p,p,p,p,o,o,o,p,
p,p,p,p,o,o,o,p,
o,o,p,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img5 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,p,o,o,o,p,
p,p,p,p,p,o,o,p,
p,p,p,p,p,o,o,p,
o,o,o,p,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img6 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,p,o,o,p,
p,p,p,p,p,p,o,p,
p,p,p,p,p,p,o,p,
o,o,o,o,p,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img7 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,p,o,p,
p,p,p,p,p,p,p,p,
p,p,p,p,p,p,p,p,
o,o,o,o,o,p,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img8 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,p,p,
o,p,p,p,p,p,p,p,
o,p,p,p,p,p,p,p,
o,o,o,o,o,o,p,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img9 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,p,p,p,p,p,p,
o,o,p,p,p,p,p,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img10 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,p,p,p,p,p,
o,o,o,p,p,p,p,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img11 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,p,p,p,p,
o,o,o,o,p,p,p,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img12 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,p,p,p,
o,o,o,o,o,p,p,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img13 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,p,p,
o,o,o,o,o,o,p,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
press_img14 = [
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
o,o,o,o,o,o,o,p,
]
working_array= [
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
g,g,g,g,g,g,g,g,
]
error_array=[
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
r,r,r,r,r,r,r,r,
]
end_array=[
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
b,b,b,b,b,b,b,b,
]
reset_array=[
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
o,o,o,o,o,o,o,o,
]
# routes to display diffrent images
if idImg == 'welcome':
sh.show_message('Welcome to PAPi', text_colour = w, scroll_speed=0.05)
#This is a quote from one of a 'Country balls comics'
sh.show_message('Poland can ', text_colour = w, scroll_speed=0.05)
sh.show_message('into space!', text_colour = r, scroll_speed=0.05)
#Polish flag is upside down becouse this picture represent one of the 'Country balls'
#specificaly a 'Poland ball', which have colors of Monako hahahaha!
sh.set_rotation(180)
sh.set_pixels(welcome_img)
time.sleep(2)
sh.set_rotation(0)
# store multiple arrays (images) in one array for do an animation
temp_array = [temp_img1, temp_img2,temp_img3, temp_img4, temp_img5, temp_img6, temp_img5, temp_img4, temp_img3, temp_img2, temp_img1]
if idImg == 'temp':
for temp_anim in temp_array:
sh.set_pixels(temp_anim)
time.sleep(0.1)
hum_array = [hum_img1, hum_img2, hum_img3, hum_img4, hum_img5]
if idImg == 'hum':
for hum_anim in hum_array:
sh.set_pixels(hum_anim)
time.sleep(0.5)
press_array = [press_img1, press_img2, press_img3, press_img4, press_img5,press_img6, press_img7, press_img8, press_img9, press_img10, press_img11, press_img12, press_img13, press_img14]
if idImg == 'press':
for press_anim in press_array:
sh.set_pixels(press_anim)
time.sleep(0.1)
if idImg == 'end':
sh.set_pixels(end_array)
time.sleep(0.3)
if idImg == 'error':
sh.set_pixels(error_array)
time.sleep(0.5)
if idImg == 'wait':
sh.set_pixels(wait_img)
# here we have animation by rotate the screen
for rot in orientation:
sh.set_rotation(rot)
time.sleep(0.2)
if idImg == 'reset':
sh.set_pixels(reset_array)
# if given parameter was not equal to wait and or error show us a greeen image until next function call
# green image means that everything is okay
if (idImg != 'wait' and idImg != 'end' and idImg != 'error' and idImg != 'reset'):
sh.set_pixels(working_array)
# Handle the exception
except Exception as e_display_eception:
global anyException
anyException = True
logger.error('Time from start: %s,Time is: %s,ERROR: %s',timer1.minsOfRun(),timer1.nowForLog(),str(e_display_eception))
return "ERROR"
def showInfo(measure):
'''
that function gets random color and displays measure parameter on SenseHat screen
'''
try:
# define RGB colors
r = [50,0,0]
g = [0,50,0]
b = [0,0,50]
p = [50,0,50]
c = [0,50,50]
u = [50,50,0]
# store all colors in one array for easier draw
textColours = [r,g,b,p,c,u]
# get random color
color = textColours[random.randint(0,len(textColours)-1)]
# set rotation to 0 degrees
sh.set_rotation(0)
# show message
sh.show_message(str(measure), text_colour =color, scroll_speed=0.05)
# sleep one secound
time.sleep(1)
# handle the exception
except Exception as e_displayText_exception:
logger.error('cannot show message on senseHat: %s',e_displayText_exception)
pictures('error')
def ephemISS():
'''
Ephem module funciton
'''
try:
# ehem computing for logs
# SOURCE = CELESTREAK.COM
# DAY = 26.01.2019
nameOfStation = 'ISS (ZARYA)'
firstLine = '1 25544U 98067A 19027.58387731 .00001656 00000-0 33287-4 0 9996'
secondLine = '2 25544 51.6426 340.5081 0004927 322.6857 20.8029 15.53199695153409'
stationISS = ephem.readtle(nameOfStation,firstLine,secondLine)
stationISS.compute()
# calculate result
if(stationISS.sublat)<0:
return 'ISS is in Southern hemisphere'
else:
return 'ISS is in Northern hemisphere'
# handle the exception
except Exception as e_ISS_eception:
global anyException
anyException = True
logger.error('Cannot get EPHEM resulit eroor:%s',str(e_ISS_eception))
pictures('error')
return "ERROR"
class timer:
'''
Timer obiect give us easier ability to count time and get better logs
'''
# set start time and end time of obiect
def __init__(self):
# get start time from datatime module
self.startTime = datetime.datetime.now()
# count the endtime, programTime was defined at the top
self.endTime = self.startTime + datetime.timedelta(minutes=programTime)
def minsOfRun(self):
# cuted output for logs
# example '0:00:27' - how much time program is already running
return str(datetime.datetime.now()-self.startTime)[:7]
def now(self):
# return datetime obiect for check actual time
self.time = datetime.datetime.now()
return self.time
def nowForLog(self):
# cuted output for nicer logs:
# example '14:52:32'
return str(datetime.datetime.now())[11:19]
# ____________________________
# INICIALIZE PROGRAM
# ____________________________
try:
# set timer obiect
timer1 = timer()
# call function to setup logging
setLoggingFile()
# first log
logger.debug('starting program,time is: %s, program will be running for: %smin',timer1.now(),programTime)
# log ephem return
logger.info('EPHEM: %s',ephemISS())
# connect to SenseHat
sh = SenseHat()
# sleep 2 sec
time.sleep(2)
# show welcome screen
pictures('welcome')#
# handle the exception
except Exception as e_init_exception:
anyException = True
logger.error('INIT ERROR: %s',str(e_init_exception))
# ____________________________
# SET VARIABLES
# ____________________________
averageTemp=0
averageHum=0
averagePress=0
rounds = 0
lowestTemp = 9999999999
lowestHum = 9999999999
lowestPress = 9999999999
higestTemp = 0
higestHum = 0
higestPress = 0
# ____________________________
# MAIN LOOP OF PROGRAM
# ____________________________
# while timer1 obiect (time now) is smaller than (endTime)
while(timer1.now()<timer1.endTime):
try:
# check oversize function return
if(isItOversized()):
logger.debug('OVERSIZED EXITING')
break
# add rounds (average measurements will be calculate on this variable)
rounds+=1
# log round and time for start
logger.debug('Start round: %s,Time from start: %s',rounds,timer1.minsOfRun())
# show wait image and show active round on screen
pictures('wait')
showInfo('round: '+str(rounds))
# show temperature animation, measure temperature and display result on screen
pictures('temp')
tempNowIs = measure('temp')
showInfo(str(tempNowIs)+" 'C")
# same to humidity
pictures('hum')
humNowIs = measure('hum')
showInfo(str(humNowIs)+' %')
# same to pressure
pictures('press')
pressNowIs = measure('press')
showInfo(str(pressNowIs)+' mbar')
# log all results
logger.info('Time is: %s,Time from start: %s,Temp: %s,Hum: %s,Press: %s',timer1.nowForLog(),timer1.minsOfRun(),tempNowIs,humNowIs,pressNowIs)
# if there was not any error count lower, higest and average measurements
if(tempNowIs != 'ERROR' and humNowIs != 'ERROR' and pressNowIs != 'ERROR'):
averageTemp+=tempNowIs
averageHum+=humNowIs
averagePress+=pressNowIs
if tempNowIs<lowestTemp:
lowestTemp = tempNowIs
if tempNowIs>higestTemp:
higestTemp = tempNowIs
if humNowIs < lowestHum:
lowestHum = humNowIs
if humNowIs>higestHum:
higestHum = humNowIs
if pressNowIs<lowestPress:
lowestPress = pressNowIs
if pressNowIs>higestPress:
higestPress = pressNowIs
else:
# if there was an error dont count this round
rounds-=1
# handle main exception
except Exception as e_main_exception:
anyException = True
logger.error('Time from start: %s,Time is: %s,ERROR: %s',timer1.minsOfRun(),timer1.nowForLog(),str(e_main_exception))
pictures('error')
# ____________________________
# AFTER MAIN LOOP
# ____________________________
try:
pictures('end')
# calculate and round average measurements
averageTemp/=rounds
averageHum/=rounds
averagePress/=rounds
averageTemp = round(averageTemp,2)
averageHum = round(averageHum,2)
averagePress = round(averagePress,2)
lowestTemp = round(lowestTemp,2)
lowestHum = round(lowestHum,2)
lowestPress = round(lowestPress,2)
higestTemp = round(higestTemp,2)
higestHum = round(higestHum,2)
higestPress = round(higestPress,2)
# log all
logger.info('average Temp: %s,average hum: %s,average press: %s',averageTemp,averageHum,averagePress)
logger.info('Temp: highest: %s lowest: %s ,Hum: highest: %s lowest: %s ,Press: highest: %s lowest: %s',higestTemp,lowestTemp,higestHum,lowestHum,higestPress,lowestPress)
logger.debug('code succesfully exited after: %s,expected time: %smin, time of end is: %s, problems: %s',timer1.minsOfRun(),programTime,timer1.now(),anyException)
logger.debug('program ended with %s rounds of collecting data',rounds)
# say goodbye
time.sleep(3)
pictures('reset')
# exit on that Exception
except Exception as e_sumUp_exception:
print('CANNOT SUMUP DATA, EXITING')
logger.error('SUMUP ERROR: %s',str(e_sumUp_exception))
exit() |
2,685 | 0412369f89842e2f55aa115e63f46a1b71a0f322 | s=int(input())
print(s+2-(s%2)) |
2,686 | 268a8252f74a2bdafdadae488f98997c91f5607c | import os
from unittest import TestCase
class TestMixin(TestCase):
@classmethod
def setUpClass(cls):
cls.base_dir = os.path.dirname(os.path.abspath(__file__))
cls.fixtures_dir = os.path.join(cls.base_dir, 'fixtures')
cls.bam_10xv2_path = os.path.join(cls.fixtures_dir, '10xv2.bam')
cls.fastq_10xv2_paths = [
os.path.join(cls.fixtures_dir, '10xv2_1.fastq.gz'),
os.path.join(cls.fixtures_dir, '10xv2_2.fastq.gz')
]
|
2,687 | 44f18d7e7713073c27fec38f0b847803eceefbc9 | import random
print(random.choice(['python','c++','java']))
print(random.choice((1.1,-5,6,4,7)))
|
2,688 | b9678b447bc6e7c4e928ffa6b8cd58639e41a801 | """
Author: Alan Danque
Date: 20210323
Purpose:Final Data Wrangling, strips html and punctuation.
"""
from sklearn.tree import export_graphviz
import pydot
import pickle
from pathlib import Path
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
import time
start_time = time.time()
pd.options.mode.chained_assignment = None # default='warn' # https://stackoverflow.com/questions/20625582/how-to-deal-with-settingwithcopywarning-in-pandas
results_dir = Path('C:/Alan/DSC680/Project1Data/').joinpath('results')
results_dir.mkdir(parents=True, exist_ok=True)
filepath = "C:/Alan/DSC680/Project1Data/FinalLeadAnalyticsRecord2.csv"
data = pd.read_csv(filepath)
print(data.shape)
shape = data.shape
print('\nDataFrame Shape :', shape)
print('\nNumber of rows :', shape[0])
print('\nNumber of columns :', shape[1])
amsmodel1 = data
del amsmodel1['street_address']
amsmodel1.fillna(0, inplace=True)
#amsmodel1.replace(np.nan,0)
print("Dataframe Loaded: --- %s seconds ---" % (time.time() - start_time))
# load model and predict
model_file = results_dir.joinpath('My3rdModel.pkl')
with open(model_file, 'rb') as f:
rf = pickle.load(f)
#rf.predict(X[0:1])
print("Model Loaded: --- %s seconds ---" % (time.time() - start_time))
target = np.array(amsmodel1['price'])
features = amsmodel1.drop('price', axis = 1)
feature_list = list(features.columns)
features = np.array(features)
print(feature_list)
print(features)
print("Features Loaded: --- %s seconds ---" % (time.time() - start_time))
"""
from sklearn.model_selection import RandomizedSearchCV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
print(random_grid)
print("Estimators Loaded: --- %s seconds ---" % (time.time() - start_time))
"""
# Decision Tree
## SAVING THE DECISION TREE
tree = rf.estimators_[5]
tree_dot_file = results_dir.joinpath('tree.dot')
tree_png_file = results_dir.joinpath('tree.png')
dotfile = open(tree_dot_file, 'w')
export_graphviz(tree, out_file = dotfile, feature_names = feature_list, rounded = True, precision = 1)
# Install https://graphviz.org/download/#windows
#(graph, ) = pydot.graph_from_dot_file(tree_dot_file)
#graph.write_png(tree_png_file)
# C:\Program Files\Graphviz\bin
# having issues with pydot.graph_from_dot_file. Since my dot file is getting created using subprocess.
## from subprocess import check_call
## check_call(['dot','-Tpng',dotfile,'-o',tree_png_file])
(graph,) = pydot.graph_from_dot_file(tree_dot_file)
graph.write_png(tree_png_file)
print("DecisionTree: --- %s seconds ---" % (time.time() - start_time))
"""
PyDot Conversion Complete: --- 3804.3111951351166 seconds ---
"""
|
2,689 | 2b73c4e07bba7ed5c89a31ebd45655eaa85dcdcc |
__all__ = '''
calc_common_prefix_length
'''.split()
import operator
import itertools
def calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):
if __eq__ is None:
__eq__ = operator.__eq__
idx = -1
for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):
if not __eq__(a, b):
return idx
else:
return idx+1
assert calc_common_prefix_length([], []) == 0
assert calc_common_prefix_length([], [1]) == 0
assert calc_common_prefix_length([1], [1]) == 1
assert calc_common_prefix_length([1,3], [1,2]) == 1
|
2,690 | 7383ae97d6a1368896d05d0cafc9846c24004701 | """Testing data storage functionality in gludb.simple (see simple_tests.py for
testing of the rest of gludb.simple functionality)"""
import unittest
import datetime
import time
import gludb.config
from gludb.versioning import VersioningTypes
from gludb.data import orig_version
from gludb.simple import DBObject, Field
from gludb.utils import parse_now_field
from utils import compare_data_objects
@DBObject(table_name='SimpleStorageTest', versioning=VersioningTypes.NONE)
class SimpleStorage(object):
name = Field('default name')
descrip = Field()
age = Field(42)
extra_data = Field(dict)
# Same tests as DefaultStorageTesting but with differnt setUp/tearDown
class MissingMapTesting(unittest.TestCase):
def setUp(self):
gludb.config.default_database(None) # no default database
def tearDown(self):
# Undo any database setup
gludb.config.clear_database_config()
def test_failedops(self):
def try_op():
return gludb.config.get_mapping(SimpleStorage)
self.assertRaises(ValueError, try_op)
def test_justnomap(self):
mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)
self.assertIsNone(mapped)
class DefaultStorageTesting(unittest.TestCase):
def setUp(self):
gludb.config.default_database(gludb.config.Database(
'sqlite',
filename=':memory:'
))
SimpleStorage.ensure_table()
def tearDown(self):
# Undo any database setup
gludb.config.clear_database_config()
def assertObjEq(self, obj1, obj2):
self.assertTrue(compare_data_objects(obj1, obj2))
def assertReadable(self, obj):
read_back = obj.__class__.find_one(obj.id)
self.assertObjEq(obj, read_back)
orig_ver = obj.__class__.from_data(orig_version(read_back))
self.assertObjEq(obj, orig_ver)
def assertCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) < eps)
def assertNotCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)
def test_missing(self):
self.assertIsNone(SimpleStorage.find_one('not there'))
def test_table_has_prefix(self):
self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.__table_name__)
def test_extra_fields(self):
s = SimpleStorage(name='TimeTracking', descrip='FirstSave')
s.save()
create1 = parse_now_field(s._create_date)
update1 = parse_now_field(s._last_update)
self.assertCloseTimes(datetime.datetime.utcnow(), update1)
self.assertCloseTimes(create1, update1)
# Sucks, but we need to space out our timestamps
time.sleep(0.3)
s.descrip = 'SecondSave'
s.save()
create2 = parse_now_field(s._create_date)
update2 = parse_now_field(s._last_update)
self.assertCloseTimes(datetime.datetime.utcnow(), update2)
self.assertCloseTimes(create1, create2)
self.assertNotCloseTimes(update1, update2)
s2 = SimpleStorage.find_one(s.id)
create3 = parse_now_field(s2._create_date)
update3 = parse_now_field(s2._last_update)
# Note that we DON'T check for string equality - that's because
# _last_update is updated every time the instance method to_data is
# called. See simple.md for extra details on auto fields
self.assertCloseTimes(create2, create3)
self.assertCloseTimes(update2, update3)
def test_readwrite(self):
s = SimpleStorage(name='Pre', descrip='Testing', age=-1)
self.assertEquals('', s.id)
self.assertEquals('Pre', s.name)
self.assertEquals('Testing', s.descrip)
self.assertEquals(-1, s.age)
self.assertEquals({}, s.extra_data)
s.extra_data['coolness'] = {'a': 123, 'b': 456}
s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]
s.extra_data['oscar'] = 'grouch'
s.extra_data['fp'] = 42.42
self.assertTrue(orig_version(s) is None)
s.save()
self.assertTrue(len(s.id) > 0)
self.assertReadable(s)
# Saved - so should have a prev version that is identical
self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))
s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)
s2.save()
self.assertReadable(s2)
all_recs = SimpleStorage.find_all()
self.assertEqual(1, len(all_recs))
self.assertObjEq(s2, all_recs[0])
# Change the object we read and then insure that the pervious version
# saved on load is correct
read_obj = all_recs[0]
read_obj.name = 'Pre2'
read_obj.descrip = 'Testing2'
read_obj.age = -2
s0 = SimpleStorage.from_data(orig_version(read_obj))
self.assertEquals(s.id, s0.id)
self.assertEquals('Post', s0.name)
self.assertEquals('AtItAgain', s0.descrip)
self.assertEquals(256, s0.age)
self.assertEquals({}, s0.extra_data)
# Same tests as DefaultStorageTesting but with differnt setUp/tearDown
class SpecificStorageTesting(DefaultStorageTesting):
def setUp(self):
gludb.config.default_database(None) # no default database
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite',
filename=':memory:'
))
SimpleStorage.ensure_table()
def tearDown(self):
# Undo any database setup
gludb.config.clear_database_config()
# Same tests as DefaultStorageTesting but with differnt setUp/tearDown
class PrefixedStorageTesting(DefaultStorageTesting):
PREFIX = "Prefix"
def setUp(self):
gludb.config.default_database(None) # no default database
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite',
filename=':memory:'
))
gludb.config.set_db_application_prefix(self.PREFIX)
SimpleStorage.ensure_table()
def tearDown(self):
# Undo any database setup
gludb.config.clear_database_config()
gludb.config.set_db_application_prefix(None)
def test_table_has_prefix(self):
expectedName = self.PREFIX + gludb.config._APPLICATION_SEP + SimpleStorage.__table_name__
self.assertEqual(SimpleStorage.get_table_name(), expectedName) |
2,691 | c8bb6ead7e305f466e24b47811d6ed38c8cfec0a |
from oscar.app import Shop
from apps.catalogue.app import application as catalogue_app
class BaseApplication(Shop):
catalogue_app = catalogue_app
application = BaseApplication()
|
2,692 | 355d60300cbbed817b4512e9b02cc4dd53d1293e | friends = ["Rolf", "Bob", "Anne"]
print(friends[0])
print(friends[1])
print(len(friends))
new_friends = [
["Rolf", 24],
["Bob", 30],
["Anne", 27],
["Charlie", 25],
["Jen", 25],
["Adam", 29]
]
print(friends[0][0])
friends.append("Jen")
print(friends)
new_friends.remove(["Anne", 27])
print(new_friends)
|
2,693 | d96038a715406388b4de4611391dee18fc559d5a | import bs4
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import pandas as pd
import time
from urllib.request import Request
import requests
import json
import re
import sys
def compare(mystring):
def usd_to_ngn():
print("Getting USD to NGN Rate")
req = requests.get("http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63")
req.raise_for_status()
res = str(req.content)[2:-1]
res = json.loads(res)
rate = float(res['results']['USD_NGN']['val'])
return rate
def amazon(mystring):
search_term = mystring.replace(" ", "+")
header = {'User-agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'}
html = Request("https://www.amazon.com/s?k={}&ref=nb_sb_noss_1".format(search_term), headers=header)
time.sleep(10)
page_html2 = uReq(html).read()
page_soup = soup(page_html2, 'html.parser')
price_tags1 = page_soup.select('span.a-offscreen')
prices = [el.get_text() for el in price_tags1] # get text
# print(f"1 : {prices}")
prices = ["".join(re.findall("([\S]?)([0-9\.]+)", i)[0]) for i in prices]
# ^ remove spaces, and get the price range minimum, with the currency
rate = usd_to_ngn()
prices = [(float(i[1:]) * rate) for i in prices]
return prices
def konga(mystring):
#mystring = (input('enter your search term: '))
search_term = mystring.replace(" ", "+")
my_url = 'https://www.konga.com/search?search='
new = my_url+search_term
header = {'User-agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'}
#print(new)
request = Request(new, headers=header)
time.sleep(10)
response = uReq(request).read()
page_soup = soup(response, 'html.parser')
#price_containers = page_soup.find_all('span', {'class':'d7c0f_sJAqi'})
#containers = page_soup.find_all('div', {'class':'af885_1iPzH'})
price_tags = page_soup.select("span.d7c0f_sJAqi")
prices = [float(str(el.contents[1]).replace(",", "")) for el in price_tags[:30]]
return prices
konga = konga(mystring)
# print(konga)
amazon = amazon(mystring)
# print(alibaba)
"""
if len(konga) > len(alibaba) > 0:
konga = konga[:len(alibaba)]
elif len(konga) > 0:
alibaba = alibaba[:len(konga)]
"""
def find_avg(lst):
if len(lst) < 1:
return None
avg = 0
for i in lst:
avg += i
return avg / len(lst)
obj = {"avg_konga_price": find_avg(konga), "avg_Amazon_price": find_avg(amazon),
"currency" : "NGN",
'konga' : ("Unable To Fetch Prices" if (len(konga) < 1) else konga),
'amazon' : ("Unable To Fetch Prices" if (len(amazon) < 1) else amazon)}
# print(f"k = {konga} : a = {alibaba}")
print(obj)
if len(sys.argv) > 1:
compare(" ".join(sys.argv[1:]))
# Uncomment the code below to run a test with query='diamond jewelry'
term = str(input('enter your search term: '))
compare(term)
|
2,694 | 180d28ac77b6ff4488b3fd9c17a9ee4571e33631 | import math
#variables for current GPS Lat / Lon Readings
currentLat = 41.391240
currentLon = -73.956217
destLat = 41.393035
destLon = -73.953398
#variables for current UTM coordinates
currentX = 587262
currentY = 4582716
destX = 587499
destY = 4582919
#declination angle based on geographic location
#see #https://www.ngdc.noaa.gov/geomag-web/
#needed for "grid-to-magnetic" angle
declinationAngle = 13
########### Functions ############################################################################
def haversine(currentLat,currentLon, destLat, destLon):
#Calculate the great circle distance between two points
#on the earth (specified in decimal degrees - Lat/Lon coords) using Haversine Formula
haversineDistance = math.acos( math.sin(currentLat*math.pi/180)*math.sin(destLat*math.pi/180) + math.cos(currentLat*math.pi/180)*math.cos(destLat*math.pi/180)*math.cos(destLon*math.pi/180-currentLon*math.pi/180) ) * 6371000
haversineAngle = ( math.atan2(math.cos(currentLat)*math.sin(destLat)-math.sin(currentLat)*math.cos(destLat)*math.cos(destLon-currentLon), math.sin(destLon-currentLon)*math.cos(destLat)) ) * (180/math.pi)
#transform angle perspective - Haversine calculates angle with the perspective that 90 degrees points North
#for magnetic field reference, we need North to correspond to 0 degrees, so subtract 90
magBearing = haversineAngle - 90
#account for declination angle (Westerly declination, so add offset)
magBearing = magBearing + declinationAngle
#account for angle wrap
if magBearing < 0:
magBearing = magBearing + 360
elif magBearing > 360:
magBearing = magBearing - 360
return haversineDistance, magBearing
def distAndBearing_utm(currentX, currentY, destX, destY):
#calculate distance & bearing using UTM coordinates (x,y)-type coordinates
dx = destX - currentX
dy = destY - currentY
#calculate distance between the two points
utm_dist = math.sqrt( (dx)**2 + (dy)**2 )
#calculate the angle between the points
utm_angle = math.atan(dy/float(dx)) * (180/math.pi)
#If we treat the current (X,Y) point as the origin, then destination (X,Y) lies in a quadrant (either I,II,III, or IV), because ->
#the dx and dy (above) results in a + or - difference, which indicates the destination quadrant.
#The quadrant will determine the type of angle adjustment needed magnetically (based on N,S,E, and W heading)
if dx > 0 and dy > 0: #then destination is in quadrant I (between N and E); atan angle is positive
utm_angleTF = 90 - utm_angle
elif dx < 0 and dy > 0: #then destination is in quadrant II (between N and W)
#atan angle calculation is negative; (get rid of neg. sign, then add to 270 deg-West)
utm_angleTF = 270 + (-1 * utm_angle)
elif dx < 0 and dy < 0: #then destination is in quadrant III (between (W and S); atan angle is positive
utm_angleTF = 270 - utm_angle
else: # dx > 0 and dy <0, then quad IV (between S and E)
#angle calculation is negative; (get rid of neg. sign, then add to 90 deg-East)
utm_angleTF = 90 + (-1 * utm_angle)
#account for declination angle (Westerly declination angle, so add offset)
magUtmBearing = utm_angleTF + declinationAngle #add offset due to Westerly declination
#account for angle wrap
if magUtmBearing < 0:
magUtmBearing = magUtmBearing + 360
elif magUtmBearing > 360:
magUtmBearing = magUtmBearing - 360
return utm_dist, magUtmBearing
####### MAIN ########################################################
dist, bearing = haversine(currentLat,currentLon, destLat, destLon)
print "Distance & Bearning based on Lat/Lon is: ", dist, bearing
utm_dist, utm_angle = distAndBearing_utm(currentX, currentY, destX, destY)
print "Distance & Bearning based on UTM is: ", utm_dist, utm_angle
|
2,695 | 382a3b8bcd07c7098cecf2b770e46dfff50eeb98 | #####################
# Aufgabe 2, 13.7 #
# v1.0 #
# baehll #
# 04.05.2018 #
#####################
class Pinnwand:
def __init__(self):
self.__zettel = []
def hefteAn(self, notiz):
#Analyse des Textes
prio = notiz.count("!")
self.__zettel.append((prio, notiz))
def entferne(self):
hoechste = 0
zettel = 0
for i in range(len(self.__zettel)):
if self.__zettel[i][0] > hoechste:
hoechste = self.__zettel[i][0]
zettel = i
print(self.__zettel[zettel][1])
del self.__zettel[zettel]
def __str__(self):
ausgabe = "Notizen\n"
zettelListe = self.__zettel[:]
zettelListe.sort(reverse=True)
print("Zettelliste: ")
print(zettelListe)
for z in zettelListe:
ausgabe += z[1] + "\t"
ausgabe += "(Priorität: " + str(z[0]) + ")" + "\n"
return ausgabe
menue = """
(N)eue Notiz anheften (A)lle Notizen auflisten
(W)ichtigste Notiz entfernen (E)nde
"""
p = Pinnwand()
while True:
print(menue)
eingabe = input("Ihre Wahl: ")
if eingabe in "nN":
notiz = input("Notiz: ")
while notiz != "":
p.hefteAn(notiz)
notiz = input("Notiz: ")
elif eingabe in "aA":
print(p)
elif eingabe in "wW":
p.entferne()
elif eingabe in "eE":
print("Tschüß!")
break |
2,696 | ec70fb9119b430dcd36549f2fac8e5e0a0e1bb00 |
from django.http import HttpResponsePermanentRedirect
from django.urls import is_valid_path
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import escape_leading_slashes
class AppendSlashMiddleware(MiddlewareMixin):
response_redirect_class = HttpResponsePermanentRedirect
def process_request(self, request):
redirect_url = ''
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
if redirect_url or path != request.get_full_path():
redirect_url += path
return self.response_redirect_class(redirect_url)
def should_redirect_with_slash(self, request):
if request.path_info.endswith('/'):
return False
urlconf = getattr(request, 'urlconf', None)
return (
not is_valid_path(request.path_info, urlconf) and
is_valid_path('%s/' % request.path_info, urlconf)
)
def get_full_path_with_slash(self, request):
new_path = request.get_full_path(force_append_slash=True)
return escape_leading_slashes(new_path)
def process_response(self, request, response):
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(
self.get_full_path_with_slash(request))
if not response.streaming and \
not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
return response
|
2,697 | b094693b11fdc4f5fbff30e79a9f82d40104611d | from time import time
class Task:
def __init__(self, f, ready: float):
self._f = f
self._ready = ready
def set_ready(self, ready: float) -> None:
self._ready = ready
def get_ready(self) -> float:
return self._ready
def __call__(self) -> None:
self._f()
def __lt__(self, other) -> bool:
return self._ready < other.get_ready()
def __str__(self):
return "Task(" + str(self._ready) + ")"
|
2,698 | 8040b47dc3fd6b03432f64d7fb8a4267cc94ac9a | import caffe
import numpy as np
class PyLayer(caffe.Layer):
def setup(self, bottom, top):
if len(bottom) != 2:
raise Exception("Need two inputs to compute distance")
def reshape(self, bottom, top):
if bottom[0].count != bottom[1].count:
raise Exception("Inputs must have the same dimension")
self.diff = np.zeros(bottom[0].data.shape, dtype=np.float32)
top[0].reshape(1)
def forward(self, bottom, top):
self.diff[...] = bottom[0].data - bottom[1].data
top[0].data[...] = np.sum(self.diff ** 2) * (0.5 / bottom[0].num)
def backward(self, top, propagate_down, bottom):
for i in range(2):
if not propagate_down[i]:
continue
if i == 0:
bottom[i].diff[...] = self.diff * (1 / bottom[i].num)
else:
bottom[i].diff[...] = self.diff * (-1 / bottom[i].num)
|
2,699 | 8d6e4d06e390b4a45e576239189745c2e37217c5 | import json
from datetime import datetime, timedelta
from itertools import product
from django.db.models import QuerySet
import pytz
from model_mommy import mommy
from ...views import get_authors, get_featured_challenges, get_term_of_user
class TestNonMiscView:
"""Test for non view functions in ideax.views (for refactor)"""
def test_get_term_of_user_empty(self, rf, db):
request = rf.get('/')
response = get_term_of_user(request)
assert response.status_code == 200
assert json.loads(response.content) == {'term': 'No Term of Use found'}
def test_get_term_of_user(self, rf, db):
mommy.make('Use_Term', term='EULA Test', final_date=datetime.now(pytz.UTC) + timedelta(days=1))
request = rf.get('/')
response = get_term_of_user(request)
assert response.status_code == 200
assert json.loads(response.content) == {'term': 'EULA Test'}
def test_get_featured_challenges_empty(self, db):
response = get_featured_challenges()
assert isinstance(response, QuerySet)
assert response.count() == 0
def test_get_featured_challenges(self, db):
challenges = {
(active, discarted): mommy.make('Challenge', active=active, discarted=discarted)
for active, discarted in product((False, True), repeat=2)
}
response = get_featured_challenges()
assert isinstance(response, QuerySet)
assert response.count() == 1
assert response.first() == challenges[(True, False)]
def test_get_authors_empty(self, db):
response = get_authors('test@gmail.com')
assert isinstance(response, QuerySet)
assert response.count() == 0
def test_get_authors(self, db):
staff_options = (False, True)
# User e-mail cannot be null (refactor get_authors)
email_options = ('', 'exclude@gmail.com', 'valid@gmail.com')
authors = {
(staff, email): mommy.make('UserProfile', user__is_staff=staff, user__email=email)
for staff, email in product(staff_options, email_options)
}
response = get_authors('exclude@gmail.com')
assert isinstance(response, QuerySet)
assert response.count() == 1
assert response.first() == authors[(False, 'valid@gmail.com')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.