seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
38313863819 | '''
Created on Oct 2, 2013
@author: lindahlm
'''
import nest
import pylab
t=1000.0
n=nest.Create('iaf_neuron')
mm=nest.Create('multimeter', params={'record_from':['V_m'], 'start':0.0})
pg=nest.Create('poisson_generator', params={'rate':10.0})
nest.Connect(pg,n,model='tsodyks_synapse')
#nest.Connect(mm,n)
nest.Connect(pg,n)
nest.Simulate(t)
smm=nest.GetStatus(mm)[0]
pylab.plot(smm['events']['V_m'])
pylab.show()
| mickelindahl/bgmodel | python/misc_folder/test_poisson_generator_and_dep_syn.py | test_poisson_generator_and_dep_syn.py | py | 417 | python | en | code | 5 | github-code | 13 |
9722986305 | import socket,threading,sys,os,queue,json
MAX_BYTES = 65535
lock = threading.Lock() # 创建锁, 防止多个线程写入数据的顺序打乱
que = queue.Queue() # 用于存放客户端发送的信息的队列
users=[] #[(user,addr)]
def onlines():
online = []
for i in range(len(users)):
online.append(users[i][0])
return online
class ChatServer(threading.Thread):
global users, que, lock, IP
def __init__(self, ip,port): #构造函数
threading.Thread.__init__(self)
self.ADDR = (ip, port)
os.chdir(sys.path[0])
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def recv(self, data, addr):
lock.acquire()
try:
que.put((addr, data))
finally:
lock.release()
# 判断断开用户在users中是第几位并移出列表, 刷新客户端的在线用户显示
def delUsers(self, addr):
a = 0
for i in users: # 循环遍历 看看要删除的用户是第几个用户
if i[1] == addr:
users.pop(a) # 注意a用的很好
print(' Remaining online users: ', end='') # 打印剩余在线用户(conn)
d = onlines()
self.recv(d, addr)
print(d)
break
a += 1
def udp_connect(self,):
while True:
data, address = self.s.recvfrom(MAX_BYTES)
# print(data.decode())
msglist = data.decode().split(';;') # login
met = msglist[0] #模式: 登录 退出 发言
mes = msglist[1]
# datalist = mes.split(':;')
if met == 'login':# 进入聊天室请求
users.append((msglist[1],address))
d = onlines()
self.recv(d, address)
elif met == 'speak':
self.recv(mes,address)
elif met == 'quit':
self.delUsers(address)
# print(data.decode())
def sendData(self): #队列中(addr,data)
while True:
if not que.empty():
data=''
message = que.get()#addr , data
print(message[1])
if isinstance(message[1],str):
for i in range(len(users)):#给用户列表中的每一个人通报
for j in range(len(users)): #只是为了找出发出者的username(用户体验高)
if message[0]==users[j][1]:
data = '' + users[j][0] + ':' + message[1]
self.s.sendto(data.encode(), users[i][1])
if isinstance(message[1], list): # 同上
# 如果是list则打包后直接发送
data = json.dumps(message[1])
for i in range(len(users)):
try:
self.s.sendto(data.encode(),users[i][1]) #conn的对象
except:
pass
def run(self):
self.s.bind(self.ADDR)
print('Chat server listening at',self.s.getsockname())
t = threading.Thread(target=self.udp_connect)
t.start()
q = threading.Thread(target=self.sendData)
q.start()
if __name__ == '__main__':
IP = ''
PORT = int(50007)
cserver = ChatServer(IP,PORT)
cserver.start()
| WhaleKlng/udp_chat_room | server.py | server.py | py | 3,604 | python | en | code | 1 | github-code | 13 |
7674182772 | from command import Command
def parse_range(range_str):
def set_range(s):
if s.isdigit():
return {int(s)}
else:
start, stop = [int(n) for n in s.split('-')]
return set(range(start, stop + 1))
ranges = set()
for s in range_str.split(','):
ranges |= set_range(s.strip())
return ranges
class Component:
def __init__(self, name, slave_name, comp_id, single_note=False,
fast_return=False, note_range='0-127'):
self.name = name
self.slave_name = slave_name
self.comp_id = comp_id
self.single_note = single_note
self.fast_return = fast_return
self.note_range = parse_range(note_range)
self.notes_playing = []
def set_slave(self, slave):
self.slave = slave
@staticmethod
def from_config(section):
return Component(
section.name,
section['slave'],
section.getint('comp_id'),
section.getboolean('single_note'),
section.getboolean('fast_return'),
section['note_range']
)
def __repr__(self):
return f'<Component {self.slave}:{self.comp_id} {self.name}>'
def can_play(self, note_val):
return note_val in self.note_range and \
note_val not in self.notes_playing and \
not (self.single_note and self.notes_playing)
def note_on(self, note_val):
self.slave.add_command(self.comp_id, Command('note_on', note_val))
self.notes_playing.append(note_val)
def note_off(self, note_val):
self.slave.add_command(self.comp_id, Command('note_off', note_val))
self.notes_playing.remove(note_val)
| pamtdoh/orchestrion | component.py | component.py | py | 1,732 | python | en | code | 0 | github-code | 13 |
17159198092 | import json
import requests
from ratelimit import limits, sleep_and_retry
from .config import BASE_URL, X_AUTH_TOKEN
auth_key = 'Call reset_sim() to make this value valid'
# Max 10 calls per second.
@sleep_and_retry
@limits(calls=10, period=1)
def check_limit():
pass
def reset_sim(problem: int):
check_limit()
headers = {
'X-Auth-Token': X_AUTH_TOKEN,
'Content-Type': 'application/json'
}
body = {
'problem': problem
}
global auth_key
auth_key = requests.post(url=BASE_URL + '/start', data=json.dumps(body), headers=headers).json()['auth_key']
def get_waiting_line():
check_limit()
headers = {
'Authorization': auth_key,
'Content-Type': 'application/json'
}
return requests.get(url=BASE_URL + '/waiting_line', headers=headers).json()
def get_game_result():
check_limit()
headers = {
'Authorization': auth_key,
'Content-Type': 'application/json'
}
return requests.get(url=BASE_URL + '/game_result', headers=headers).json()
def get_user_info():
check_limit()
headers = {
'Authorization': auth_key,
'Content-Type': 'application/json'
}
return requests.get(url=BASE_URL + '/user_info', headers=headers).json()
def put_match(pairs):
check_limit()
headers = {
'Authorization': auth_key,
'Content-Type': 'application/json'
}
body = {
'pairs': pairs
}
return requests.put(url=BASE_URL + '/match', data=json.dumps(body), headers=headers).json()
def put_change_grade(commands):
check_limit()
headers = {
'Authorization': auth_key,
'Content-Type': 'application/json'
}
body = {
'commands': commands
}
return requests.put(url=BASE_URL + '/change_grade', data=json.dumps(body), headers=headers).json()
def get_score():
check_limit()
headers = {
'Authorization': auth_key,
'Content-Type': 'application/json'
}
return requests.get(url=BASE_URL + '/score', headers=headers).json()
| jiyolla/study-for-coding-test | programmers/kakao2022/kakao2022/api.py | api.py | py | 2,074 | python | en | code | 0 | github-code | 13 |
17432300434 | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
# sliding window
longest, left, right = 1, 0, 1
if len(s) < 2:
return len(s)
# right poiner move forwards
while right < len(s):
if s[right] not in s[left: right]:
longest = max(longest, right - left + 1)
else:
left = s.index(s[right], left, right) + 1
right += 1
return longest | Jasondecode2020/LeetcodeFirst500 | leetcode500/3.py | 3.py | py | 489 | python | en | code | 0 | github-code | 13 |
6634500364 | """Crie um programa que leia o ano de nascimento de sete pessoas. No final, mostre quantas pessoas ainda não
atingiram a maioridade e quantas já são maiores."""
from datetime import date
atual = date.today().year
count = 0
count2 = 0
for c in range(1, 8):
nasc = int(input(f'Em que ano a {c}ª pessoa nasceu? '))
idade = atual - nasc
if idade >= 18:
count += 1
else:
count2 += 1
print(f'Ao todo são {count} pessoas MAIORES DE IDADE!')
print(f'E {count2} pessoas MENORES DE IDADE!')
| rafaelsantosmg/cev_python3 | cursoemvideo/ex054.py | ex054.py | py | 519 | python | pt | code | 1 | github-code | 13 |
2169441996 |
#리팩토링 -> 함수를 계속 개선하다.
def add(a,b):
result = a+b
return result
result = add(10,20)
print(result)
def add2(a,b,c=0):
result = a+b+c
return result
result = add2(10,20,30)
print(result)
def add3(nums):
result = 0
for num in nums:
result+=num
return result
result = add3([10,20,30])
print(result)
def add4(*nums):
result=0
for num in nums:
result+=num
return result
result = add4(10,20,30)
print(result)
def add5(nums):
result=1
for num in nums:
result*=num
return result
result = add5([10,20,30]) #10x10x20x30
print(result)
def add6(nums):
result = 20
for num in nums:
result+=num
return result
result = add6([10,20,30]) #10x10x20x30
print(result)
# bmi 함수
def bmi(b,c):
a=c/((b/100)**2)
return float(a)
b=input('키')
b=int(b)
c=input('몸무게')
c=int(c)
d=bmi(b,c)
print(d)
if d>= 23 and d< 25 :
BMI_result = "과체중"
elif d>= 25 and d< 30:
BMI_result = "비만"
elif d>= 30:
BMI_result = "고도비만"
else:
BMI_result = "정상"
print(BMI_result) | mokimoki191225/jbfc_220506 | pycharm/function/함수10리팩토링.py | 함수10리팩토링.py | py | 1,200 | python | en | code | 0 | github-code | 13 |
8125422210 | def get_attendance_records(file_path):
attendance_file = open(file_path,'r')
lines = attendance_file.readlines()
attendance_file.close()
header = lines[0]
attendance_records = lines[1:]
return attendance_records
def convert_attendance_record_to_bools(sessions):
sessions_bool = []
for session in sessions:
if session == 'Yes':
sessions_bool.append(1)
else:
sessions_bool.append(0)
return sessions_bool
def session_attendance(file_path):
number_of_sessions = 9
session_attendance = {u'Session_0':0, u'Session_1':0, u'Session_2':0, u'Session_3':0, u'Session_4':0, u'Session_5':0, u'Session_6':0, u'Session_7':0, u'Session_8':0}
attendee_consistency = {u'0_Sessions':0, u'1_Sessions':0, u'2_Sessions':0, u'3_Sessions':0, u'4_Sessions':0, u'5_Sessions':0, u'6_Sessions':0, u'7_Sessions':0, u'8_Sessions':0, u'9_Sessions':0}
attendance_records = get_attendance_records(file_path)
for record in attendance_records:
record = record.strip('\n').split(',') # convert record from a string to a list
sessions = convert_attendance_record_to_bools(record[2:])
number_of_sessions = len(sessions)
number_of_sessions_attended = str(sum(sessions))+'_Sessions'
# add record to attendee_consitency dictionary
attendee_consistency[number_of_sessions_attended] += 1
# add record to session attendance dictionary
for i in range(number_of_sessions):
key = u'Session_'+ str(i)
session_attendance[key] += sessions[i]
return {
u"by_attendee" : attendee_consistency,
u"by_session" : session_attendance
}
# print session_attendance('attendance.csv')
import string
import collections
from operator import itemgetter
IGNORE = {
'a', 'also', 'an', 'and', 'are', 'as', 'be', 'by', 'can', 'do', 'for', 'from',
'have', 'in', 'is', 'it', 'just', 'more', 'not', 'of', 'on', 'or', 'our',
'over', 'than', 'that', 'the', 'their', 'these', 'they', 'this', 'those',
'to', 'up', 'we', 'with'
}
def build_word_counter(file_path):
with open(file_path, 'r') as f:
speech = f.read()
chars_to_remove = list(string.punctuation) + ['\n'] + list(string.digits)
for char in chars_to_remove:
speech = speech.replace(char, '')
return collections.Counter(w.lower() for w in speech.split() if w not in IGNORE)
def common_words(file_path):
word_counter = build_word_counter(file_path)
return sorted(w.decode('utf-8') for w in word_counter if word_counter[w] > 10)
def most_used_words(file_path):
word_counter = build_word_counter(file_path)
word_counter_sorted = sorted(word_counter.most_common(20), key=itemgetter(1,0))
return [word.decode('utf-8') for word, _ in word_counter_sorted] | pathespe/MarkerBot | tests/resources/session_6.py | session_6.py | py | 2,838 | python | en | code | 0 | github-code | 13 |
3426417222 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 16:50:17 2020
@author: Obed Junias
"""
import os
import sys
import time
import requests
def retrieve_page():
for year in range(2013,2019):
for month in range(1,13):
if month < 10:
url = "https://en.tutiempo.net/climate/0{}-{}/ws-432950.html".format(month,year)
else:
url = "https://en.tutiempo.net/climate/{}-{}/ws-432950.html".format(month,year)
data = requests.get(url)
encoded_data = data.text.encode('utf-8')
if not os.path.exists("Data/htmlData/{}".format(year)):
os.makedirs("Data/htmlData/{}".format(year))
with open("Data/htmlData/{}/{}.html".format(year,month),"wb") as op:
op.write(encoded_data)
sys.stdout.flush()
if __name__ == "__main__":
start_time = time.time()
retrieve_page()
stop_time = time.time()
print("Time Taken: {}".format((stop_time-start_time)))
| obedjunias/AQI-Prediction | Data-Collection.py | Data-Collection.py | py | 1,091 | python | en | code | 0 | github-code | 13 |
3318938175 | #!/usr/bin/env python
import sys
sys.path.append("/Users/gkirk/Dropbox/git/Library/")
sys.path.append(".")
import os
import csv
from Stream_SD import Stats_Stream
# File format is Time,SV,Elev,Az,SNR
def Compute_Stats (Signal):
Elev_Stats=list(range(91))
for elev in range (91):
Elev_Stats[elev]=Stats_Stream()
if os.path.isfile(Signal):
SignalFile=open(Signal, 'r')
Reader=csv.reader(SignalFile)
for row in Reader:
# print row
# print row[1],row[2],row[4]
if int(row[2])<=90:
Elev_Stats[int(row[2])].add_item(row[4])
"""
fields=line
Current_Elev=
Current_SNR=
"""
SignalFile.close()
return Elev_Stats
def Ouput_Stats (FileName,Stats):
StatsFile=open(FileName, 'w')
# StatsFile.write( "Elev,N,Mean,SD,Min,Max\n")
for elev in range (91):
# print elev, L1_Stats[elev]
StatsFile.write( "{0},{1},{2:0.1f},{3:0.1f},{4},{5}\n".format(elev,Stats[elev].N(),Stats[elev].Mean(),Stats[elev].SD(),Stats[elev].Min(),Stats[elev].Max()))
StatsFile.close()
L1_Stats = Compute_Stats("GLONASS-L1-CA.SNR")
Ouput_Stats("GLONASS-L1-CA.MEAN",L1_Stats)
L1_P_Stats = Compute_Stats("GLONASS-L1-P.SNR")
Ouput_Stats("GLONASS-L1-P.MEAN",L1_P_Stats)
L2_CA_Stats = Compute_Stats("GLONASS-L2-CA.SNR")
Ouput_Stats("GLONASS-L2-CA.MEAN",L2_CA_Stats)
L2_P_Stats = Compute_Stats("GLONASS-L2-P.SNR")
Ouput_Stats("GLONASS-L2-P.MEAN",L2_P_Stats)
L1_Stats = Compute_Stats("GPS-L1-CA.SNR")
Ouput_Stats("GPS-L1-CA.MEAN",L1_Stats)
L2_E_Stats = Compute_Stats("GPS-L2-E.SNR")
Ouput_Stats("GPS-L2-E.MEAN",L2_E_Stats)
L2_CS_Stats = Compute_Stats("GPS-L2-CS.SNR")
Ouput_Stats("GPS-L2-CS.MEAN",L2_CS_Stats)
L5_IQ_Stats = Compute_Stats("GPS-L5-IQ.SNR")
Ouput_Stats("GPS-L5-IQ.MEAN",L5_IQ_Stats)
L1_Stats = Compute_Stats("SBAS-L1-CA.SNR")
Ouput_Stats("SBAS-L1-CA.MEAN",L1_Stats)
L5_I_Stats = Compute_Stats("SBAS-L5-I.SNR")
Ouput_Stats("SBAS-L5-I.MEAN",L5_I_Stats)
| jcmb/TrackingPlot | cgi-bin/SNR_STATS.py | SNR_STATS.py | py | 2,018 | python | en | code | 0 | github-code | 13 |
71812250258 |
from common import *
class Quiz:
def __init__(self, gsheets, db):
self.gsheets = gsheets
self.db = db
def create(self, email):
try:
# S_1-1 連接模板
template_id = '1kFso7_L21vzRpeeHDgpl9HLAlP8SSVZ_vgpH_qQvS3I'
template_spreadsheet = self.gsheets.open_by_key(template_id)
# S_1-2 創立新的 spreadsheet
spreadsheet = self.gsheets.create('新建立之測驗設定檔(可自訂名稱)')
gsid = spreadsheet.id
# S_1-3 從模板複製到新創立的 spreadsheet
for i in range(5):
worksheet = template_spreadsheet.worksheet('index', i).copy_to(gsid)
worksheet.title = re.search(r'(?<=\s)\S+$', worksheet.title).group(0)
# S_1-4 刪除初始 worksheet
sheet1 = spreadsheet.worksheet_by_title('Sheet1')
spreadsheet.del_worksheet(sheet1)
# S_1-5 '更新此測驗設定' 連結
worksheet = spreadsheet.worksheet_by_title('說明')
update_url = f'{main_url}?action=update&on=quiz&gsid={gsid}'
worksheet.update_value('A3', f'=HYPERLINK("{update_url}", "更新此測驗設定")')
# S_1-6 '更新此測驗紀錄' 連結
update_result_url = f'{main_url}?action=update&on=quiz_result&gsid={gsid}'
worksheet.update_value('A4', f'=HYPERLINK("{update_result_url}", "更新此測驗紀錄")')
# S_1-7 '刪除此測驗設定' 連結
delete_url = f'{main_url}?action=delete&on=quiz&gsid={gsid}'
worksheet.update_value('A5', f'=HYPERLINK("{delete_url}", "刪除此測驗設定")')
# S_1-8 '刪除此測驗紀錄' 連結
delete_result_url = f'{main_url}?action=delete&on=quiz_result&gsid={gsid}'
worksheet.update_value('A6', f'=HYPERLINK("{delete_result_url}", "刪除此測驗紀錄")')
# S_1-9 設定分享權限
email_message = '新建立之測驗設定檔'
spreadsheet.share(email, 'writer', emailMessage=email_message)
# TODO 到時我的權限可拿掉
spreadsheet.share('yuncheng.dev@gmail.com', 'writer', emailMessage=email_message)
# NOTE 轉移所有權
# spreadsheet.share('yuncheng.dev@gmail.com', 'owner', transferOwnership=True)
except:
return '建立測驗失敗!'
return f'新建立之測驗設定檔連結已寄至信箱(可能會在垃圾郵件中....),或複製此連結進入:<br/><br/> {spreadsheet.url}'
def update(self, gsid):
try:
# S_1-1 連接 spreadsheet
spreadsheet = self.gsheets.open_by_key(gsid)
# S_1-2 提取資訊
# TODO 處理日期 https://api.dart.dev/stable/2.9.0/dart-core/DateTime/parse.html
quiz_info = spreadsheet.worksheet_by_title('測驗資訊') \
.get_values(start='C2', end='C4', include_all=True)
quiz_info_dict = {
'quizId': gsid,
'quizName': quiz_info[0][0],
'customProjectId': quiz_info[1][0],
'customUnitId': quiz_info[2][0]
}
# S_1-3 檢查輸入的內容是否符合格式
# S_1-3-1 檢查是否為空
for k, v in quiz_info_dict.items():
if not v:
return '測驗資訊不能為空!'
# S_1-3-2 檢查連結的單位 ID、專案 ID 是否存在
unit_query = self.db.collection('unit') \
.where('customUnitId', '==', quiz_info_dict['customUnitId'])
unit_dict = unit_query.query_to_dict(first=True)
if unit_dict:
quiz_info_dict['unitId'] = unit_dict['unitId']
unit_gsid = unit_dict['unitId']
quiz_info_dict.pop('customUnitId')
else:
return '找不到連結的單位 ID!'
project_query = self.db.collection('project') \
.where('customProjectId', '==', quiz_info_dict['customProjectId'])\
.where('unitId', '==', unit_gsid)
project_query_dict = project_query.query_to_dict(first=True)
if project_query_dict:
quiz_info_dict['projectId'] = project_query_dict['projectId']
project_gsid = project_query_dict['projectId']
quiz_info_dict.pop('customProjectId')
else:
return '找不到連結的專案 ID!'
# S_1-3-3 檢查是否為重複的測驗名稱
quiz_query = self.db.collection('quiz') \
.where('projectId', '==', project_gsid) \
.where('unitId', '==', unit_gsid)\
.where('quizName', '==', quiz_info_dict['quizName'])
quiz_query_dict = quiz_query.query_to_dict(first=True)
if quiz_query_dict:
return '同專案下,測驗名稱重複,請輸入其他名稱!'
# S_2 更新 Firestore
batch = self.db.batch()
# S_2-1 更新 Firestore: quiz/{quizId}
# TAG Firestore SET
# EXAMPLE
'''
quiz / {quizId} / {
quizId: '1kFso7_L21vzRpeeHDgpl9HLAlP8SSVZ_vgpH_qQvS3I',
quizName: '範例測驗',
projectId: '1u1NdL7ZND_E3hU1jS2SNhhDIluIuHrcHpG4W9XyUChQ',
unitId: '1VRGeK8m-w_ZCjg1SDQ74TZ7jpHsRiTiI3AcD54I5FC8'
}
'''
quiz_ref = self.db.document('quiz', gsid)
batch.set(quiz_ref, quiz_info_dict)
# S_2-2 更新 Firestore: quizList/{unitId}
# TAG Firestore UPDATE
# EXAMPLE
'''
quizList / {unitId} / {
{projectId}: {
unitId: '1VRGeK8m-w_ZCjg1SDQ74TZ7jpHsRiTiI3AcD54I5FC8',
projectId: '1u1NdL7ZND_E3hU1jS2SNhhDIluIuHrcHpG4W9XyUChQ',
quizList: {
{quizId}: {
quizId: '1kFso7_L21vzRpeeHDgpl9HLAlP8SSVZ_vgpH_qQvS3I',
quizName: '範例測驗'
}
}
}
}
'''
quiz_list_dict = {
'unitId': unit_gsid,
'projectId': project_gsid,
'quizList': {
gsid: {
'quizId': gsid,
'quizName': quiz_info_dict['quizName'],
'isFinished': False
}
}
}
quiz_list_ref = self.db.document('quizList', unit_gsid)
batch.set(quiz_list_ref, {
project_gsid: quiz_list_dict
}, merge=True)
# S_2-3 更新 Firestore: interviewerQuiz/{interviewerId_projectId}
# TAG Firestore UPDATE
# S_2-3-1 取得 interviewerList
interviewer_list_ref = self.db.document('interviewerList', unit_gsid)
interviewer_list_dict = interviewer_list_ref.doc_to_dict()
# S_2-3-2 迴圈 interviewerList
# TODO 刪除 interviewer 時的處理
for k, v in interviewer_list_dict.items():
# S_2-3-2-1 加入 interviewerId
quiz_list_dict['interviewerId'] = k
# S_2-3-2-2 取得舊資料,目的是提取測驗完成狀態
interviewer_quiz_ref = self.db.document('interviewerQuiz', f'{k}_{project_gsid}')
old_quiz_list_dict = interviewer_quiz_ref.doc_to_dict()
if old_quiz_list_dict:
is_finished = old_quiz_list_dict['quizList'][gsid]['isFinished']
quiz_list_dict['quizList'][gsid]['isFinished'] = is_finished
else:
quiz_list_dict['quizList'][gsid]['isFinished'] = False
batch.set(interviewer_quiz_ref, quiz_list_dict, merge=True)
# S_2-4 更新 Firestore: questionList/{quizId}
# TAG Firestore SET
# EXAMPLE
'''
questionList / {quizId} / {
{questionId}: {
questionId: '1',
questionBody: 'Question 1',
answer: 'O'
}
}
'''
question_list_df = get_worksheet_df(spreadsheet, worksheet_title='題庫', end='C')
question_list_dict = df_to_dict(question_list_df,
new_column_names=['questionId', 'questionBody', 'answer'],
index_column='questionId')
question_list_ref = self.db.document('questionList', gsid)
batch.set(question_list_ref, question_list_dict)
batch.commit()
except:
return '更新測驗設定失敗!'
return '更新測驗設定成功!'
def update_result(self, gsid, project_gsid, interviewer_id):
try:
# S_1 更新 Firestore: interviewerQuiz/{interviewerId_projectId}
# TAG Firestore UPDATE
# NOTE interviewerQuiz 該 interviewer isFinished 改 True
if project_gsid and interviewer_id:
interviewer_quiz_ref = self.db.document('interviewerQuiz', f'{interviewer_id}_{project_gsid}')
quiz_list_dict = interviewer_quiz_ref.doc_to_dict()
quiz_list_dict['quizList'][gsid]['isFinished'] = True
interviewer_quiz_ref.set(quiz_list_dict, merge=True)
# S_2 更新 spreadsheet
# S_2-1 連接 spreadsheet
spreadsheet = self.gsheets.open_by_key(gsid)
# S_2-2 query quiz_result 資料
quiz_result_query = self.db.collection('quizResult') \
.where('quizId', '==', gsid) \
.where('isFinished', '==', True)
quiz_result_dict = quiz_result_query.query_to_dict()
# S_2-3
if quiz_result_dict:
# S_2-3-1 資料處理
wide_dict = defaultdict(dict)
tw_tz = pytz.timezone('Asia/Taipei') # NOTE 設定時區
for key, value in quiz_result_dict.items():
wide_dict[key]['reply_id'] = key
wide_dict[key]['interviewer_id'] = value['interviewer']['id']
wide_dict[key]['interviewer_name'] = value['interviewer']['name']
wide_dict[key]['total_right_score'] = value['score']['right']
wide_dict[key]['total_wrong_score'] = value['score']['wrong']
wide_dict[key]['upload_timestamp'] = value['serverTimeStamp'].astimezone(tw_tz).replace(tzinfo=None)
for question_id, score in value['scoreHistory']['scoreHistory'].items():
wide_dict[key][f'question_id_{question_id}'] = score
wide_df = pd.DataFrame.from_dict(wide_dict, orient='index')
id_cols = ['reply_id', 'interviewer_id', 'interviewer_name',
'total_right_score', 'total_wrong_score', 'upload_timestamp']
long_df = wide_df.melt(id_vars=id_cols, var_name='question_id', value_name='score')
long_df = long_df[long_df.score.notnull()]
long_df['score'] = long_df.score.astype(int)
long_df['question_id'] = long_df.question_id.str.replace('question_id_', '')
long_df = long_df.sort_values(by=['upload_timestamp', 'question_id'])
wide_df = long_df.copy()
wide_df['question_id'] = 'question_id_' + long_df.question_id
wide_df = wide_df.pivot_table(index=id_cols, columns='question_id', values='score').reset_index()
# S_2-3-2 寫入 spreadsheet
long_sheet = spreadsheet.worksheet_by_title('測驗紀錄_long')
wide_sheet = spreadsheet.worksheet_by_title('測驗紀錄_wide')
long_sheet.clear()
long_sheet.set_dataframe(long_df, 'A1', nan='')
wide_sheet.clear()
wide_sheet.set_dataframe(wide_df, 'A1', nan='')
except:
return '更新測驗紀錄失敗!'
return '更新測驗紀錄成功!'
def delete(self, gsid):
try:
# S_1 刪除 Firestore
batch = self.db.batch()
# S_1-1 刪除 Firestore: quiz/{quizId}
# TAG Firestore DELETE
quiz_ref = self.db.document('quiz', gsid)
quiz_dict = quiz_ref.doc_to_dict()
unit_gsid = quiz_dict['unitId']
project_gsid = quiz_dict['projectId']
batch.delete(quiz_ref)
# S_1-2 刪除 Firestore: quizList/{unitId}
# TAG Firestore UPDATE
quiz_list_ref = self.db.document('quizList', unit_gsid)
batch.set(quiz_list_ref, {
project_gsid: {
'quizList': {
gsid: firestore.DELETE_FIELD
}
}
}, merge=True)
# S_1-3 刪除 Firestore: interviewerQuiz/{interviewerId_projectId}
# TAG Firestore UPDATE
# NOTE 因為 gsid 有可能是數字開頭,所以必須要加上 ``
interviewer_quiz_docs = self.db.collection('interviewerQuiz') \
.where(f'quizList.`{gsid}`.quizId', '==', gsid) \
.stream()
for doc in interviewer_quiz_docs:
doc_dict = doc.to_dict()
doc_dict['quizList'][gsid] = firestore.DELETE_FIELD
batch.set(doc.reference, doc_dict, merge=True)
# S_1-4 刪除 Firestore: questionList/{quizId}
# TAG Firestore DELETE
question_list_ref = self.db.document('questionList', gsid)
batch.delete(question_list_ref)
batch.commit()
except:
return '刪除測驗設定失敗!'
return '刪除測驗設定成功!'
def delete_result(self, gsid):
try:
# S_1 更新 Firestore: interviewerQuiz/{interviewerId_projectId}
# TAG Firestore UPDATE
batch = self.db.batch()
interviewer_quiz_docs = self.db.collection('interviewerQuiz') \
.where(f'quizList.`{gsid}`.quizId', '==', gsid) \
.where(f'quizList.`{gsid}`.isFinished', '==', True) \
.stream()
for doc in interviewer_quiz_docs:
doc_dict = doc.to_dict()
doc_dict['quizList'][gsid]['isFinished'] = False
batch.set(doc.reference, doc_dict, merge=True)
# S_1-4 刪除 Firestore: quizResult/{replyId}
# TAG Firestore DELETE
quiz_result_docs = self.db.collection('quizResult') \
.where('quizId', '==', gsid) \
.stream()
for doc in quiz_result_docs:
batch.delete(doc.reference)
batch.commit()
# S_2 清空 spreadsheet
spreadsheet = self.gsheets.open_by_key(gsid)
long_sheet = spreadsheet.worksheet_by_title('測驗紀錄_long')
wide_sheet = spreadsheet.worksheet_by_title('測驗紀錄_wide')
long_sheet.clear()
wide_sheet.clear()
except:
return '刪除測驗設定失敗!'
return '刪除測驗設定成功!'
| yun-cheng/interviewer-quiz-backend | quiz.py | quiz.py | py | 15,577 | python | en | code | 0 | github-code | 13 |
72093832019 | import random
from enum import Enum
class ColoursPalete(object):
def __init__(self, amount, rgb_anchor):
#self.__colours = ['#7D3C98','#70C742','#C74278','#8CEE6D','#01DFD7','#FACC2E','#A9A9F5']
self.__colours = self.__generate_colours(amount, rgb_anchor)
self.__index = 0
class RGBAnchor(Enum):
RED = 1
BLUE = 2
GREEN = 3
class Colour(object):
def __init__(self, red, blue, green):
self.red = red
self.blue = blue
self.green = green
def composite(self):
return self.red + self.blue + self.green
def __gt__(self, other):
return self.composite() > other.composite()
def __lt__(self, other):
return self.composite() < other.composite()
def __eq__(self, other):
return self.composite() == other.composite()
def get_next_colour(self):
colour = self.__colours[self.__index]
self.__index +=1
if self.__index >= len(self.__colours):
self.__index = 0
return colour
def __rgb2hex(self, r, g, b):
return "#{:02x}{:02x}{:02x}".format(r,g,b)
def __generate_colours(self, amount, rgb_anchor):
colours = []
colours = self.__generate_graded_colours(100, rgb_anchor)
colours = colours[20:100]
#colours = self.__get_random_sample(amount, colours)
colours = self.__get_graded_sample(amount, colours)
colours.sort()
hex_versions = []
for colour in colours:
hex_versions.append(str(self.__rgb2hex(colour.red, colour.blue, colour.green)))
print(hex_versions)
return hex_versions
def __get_random_sample(self, amount, colours):
filtered_colours = []
for i in range(1, amount +1):
index = int(random.random() * len(colours))
filtered_colours.append(colours[index])
return filtered_colours
def __get_graded_sample(self, amount, colours):
filtered_colours = []
if amount == 0:
amount = 1
interval = int(len(colours) / amount)
for i in range(0, len(colours), interval):
filtered_colours.append(colours[i])
return filtered_colours
def __generate_graded_colours(self, amount, rgb_anchor):
interval = (255 - 23) / amount
colours = []
colour_one = 255
colour_two = 255
colour_three = 255
for i in range(1, amount+1):
#print(i, colour_one, colour_two, colour_three)
colours.append(self.__convert_to_colour(rgb_anchor, colour_one, colour_two, colour_three))
colour_one = colour_one
colour_two = int(colour_two - (interval * 2))
if colour_two < 0:
colour_two = 0
colour_three = int(colour_three - (interval * 2))
if colour_three < 0:
colour_three = 0
if colour_two == 0 or colour_three == 0:
colour_one = int(colour_one - (interval * 2))
if colour_one < 0:
colour_one = 0
return colours
def __convert_to_colour(self, rgb_anchor, colour_one, colour_two, colour_three):
if rgb_anchor == ColoursPalete.RGBAnchor.RED:
r = colour_one
g = colour_two
b = colour_three
elif rgb_anchor == ColoursPalete.RGBAnchor.GREEN:
g = colour_one
r = colour_two
b = colour_three
elif rgb_anchor == ColoursPalete.RGBAnchor.BLUE:
b = colour_one
r = colour_two
g = colour_three
#print(r, g, b)
return ColoursPalete.Colour( r, g, b)
def __generate_random_colours(self, n):
r = int(random.random() * 256)
g = int(random.random() * 256)
b = int(random.random() * 256)
step = 256 / n
for i in range(n):
r += step
g += step
b += step
r = int(r) % 256
g = int(g) % 256
b = int(b) % 256
return ColoursPalete.Colour(r, g, b)
| JamesScanlan/graph_monkey | colours_palete.py | colours_palete.py | py | 4,230 | python | en | code | 0 | github-code | 13 |
34375585460 | """Example script, copy of the quickstart in the documentation."""
import nawrapper as nw
import numpy as np
import matplotlib.pyplot as plt
from pixell import enmap
# map information
shape, wcs = enmap.geometry(shape=(1024, 1024),
res=np.deg2rad(0.5/60.), pos=(0, 0))
# create power spectrum information
ells = np.arange(0, 6000, 1)
ps = np.zeros(len(ells))
ps[2:] = 1/ells[2:]**2.5 # don't want monopole/dipole
# generate a realization
imap = enmap.rand_map(shape, wcs, ps[np.newaxis, np.newaxis])
# plt.imshow(imap)
mask = enmap.ones(imap.shape, imap.wcs)
N_point_sources = 50
for i in range(N_point_sources):
mask[
np.random.randint(low=0, high=mask.shape[0]),
np.random.randint(low=0, high=mask.shape[1])] = 0
# apodize the pixels to make fake sources
point_source_map = 1-nw.apod_C2(mask, 0.1)
imap += point_source_map # add our sources to the map
mask = nw.apod_C2(mask, 0.5) # apodize the mask
# # plot our cool results
# fig, axes = plt.subplots(1, 2, figsize=(8,16))
# axes[0].imshow(imap)
# axes[1].imshow(mask)
ells = np.arange(0, len(ps), 1)
nl = np.ones(len(ells)) * 1e-8
noise_map_1 = enmap.rand_map(shape, wcs, nl[np.newaxis, np.newaxis])
noise_map_2 = enmap.rand_map(shape, wcs, nl[np.newaxis, np.newaxis])
# plt.plot(ps, label="ps")
# plt.plot(nl, label="noise")
# plt.yscale('log')
# plt.legend()
namap_1 = nw.namap_car(maps=(imap + noise_map_1, None, None), masks=mask)
namap_2 = nw.namap_car(maps=(imap + noise_map_2, None, None), masks=mask)
binfile = '../notebooks/data/BIN_ACTPOL_50_4_SC_low_ell'
bins = nw.read_bins(binfile)
mc = nw.mode_coupling(namap_1, namap_2, bins)
Cb = nw.compute_spectra(namap_1, namap_2, mc=mc)
plt.plot(ps, 'k-', label='input')
plt.plot(Cb['ell'], Cb['TT'], 'r.', label='computed')
plt.legend()
plt.yscale('log')
| xzackli/nawrapper | examples/quickstart.py | quickstart.py | py | 1,832 | python | en | code | 1 | github-code | 13 |
39776174612 |
import stat
import textwrap
import pytest
import troika
from troika.config import Config
from troika.connections.local import LocalConnection
from troika.controllers.base import Controller
from troika.site import get_site
from troika.sites import pbs
@pytest.fixture
def dummy_pbs_conf(tmp_path):
return {
"type": "pbs",
"connection": "local",
"preprocess": ["remove_top_blank_lines", "pbs_add_output", "pbs_bubble"]
}
def test_get_site(dummy_pbs_conf):
global_config = Config({"sites": {"foo": dummy_pbs_conf}})
site = get_site(global_config, "foo", "user")
assert isinstance(site, pbs.PBSSite)
@pytest.fixture
def dummy_pbs_site(dummy_pbs_conf):
conn = LocalConnection(dummy_pbs_conf, "user")
return pbs.PBSSite(dummy_pbs_conf, conn, Config({}))
def test_invalid_script(dummy_pbs_site, tmp_path):
script = tmp_path / "dummy_script.sh"
with pytest.raises(troika.InvocationError):
dummy_pbs_site.submit(script, "user", "output", dryrun=False)
@pytest.fixture
def sample_script(tmp_path):
script_path = tmp_path / "script.sh"
script_path.write_text(textwrap.dedent("""\
#!/usr/bin/env bash
echo "Script called!"
"""))
script_path.chmod(script_path.stat().st_mode
| stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
return script_path
@pytest.fixture
def dummy_controller(dummy_pbs_site):
controller = Controller(Config({}), None, None)
controller.site = dummy_pbs_site
return controller
@pytest.mark.parametrize("sin, sexp", [
pytest.param(
"""\
#!/usr/bin/env bash
echo "Hello, World!"
""",
"""\
#!/usr/bin/env bash
#PBS -o @OUTPUT@
echo "Hello, World!"
""",
id="add_output"),
pytest.param(
"""\
#PBS -q test
set +x
#PBS -N hello
echo "Hello, World!"
""",
"""\
#PBS -o @OUTPUT@
#PBS -q test
#PBS -N hello
set +x
echo "Hello, World!"
""",
id="bubble"),
pytest.param(
"""\
#!/usr/bin/env bash
#PBS -q test
set +x
#PBS -N hello
echo "Hello, World!"
""",
"""\
#!/usr/bin/env bash
#PBS -o @OUTPUT@
#PBS -q test
#PBS -N hello
set +x
echo "Hello, World!"
""",
id="bubble_shebang"),
pytest.param(
"""\
#PBS -q test
#!/usr/bin/env bash
set +x
#PBS -N hello
echo "Hello, World!"
""",
"""\
#!/usr/bin/env bash
#PBS -o @OUTPUT@
#PBS -q test
#PBS -N hello
set +x
echo "Hello, World!"
""",
id="bubble_shebang_blank"),
pytest.param(
"""\
#!/usr/bin/env bash
#PBS -N hello
#PBS -e foo
echo "Hello, World!"
""",
"""\
#!/usr/bin/env bash
#PBS -o @OUTPUT@
#PBS -N hello
echo "Hello, World!"
""",
id="drop_error"),
pytest.param(
"""\
#!/usr/bin/env bash
#PBS -N hello
#PBS -j n
#PBS -e foo
#PBS -o bar
echo "Hello, World!"
""",
"""\
#!/usr/bin/env bash
#PBS -o @OUTPUT@
#PBS -N hello
echo "Hello, World!"
""",
id="drop_join"),
pytest.param(
"""\
#!/usr/bin/env bash
#PBS -N hello
#PBS -o foo
echo "Hello, World!"
""",
"""\
#!/usr/bin/env bash
#PBS -o @OUTPUT@
#PBS -N hello
echo "Hello, World!"
""",
id="drop_output"),
pytest.param(
"""\
#!/usr/bin/env bash
#PBS -N hello
echo "\xfc\xaa"
""",
"""\
#!/usr/bin/env bash
#PBS -o @OUTPUT@
#PBS -N hello
echo "\xfc\xaa"
""",
id="invalid_utf8"),
])
def test_preprocess(sin, sexp, dummy_controller, tmp_path):
script = tmp_path / "script.sh"
orig_script = tmp_path / "script.sh.orig"
output = tmp_path / "output.log"
sin = textwrap.dedent(sin)
script.write_text(sin)
sexp = textwrap.dedent(sexp).replace("@OUTPUT@", str(output.resolve()))
dummy_controller.parse_script(script)
pp_script = dummy_controller.generate_script(script, "user", output)
assert pp_script == script
assert pp_script.read_text() == sexp
assert orig_script.exists()
assert orig_script.read_text() == sin
@pytest.mark.parametrize("sin, sexp, garbage", [
pytest.param(
"""\
#!/usr/bin/env bash
#PBS -N hello
echo "@GARBAGE@"
""",
"""\
#!/usr/bin/env bash
#PBS -o @OUTPUT@
#PBS -N hello
echo "@GARBAGE@"
""",
b"\xfc\xaa",
id="invalid_utf8"),
])
def test_preprocess_bin(sin, sexp, garbage, dummy_controller, tmp_path):
script = tmp_path / "script.sh"
orig_script = tmp_path / "script.sh.orig"
output = tmp_path / "output.log"
sin = textwrap.dedent(sin).encode('utf-8').replace(b"@GARBAGE@", garbage)
script.write_bytes(sin)
sexp = textwrap.dedent(sexp).replace("@OUTPUT@", str(output.resolve()))
sexp = sexp.encode('utf-8').replace(b"@GARBAGE@", garbage)
dummy_controller.parse_script(script)
pp_script = dummy_controller.generate_script(script, "user", output)
assert pp_script == script
assert pp_script.read_bytes() == sexp
assert orig_script.exists()
assert orig_script.read_bytes() == sin
def test_submit_dryrun(dummy_pbs_site, sample_script, tmp_path):
output = tmp_path / "output.log"
proc = dummy_pbs_site.submit(sample_script, "user", output, dryrun=True)
assert proc is None
assert not output.exists()
@pytest.mark.parametrize("path_type", [
pytest.param((lambda x: x), id="path"),
pytest.param(str, id="str"),
pytest.param(bytes, id="bytes"),
])
def test_output_path_type(path_type, dummy_controller, sample_script, tmp_path):
output = path_type(tmp_path / "output.log")
dummy_controller.parse_script(sample_script)
pp_script = dummy_controller.generate_script(sample_script, "user", output)
assert pp_script == sample_script
| ecmwf/troika | tests/unit/sites/test_pbs.py | test_pbs.py | py | 6,372 | python | en | code | 11 | github-code | 13 |
39762138192 | from datetime import datetime
from typing import Dict, List
from .. import logger, user_config
from ..authentication.auth import Auth
from ..custom_exceptions import EventListenerException
from ..engine import engine_factory as ef
from . import event_listener_factory as elf
from .event_listener import EventListener
class ListenerManager:
"""
This class manages the execution of the various event listeners
"""
def __init__(self):
self._listeners: List[EventListener] = []
@property
def listeners(self) -> List[EventListener]:
return self._listeners
def _run_listeners(self) -> bool:
"""
This method is used to execute all the listeners currently managed
:return: True if all the listeners are in execution, False otherwise
"""
logger.debug("Calling run all listeners...")
result = True
listener_to_remove: List[EventListener] = []
for listener in self._listeners:
# Execute the listener
if not listener.listen():
result = False
listener_to_remove.append(listener)
else:
keys = ",".join(listener.keys)
logger.info(f"Listening to {keys} at {listener.engine.host}:{listener.engine.port}...")
# now remove all of the listeners that were not able to start
for listener in listener_to_remove:
self._listeners.remove(listener)
return result
def _add_listener(self, listener: EventListener) -> None:
"""
Add a listener to the internal listener list of the manager
:param listener: EventListener object
"""
self._listeners.append(listener)
def _add_listeners(self, listeners: List[EventListener]) -> None:
"""
Add the listener list to the internal listener list of the manager
:param listeners: EventListener list
"""
for listener in listeners:
self._add_listener(listener)
def _stop_listener(self, listener: EventListener) -> bool:
"""
Stop the execution of listener passed as argument.
:param listener: EventListener object
:return: True if stopped, False otherwise
"""
try:
logger.debug(f"Calling stop {listener}...")
# Stop the listener
return listener.stop()
except ValueError as error:
logger.error(f"Error in stopping listener, exception message: {error}")
logger.debug("", exc_info=True)
return False
def _cancel_listener(self, listener: EventListener) -> None:
"""
Stop and delete the listener passed as argument
:param listener: EventListener object
"""
# first stop listener
self._stop_listener(listener)
# now remove it from the list
self._listeners.remove(listener)
def cancel_listeners(self) -> None:
"""
Stop the execution of any listener currently in execution
:return: True if no listener is currently in execution
"""
# first cancel all the notification listeners
for listener in self._listeners:
self._stop_listener(listener)
# now remove all of them from the internal list
self._listeners.clear()
def listen(
self,
listeners: List[Dict[str, any]],
listener_schema: Dict[str, any],
config: user_config.UserConfig = None,
from_date: datetime = None,
to_date: datetime = None,
) -> int:
"""
This method implements the main workflow to instantiate and execute new listeners
:param listeners: listeners as list of dictionaries
:param listener_schema: schema to use to validate the listeners
:param config: UserConfig object
:param from_date: date from when to request notifications, if None it will be from now
:param to_date: date until when to request notifications, if None it will be until now
:return: number of listeners running
"""
logger.debug("Calling listen in ListenerManager...")
# first check the config
if config is None:
config = user_config.UserConfig()
# Create the engine and listener factories
engine_factory: ef.EngineFactory = ef.EngineFactory(config.notification_engine, Auth.get_auth(config))
listener_factory: elf.EventListenerFactory = elf.EventListenerFactory(engine_factory, listener_schema)
# read the payload key from the schema
payload_key = listener_schema.get("payload")
# Parse notification listeners
event_listeners: List[EventListener] = []
for ls in listeners:
logger.debug(f"Reading listeners {ls}")
try:
for ev_listener in listener_factory.create_listeners(ls, from_date, to_date, payload_key):
event_listeners.append(ev_listener)
logger.debug("Listener dictionary correctly parsed")
except Exception as e:
raise EventListenerException(f"Not able to load listener dictionary {ls}: {e}")
# Add the listeners to the manager and run them
logger.debug("Starting listeners...")
self._add_listeners(event_listeners)
if not self._run_listeners():
if len(self.listeners) == 0:
raise EventListenerException("Listeners could not start, please check logs")
else:
logger.error("One or more listeners were not able to start")
# return the number of listeners running
return len(self.listeners)
| ecmwf/aviso | pyaviso/event_listeners/listener_manager.py | listener_manager.py | py | 5,714 | python | en | code | 9 | github-code | 13 |
34537660538 | from app import app
from app import q
from app.tasks import imageEmailAndCreate
from flask import render_template, request, redirect, url_for
import os
from werkzeug.utils import secure_filename
import random
import string
from rq import Retry
import pickle
import datetime
from PIL import Image
from app.imageto3dWrapper import imageTo3d
def randomString(stringLength=8):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def png2jpg(filename):
im = Image.open("{}".format(filename))
rgb_im = im.convert("RGB")
imgFolderPath, _ = os.path.splitext(filename)
fullImgPath = "{}.jpg".format(imgFolderPath)
rgb_im.save(fullImgPath)
return fullImgPath
app.config["IMAGE_UPLOADS"] = "./model/image"
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
if request.files:
effectType = request.form['radioEffect']
email = request.form['email']
image = request.files["images"]
if image.filename != "":
#MAKES SURE FILE NAME IS SECURE AND IS NOT MALICIOUS
filename = secure_filename(image.filename)
_, file_extension = os.path.splitext(filename)
convertToJpg = False
if file_extension.lower() == '.png' or file_extension.lower() == '.jpeg':
convertToJpg = True
filename = str(randomString(12))
imgFolderPath = "{}/{}".format(app.config["IMAGE_UPLOADS"], filename)
imgFullPath = os.path.join(imgFolderPath, filename+file_extension)
if(os.path.isfile(imgFullPath)):
#CHECKS FOR WEIRD EDGE CASES IF THE FILE IS A REPEAT
while(os.path.isfile(imgFullPath)):
filename = str(randomString(12))
imgFullPath = os.path.join(imgFolderPath, filename+file_extension)
os.mkdir(imgFolderPath)
image.save(os.path.join(imgFolderPath, filename+file_extension))
if convertToJpg:
temp = png2jpg(imgFullPath)
imgFullPath = temp
else:
os.mkdir(imgFolderPath)
image.save(os.path.join(imgFolderPath, filename+file_extension))
if convertToJpg:
temp = png2jpg(imgFullPath)
imgFullPath = temp
waitTime = str(datetime.timedelta(seconds=((len(q) + 1) * 900)))
imageOBJ = imageTo3d(filename, effectType, email,waitTime)
with open('./model/pickles/{}.obj'.format(filename), 'wb') as handle:
pickle.dump(imageOBJ, handle, protocol=pickle.HIGHEST_PROTOCOL)
#adds CONVERTING IMAGE TO 3d to a task queue
jobs = q.jobs
url = request.args.get("url")
task = q.enqueue(imageEmailAndCreate, imageOBJ, job_timeout=4620, retry=Retry(max=2))
jobs = q.jobs
q_len = len(q)
return redirect("/email/{}".format(filename))
else:
return redirect("/")
return render_template("public/upload_image.html")
@app.errorhandler(404)
def page_not_found(e):
return redirect("/")
@app.route("/email/<filename>", methods=["GET"])
def email(filename):
print(os.getcwd())
notFound = True
imgFileNameMatch = ""
for i in os.listdir('./model/pickles'):
if filename in os.path.splitext(i)[0]:
print(i)
imgFileNameMatch = i
notFound = False
continue
if(notFound):
return redirect("/")
else:
filename = imgFileNameMatch
with open('./model/pickles/{}'.format(filename), 'rb') as handle:
imageOBJ = pickle.load(handle)
return render_template("public/postSubmit.html", Email=imageOBJ.email,waitTime=imageOBJ.waitTime)
| howard56k/ImageTo3d | app/views.py | views.py | py | 4,142 | python | en | code | 1 | github-code | 13 |
9071890720 | from collections import deque
import sys
sys.stdin = open("in_out/chapter6/in5.txt", "rt")
n, m = map(int, input().split())
patients = list(map(int, input().split()))
patients = deque([(i, idx) for idx, i in enumerate(patients)])
res = 0
while patients:
if patients[0] == max(patients, key=lambda x: x[0]):
res += 1
if patients[0][1] == m:
break
patients.popleft()
else:
patients.append(patients.popleft())
print(res)
| mins1031/coding-test | section5/Chapter6.py | Chapter6.py | py | 474 | python | en | code | 0 | github-code | 13 |
17059781164 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ShopRating(object):
def __init__(self):
self._lower_bound = None
self._upper_bound = None
self._value = None
@property
def lower_bound(self):
return self._lower_bound
@lower_bound.setter
def lower_bound(self, value):
self._lower_bound = value
@property
def upper_bound(self):
return self._upper_bound
@upper_bound.setter
def upper_bound(self, value):
self._upper_bound = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def to_alipay_dict(self):
params = dict()
if self.lower_bound:
if hasattr(self.lower_bound, 'to_alipay_dict'):
params['lower_bound'] = self.lower_bound.to_alipay_dict()
else:
params['lower_bound'] = self.lower_bound
if self.upper_bound:
if hasattr(self.upper_bound, 'to_alipay_dict'):
params['upper_bound'] = self.upper_bound.to_alipay_dict()
else:
params['upper_bound'] = self.upper_bound
if self.value:
if hasattr(self.value, 'to_alipay_dict'):
params['value'] = self.value.to_alipay_dict()
else:
params['value'] = self.value
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ShopRating()
if 'lower_bound' in d:
o.lower_bound = d['lower_bound']
if 'upper_bound' in d:
o.upper_bound = d['upper_bound']
if 'value' in d:
o.value = d['value']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/ShopRating.py | ShopRating.py | py | 1,830 | python | en | code | 241 | github-code | 13 |
69975854098 | class user:
def __init__(self,seats,fuel):
print("new user being created...")
self.seat = seats
self.fuel = fuel
def race_mode(self):
self.seat = 2
return
user1 = user(3,"petrol")
#user1.user_name = 'loki'
print(user1.seat)
print(user1.race_mode()) | Kotravai/100-Days-of-Code | L17-Quiz/L17-Start.py | L17-Start.py | py | 301 | python | en | code | 0 | github-code | 13 |
73968235856 | # -*- coding: utf-8 -*-
"""
@author: robin
"""
import ML_module as ML
# Load data
import pandas as pd
train_df = pd.read_csv('asl_data/sign_mnist_train.csv')
valid_df = pd.read_csv('asl_data/sign_mnist_test.csv')
# Split between train and validation sets
y_train = train_df['label'].values
y_valid = valid_df['label'].values
del train_df['label']
del valid_df['label']
x_train = train_df.values
x_valid = valid_df.values
MLobj=ML.Classification(x_train,y_train,x_valid,y_valid)
# Explore data
MLobj.check_data()
#Data preparation for training
MLobj.data_preparation(flaten=False,normalise=True)
#Target encoding
num_categories =25
MLobj.target_encoding(num_categories,encoding='binarymartix')
#################################################
#Creating model
from tensorflow.keras.models import Sequential
x_train=MLobj.x_train
y_train=MLobj.y_train
x_valid=MLobj.x_valid
y_valid=MLobj.y_valid
print("Creating model")
model = Sequential()
#Inpul layer
from tensorflow.keras.layers import Dense
model.add(Dense(units=512, activation='relu', input_shape=(x_train.shape[1],)))
#Hidden layer
model.add(Dense(units = 512, activation='relu'))
#Output layer
model.add(Dense(units = num_categories, activation='softmax'))
#Model summary
model.summary()
#Model compiling
model.compile(loss='categorical_crossentropy', metrics=['accuracy'])
#Training model
nb_epochs=20
history = model.fit(
x_train, y_train, epochs=nb_epochs, verbose=1, validation_data=(x_valid, y_valid)
)
acc = [element * 100 for element in history.history['accuracy']]
val_acc = [element * 100 for element in history.history['val_accuracy']]
loss = history.history['loss']
val_loss = history.history['val_loss']
#################################################
#plot accuracy and loss
MLobj.plot_acc_and_loss(acc,val_acc,loss,val_loss)
| rsebastian91/CategoricalClassification | categorical_asl.py | categorical_asl.py | py | 1,834 | python | en | code | 0 | github-code | 13 |
3958090244 | import numpy as np
import grid
from Tkinter import *
from graphics import color_rgb
import setup
colors = { -1:[0,0,0], 0:[215,255,215] , 1:[135, 206, 235], 2:[0, 128, 0] , 3:[255, 0, 0], 4:[128, 0, 128],
5:[128, 0, 0], 6 :[64, 224, 208], 7:[255, 192, 203] , 8:[128, 128, 128], 9:[255,255,255]}
'''
n =10
dimension = n
number_of_mines = 20
'''
def matrix_gui(n,val):
w, gui = grid.buildmaze(n) #now that we have the base w and gui which is each boxes we can start coloring them
gui = np.array(gui)
gui = gui.reshape((n,n))
w.setBackground('white')
val = np.array(val)
print(val)
print(val[0][0])
for i in range(0,n):
for j in range( 0 , n):
#print(arr[i][j])
#if(val[i][j] == -1):
# gui[i][j].setFill(color_rgb(0,0,0))
# continue
c = colors[val[i][j]]
gridnum = val[i][j]
#print("this is the value of n: " + str(gridnum))
#print(val[i][j])
#print(c[0])
#print(type(gui[i][j]))
gui[i][j].setFill(color_rgb(c[0],c[1],c[2]))
gui[i][j].draw(w)
w.getMouse()
w.close()
#matrix_gui(10,setup.setup(10,15))
| adarshgogineni/MineSweeper | matrixgui.py | matrixgui.py | py | 1,214 | python | en | code | 0 | github-code | 13 |
4002314037 | import os
import numpy as np
import tensorflow as tf
def save_model(model, save_dir):
# Buat direktori jika belum ada
os.makedirs(save_dir, exist_ok=True)
# Simpan model sebagai format SavedModel
tf.saved_model.save(model, save_dir)
if __name__ == "__main__":
# Contoh data latih dan label
X_train = np.random.rand(100, 10)
y_train = np.random.rand(100, 1)
# Contoh pembuatan model dan training (silakan sesuaikan dengan model Anda)
model = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu', input_shape=(10,)),
tf.keras.layers.Dense(1)
])
model.compile(optimizer='adam', loss='mse')
model.fit(X_train, y_train, epochs=10)
# Ganti 'D:/BISA!/OpenCvUdang/model/' dengan jalur tempat Anda ingin menyimpan model
model_save_path = 'D:/BISA!/OpenCvUdang/model/'
save_model(model, model_save_path)
| RendyAFS/Project-IOT-Udang | te.py | te.py | py | 889 | python | id | code | 0 | github-code | 13 |
2839467293 | #-*- coding: utf-8 -*-
from .constant import *
from .record import Record
from .fileio import FileIO
from .function import pformat, formatSize2
class PartMet:
def __init__(self, path):
self.version = 0
self.record = Record()
with FileIO(path, "rb") as file_:
self.__loadFromFile(file_)
assert(len(file_.read(1)) == 0)
def __loadFromFile(self, reader):
start_pos = reader.tell()
self.version = reader.readUint8()
if not self.version in (PARTFILE_VERSION,
PARTFILE_SPLITTEDVERSION,
PARTFILE_VERSION_LARGEFILE):
raise Exception("Invailed Version 0x%02X"%(self.version,))
isnewstyle = PARTFILE_SPLITTEDVERSION == self.version
partmettype = PMT_DEFAULTOLD
if isnewstyle:
partmettype = PMT_SPLITTED
else:
reader.seek(start_pos+24)
if reader.readUint32() == 0x01020000:
#edonkeys so called "old part style"
isnewstyle, partmettype = (True, PMT_NEWOLD)
reader.seek(start_pos+1)
if isnewstyle:
if reader.readUint32() == 0: #0.48 partmets - different again
self.record.loadHashs(reader)
else:
reader.seek(start_pos+2)
self.record.loadModifTime(reader)
self.record.loadHashs(reader, loadFileHashOnly=True)
else:
self.record.loadModifTime(reader)
self.record.loadHashs(reader)
self.record.loadTags(reader, isnewstyle, partmettype)
def printDetails(self, hashsOnly=False, areaOnly=False, linkOnly=False):
if hashsOnly:
for h in self.record.head[Record.PartHashs]:
print(h.hex().upper())
return
if areaOnly:
for area in self.record.arealist:
print(area.start, area.end)
return
if linkOnly:
print(self.record.getEd2kLink())
return
pformat("PartMet Version:", "0x%02X"%(self.version,))
pformat("Modification Time:", self.record.getFormatModifTime())
pformat("Last Seen Complete:", self.record.getFormatLastSeenComplete())
pformat("File Name:", self.record.getFileName())
pformat("Part Name:", self.record.getPartName())
pformat("File Size:", formatSize2(self.record.getFileSize()))
pformat("File Hash:", self.record.getFileHash())
pformat("AICH Hash:", self.record.getAichHash())
pformat("Part Hash Count:", self.record.getEd2kPartCount())
pformat("Progress:", self.record.getFormatProgress())
@staticmethod
def main()->int:
import sys
import argparse
p = argparse.ArgumentParser()
p.add_argument("-p", dest="p", action="store_true", help="show part hashs only")
p.add_argument("-a", dest="a", action="store_true", help="show incomplete area only")
p.add_argument("-l", dest="l", action="store_true", help="show ed2k link only")
p.add_argument(dest="files", nargs="+", help="XXX.part.met")
args = p.parse_args(sys.argv[1:])
try:
for path in args.files:
PartMet(path).printDetails(args.p, args.a, args.l)
return 0
except Exception as err:
print("Exception:", err, file=sys.stderr)
return 1
| gefranks/amuletools | partmet.py | partmet.py | py | 3,467 | python | en | code | 0 | github-code | 13 |
25203726383 | from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from sys import platform, maxsize, version_info
import os, sys
from Cython.Compiler.Main import default_options, CompilationOptions
default_options['emit_linenums'] = True
from subprocess import check_output, CalledProcessError
def get_include():
dirs = []
try:
dirs.append(os.environ['PMIX_TOP_BUILDDIR'] + "/include")
dirs.append(os.environ['PMIX_TOP_SRCDIR'] + "/include")
except:
return dirs
return dirs
def getVersion():
dir = os.path.dirname(__file__)
vers_path = os.path.join(dir, '../../include', 'pmix_version.h')
if not os.path.exists(vers_path):
include_dirs = get_include()
vers_path = None
for dir in include_dirs:
tmp_path = os.path.join(dir, 'pmix_version.h')
if os.path.exists(tmp_path):
vers_path = tmp_path
break
if vers_path is None:
print("Error: pmix_version.h does not exist at path: ",vers_path)
sys.exit(1)
with open(vers_path) as verFile:
lines = verFile.readlines()
for l in lines:
if 'MAJOR' in l:
major = l.split()[2]
major = major[:-1]
elif 'MINOR' in l:
minor = l.split()[2]
minor = minor[:-1]
elif 'RELEASE' in l:
release = l.split()[2]
release = release[:-1]
vers = [major, minor, release]
version = ".".join(vers)
return version
setup(
name = 'pypmix',
version = getVersion(),
url = 'https://pmix.org',
license = '3-clause BSD',
author = 'Ralph H. Castain',
author_email = 'ralph.h.castain@intel.com',
description = 'Python bindings for PMIx',
classifiers = [
'Development Status :: 1 - Under Construction',
'Intended Audience :: Developers',
'Topic :: HPC :: Parallel Programming :: System Management',
'License :: 3-clause BSD',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'],
keywords = 'PMI PMIx HPC MPI SHMEM',
platforms = 'any',
ext_modules = cythonize([Extension("pmix",
[os.environ['PMIX_TOP_SRCDIR']+"/bindings/python/pmix.pyx"],
libraries=["pmix"]) ],
compiler_directives={'language_level': 3}),
include_dirs = get_include()
)
| deepin-community/pmix | bindings/python/setup.py | setup.py | py | 2,643 | python | en | code | 0 | github-code | 13 |
12221576567 | from operator import itemgetter
from multiprocessing.pool import Pool
from multiprocessing import cpu_count
import time
from functools import partial
from abc import ABC, abstractmethod
from itertools import repeat
class Algorithm(ABC):
"""
The Algorithm interface declares operations common to all supported versions
of some algorithm.
The FairTeamGenerator uses this interface to call the algorithm defined by concrete
algorithm classes.
"""
@abstractmethod
def calculate_teams(self, players_):
pass
class Greedy(Algorithm):
"""
The Greedy algorithm sorts players into two teams to get the minimal total difference
in team average rating values. The algorithm starts with separating the the best players,
after that the weaker team chooses the next player until there are no more players.
"""
def calculate_teams(self, players_):
start_time = time.time()
start_time_ns = time.time_ns()
team_first = [players_[0]]
team_second = [players_[1]]
team_first_sum = team_first[0][1]
team_second_sum = team_second[0][1]
for player in players_[2:]:
if team_first_sum < team_second_sum:
team_first.append(player)
team_first_sum += player[1]
else:
team_second.append(player)
team_second_sum += player[1]
print(f"[*] Greedy finished in: {time.time() - start_time}s (or {time.time_ns() - start_time_ns}ns)")
return team_first, team_second, team_first_sum / len(team_first), team_second_sum / len(team_second)
class Neighbourhood(Algorithm):
"""
The Neighbourhood algorithm sorts players into two teams to get the minimal total difference
in team average rating values. The algorithm starts with separating the the best players,
after that the teams choose the next players alternately until there are no more players.
"""
def calculate_teams(self, players_):
start_time = time.time()
start_time_ns = time.time_ns()
team_first = [players_[0]]
team_second = [players_[1]]
team_first_sum = team_first[0][1]
team_second_sum = team_second[0][1]
for idx, player in enumerate(players_[2:]):
if 0 == idx % 2:
team_first.append(player)
team_first_sum += player[1]
else:
team_second.append(player)
team_second_sum += player[1]
print(f"[*] Neighbour finished in: {time.time() - start_time}s (or {time.time_ns() - start_time_ns}ns)")
return team_first, team_second, team_first_sum / len(team_first), team_second_sum / len(team_second)
class NegativeNeighbourhood(Algorithm):
"""
The NegativeNeighbourhood algorithm sorts players into two teams to get the minimal total difference
in team average rating values. The algorithm starts with separating the two weakest players,
after that the teams choose the next players alternately until there are no more players.
"""
def calculate_teams(self, players_):
start_time = time.time()
start_time_ns = time.time_ns()
last_index = len(players_) - 1
team_first = [players_[last_index]]
team_second = [players_[last_index - 1]]
team_first_sum = team_first[0][1]
team_second_sum = team_second[0][1]
for idx, player in reversed(list(enumerate(players_[:-2]))):
if 0 == idx % 2:
team_first.append(player)
team_first_sum += player[1]
else:
team_second.append(player)
team_second_sum += player[1]
print(f"[*] Neighbour finished in: {time.time() - start_time}s (or {time.time_ns() - start_time_ns}ns)")
return team_first, team_second, team_first_sum / len(team_first), team_second_sum / len(team_second)
class Players:
"""
Storage class for players with nicknames and rating values.
"""
__slots__ = ["_players"]
def __init__(self):
self._players = []
def add_player(self, nickname_, rating_):
if [player for player in self._players if nickname_ in player]:
raise ValueError
else:
self._players.append((nickname_, rating_))
def get_players(self):
return self._players
def sort_players(self):
self._players.sort(key=itemgetter(1), reverse=True)
class FairTeamGenerator:
"""
The FairTeamGenerator is responsible to generate fair teams based on the
players rating values, so the total difference between the teams is minimal.
"""
__slots__ = ["_players", "_algorithms", "_pool_functions", "_result"]
def __init__(self):
self._players = []
self._algorithms = []
self._pool_functions = []
self._result = []
@staticmethod
def parallelize(workers_num, functions, arguments):
# if we need this multiple times, instantiate the pool outside and
# pass it in as dependency to spare recreation all over again
with Pool(workers_num) as pool:
tasks = zip(functions, repeat(arguments))
futures = [pool.apply_async(*t) for t in tasks]
results = [fut.get() for fut in futures]
return results
def add_players(self, players_):
self._players = players_
def set_algorithm(self, *algorithms):
for alg in algorithms:
self._algorithms.append(alg)
def calculate_teams(self):
for alg in self._algorithms:
self._pool_functions.append((partial(alg.calculate_teams, self._players)))
start_time_algs = time.time()
start_time_algs_ns = time.time_ns()
# call algorithms parallel
functions = self._algorithms[0].calculate_teams, \
self._algorithms[1].calculate_teams, \
self._algorithms[2].calculate_teams
self._result = self.parallelize(NUM_OF_WORKERS, functions, arguments=(self._players,))
end_time_algs = time.time()
end_time_algs_ns = time.time_ns()
print("#-------------------------------#")
print(f"Overall time taken: {end_time_algs - start_time_algs}s (or {end_time_algs_ns - start_time_algs_ns}ns)")
def print_teams(self):
print("#-------------------------------#")
print("#---------- GREEDY -------------#")
print("#-------------------------------#")
print("First team:")
for player in self._result[0][0]:
print(player[0] + " - " + str(player[1]))
print("Team average:")
print(self._result[0][2])
print("#-------------------------------#")
print("Second team:")
for player in self._result[0][1]:
print(player[0] + " - " + str(player[1]))
print("Team average:")
print(self._result[0][3])
print("#-------------------------------#")
print("#------------- NGH -------------#")
print("#-------------------------------#")
print("First team:")
for player in self._result[1][0]:
print(player[0] + " - " + str(player[1]))
print("Team average:")
print(self._result[1][2])
print("#-------------------------------#")
print("Second team:")
for player in self._result[1][1]:
print(player[0] + " - " + str(player[1]))
print("Team average:")
print(self._result[1][3])
print("#-------------------------------#")
print("#------------ NNGH -------------#")
print("#-------------------------------#")
print("First team:")
for player in self._result[2][0]:
print(player[0] + " - " + str(player[1]))
print("Team average:")
print(self._result[2][2])
print("#-------------------------------#")
print("Second team:")
for player in self._result[2][1]:
print(player[0] + " - " + str(player[1]))
print("Team average:")
print(self._result[2][3])
# get differences between teams
diff_grd = abs(self._result[0][2] - self._result[0][3])
diff_ngh = abs(self._result[1][2] - self._result[1][3])
diff_nngh = abs(self._result[2][2] - self._result[2][3])
# store differences with algorithm names
total_differences = {
"Greedy": diff_grd,
"Neighbour": diff_ngh,
"Negative Neighbour": diff_nngh
}
total_differences = {k: v for k, v in sorted(total_differences.items(), key=lambda item: item[1])}
# chose best algorithm
best_choice = list(total_differences.items())[0]
print("#-------------------------------#")
print("Best algorithm: {} with team rating difference of {:.2f}".format(best_choice[0], best_choice[1]))
def read_nickname(index):
nickname_ = input(f"Give the nickname of player {index + 1}: ")
return nickname_
def read_rating(nickname_):
rating_ = int(input(f"Give the rating of {nickname_}: "))
return rating_
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
NUM_OF_PLAYERS_4 = 4
NUM_OF_PLAYERS_6 = 6
NUM_OF_PLAYERS_8 = 8
# limit maximal number of worker processes to CPU limit
NUM_OF_WORKERS = max(cpu_count() - 1, 1)
# get number of players
num_of_players = 0
print("Currently only 4, 6 or 8 players are allowed to use.\n")
while True:
try:
num_of_players = int(input("Enter the number of players: "))
if NUM_OF_PLAYERS_4 != num_of_players and \
NUM_OF_PLAYERS_6 != num_of_players and \
NUM_OF_PLAYERS_8 != num_of_players:
print("Invalid number of players, it should be 4, 6 or 8. Try again.")
continue
except ValueError:
print("Invalid type of number of players, it should be an integer value of 4, 6 or 8. Try again.")
continue
else:
break
ftg = FairTeamGenerator()
algorithm_greedy = Greedy()
algorithm_nbh = Neighbourhood()
algorithm_nnbh = NegativeNeighbourhood()
ftg.set_algorithm(algorithm_greedy, algorithm_nbh, algorithm_nnbh)
players = Players()
for i in range(num_of_players):
while True:
nickname = ""
rating = 0
# read nickname first
while True:
nickname = read_nickname(i)
if len(nickname) < 3:
print("Too short nickname, it should be at least 3 characters long. Try again.")
continue
break
# read corresponding rating
while True:
try:
rating = read_rating(nickname)
if rating < 100 or rating > 5000:
print("Invalid range for rating, it should be between 100 and 5000. Try again.")
continue
except ValueError:
print("Invalid rating format: only integers are allowed. Try again.")
continue
else:
break
# store player's nickname and rating
try:
players.add_player(nickname, rating)
except ValueError:
print(f"The nickname of {nickname} is already entered. Use another.")
continue
else:
break
print("You entered the following:")
for player in players.get_players():
print(player[0] + " - " + str(player[1]))
# calculate fair teams
print("#-------------------------------#")
print("Now calculating fair teams...")
# sort player - rating pairs by descending order based on rating
players.sort_players()
# store players for internal use
ftg.add_players(players.get_players())
# calculate fair teams
ftg.calculate_teams()
# print teams
ftg.print_teams()
| GaborWilk/FairTeamGenerator | python/team_generator.py | team_generator.py | py | 12,416 | python | en | code | 0 | github-code | 13 |
36261186772 | def alphafold_predict(session, sequence):
if not _is_alphafold_available(session):
return
ar = show_alphafold_run(session)
if ar.running:
from chimerax.core.errors import UserError
raise UserError('AlphaFold prediction currently running. Can only run one at a time.')
ar.start(sequence)
# ------------------------------------------------------------------------------
#
from chimerax.core.tools import ToolInstance
class AlphaFoldRun(ToolInstance):
_ipython_notebook_url = 'https://colab.research.google.com/github/RBVI/ChimeraX/blob/develop/src/bundles/alphafold/src/alphafold_predict_colab.ipynb'
def __init__(self, session, tool_name):
ToolInstance.__init__(self, session, tool_name)
self._running = False
self._sequence = None # Sequence instance or subclass such as Chain
self._download_directory = None
from chimerax.ui import MainToolWindow
self.tool_window = tw = MainToolWindow(self)
parent = tw.ui_area
from Qt.QtWidgets import QVBoxLayout
layout = QVBoxLayout(parent)
layout.setContentsMargins(0,0,0,0)
layout.setSpacing(0)
# Avoid warning message from Qt when closing colab html panel.
# "WARNING: Release of profile requested but WebEnginePage still not deleted. Expect troubles !"
# After the html window is destroyed we remove the profile.
# Related to ChimeraX bug report #3761.
profile_parent = None
from chimerax.ui.widgets.htmlview import ChimeraXHtmlView, create_chimerax_profile
profile = create_chimerax_profile(profile_parent, download = self._download_requested,
storage_name = 'AlphaFold')
self._browser = b = ChimeraXHtmlView(session, parent, size_hint = (800,500), profile=profile)
b.destroyed.connect(lambda *,profile=profile: profile.deleteLater())
layout.addWidget(b)
tw.manage(placement=None)
def start(self, sequence):
colab_started = (self._sequence is not None)
self._sequence = sequence
if not colab_started:
b = self._browser
from Qt.QtCore import QUrl
b.setUrl(QUrl(self._ipython_notebook_url))
b.page().loadFinished.connect(self._page_loaded)
else:
self._run()
def _page_loaded(self, okay):
if okay:
# Need to delay setting sequence and running or those do nothing
# probably because it is still waiting for some asynchronous setup.
delay_millisec = 1000
self._keep_timer_alive = self.session.ui.timer(delay_millisec, self._run)
# If we don't save the timer in a variable it is deleted and never fires.
def _run(self):
self._set_colab_sequence()
self._run_colab()
self.session.logger.info('Running AlphaFold prediction')
def _set_colab_sequence(self):
p = self._browser.page()
set_seq_javascript = ('document.querySelector("paper-input").setAttribute("value", "%s")'
% self._sequence.characters + '; ' +
'document.querySelector("paper-input").dispatchEvent(new Event("change"))')
p.runJavaScript(set_seq_javascript)
def _run_colab(self):
p = self._browser.page()
p.runJavaScript('document.querySelector("colab-run-button").click()')
def show(self):
self.tool_window.shown = True
def hide(self):
self.tool_window.shown = False
@classmethod
def get_singleton(self, session, create=True):
from chimerax.core import tools
return tools.get_singleton(session, AlphaFoldRun, 'AlphaFold Run', create=create)
@property
def running(self):
return self._running
def _download_requested(self, item):
# "item" is an instance of QWebEngineDownloadItem
filename = item.suggestedFileName()
if filename == 'best_model.pdb':
item.cancel() # Historical. Used to just download pdb file.
return
dir = self._download_directory
if dir is None:
self._download_directory = dir = self._unique_download_directory()
item.setDownloadDirectory(dir)
if filename == 'results.zip':
item.finished.connect(self._unzip_results)
item.accept()
def _unique_download_directory(self):
from os.path import expanduser, join, exists
ddir = expanduser('~/Downloads')
adir = join(ddir, 'ChimeraX', 'AlphaFold')
from os import makedirs
makedirs(adir, exist_ok = True)
for i in range(1,1000000):
path = join(adir, 'prediction_%d' % i)
if not exists(path):
break
makedirs(path, exist_ok = True)
return path
def _open_prediction(self):
from os.path import join, exists
path = join(self._download_directory, 'best_model.pdb')
if not exists(path):
self.session.logger.warning('Downloaded prediction file not found: %s' % path)
return
from chimerax.pdb import open_pdb
models, msg = open_pdb(self.session, path)
from .match import _set_alphafold_model_attributes
_set_alphafold_model_attributes(models)
from chimerax.atomic import Chain
if isinstance(self._sequence, Chain):
chain = self._sequence
from .fetch import _color_by_confidence, _log_chain_info
from .match import _align_to_chain, _rename_chains
for m in models:
_rename_chains(m, chain)
_color_by_confidence(m)
_align_to_chain(m, chain)
_log_chain_info(models, chain.name)
self.session.models.add(models)
def _unzip_results(self, *args, **kw):
if self._download_directory is None:
return # If user manages to request two downloads before one completes. Bug #5412
from os.path import join, exists
path = join(self._download_directory, 'results.zip')
if exists(path):
import zipfile
with zipfile.ZipFile(path, 'r') as z:
z.extractall(self._download_directory)
self._open_prediction()
self.session.logger.info('AlphaFold prediction finished\n' +
'Results in %s' % self._download_directory)
self._download_directory = None # Make next run go in a new directory
# ------------------------------------------------------------------------------
#
def _is_alphafold_available(session):
'''Check if the AlphaFold web service has been discontinued or is down.'''
url = 'https://www.rbvi.ucsf.edu/chimerax/data/status/alphafold_v2.html'
import requests
try:
r = requests.get(url)
except requests.exceptions.ConnectionError:
return True
if r.status_code == 200:
session.logger.error(r.text, is_html = True)
return False
return True
# ------------------------------------------------------------------------------
#
def show_alphafold_run(session):
ar = AlphaFoldRun.get_singleton(session)
return ar
# ------------------------------------------------------------------------------
#
def register_alphafold_predict_command(logger):
from chimerax.core.commands import CmdDesc, register
from chimerax.atomic import SequenceArg
desc = CmdDesc(
required = [('sequence', SequenceArg)],
synopsis = 'Predict a structure with AlphaFold'
)
register('alphafold predict', desc, alphafold_predict, logger=logger)
| HamineOliveira/ChimeraX | src/bundles/alphafold/src/predict.py | predict.py | py | 7,715 | python | en | code | null | github-code | 13 |
24036401076 | from src import models, db
import datetime
def create_contacts(first_name, last_name, email, phone, birthday, address):
birth_format = datetime.datetime.strptime(birthday, '%Y-%m-%d')
contact = models.Contact(first_name=first_name, last_name=last_name, email=email, phone=phone,
birthday=birth_format.date(), address=address, created=datetime.datetime.now().date())
db.session.add(contact)
db.session.commit()
def update_contacts(cont_id, first_name, last_name, email, phone, birthday, address):
birth_format = datetime.datetime.strptime(birthday, '%Y-%m-%d')
contact = models.Contact.query.filter_by(id=cont_id).first()
contact.first_name = first_name
contact.last_name = last_name
contact.email = email
contact.phone = phone
contact.birthday = birth_format.date()
contact.address = address
db.session.commit()
def delete_contacts(cont_id):
contact = models.Contact.query.filter_by(id=cont_id).first()
db.session.delete(contact)
db.session.commit()
def get_contact_by_id(cont_id):
contact = models.Contact.query.filter_by(id=cont_id).first()
return contact
def get_all_contacts():
contacts = models.Contact.query.all()
return contacts | Vishnyak13/PyWEB_HW-11_Flask | src/repository/contacts.py | contacts.py | py | 1,254 | python | en | code | 0 | github-code | 13 |
25213534118 | """
Module containing functions used to return the correct conformal predictor
class given the underlying model type.
"""
from functools import singledispatch
@singledispatch
def get_absolute_error_conformal_predictor(model):
"""Function to return the appropriate child class of
AbsoluteErrorConformalPredictor depending on the type
of the model arg.
"""
raise NotImplementedError(
f"model type not supported for AbsoluteErrorConformalPredictor children; {type(model)}"
)
@singledispatch
def get_leaf_node_scaled_conformal_predictor(model):
"""Function to return the appropriate child class of
LeafNodeScaledConformalPredictor depending on the type
of the model arg.
"""
raise NotImplementedError(
f"model type not supported for LeafNodeScaledConformalPredictor children; {type(model)}"
)
@singledispatch
def get_split_leaf_node_scaled_conformal_predictor(model, n_bins=3):
"""Function to return the appropriate child class inheriting from
SplitConformalPredictorMixin and a child class of LeafNodeScaledConformalPredictor
depending on the type of the model arg.
"""
raise NotImplementedError(
f"model type not supported for SplitConformalPredictorMixin, LeafNodeScaledConformalPredictor children; {type(model)}"
)
| richardangell/pitci | pitci/dispatchers.py | dispatchers.py | py | 1,321 | python | en | code | 7 | github-code | 13 |
26763151855 | import numpy as np
import pandas as pandas
import streamlit as st
import pickle as pk
model = pk.load(open('model.sav','rb'))
st.title('University Admit Probability Predictor')
with st.form('StudentDetails',clear_on_submit=True):
gre_score = st.number_input(label='Enter Your GRE Score',min_value=260,max_value=340)
TOEFL_Score = st.number_input(label='Enter your TOEFL score',min_value=0 ,max_value=120,value=0 ,step=1)
University = st.text_input(label='Enter the name of the university')
University_Rating = st.number_input(label='Enter Your Desired University Raking (1-5)',min_value=1,max_value=5,)
SOP = st.number_input(label='Enter Your SOP Rating (1-5)',min_value=0.0,max_value=5.0,value=0.0,step=0.5)
LOR = st.number_input(label='Enter Your LOR Rating (1-5)',min_value=0.0,max_value=5.0,step=0.5)
CGPA = st.number_input(label='Enter Your CGPA on a scale of 1-10',min_value=1,max_value=10,step=1)
Research = st.radio(label='Have you ever published a research paper?',options=('Yes','No'))
submit = st.form_submit_button("Submit")
if(Research=='Yes'):
Research = 1
else:
Research = 0
sample = [gre_score,TOEFL_Score,University_Rating,SOP,LOR,CGPA,Research]
prob = model.predict(np.array(sample).reshape(1,-1))
if(prob > 1):
prob = 1
if(prob < 0):
prob = 0
st.write(f"You have a {np.round(prob[0]*100,2)}% chance of getting into {University}.")
| mkchaitanya03/University-Admit-Predictor | App.py | App.py | py | 1,410 | python | en | code | 0 | github-code | 13 |
17040991514 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.Participant import Participant
from alipay.aop.api.domain.TransOrderDetail import TransOrderDetail
class AlipayFundBatchUniTransferModel(object):
def __init__(self):
self._biz_scene = None
self._business_params = None
self._original_order_id = None
self._out_batch_no = None
self._payer_info = None
self._product_code = None
self._remark = None
self._total_count = None
self._total_trans_amount = None
self._trans_order_list = None
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def business_params(self):
return self._business_params
@business_params.setter
def business_params(self, value):
self._business_params = value
@property
def original_order_id(self):
return self._original_order_id
@original_order_id.setter
def original_order_id(self, value):
self._original_order_id = value
@property
def out_batch_no(self):
return self._out_batch_no
@out_batch_no.setter
def out_batch_no(self, value):
self._out_batch_no = value
@property
def payer_info(self):
return self._payer_info
@payer_info.setter
def payer_info(self, value):
if isinstance(value, Participant):
self._payer_info = value
else:
self._payer_info = Participant.from_alipay_dict(value)
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def total_count(self):
return self._total_count
@total_count.setter
def total_count(self, value):
self._total_count = value
@property
def total_trans_amount(self):
return self._total_trans_amount
@total_trans_amount.setter
def total_trans_amount(self, value):
self._total_trans_amount = value
@property
def trans_order_list(self):
return self._trans_order_list
@trans_order_list.setter
def trans_order_list(self, value):
if isinstance(value, list):
self._trans_order_list = list()
for i in value:
if isinstance(i, TransOrderDetail):
self._trans_order_list.append(i)
else:
self._trans_order_list.append(TransOrderDetail.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.business_params:
if hasattr(self.business_params, 'to_alipay_dict'):
params['business_params'] = self.business_params.to_alipay_dict()
else:
params['business_params'] = self.business_params
if self.original_order_id:
if hasattr(self.original_order_id, 'to_alipay_dict'):
params['original_order_id'] = self.original_order_id.to_alipay_dict()
else:
params['original_order_id'] = self.original_order_id
if self.out_batch_no:
if hasattr(self.out_batch_no, 'to_alipay_dict'):
params['out_batch_no'] = self.out_batch_no.to_alipay_dict()
else:
params['out_batch_no'] = self.out_batch_no
if self.payer_info:
if hasattr(self.payer_info, 'to_alipay_dict'):
params['payer_info'] = self.payer_info.to_alipay_dict()
else:
params['payer_info'] = self.payer_info
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.total_count:
if hasattr(self.total_count, 'to_alipay_dict'):
params['total_count'] = self.total_count.to_alipay_dict()
else:
params['total_count'] = self.total_count
if self.total_trans_amount:
if hasattr(self.total_trans_amount, 'to_alipay_dict'):
params['total_trans_amount'] = self.total_trans_amount.to_alipay_dict()
else:
params['total_trans_amount'] = self.total_trans_amount
if self.trans_order_list:
if isinstance(self.trans_order_list, list):
for i in range(0, len(self.trans_order_list)):
element = self.trans_order_list[i]
if hasattr(element, 'to_alipay_dict'):
self.trans_order_list[i] = element.to_alipay_dict()
if hasattr(self.trans_order_list, 'to_alipay_dict'):
params['trans_order_list'] = self.trans_order_list.to_alipay_dict()
else:
params['trans_order_list'] = self.trans_order_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundBatchUniTransferModel()
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'business_params' in d:
o.business_params = d['business_params']
if 'original_order_id' in d:
o.original_order_id = d['original_order_id']
if 'out_batch_no' in d:
o.out_batch_no = d['out_batch_no']
if 'payer_info' in d:
o.payer_info = d['payer_info']
if 'product_code' in d:
o.product_code = d['product_code']
if 'remark' in d:
o.remark = d['remark']
if 'total_count' in d:
o.total_count = d['total_count']
if 'total_trans_amount' in d:
o.total_trans_amount = d['total_trans_amount']
if 'trans_order_list' in d:
o.trans_order_list = d['trans_order_list']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayFundBatchUniTransferModel.py | AlipayFundBatchUniTransferModel.py | py | 6,647 | python | en | code | 241 | github-code | 13 |
27236933397 | """
Adapted by dt10 from the following:
"""
"""
Matplotlib Animation Example
author: Jake Vanderplas
email: vanderplas@astro.washington.edu
website: http://jakevdp.github.com
license: BSD
Please feel free to use and modify this, but keep the above information. Thanks!
"""
xypos=[] # Vector of frame -> numpy particles * 2
colours={} # Map of particle num -> colour index
minx=+1000
maxx=-1000
miny=+1000
maxy=-1000
particles=set()
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
import sys
import csv
sourceFile='particles.csv'
if len(sys.argv)>1:
sourceFile=sys.argv[1]
with open(sourceFile, 'r') as csvfile:
reader = csv.reader(csvfile)
xy=[] # Starts out as dict
prevFrame=0
for (frame,t,particle,colour,x,y,dx,dy) in reader:
x=float(x)
y=float(y)
frame=int(frame)
particle=int(particle)
if frame!=prevFrame: # First particle in a frame
if (frame%10)==0:
print(" loaded frame {}".format(prevFrame))
if prevFrame==0: # Just finished the first frame, now we know particle count
xy=np.array(xy,np.single)
xypos.append(xy)
xy=np.empty([len(particles),2],np.single)
prevFrame=frame
if frame==0:
colour=int(colour)
colours[particle]=colour
particles.add(particle)
assert particle==len(xy) # Assume contiguous
xy.append( (x,y) )
else:
xy[particle,:]=(x,y)
minx=min(minx,x)
miny=min(miny,y)
maxx=max(maxx,x)
maxy=max(maxy,y)
print("x=({},{}), y=({},{})".format(minx,maxx,miny,maxy))
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
ax = plt.axes(xlim=(minx, maxx), ylim=(miny, maxy))
palette=['r','g','b','c','y','m','y']
c=[ palette[colours[p] % len(palette)] for p in particles]
splot = ax.scatter(xypos[0][:,0],xypos[0][:,1],color=c,alpha=0.5,edgecolor='')
# initialization function: plot the background of each frame
def init():
splot.set_offsets([])
return splot,
# animation function. This is called sequentially
def animate(i):
if (i%10)==0:
print(" Render frame {} of {}".format(i,len(xypos)))
splot.set_offsets(xypos[i])
return splot,
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=len(xypos), interval=20, blit=True)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
anim.save('basic_animation.mp4', fps=25, extra_args=['-vcodec', 'libx264'])
#plt.show()
| joshjennings98/fyp | graph_schema-4.2.0/apps/nursery/particle/v3/scripts/plot_particles_v2.py | plot_particles_v2.py | py | 3,034 | python | en | code | 0 | github-code | 13 |
14933556555 | from transformers import GPT2LMHeadModel, GPT2Tokenizer
model_path = './output' # Path to the fine-tuned model directory
model = GPT2LMHeadModel.from_pretrained(model_path)
tokenizer = GPT2Tokenizer.from_pretrained(model_path)
# Define a function for generating responses
def generate_response(prompt, max_length=50):
input_ids = tokenizer.encode(prompt, return_tensors='pt')
output = model.generate(input_ids, max_length=max_length, num_return_sequences=1)
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response
# Example usage
user_input = input('Write a question: ')
response = generate_response(user_input)
print(response) | Jiffy-JM/Earthshot-ChatBot | gpt-2/gpt_chat.py | gpt_chat.py | py | 673 | python | en | code | 3 | github-code | 13 |
29881848997 | #!/usr/bin/env python
# This file converts a dictionary file (like amwgmaster.py or lmwgmaster.py) to a series of diags.py commands.
import sys, getopt, os, subprocess, logging, pdb
from time import sleep
from argparse import ArgumentParser
from functools import partial
from collections import OrderedDict
from metrics.frontend.options import Options
from metrics.frontend.options import make_ft_dict
from metrics.fileio.filetable import *
from metrics.fileio.findfiles import *
from metrics.frontend.form_filenames import form_filename, form_file_rootname
from metrics.packages.diagnostic_groups import *
from output_viewer.index import OutputIndex, OutputPage, OutputGroup, OutputRow, OutputFile, OutputMenu
import vcs
import tempfile
import glob
logger = logging.getLogger(__name__)
def filenames(collkey, plotset, variable, obs_set='', var_option=None, region="Global",
season="ANN", combined=False):
if collkey=='7' or collkey=='7s':
region = ''
# root_name = form_file_rootname(plotset, [variable],
root_name = form_file_rootname(plotset, [variable],
aux=[] if var_option is None else [var_option],
postn=obs_set,
season=season, region=region, combined=combined
)
#basen="set%s"%collkey, postn=obs_set,
files = []
files.extend(form_filename(root_name, ["png", "pdf"], descr=True, more_id="combined" if combined else ""))
for dataset in ("obs", "ft1", "diff"):
files.extend(form_filename(root_name, ["nc"], descr=True, vname="_".join((variable,dataset))))
return files
def filename_to_fileobj(name):
if name.endswith(".nc"):
data = name[:-3].split("--")[1]
data = data[0].upper() + data[1:] + " Data"
return {"url": name, "title": data}
else:
return {"url": name}
# If not specified on an individual variable, this is the default.
def_executable = 'diags'
# The user specified a package; see what collections are available.
def getCollections(pname):
allcolls = diags_collection.keys()
colls = []
dm = diagnostics_menu()
pclass = dm[pname.upper()]()
slist = pclass.list_diagnostic_sets()
keys = slist.keys()
for k in keys:
fields = k.split()
colls.append(fields[0])
# Find all mixed_plots sets that have the user-specified pname
# Deal with mixed_plots next
for c in allcolls:
if diags_collection[c].get('mixed_plots', False) == True:
# mixed_packages requires mixed_plots
if diags_collection[c].get('mixed_packages', False) == False:
# If no package was specified, just assume it is universal
# Otherwise, see if pname is in the list for this collection
if diags_collection[c].get('package', False) == False or diags_collection[c]['package'].upper() == pname.upper():
colls.append(c)
else: # mixed packages. need to loop over variables then. if any variable is using this pname then add the package
vlist = list( set(diags_collection[c].keys()) - set(collection_special_vars))
for v in vlist:
# This variable has a package
if diags_collection[c][v].get('package', False) != False and diags_collection[c][v]['package'].upper() == pname.upper():
colls.append(c)
logger.info('The following diagnostic collections appear to be available: %s' , colls)
return colls
def makeTables(collnum, model_dict, obspath, outpath, pname, outlogdir, dryrun=False):
collnum = collnum.lower()
seasons = diags_collection[collnum].get('seasons', ['ANN'])
regions = diags_collection[collnum].get('regions', ['Global'])
vlist = list(set(diags_collection[collnum].keys()) - set(collection_special_vars))
aux = ['default']
num_models = len(model_dict.keys())
if vlist == []:
logger.warning('varlist was empty. Assuming all variables.')
vlist = ['ALL']
if num_models > 2:
logger.critical('Only <=2 models supported for tables')
quit()
raw0 = None
raw1 = None
climo0 = None
climo1 = None
cf0 = 'yes' #climo flag
cf1 = 'yes'
raw0 = model_dict[model_dict.keys()[0]]['raw']
if raw0 != None:
ps0 = "--model path=%s,climos='no'" % raw0.root_dir()
climo0 = model_dict[model_dict.keys()[0]]['climos']
if climo0 != None:
ps0 = "--model path=%s,climos='yes'" % climo0.root_dir()
name0 = model_dict[model_dict.keys()[0]].get('name', 'ft0')
if num_models == 2:
raw1 = model_dict[model_dict.keys()[1]]['raw']
if raw1 != None:
ps1 = "--model path=%s,climos='no'" % raw1.root_dir()
climo1 = model_dict[model_dict.keys()[1]]['climos']
if climo1 != None:
ps1 = "--model path=%s,climos='yes'" % climo1.root_dir()
name1 = model_dict[model_dict.keys()[1]].get('name', 'ft1')
# This assumes no per-variable regions/seasons. .... See if land set 5 cares
if 'NA' in seasons:
seasonstr = ''
else:
seasonstr = '--seasons '+' '.join(seasons)
regionstr = '--regions '+' '.join(regions)
obsstr = ''
if obspath != None:
obsstr = '--obs path=%s' % obspath
for v in vlist:
ft0 = (climo0 if climo0 is not None else raw0)
ft1 = (climo1 if climo1 is not None else raw1)
if ft0 == climo0:
cf0 = 'yes'
else:
cf0 = 'no'
if ft1 == climo1:
cf1 = 'yes'
else:
cf1 = 'no'
if v == 'ALL':
vstr = ''
else:
ps0 = ''
ps1 = ''
if diags_collection[collnum][v].get('options', False) != False:
optkeys = diags_collection[collnum][v]['options'].keys()
if 'requiresraw' in optkeys and diags_collection[collnum][v]['options']['requiresraw'] == True:
ft0 = raw0
ft1 = raw1
cf0 = 'no'
cf1 = 'no'
if ft0 == None:
logger.warning('Variable %s requires raw data. No raw data provided. Passing', v)
continue
if num_models == 2 and ft1 == None:
logger.warning('Variable %s requires raw data. No second raw dataset provided. Passing on differences', v)
continue
ps0 = '--model path=%s,climos=no' % (ft0.root_dir())
if num_models == 2:
ps1 = '--model path=%s,climos=no' % (ft1.root_dir())
# do we also have climos? if so pass both instead.
if climo0 != None:
ps0 = '--model path=%s,climos=yes,name=%s --model path=%s,climos=no,name=%s' % (climo0.root_dir(), name0, raw0.root_dir(), name0)
if num_models == 2 and climo1 != None:
ps1 = '--model path=%s,climos=yes,name=%s --model path=%s,clmios=no,name=%s' % (climo1.root_dir(), name1, raw1.root_dir(), name1)
else:
ps0 = '--model path=%s,climos=%s' % (ft0.root_dir(), cf0)
if num_models == 2 and ft1 != None:
ps1 = '--model path=%s,climos=%s' % (ft1.root_dir(), cf1)
vstr = '--vars %s' % v
if diags_collection[collnum][v].get('varopts', False) != False:
aux = diags_collection[collnum][v]['varopts']
# Ok, variable(s) and varopts ready to go. Get some path strings.
# Create path strings.
if ft0 == None:
logger.warning('ft0 was none')
continue
else:
path0str = ps0
path1str = ''
if num_models == 2 and ft1 != None:
path1str = ps1
for a in aux:
if a == 'default':
auxstr = ''
else:
auxstr = '--varopts '+a
cmdline = (def_executable, path0str, path1str, obsstr, "--table", "--set", collnum, "--prefix", "set%s" % collnum, "--package", package, vstr, seasonstr, regionstr, auxstr, "--outputdir", outpath)
runcmdline(cmdline, outlogdir, dryrun)
def generatePlots(model_dict, obspath, outpath, pname, xmlflag, data_hash, colls=None, dryrun=False):
import os
# Did the user specify a single collection? If not find out what collections we have
if colls == None:
colls = getCollections(pname) #find out which colls are available
# Create the outpath/{package} directory. options processing should take care of
# making sure outpath exists to get this far.
outpath = os.path.join(outpath, pname.lower())
if not os.path.isdir(outpath):
try:
os.makedirs(outpath)
except:
logger.exception('Failed to create directory %s', outpath)
outlogdir = os.path.join(outpath, "DIAGS_OUTPUT", data_hash)
if not os.path.exists(outlogdir):
try:
os.makedirs(outlogdir)
except Exception:
logger.exception("Couldn't create output log directory- %s/DIAGS_OUTPUT/", outpath)
quit()
# Get some paths setup
num_models = len(model_dict.keys())
raw0 = model_dict[model_dict.keys()[0]]['raw']
climo0 = model_dict[model_dict.keys()[0]]['climos']
name0 = None
name0 = model_dict[model_dict.keys()[0]].get('name', 'ft0')
defaultft0 = climo0 if climo0 is not None else raw0
modelpath = defaultft0.root_dir()
if num_models == 2:
raw1 = model_dict[model_dict.keys()[1]]['raw']
climo1 = model_dict[model_dict.keys()[1]]['climos']
name1 = model_dict[model_dict.keys()[1]].get('name', 'ft1')
defaultft1 = climo1 if climo1 is not None else raw1
modelpath1 = defaultft1.root_dir()
else:
modelpath1 = None
defaultft1 = None
raw1 = None
climo1 = None
name1 = None
if climo0 != None:
cf0 = 'yes'
else:
cf0 = 'no'
if climo1 != None:
cf1 = 'yes'
else:
cf1 = 'no'
pages = []
menus = []
for group in diags_groups:
menus.append(OutputMenu(group, []))
# Sort the plotsets so they're appended onto the menu in the correct order
coll_meta = []
for collnum in colls:
menu_index = -1
index_in_menu = -1
for ind, menu in enumerate(menus):
if collnum in diags_groups[menu.title]:
menu_index = ind
index_in_menu = diags_groups[menu.title].index(collnum.lower())
coll_meta.append((menu_index, index_in_menu, collnum))
coll_meta.sort()
colls = [coll[2] for coll in coll_meta]
# Now, loop over collections.
for collnum in colls:
logger.info('Working on collection %s', collnum)
collnum = collnum.lower()
coll_def = diags_collection[collnum]
seasons = coll_def.get("seasons", None)
if seasons is not None:
page_columns = ["Description"] + seasons
else:
page_columns = None
page = OutputPage("Plotset %s" % collnum, short_name="set_%s" % collnum, columns=page_columns, description=coll_def["desc"], icon="amwg_viewer/img/SET%s.png" % collnum)
if pname.lower() == "amwg":
if collnum == "2":
page.columns = [""]
group = OutputGroup("Annual Implied Northward Transports")
page.addGroup(group)
elif collnum == "11":
page.columns = ["Scatter Plot"]
page.addGroup(OutputGroup("Warm Pool Scatter Plot"))
page.addGroup(OutputGroup("Annual Cycle on the Equatorial Pacific"))
elif collnum == "12":
page.columns = ["T", "Q", "H"]
group = OutputGroup("Station Name")
page.addGroup(group)
elif collnum == "13":
group = OutputGroup("Region")
page.addGroup(group)
elif collnum == "14":
group = OutputGroup("", columns=["ANN", "DJF", "MAM", "JJA", "SON"])
page.addGroup(group)
group = OutputGroup("", columns=["Bias (%)", "Variance (ratio)", "Correlation Coefficient Tables"])
page.addGroup(group)
elif collnum == "topten":
group = OutputGroup("Variable", columns=["ANN"])
page.addGroup(group)
for menu in menus:
if collnum in diags_groups[menu.title]:
menu.addPage(page)
pages.append(page)
# Special case the tables since they are a bit special. (at least amwg)
if diags_collection[collnum].get('tables', False) != False:
makeTables(collnum, model_dict, obspath, outpath, pname, outlogdir, dryrun)
group = OutputGroup("Tables")
page.addGroup(group)
for region in coll_def.get("regions", ["Global"]):
columns = []
for season in coll_def.get("seasons", ["ANN"]):
fname = form_filename(form_file_rootname('resstring', [], 'table', season=season, basen="set%s" % collnum, region=region), 'text')
file = OutputFile(fname, title="{region} Table ({season})".format(region=region, season=season))
columns.append(file)
row = OutputRow("{region} Tables".format(region=region), columns)
page.addRow(row, 0)
continue
# deal with collection-specific optional arguments
optionsstr = ''
if diags_collection[collnum].get('options', False) != False:
# we have a few options
logger.debug('Additional command line options to pass to diags.py - %s', diags_collection[collnum]['options'])
for k in diags_collection[collnum]['options'].keys():
optionsstr = optionsstr + '--%s %s ' % (k, diags_collection[collnum]['options'][k])
# Deal with packages
# Do we have a global package?
if diags_collection[collnum].get('package', False) != False and diags_collection[collnum]['package'].upper() == pname.upper():
if diags_collection[collnum].get('mixed_packages', False) == False:
packagestr = '--package '+pname
if diags_collection[collnum].get('mixed_packages', False) == False: #no mixed
# Check global package
if diags_collection[collnum].get('package', False) != False and diags_collection[collnum]['package'].upper() != pname.upper():
message = pname.upper()
logger.debug(str(message))
message = diags_collection[collnum]['package']
logger.debug(str(message))
# skip over this guy
logger.warning('Skipping over collection %s', collnum)
continue
else:
if diags_collection[collnum].get('package', False) != False and diags_collection[collnum]['package'].upper() == pname.upper():
logger.debug('Processing collection %s ', collnum)
packagestr = '--package '+pname
# Given this collection, see what variables we have for it.
vlist = []
special = set(collection_special_vars)
for k in diags_collection[collnum].keys():
if k in special:
continue
else:
vlist.append(k)
# now, see how many plot types we have to deal with and how many obs
plotlist = []
obslist = []
for v in vlist:
plotlist.append(diags_collection[collnum][v]['plottype'])
obslist.extend(diags_collection[collnum][v]['obs'])
plotlist = list(set(plotlist))
# At this point, we have a list of obs for this collection, a list of variables, and a list of plots
# We need to organize them so that we can loop over obs sets with a fixed plottype and list of variables.
# Let's build a dictionary for that.
for p in plotlist:
obsvars = OrderedDict([(key, []) for key in diags_obslist])
for o in diags_obslist:
for v in vlist:
if o in diags_collection[collnum][v]['obs'] and diags_collection[collnum][v]['plottype'] == p:
if v not in obsvars[o]:
obsvars[o].append(v)
for o in diags_obslist:
if len(obsvars[o]) == 0:
del obsvars[o]
else:
group = OutputGroup(diags_obslist[o]["desc"])
page.addGroup(group)
# ok we have a list of observations and the variables that go with them for this plot type.
for obs_index, o in enumerate(obsvars.keys()):
# Each command line will be an obs set, then list of vars/regions/seasons that are consistent. Start constructing a command line now.
cmdline = ''
packagestr = ' --package '+pname
outstr = ' --outputdir '+outpath
if xmlflag == False:
xmlstr = ' --xml no'
else:
xmlstr = ''
if o != 'NA' and obspath != None:
obsfname = diags_obslist[o]['filekey']
obsstr = '--obs path='+obspath+',climos=yes,filter="f_startswith(\''+obsfname+'\')"'
poststr = '--postfix '+obsfname
else:
if o != 'NA':
logger.warning('No observation path provided but this variable/collection combination specifies an obs set.')
logger.warning('Not making a comparison vs observations.')
obsstr = ''
poststr = ' --postfix \'\''
setstr = ' --set '+p
prestr = ' --prefix set'+collnum
# set up season str (and later overwrite it if needed)
g_season = diags_collection[collnum].get('seasons', ['ANN'])
if 'NA' in g_season:
seasonstr = ''
else:
seasonstr = '--seasons '+' '.join(g_season)
# set up region str (and later overwrite it if needed)
g_region = diags_collection[collnum].get('regions', ['Global'])
if g_region == ['Global'] or collnum=='7' or collnum=='7s':
regionstr = ''
else:
regionstr = '--regions '+' '.join(g_region)
# Now, check each variable for a season/region/varopts argument. Any that do NOT have them can be dealt with first.
obs_vlist = obsvars[o]
simple_vars = []
for v in obs_vlist:
keys = ["seasons", "regions", "varopts", "options", "executable"]
# Check if they're false
vals = [diags_collection[collnum][v].get(key, False) is False for key in keys]
if all(vals):
simple_vars.append(v)
# I believe all of the lower level plot sets (e.g. in amwg.py or lmwg.py) will ignore a second dataset, IF one is supplied
# unnecessarily, so pass all available datasets here.
complex_vars = list(set(obs_vlist) - set(simple_vars))
# simple vars first
if len(simple_vars) != 0:
varstr = '--vars '+' '.join(simple_vars)
pstr1 = '--model path=%s,climos=%s,type=model' % (modelpath, cf0)
#append the name if passed from command line
if name0 != None:
pstr1 += ',name=' + name0
if modelpath1 != None:
pstr2 = '--model path=%s,climos=%s,type=model' % (modelpath1, cf1)
#append the name if passed from command line
if name1 != None:
pstr2 += ',name=' + name1
else:
pstr2 = ''
cmdline = (def_executable, pstr1, pstr2, obsstr, optionsstr, packagestr, setstr, seasonstr, varstr, outstr, xmlstr, prestr, poststr, regionstr)
if collnum != 'dontrun':
runcmdline(cmdline, outlogdir, dryrun)
else:
message = cmdline
logger.debug('DONTRUN: %s', cmdline)
# let's save what the defaults are for this plotset
g_seasons = g_season
g_regions = g_region
for v in complex_vars:
# run these individually basically.
g_region = diags_collection[collnum][v].get('regions', g_regions)
g_season = diags_collection[collnum][v].get('seasons', g_seasons)
g_exec = diags_collection[collnum][v].get('executable', def_executable)
regionstr = '--regions '+' '.join(g_region)
if 'NA' in g_season:
seasonstr = ''
else:
seasonstr = '--seasons '+' '.join(g_season)
varopts = ''
if diags_collection[collnum][v].get('varopts', False) != False:
varopts = '--varopts '+' '.join(diags_collection[collnum][v]['varopts'])
varstr = '--vars '+v
if g_exec == def_executable:
# check for options.
raw = False
cf0 = 'yes'
cf1 = 'yes'
if diags_collection[collnum][v].get('options', False) != False:
raw = diags_collection[collnum][v]['options'].get('requiresraw', False)
if raw != False:
if raw0 == None:
logger.critical('No raw dataset provided and this set requires raw data')
quit()
else:
modelpath = raw0.root_dir()
cf0 = 'no'
if raw1 == None and num_models == 2:
logger.critical('2 or more datasets provided, but only one raw dataset provided.')
logger.critical('This variable in this collection requires raw datasets for comparisons')
quit()
else:
modelpath1 = raw1.root_dir()
cf1 = 'no'
pstr1 = '--model path=%s,climos=%s,type=model' % (modelpath, cf0)
if name0 != None:
pstr1 += ',name=' + name0
if modelpath1 != None:
pstr2 = '--model path=%s,climos=%s,type=model' % (modelpath1, cf1)
if name1 != None:
pstr2 += ',name=' + name1
else:
pstr2 = ''
cmdline = [def_executable, pstr1, pstr2, obsstr, optionsstr, packagestr, setstr, seasonstr, varstr, outstr, xmlstr, prestr, poststr, regionstr]
if varopts:
cmdline += [varopts]
if collnum != 'dontrun':
runcmdline(cmdline, outlogdir, dryrun)
else:
logger.debug('DONTRUN: %s', cmdline)
else: # different executable; just pass all option key:values as command line options.
# Look for a cmdline list in the options for this variable.
execstr = diags_collection[collnum].get('exec', def_executable) # should probably NOT be def_executable....
cmdlineOpts = diags_collection[collnum][v].get('cmdline', False)
fnamebase = 'set'+collnum
if cmdlineOpts != False:
if 'datadir' in cmdlineOpts:
execstr = execstr+' --datadir '+ modelpath
if 'obsfilter' in cmdlineOpts:
logger.debug('obsfname: '+str(obsfname))
execstr = execstr+' --obsfilter '+ obsfname
if 'obspath' in cmdlineOpts:
execstr = execstr+' --obspath '+ obspath
if 'outdir' in cmdlineOpts:
execstr = execstr+' --output '+ outpath
if 'fieldname' in cmdlineOpts:
execstr = execstr+' --fieldname '+ v
if 'diagname' in cmdlineOpts:
if name0 == None:
if dsname == None:
execstr = execstr+' --diagname TEST'
else:
execstr = execstr+' --diagname '+ dsname
else:
execstr = execstr+' --diagname '+ name0
if 'casename' in cmdlineOpts:
if dsname == None:
execstr = execstr+' --casename TESTCASE'
else:
execstr = execstr+' --casename '+ dsname
if 'figurebase' in cmdlineOpts:
execstr = execstr+' --figurebase '+ fnamebase
if execstr != def_executable:
runcmdline([execstr], outlogdir, dryrun)
# VIEWER Code
# Build rows for this group in the index...
if package.lower() == "amwg":
if collnum not in ("2", "11", "12", "13", "14"):
for var in obsvars[o]:
regions = coll_def[var].get("regions", coll_def.get("regions", ["Global"]))
combined = coll_def[var].get("combined", True)
for region in regions:
varopts = coll_def[var].get("varopts", None)
if varopts is not None:
for option in varopts:
columns = []
if region != "Global":
addon_info = "({option}, {region})".format(option=option, region=region)
else:
addon_info = "({option})".format(option=option)
if var in diags_varlist:
columns.append("{desc} {addon}".format(desc=diags_varlist[var]["desc"], addon=addon_info))
else:
columns.append("")
title = "{var} {addon}".format(var=var, addon=addon_info)
for s in coll_def.get("seasons", ["ANN"]):
files = filenames(collnum, p, var, obs_set=diags_obslist[o]["filekey"], combined=combined, season=s, var_option=option, region=region)
f = OutputFile(files[0], title="{season}".format(season=s), other_files=[filename_to_fileobj(f) for f in files[1:]])
columns.append(f)
row = OutputRow(title, columns)
page.addRow(row, obs_index)
else:
if region != "Global":
title = "{var} ({region})".format(var=var, region=region)
else:
title = var
columns = []
if var in diags_varlist:
if region != "Global":
columns.append("{desc} ({region})".format(desc=diags_varlist[var]["desc"], region=region))
else:
columns.append(diags_varlist[var]["desc"])
else:
columns.append("")
for s in coll_def.get("seasons", ["ANN"]):
files = filenames(collnum, p, var, obs_set=diags_obslist[o]["filekey"], combined=combined, season=s, region=region)
f = OutputFile(files[0], title="{season}".format(season=s), other_files=[filename_to_fileobj(f) for f in files[1:]])
columns.append(f)
if collnum == "topten":
page.addRow(OutputRow(title, columns), 0)
else:
page.addRow(OutputRow(title, columns), obs_index)
elif collnum == "2":
for var in obsvars[o]:
files = filenames(collnum, p, var, obs_set=diags_obslist[o]["filekey"], combined=True)
f = OutputFile(files[0], title="Plot", other_files=[filename_to_fileobj(f) for f in files[1:]])
row = OutputRow(var, columns=[f])
page.addRow(row, 0)
elif collnum == "11":
for var in obsvars[o]:
if var == "SWCF_LWCF":
group = 0
else:
group = 1
obs = diags_obslist[o]["filekey"]
files = filenames(collnum, p, var, obs_set=obs)
f = OutputFile(files[0], other_files=[filename_to_fileobj(f) for f in files[1:]])
row = OutputRow("{var} ({obs})".format(var=var, obs=obs), columns=[f])
page.addRow(row, group)
elif collnum == "12":
regions = station_names
for region in regions:
cols = []
for var in ["T", "Q", "H"]:
files = filenames(collnum, p, var, region=region)
f = OutputFile(files[0], other_files=[filename_to_fileobj(f) for f in files[1:]])
cols.append(f)
row = OutputRow(region, cols)
page.addRow(row, 0)
if package.lower() == "amwg":
# These sets don't have any variables, so they don't run through the normal system.
if collnum == "13":
regions = coll_def.get("regions", ["Global"])
seasons = coll_def.get("seasons", ["ANN"])
for region in regions:
cols = []
for season in seasons:
# This one is weird because it doesn't have variables.
root_name = form_file_rootname(collnum, [], region=region, season=season)
fname = form_filename(root_name, ["png"])[0]
cols.append(OutputFile(fname))
page.addRow(OutputRow(region, cols), 0)
elif collnum == "14":
r = OutputRow("Space and Time", columns=[OutputFile("set14_ANN_SPACE_TIME.png")])
page.addRow(r, 0)
r = OutputRow("Space Only", [OutputFile("set14_{}_SPACE.png".format(s)) for s in ["ANN", "DJF", "MAM", "JJA", "SON"]])
page.addRow(r, 0)
var_names = ["BIAS", "VAR", "CC"]
r = OutputRow("Space and Time", columns=[OutputFile("set14.METRICS_{}_SPACE_TIME.png".format(v)) for v in var_names])
page.addRow(r, 1)
r = OutputRow("Space Only", columns=[OutputFile("set14.METRICS_{}_SPACE.png".format(v)) for v in var_names])
page.addRow(r, 1)
r = OutputRow("Time Only", columns=["", "", OutputFile("set14.METRICS_CC_SPACE_TIME.png")])
page.addRow(r, 1)
return menus, pages
import multiprocessing
MAX_PROCS = multiprocessing.cpu_count()
pid_to_cmd = {}
pid_to_tmpfile = {}
active_processes = []
DIAG_TOTAL = 0
def cmderr(popened):
logfile = pid_to_cmd[popened.pid].split(" ")[-1]
logger.error("Command \n%s\n failed with code of %d. Log file is at %s.", pid_to_cmd[popened.pid], popened.returncode, logfile)
def runcmdline(cmdline, outlogdir, dryrun=False):
global DIAG_TOTAL
# the following is a total KLUDGE. It's more of a KLUDGE than last time.
# I'm not proud of this but I feel threatned if I don't do it.
# there is some sort of memory leak in vcs.
# to work around this issue, we opted for a single execution of season & variable
# isolate season and variable
length = len(cmdline)
split_cmdline = False
if length == 14:
(def_executable, pstr1, pstr2, obsstr, optionsstr, packagestr, setstr,
seasonstr, varstr, outstr, xmlstr, prestr, poststr, regionstr) = cmdline
split_cmdline = True
elif length == 15:
#varopts included
(def_executable, pstr1, pstr2, obsstr, optionsstr, packagestr, setstr,
seasonstr, varstr, outstr, xmlstr, prestr, poststr, regionstr, varopts) = cmdline
split_cmdline = True
CMDLINES = []
files = []
if split_cmdline:
seasonstr = seasonstr.split(' ')
seasonopts = seasonstr[0]
seasons = seasonstr[1:]
varstr = varstr.split(' ')
Varopts = varstr[0]
vars = varstr[1:]
plotset = setstr.split(' ')[-1]
pkg = packagestr.split(' ')[-1]
region = regionstr.split(' ')[-1]
for season in seasons:
for var in vars:
seasonstr = seasonopts + ' ' + season
varstr = Varopts + ' ' + var
# build new cmdline
obs = poststr.split(" ")[-1]
if length == 14:
if regionstr:
fname = "{pkg}_{set}_{obs}_{var}_{season}_{region}.log".format(pkg=pkg, set=plotset, obs=obs, var=var, season=season, region=region)
else:
fname = "{pkg}_{set}_{obs}_{var}_{season}.log".format(pkg=pkg, set=plotset, obs=obs, var=var, season=season)
log_file = os.path.join(outlogdir, fname)
cmdline = (def_executable, pstr1, pstr2, obsstr, optionsstr, packagestr, setstr,
seasonstr, varstr, outstr, xmlstr, prestr, poststr,
regionstr, '--log_level DEBUG ', '--log_file', log_file, '--runby meta' )
CMDLINES += [cmdline]
elif length == 15:
#varopts must be non empty
for vo in varopts.split("--varopts")[-1].split():
if regionstr:
fname = "{pkg}_{set}_{obs}_{var}_{opt}_{season}_{region}.log".format(pkg=pkg, set=plotset, obs=obs, var=var, opt=vo, season=season, region=region)
else:
fname = "{pkg}_{set}_{obs}_{var}_{opt}_{season}.log".format(pkg=pkg, set=plotset, obs=obs, var=var, opt=vo, season=season)
log_file = os.path.join(outlogdir, fname)
cmdline = (def_executable, pstr1, pstr2, obsstr, optionsstr, packagestr, setstr,
seasonstr, varstr, outstr, xmlstr, prestr, poststr,
regionstr, "--varopts", vo, '--log_level DEBUG ', '--log_file', log_file, '--runby meta')
CMDLINES += [cmdline]
else:
CMDLINES = [cmdline]
if dryrun is not False:
for cmd in CMDLINES:
print >>dryrun, " ".join(cmd)+" &"
return
for cmdline in CMDLINES:
while len(active_processes) >= MAX_PROCS:
for i, p in enumerate(active_processes):
if p.poll() is not None:
active_processes.pop(i)
if p.returncode != 0:
cmderr(p)
else:
logger.info("%s succeeded. pid= %s", pid_to_cmd[p.pid], p.pid)
cmd = pid_to_cmd[p.pid]
tmpfile = pid_to_tmpfile[p.pid]
f = open(tmpfile.name, 'r')
output = f.read()
log_file = cmd.split(" ")[-1]
with open(log_file, "a") as log:
log.write("\n\n\nSTDOUT and STDERR\n\n")
log.write(output)
f.close()
tmpfile.close()
del pid_to_tmpfile[p.pid]
cmd = " ".join(cmdline)
tmpfile = tempfile.NamedTemporaryFile()
if True: # For some testing purposes, set to False to turn off all plotting.
while True:
try:
active_processes.append(subprocess.Popen(cmd, stdout=tmpfile, stderr=tmpfile, shell=True))
break
except:
sleep(1)
DIAG_TOTAL += 1
PID = active_processes[-1].pid
pid_to_tmpfile[PID] = tmpfile
pid_to_cmd[PID] = cmd
logger.info("%s begun pid= %s diag_total= %s", cmd, PID, DIAG_TOTAL)
# These 3 functions are used to add the variables to the database for speeding up
# classic view
def setnum( setname ):
"""extracts the plot set number from the full plot set name, and returns the number.
The plot set name should begin with the set number, e.g.
setname = ' 2- Line Plots of Annual Implied Northward Transport'"""
mo = re.search( r'\d', setname ) # matches decimal digits
if mo is None:
return None
index1 = mo.start() # index of first match
mo = re.search( r'\D', setname[index1:] ) # matches anything but decimal digits
if mo is None: # everything past the first digit is another digit
setnumber = setname[index1:]
else:
index2 = mo.start() # index of first match
setnumber = setname[index1:index1+index2]
return setnumber
def list_vars(ft, package):
dm = diagnostics_menu()
vlist = []
if 'packages' not in opts._opts:
opts['packages'] = [ opts['package'] ]
for pname in opts['packages']:
if pname not in dm:
pname = pname.upper()
if pname not in dm:
pname = pname.lower()
pclass = dm[pname]()
slist = pclass.list_diagnostic_sets()
# slist contains "Set 1 - Blah Blah Blah", "Set 2 - Blah Blah Blah", etc
# now to get all variables, we need to extract just the integer from the slist entries.
snums = [setnum(x) for x in slist.keys()]
for s in slist.keys():
vlist.extend(pclass.list_variables(ft, ft, s)) # pass ft as "obs" since some of the code is not hardened against no obs fts
vlist = list(set(vlist))
return vlist
# This assumes dsname reflects the combination of datasets (somehow) if >2 datasets are provided
# Otherwise, the variable list could be off.
def postDB(fts, dsname, package, host=None):
if host == None:
host = 'localhost:8081'
vl = list_vars(fts[0], package)
vlstr = ', '.join(vl)
for i in range(len(fts)-1):
vl_tmp = list_vars(fts[i+1], package)
vlstr = vlstr+', '.join(vl_tmp)
string = '\'{"variables": "'+str(vl)+'"}\''
logger.info('Variable list: ' + string)
command = "echo "+string+' | curl -d @- \'http://'+host+'/exploratory_analysis/dataset_variables/'+dsname+'/\' -H "Accept:application/json" -H "Context-Type:application/json"'
logger.info('Adding variable list to database on %s', host)
subprocess.call(command, shell=True)
# The driver part of the script
if __name__ == '__main__':
opts = Options()
opts.parseCmdLine()
opts.verifyOptions()
if opts['package'] == None or opts['package'] == '':
logger.critical('Please specify a package when running metadiags.')
quit()
package = opts['package'].upper()
if package == 'AMWG':
from metrics.frontend.amwgmaster import *
elif package == 'LMWG':
from metrics.frontend.lmwgmaster import *
if opts._opts["custom_specs"] is not None:
execfile(opts._opts["custom_specs"])
message = diags_collection['5']['CLDMED']
logger.info(str(message))
message = diags_obslist
logger.info(str(message))
# do a little (post-)processing on the model/obs passed in.
model_fts = []
model_paths = []
for i in range(len(opts['model'])):
model_fts.append(path2filetable(opts, modelid=i))
model_paths.append(opts['model'][i]["path"])
model_dict = make_ft_dict(model_fts)
raw_fts = []
climo_fts = []
fts = []
for i in range(len(model_dict.keys())):
raw_fts.append(None)
climo_fts.append(None)
fts.append(None)
item = model_dict[model_dict.keys()[i]]
if item['raw'] != None:
raw_fts[i] = item['raw']
if item['climos'] != None:
climo_fts[i] = item['climos']
fts[i] = (climo_fts[i] if climo_fts[i] is not None else raw_fts[i])
num_models = len(model_dict.keys())
num_obs = len(opts['obs'])
if num_obs != 0:
obspath = opts['obs'][0]['path']
else:
obspath = None
outpath = opts['output']['outputdir']
colls = opts['sets']
dsname = opts['dsname']
if dsname is None:
import datetime
dsname = datetime.date.today().strftime("%Y-%m-%d")
hostname = opts["dbhost"]
# Kludge to make sure colormaps options are passed to diags
# If user changed them
for K in diags_collection.keys():
tmpDict = diags_collection[K].get("options", {})
cmaps = opts._opts["colormaps"]
tmpDict["colormaps"] = " ".join(["%s=%s" % (k, cmaps[k]) for k in cmaps])
diags_collection[K]["options"] = tmpDict
if opts["dryrun"]:
fnm = os.path.join(outpath, "metadiags_commands.sh")
dryrun = open(fnm, "w")
logger.info("List of commands is in: %s",fnm)
if opts["sbatch"] > 0:
print >> dryrun, "#!/bin/bash"
print >> dryrun, """#SBATCH -p debug
#SBATCH -N %i
#SBATCH -t 00:30:00
#SBATCH -J metadiag
#SBATCH -o metadiags.o%%j
module use /usr/common/contrib/acme/modulefiles
module load uvcdat/batch
""" % (opts["sbatch"])
else:
dryrun = False
xmlflag = opts["output"]["xml"]
index = OutputIndex("UVCMetrics %s" % package.upper(), version=dsname)
# Build data_hash from file paths of all input files (models and then obs)
import hmac
data_path_hmac = hmac.new("uvcmetrics")
for path in sorted(model_paths):
data_path_hmac.update(path)
data_path_hmac.update(obspath)
data_hash = data_path_hmac.hexdigest()
menus, pages = generatePlots(model_dict, obspath, outpath, package, xmlflag, data_hash, colls=colls,dryrun=dryrun)
for page in pages:
# Grab file metadata for every image that exists.
for group in page.rows:
for row in group:
for col in row.columns:
if isinstance(col, OutputFile):
path = os.path.join(outpath, package.lower(), col.path)
if os.path.exists(path):
if os.path.splitext(col.path)[1] == ".png":
col.meta = vcs.png_read_metadata(path)
index.addPage(page)
index.menu = menus
index.toJSON(os.path.join(outpath, package.lower(), "index.json"))
for proc in active_processes:
result = proc.wait()
if result != 0:
cmderr(proc)
else:
logger.info("%s succeeded.",pid_to_cmd[proc.pid])
if opts["dryrun"]:
if opts["sbatch"] > 0:
print >>dryrun, "wait"
dryrun.close()
if opts["sbatch"] > 0:
import shlex
cmd = "sbatch %s" % fnm
logger.info("Commmand: sbatch %s", fnm)
subprocess.call(shlex.split(cmd))
if opts["do_upload"]:
upload_path = os.path.join(outpath, package.lower())
subprocess.call(["upload_output", "--server", hostname, upload_path])
| CDAT/uvcmetrics | src/python/frontend/metadiags.py | metadiags.py | py | 45,489 | python | en | code | 3 | github-code | 13 |
32827431359 | import logging
import sys
import numpy as np
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
try:
tf.compat.v1.enable_eager_execution()
except Exception:
pass
from deeplite.profiler import ComputeEvalMetric, Device
from deeplite.tf_profiler.tf_inference import get_accuracy
from deeplite.tf_profiler.tf_profiler import *
from deeplite.tf_profiler.tf_profiler import TFProfiler
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
# Step 1: Define native Tensorflow dataloaders and model (tf.data.Dataset)
# 1a. data_splits = {"train": train_dataloder, "test": test_dataloader}
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar100.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
y_train = np.eye(100)[y_train.reshape(-1)]
y_test = np.eye(100)[y_test.reshape(-1)]
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) \
.shuffle(buffer_size=x_train.shape[0]) \
.batch(128)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)) \
.batch(128)
data_splits = {'train': train_dataset, 'test': test_dataset}
# 1b. Load the native Tensorflow Keras model: Transfer learning from pretrained model
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input#tf.keras.applications.vgg19.preprocess_input
base_model = tf.keras.applications.VGG19(input_shape=(32, 32, 3),
include_top=False,
weights='imagenet')
inputs = tf.keras.Input(shape=(32, 32, 3))
x = preprocess_input(inputs)
x = base_model(x, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = tf.keras.layers.Dense(100)(x)
native_teacher = tf.keras.Model(inputs, outputs)
native_teacher.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=tf.keras.optimizers.SGD(), metrics=['accuracy'])
# Step 2: Create Profiler class and register the profiling functions
data_loader = TFProfiler.enable_forward_pass_data_splits(data_splits)
profiler = TFProfiler(native_teacher, data_loader, name="Original Model")
profiler.register_profiler_function(ComputeFlops())
profiler.register_profiler_function(ComputeSize())
profiler.register_profiler_function(ComputeParams())
profiler.register_profiler_function(ComputeLayerwiseSummary())
profiler.register_profiler_function(ComputeExecutionTime())
profiler.register_profiler_function(ComputeEvalMetric(get_accuracy, 'accuracy', unit_name='%'))
# Step 3: Compute the registered profiler metrics for the Tensorflow Keras Model
profiler.compute_network_status(batch_size=1, device=Device.GPU, short_print=False,
include_weights=True, print_mode='info')
| Deeplite/deeplite-profiler | examples/tf_example.py | tf_example.py | py | 2,828 | python | en | code | 23 | github-code | 13 |
33566142828 | import streamlit as st
import pickle
import numpy as np
model=pickle.load(open('model.pkl','rb'))
def predict_forest(chlorides,alcohol):
input=np.array([[chlorides,alcohol]]).astype(np.float64)
prediction=model.predict_proba(input)
pred='{0:.{1}f}'.format(prediction[0][0],2)
return float(pred)
def main():
st.title("Streamlit Tutorial")
html_temp = """
<div style="background-color:#025246 ;padding:10px">
<h2 style="color:white;text-align:center;">wine quality Prediction ML App </h2>
</div>
"""
st.markdown(html_temp, unsafe_allow_html=True)
chlorides = st.text_input("chlorides","Type Here")
alcohol = st.text_input("alcohol","Type Here")
safe_html="""
<div style="background-color:#F4D03F;padding:10px >
<h2 style="color:white;text-align:center;"> Your wine is good</h2>
</div>
"""
danger_html="""
<div style="background-color:#F08080;padding:10px >
<h2 style="color:black ;text-align:center;"> Your wine is in bad</h2>
</div>
"""
if st.button("Predict"):
output=predict_forest(chlorides,alcohol)
st.success('The probability of wine being bad is {}'.format(output))
if output > 0.5:
st.markdown(safe_html,unsafe_allow_html=True)
else:
st.markdown(danger_html,unsafe_allow_html=True)
if __name__=='__main__':
main() | data2450/wine-qulity-prediction-ML | app.py | app.py | py | 1,407 | python | en | code | 0 | github-code | 13 |
72829019219 | import torch
import torch.nn as nn
from torch.autograd import Variable
import copy
class ContentLoss(nn.Module):
def __init__(self, target, weight):
super(ContentLoss, self).__init__()
self.target = target.detach() * weight
self.weight = weight
self.criterion = nn.MSELoss()
def forward(self, input):
self.loss = self.criterion(input*self.weight, self.target)
self.output = input
return self.output
def backward(self, retain_graph=True):
self.loss.backward(retain_graph=retain_graph)
return self.loss
class GramMatrix(nn.Module):
def forward(self, input):
a, b, c, d = input.size()
features = input.view(a*b, c*d)
G = torch.mm(features, features.t())
return G.div(a*b*c*d)
class StyleLoss(nn.Module):
def __init__(self,target,weight):
super(StyleLoss, self).__init__()
# self.target = target.detach()*weight
# In the original version, the api .detach() was used to make
# sure that requires_grad == False, Because it is required by criterion function.
self.target = Variable(target.data.clone(),requires_grad=False)*weight
self.weight = weight
self.gram = GramMatrix()
self.criterion = nn.MSELoss()
def forward(self,input):
self.output = input#.clone()
self.G = self.gram(input.clone())
self.G.mul_(self.weight)
self.loss = self.criterion(self.G, self.target)
return self.output
def backward(self,retain_graph=True):
self.loss.backward(retain_graph=retain_graph)
return self.loss
def get_style_model_and_losses(cnn, style_img, content_img,
content_layers, style_layers,
style_weight=1000, content_weight=1,
):
content_losses = []
style_losses = []
cnn = copy.deepcopy(cnn)
model = nn.Sequential()
gram = GramMatrix()
i = 1
for layer in list(cnn):
if isinstance(layer, nn.Conv2d):
name = "conv_"+str(i)
model.add_module(name, layer)
if name in content_layers:
target = model(content_img)#.clone()
content_loss = ContentLoss(target, content_weight)
model.add_module("content_loss_"+str(i),content_loss)
content_losses.append(content_loss)
if name in style_layers:
target_feature = model(style_img)#.clone()
target_gram = gram(target_feature)
style_loss = StyleLoss(target_gram, style_weight)
model.add_module("style_loss_"+str(i), style_loss)
style_losses.append(style_loss)
if isinstance(layer,nn.MaxPool2d):
name = "pool_"+str(i)
model.add_module(name,layer)
if isinstance(layer,nn.ReLU):
name = "relu_"+str(i)
model.add_module(name,layer)
if name in content_layers:
target = model(content_img)#.clone()
content_loss = ContentLoss(target, content_weight)
model.add_module("content_loss"+str(i),content_loss)
content_losses.append(content_loss)
if name in style_layers:
target_feature = model(style_img)#.clone()
target_gram = gram(target_feature)
style_loss = StyleLoss(target_gram, style_weight)
model.add_module("style_loss_"+str(i), style_loss)
style_losses.append(style_loss)
i=i+1
return model, style_losses, content_losses | tianjiu233/cv-models | simple_style_transfer/utils.py | utils.py | py | 3,823 | python | en | code | 12 | github-code | 13 |
7771722179 | """sistemacondominio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
from condominio.views import index, listacondominio, listaconblomov, listaconblomorador, listaconta, listaleitura, GerarPDF, geradorPDFgeral, enviaremail, calcularmovimentacao, enviarwhatsApp
from emailer.views import sendemail
from movimentacao.views import lancar_leituras
# from emailer.views import SendFormEmail
urlpatterns = [
# path('create/<int:idb><str:ma>/', include('movimentacao.urls')),
path('create/', include('movimentacao.urls')),
path('login/', auth_views.LoginView.as_view(template_name='Login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='Logout.html'), name='logout'),
path('gerarPDF/<str:ma>/<int:id_morador>/<int:idb>/',
GerarPDF.as_view(), name='gerarPDF'),
path('', index, name='index'),
path('listacondominio/<int:id>/', listacondominio, name='listacondominio'),
path('listaconblomov/<int:id>/',
listaconblomov, name='listaconblomov'),
path('listaconblomorador/<int:idb>/<str:ma>/',
listaconblomorador, name='listaconblomorador'),
path('geradorPDFgeral/<int:idb>/<str:ma>/',
geradorPDFgeral, name='geradorPDFgeral'),
path('listaconta/<int:idb>/<str:ma>/<int:id_morador>/',
listaconta, name='listaconta'),
path('listaleitura/<int:idb>/<str:ma>/<int:id_morador>/',
listaleitura, name='listaleitura'),
path('sendemail/<str:ma>/<str:email>/<str:apto>/',
sendemail, name='sendemail'),
path('enviaremail/<int:idb>/<str:ma>/',
enviaremail, name='enviaremail'),
# path('sendwhatsApp/<str:ma>/<str:email>/<str:apto>/',
# sendwhatsApp, name='sendwhatsApp'),
path('enviarwhatsApp/<int:idb>/<str:ma>/<int:id_morador>/',
enviarwhatsApp, name='enviarwhatsApp'),
path('lancar_leituras/<int:idb>/<str:ma>/',
lancar_leituras, name='lancar_leituras'),
path('calcularmovimentacao/<int:idb>/<str:ma>/',
calcularmovimentacao, name='calcularmovimentacao'),
path('calcularmovimentacao/<int:idb>/<str:ma>/',
calcularmovimentacao, name='calcularmovimentacao'),
# path('create-form/',
# create_contact, name='create-contact'),
path('admin/', admin.site.urls),
# path('', TemplateView.as_view(template_name="home.html"), name='home'),
# path('send-form-email/', SendFormEmail.as_view(), name='send_email'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
admin.AdminSite.site_header = 'Sistemas de Condomínios'
admin.AdminSite.site_title = 'Condomínios'
admin.AdminSite.index_title = 'Condomínios'
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| Fabiojoao02/sistemas-condominio-mysql | sistemacondominio/urls.py | urls.py | py | 3,570 | python | en | code | 0 | github-code | 13 |
27284088988 | # 以下代码为提示框架
# 请在...处使用一行或多行代码替换
# 请在______处使用一行代码替换
#
##import turtle as _____
##for i in range(______) :
## t.seth(i*120)
## t.fd(_______)
##########################答案######################################
data = input() #课程名考分
d = {}
while data :
data = data.split()
d[data[0]] = data[1]
data = input()
ls = list(d.items())
ls.sort(key = lambda x:x[1], reverse = True )
maxn,maxl = ls[0]
minn,minl = ls[len(ls)- 1]
avg = 0
for i in d.values() :
avg = avg + int(i)
avg = avg/len(ls)
print("最高分课程是{} {},最低分课程是{} {},平均分是{:.2f}".format(maxn,maxl,minn,minl,avg))
| DodgeV/learning-programming | 二级/真题/2018年9月第四套/PY202.py | PY202.py | py | 745 | python | en | code | 3 | github-code | 13 |
12723355812 | #美化圖片
import cv2
import numpy as np
# 1. 读取图像
image = cv2.imread('108390.jpg')
# 2. 调整亮度和对比度
alpha = 1.5 # 调整亮度
beta = 25 # 调整对比度
result = cv2.addWeighted(image, alpha, np.zeros(image.shape, image.dtype), 0, beta)
# 3. 锐化图像
kernel = np.array([[-1, -1, -1],
[-1, 9, -1],
[-1, -1, -1]])
result = cv2.filter2D(result, -1, kernel)
# 4. 去噪
result = cv2.fastNlMeansDenoisingColored(result, None, 10, 10, 7, 21)
# 5. 调整颜色饱和度
hsv = cv2.cvtColor(result, cv2.COLOR_BGR2HSV)
hsv[:, :, 1] = hsv[:, :, 1] * 1.5
result = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
# 6. 保存结果
cv2.imwrite('output.jpg', result)
cv2_imshow(result)
| ftbb100/opencv_example | bu.py | bu.py | py | 735 | python | en | code | 0 | github-code | 13 |
11068045571 | #!/usr/bin/env python
# coding: utf-8
# # TD4 - Deep Q-Network
# # Tutorial - Deep Q-Learning
#
# Deep Q-Learning uses a neural network to approximate $Q$ functions. Hence, we usually refer to this algorithm as DQN (for *deep Q network*).
#
# The parameters of the neural network are denoted by $\theta$.
# * As input, the network takes a state $s$,
# * As output, the network returns $Q_\theta [a | s] = Q_\theta (s,a) = Q(s, a, \theta)$, the value of each action $a$ in state $s$, according to the parameters $\theta$.
#
#
# The goal of Deep Q-Learning is to learn the parameters $\theta$ so that $Q(s, a, \theta)$ approximates well the optimal $Q$-function $Q^*(s, a) \simeq Q_{\theta^*} (s,a)$.
#
# In addition to the network with parameters $\theta$, the algorithm keeps another network with the same architecture and parameters $\theta^-$, called **target network**.
#
# The algorithm works as follows:
#
# 1. At each time $t$, the agent is in state $s_t$ and has observed the transitions $(s_i, a_i, r_i, s_i')_{i=1}^{t-1}$, which are stored in a **replay buffer**.
#
# 2. Choose action $a_t = \arg\max_a Q_\theta(s_t, a)$ with probability $1-\varepsilon_t$, and $a_t$=random action with probability $\varepsilon_t$.
#
# 3. Take action $a_t$, observe reward $r_t$ and next state $s_t'$.
#
# 4. Add transition $(s_t, a_t, r_t, s_t')$ to the **replay buffer**.
#
# 4. Sample a minibatch $\mathcal{B}$ containing $B$ transitions from the replay buffer. Using this minibatch, we define the loss:
#
# $$
# L(\theta) = \sum_{(s_i, a_i, r_i, s_i') \in \mathcal{B}}
# \left[
# Q(s_i, a_i, \theta) - y_i
# \right]^2
# $$
# where the $y_i$ are the **targets** computed with the **target network** $\theta^-$:
#
# $$
# y_i = r_i + \gamma \max_{a'} Q(s_i', a', \theta^-).
# $$
#
# 5. Update the parameters $\theta$ to minimize the loss, e.g., with gradient descent (**keeping $\theta^-$ fixed**):
# $$
# \theta \gets \theta - \eta \nabla_\theta L(\theta)
# $$
# where $\eta$ is the optimization learning rate.
#
# 6. Every $N$ transitions ($t\mod N$ = 0), update target parameters: $\theta^- \gets \theta$.
#
# 7. $t \gets t+1$. Stop if $t = T$, otherwise go to step 2.
# Imports
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import random
from copy import deepcopy
import gym
from gym.wrappers import Monitor
# from pyvirtualdisplay import Display
from IPython import display as ipythondisplay
from IPython.display import clear_output
from pathlib import Path
import base64
print(f"python --version = {sys.version}")
print(f"torch.__version__ = {torch.__version__}")
print(f"np.__version__ = {np.__version__}")
print(f"gym.__version__ = {gym.__version__}")
# ## Torch 101
#
# >"The torch package contains data structures for multi-dimensional tensors and defines mathematical operations over these tensors. Additionally, it provides many utilities for efficient serializing of Tensors and arbitrary types, and other useful utilities.
# [...] provides classes and functions implementing automatic differentiation of arbitrary scalar valued functions."
# [PyTorch](https://pytorch.org/docs/stable/index.html)
#
# ### Variable types
# Very similar syntax to numpy.
zero_torch = torch.zeros((3, 2))
print('zero_torch is of type {:s}'.format(str(type(zero_torch))))
# Torch -> Numpy: simply call the numpy() method.
zero_np = np.zeros((3, 2))
assert (zero_torch.numpy() == zero_np).all()
# Numpy -> Torch: simply call the corresponding function on the np.array.
zero_torch_float = torch.FloatTensor(zero_np)
print('Float:\n', zero_torch_float)
zero_torch_int = torch.LongTensor(zero_np)
print('Int:\n', zero_torch_int)
zero_torch_bool = torch.BoolTensor(zero_np)
print('Bool:\n', zero_torch_bool)
# Reshape
print('View new shape...', zero_torch.view(1, 6))
# Note that print(zero_torch.reshape(1, 6)) would work too.
# The difference is in how memory is handled (view imposes contiguity).
# Algebra
a = torch.randn((3, 2))
b = torch.randn((3, 2))
print('Algebraic operations are overloaded:\n', a, '\n+\n', b, '\n=\n', a+b )
# More generally, torch shares the syntax of many attributes and functions with Numpy.
# ### Gradient management
# torch.Tensor is a similar yet more complicated data structure than np.array.
# It is basically a static array of number but may also contain an overlay to
# handle automatic differentiation (i.e keeping track of the gradient and which
# tensors depend on which).
# To access the static array embedded in a tensor, simply call the detach() method
print(zero_torch.detach())
# When inside a function performing automatic differentiation (basically when training
# a neural network), never use detach() otherwise meta information regarding gradients
# will be lost, effectively freezing the variable and preventing backprop for it.
# However when returning the result of training, do use detach() to save memory
# (the naked tensor data uses much less memory than the full-blown tensor with gradient
# management, and is much less prone to mistake such as bad copy and memory leak).
# We will solve theta * x = y in theta for x=1 and y=2
x = torch.ones(1)
y = 2 * torch.ones(1)
# Actually by default torch does not add the gradient management overlay
# when declaring tensors like this. To force it, add requires_grad=True.
theta = torch.randn(1, requires_grad=True)
# Optimisation routine
# (Adam is a sophisticated variant of SGD, with adaptive step).
optimizer = optim.Adam(params=[theta], lr=0.1)
# Loss function
print('Initial guess:', theta.detach())
for _ in range(100):
# By default, torch accumulates gradients in memory.
# To obtain the desired gradient descent beahviour,
# just clean the cached gradients using the following line:
optimizer.zero_grad()
# Quadratic loss (* and ** are overloaded so that torch
# knows how to differentiate them)
loss = (y - theta * x) ** 2
# Apply the chain rule to automatically compute gradients
# for all relevant tensors.
loss.backward()
# Run one step of optimisation routine.
optimizer.step()
print('Final estimate:', theta.detach())
# ## Setting the environment
#
# ### 1 - Define the GLOBAL parameters
# Environment
env = gym.make("CartPole-v0")
# Discount factor
GAMMA = 0.99
# Batch size
BATCH_SIZE = 256
# Capacity of the replay buffer
BUFFER_CAPACITY = 16384 # 10000
# Update target net every ... episodes
UPDATE_TARGET_EVERY = 32 # 20
# Initial value of epsilon
EPSILON_START = 1.0
# Parameter to decrease epsilon
DECREASE_EPSILON = 200
# Minimum value of epislon
EPSILON_MIN = 0.05
# Number of training episodes
N_EPISODES = 200
# Learning rate
LEARNING_RATE = 0.1
# ### 2 - Replay buffer
class ReplayBuffer:
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, state, action, reward, next_state):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = (state, action, reward, next_state)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.choices(self.memory, k=batch_size)
def __len__(self):
return len(self.memory)
# create instance of replay buffer
replay_buffer = ReplayBuffer(BUFFER_CAPACITY)
# ### 3 - Neural Network
class Net(nn.Module):
"""
Basic neural net.
"""
def __init__(self, obs_size, hidden_size, n_actions):
super(Net, self).__init__()
self.net = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, n_actions)
)
def forward(self, x):
return self.net(x)
# ### 3.5 - Loss function and optimizer
# create network and target network
hidden_size = 128
obs_size = env.observation_space.shape[0]
n_actions = env.action_space.n
q_net = Net(obs_size, hidden_size, n_actions)
target_net = Net(obs_size, hidden_size, n_actions)
# objective and optimizer
objective = nn.MSELoss()
optimizer = optim.Adam(params=q_net.parameters(), lr=LEARNING_RATE)
# #### Question 0 (to do at home, not during the live session)
#
# With your own word, explain the intuition behind DQN. Recall the main parts of the aformentionned algorithm.
# ## Implementing the DQN
def get_q(states):
"""
Compute Q function for a list of states
"""
with torch.no_grad():
states_v = torch.FloatTensor([states])
output = q_net.forward(states_v).detach().numpy() # shape (1, len(states), n_actions)
return output[0, :, :] # shape (len(states), n_actions)
# #### Question 1
#
# Implement the `eval_dqn` function.
def eval_dqn(n_sim=5):
"""
** TO BE IMPLEMENTED **
Monte Carlo evaluation of DQN agent.
Repeat n_sim times:
* Run the DQN policy until the environment reaches a terminal state (= one episode)
* Compute the sum of rewards in this episode
* Store the sum of rewards in the episode_rewards array.
"""
env_copy = deepcopy(env)
episode_rewards = np.zeros(n_sim)
return episode_rewards
# #### Question 2
#
# Implement the `choose_action` function.
def choose_action(state, epsilon):
"""
** TO BE IMPLEMENTED **
Return action according to an epsilon-greedy exploration policy
"""
return 0
# #### Question 3
#
# Implement the `update` function
def update(state, action, reward, next_state, done):
"""
** TO BE COMPLETED **
"""
# add data to replay buffer
if done:
next_state = None
replay_buffer.push(state, action, reward, next_state)
if len(replay_buffer) < BATCH_SIZE:
return np.inf
# get batch
transitions = replay_buffer.sample(BATCH_SIZE)
# Compute loss - TO BE IMPLEMENTED!
values = torch.zeros(BATCH_SIZE) # to be computed using batch
targets = torch.zeros(BATCH_SIZE) # to be computed using batch
loss = objective(values, targets)
# Optimize the model - UNCOMMENT!
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
return loss.detach().numpy()
# #### Question 4
# Train a DQN on the `env` environment.
EVAL_EVERY = 5
REWARD_THRESHOLD = 199
def train():
state = env.reset()
epsilon = EPSILON_START
ep = 0
total_time = 0
while ep < N_EPISODES:
action = choose_action(state, epsilon)
# take action and update replay buffer and networks
next_state, reward, done, _ = env.step(action)
loss = update(state, action, reward, next_state, done)
# update state
state = next_state
# end episode if done
if done:
state = env.reset()
ep += 1
if ( (ep+1)% EVAL_EVERY == 0):
rewards = eval_dqn()
print("episode =", ep+1, ", reward = ", np.mean(rewards))
if np.mean(rewards) >= REWARD_THRESHOLD:
break
# update target network
if ep % UPDATE_TARGET_EVERY == 0:
target_net.load_state_dict(q_net.state_dict())
# decrease epsilon
epsilon = EPSILON_MIN + (EPSILON_START - EPSILON_MIN) * np.exp(-1. * ep / DECREASE_EPSILON )
total_time += 1
# Run the training loop
train()
# Evaluate the final policy
rewards = eval_dqn(20)
print("")
print("mean reward after training = ", np.mean(rewards))
# #### Question 5
#
# Experiment the policy network.
def show_video():
html = []
for mp4 in Path("videos").glob("*.mp4"):
video_b64 = base64.b64encode(mp4.read_bytes())
html.append('''<video alt="{}" autoplay
loop controls style="height: 400px;">
<source src="data:video/mp4;base64,{}" type="video/mp4" />
</video>'''.format(mp4, video_b64.decode('ascii')))
ipythondisplay.display(ipythondisplay.HTML(data="<br>".join(html)))
env = Monitor(env, './videos', force=True, video_callable=lambda episode: True)
for episode in range(1):
done = False
state = env.reset()
while not done:
action = choose_action(state, 0.0)
state, reward, done, info = env.step(action)
env.close()
# show_video()
# ### Experiments: Do It Yourself
# Remember the set of global parameters:
# ```
# # Environment
# env = gym.make("CartPole-v0")
#
# # Discount factor
# GAMMA = 0.99
#
# # Batch size
# BATCH_SIZE = 256
# # Capacity of the replay buffer
# BUFFER_CAPACITY = 16384 # 10000
# # Update target net every ... episodes
# UPDATE_TARGET_EVERY = 32 # 20
#
# # Initial value of epsilon
# EPSILON_START = 1.0
# # Parameter to decrease epsilon
# DECREASE_EPSILON = 200
# # Minimum value of epislon
# EPSILON_MIN = 0.05
#
# # Number of training episodes
# N_EPISODES = 200
#
# # Learning rate
# LEARNING_RATE = 0.1
# ```
# #### Question 6
#
# Craft an experiment and study the influence of the `BUFFER_CAPACITY` on the learning process (speed of *convergence*, training curves...)
# #### Question 7
#
# Craft an experiment and study the influence of the `UPDATE_TARGET_EVERY` on the learning process (speed of *convergence*, training curves...)
# #### Question 8
#
# If you have the computer power to do so, try to do a grid search on those two hyper-parameters and comment the results. Otherwise, study the influence of another hyper-parameter.
# ## Bonus: SAIL-DQN
#
#
# `choose_action`, `get_q` and `eval_dqn` remain the same.
#
# To be implemented:
# * `update_sail`, compared to `update`, modifies $y_i$ as explained above.
# * `train_sail` adds several steps to `train`.
#
# Tip #1: `replay_buffer` now contains returns as well.
#
# Tip #2: in the computed advantage, use $Q(s_i, a_i, \theta^-)$, not $Q(s_i, a_i)$. It makes the bonus more stable.
#
# Tip #3: `torch.maximum` can be used to compute the element-wise max between two arrays.
# #### Question 9
#
# Implement `update_sail` function.
def update_sail(state, action, reward, next_state, done):
"""
** TO BE COMPLETED **
"""
# add data to temporary replay buffer
if done:
next_state = None
replay_buffer_temp.push(state, action, reward, next_state)
if len(replay_buffer) < BATCH_SIZE:
return np.inf
# get batch
transitions = replay_buffer.sample(BATCH_SIZE)
# Compute loss - TO BE IMPLEMENTED!
values = torch.zeros(BATCH_SIZE) # to be computed using batch
targets = torch.zeros(BATCH_SIZE) # to be computed using batch
loss = objective(values, targets)
# Optimize the model - UNCOMMENT!
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
return loss.detach().numpy()
# #### Question 10
#
# Implement the training loop.
def get_episode_returns(rewards):
returns_reversed = accumulate(rewards[::-1],
lambda x, y: x*GAMMA + y)
return list(returns_reversed)[::-1]
def train_sail():
state = env.reset()
epsilon = EPSILON_START
ep = 0
total_time = 0
while ep < N_EPISODES:
action = choose_action(state, epsilon)
# take action and update replay buffer and networks
next_state, reward, done, _ = env.step(action)
loss = update_sail(state, action, reward, next_state, done)
# update state
state = next_state
# end episode if done
if done:
state = env.reset()
ep += 1
if ( (ep+1)% EVAL_EVERY == 0):
rewards = eval_dqn()
print("episode =", ep+1, ", reward = ", np.mean(rewards))
if np.mean(rewards) >= REWARD_THRESHOLD:
break
# fetch transitions from the temporary memory
transitions = replay_buffer_temp.memory
# calculate episode returns
# TO IMPLEMENT
# transfer transitions completed with returns to main memory
# TO IMPLEMENT
# reset the temporary memory
# TO IMPLEMENT
# update target network
if ep % UPDATE_TARGET_EVERY == 0:
target_net.load_state_dict(q_net.state_dict())
# decrease epsilon
epsilon = EPSILON_MIN + (EPSILON_START - EPSILON_MIN) * np.exp(-1. * ep / DECREASE_EPSILON )
total_time += 1
# Run the training loop
train_sail()
# Evaluate the final policy
rewards = eval_dqn(20)
print("")
print("mean reward after training = ", np.mean(rewards))
# #### Question 11
#
# Display your policy in action.
from pyvirtualdisplay import Display
from IPython import display as ipythondisplay
from IPython.display import clear_output
from pathlib import Path
import base64
def show_video():
html = []
for mp4 in Path("videos").glob("*.mp4"):
video_b64 = base64.b64encode(mp4.read_bytes())
html.append('''<video alt="{}" autoplay
loop controls style="height: 400px;">
<source src="data:video/mp4;base64,{}" type="video/mp4" />
</video>'''.format(mp4, video_b64.decode('ascii')))
ipythondisplay.display(ipythondisplay.HTML(data="<br>".join(html)))
env = Monitor(env, './videos', force=True, video_callable=lambda episode: True)
for episode in range(1):
done = False
state = env.reset()
while not done:
action = choose_action(state, 0.0)
state, reward, done, info = env.step(action)
env.close()
# show_video()
| tawlas/master_2_school_projects | reinforcement learning/TD4/TD4.py | TD4.py | py | 17,574 | python | en | code | 0 | github-code | 13 |
3410333806 | import xml.etree.ElementTree as ET
import json
root = ET.parse('./tagfinder_thesaurus.rdf.xmp').getroot()
namespaces = {
'foaf': "http://xmlns.com/foaf/0.1/",
'skos': "http://www.w3.org/2004/02/skos/core#",
'rdf': "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
'osm': "http://wiki.openstreetmap.org/wiki/",
'dcterms': "http://purl.org/dc/terms/",
}
defined_features = [
"aerialway", "aeroway", "amenity", "barrier", "boundary", "building", "craft", "emergency", "geological", "healthcare", "highway", "historic", "landuse", "leisure", "man_made", "military", "natural", "office", "place", "power", "public_transport", "railway", "route", "shop", "sport", "telecom", "tourism", "water", "waterway",
]
# Tags related to defined features; Subfeatures are tags relating to features defined in OSMDefinedFeatures
# Keys relate to arbitrary metadata
data = {
'definedValues': [], # e.g. <skos:Concept rdf:about="http://wiki.openstreetmap.org/wiki/Tag:shop=computer">
'value': [], # e.g. <skos:Concept rdf:about="http://wiki.openstreetmap.org/wiki/Tag:shop=computer">
'key': [] # e.g. <skos:Concept rdf:about="http://wiki.openstreetmap.org/wiki/Key:meadow">
}
i = 0
for tagRoot in root:
tagUrl = tagRoot.attrib['{http://www.w3.org/1999/02/22-rdf-syntax-ns#}about']
if 'Tag:' in tagUrl:
tagPath = tagUrl.split('Tag:')[1]
key = tagPath.split('=')[0]
value = tagPath.split('=')[1]
if key in defined_features:
tagType = "definedValues"
else:
tagType = "value"
elif 'Key:' in tagUrl:
tagType = "key"
tagPath = tagUrl.split('Key:')[1]
key = tagPath
value = ''
else:
continue
description = ""
descriptionTags = tagRoot.findall('skos:scopeNote', namespaces)
for tag in descriptionTags:
if "{http://www.w3.org/XML/1998/namespace}lang" in tag.attrib:
if tag.attrib["{http://www.w3.org/XML/1998/namespace}lang"] != "en":
continue
if hasattr(tag, 'text'):
description = tag.text
node_countTag = tagRoot.find('osm:node', namespaces)
node_count = ''
if node_countTag is not None:
node_count = json.loads(node_countTag.text)['count']
way_countTag = tagRoot.find('osm:way', namespaces)
way_count = ''
if way_countTag is not None:
way_count = json.loads(way_countTag.text)['count']
relation_countTag = tagRoot.find('osm:relation', namespaces)
relation_count = ''
if relation_countTag is not None:
relation_count = json.loads(relation_countTag.text)['count']
tag_example = {
'key': key,
'value': value,
'description': description,
'nodes': node_count,
'ways': way_count,
'relations': relation_count,
}
data[tagType].append(tag_example)
print("Found this many defined values:", len(data['definedValues']))
print("Found this many tags:", len(data['value']))
print("Found this many key:", len(data['key']))
with open('PrimaryValuesData.json', 'w') as fp:
json.dump(data['definedValues'], fp, indent=4, sort_keys=True)
with open('KeyValueData.json', 'w') as fp:
json.dump(data['value'], fp, indent=4, sort_keys=True)
with open('KeyData.json', 'w') as fp:
json.dump(data['key'], fp, indent=4, sort_keys=True)
| philipbelesky/Caribou | OSM Feature Data/tagfinder_parse.py | tagfinder_parse.py | py | 3,362 | python | en | code | 21 | github-code | 13 |
7828543046 | '''
==================================================================
-- Author: Hamid Doostmohammadi, Azadeh Nazemi
-- Create date: 29/10/2020
-- Description: This code obtains keypoints from an RGB image
and extracts descriptors based on keypoints.
==================================================================
'''
import cv2
import sys
import os
import numpy as np
def KAZE(image):
# Vector_size value to be defined
vector_size = 8
alg = cv2.KAZE_create()
kps = alg.detect(image)
kps = sorted(kps, key=lambda x: -x.response)[:vector_size]
kps, dsc = alg.compute(image, kps)
# You can increase vector_size to increase the length of descriptor
needed_size = (vector_size * 64)
if dsc is not None:
dsc = dsc.flatten()
if dsc.size < needed_size:
dsc = np.concatenate([dsc, np.zeros(needed_size - dsc.size)])
else:
dsc = np.ones(512)
return kps, dsc
imagepath = sys.argv[1]
image = cv2.imread(imagepath)
keypoint, descriptor = KAZE(image)
cv2.drawKeypoints(image, keypoint, image, color=(0, 255, 0))
cv2.imwrite("outputimage.jpg", image)
| HamidDoost/basic-image-processing-concepts | keypointAndDescriptor.py | keypointAndDescriptor.py | py | 1,205 | python | en | code | 0 | github-code | 13 |
70333239378 | n = int(input())
a, b = input().split()
a, b = [int(a), int(b)]
array = input().split()
count = 0
for i in range(a, b+1):
if i == b:
break
else:
if array[i] == array[i+1]:
count += 1
print(count) | Emad-Salehi/Data-Structures-and-Algorithms-Course | HW#1/Q1.py | Q1.py | py | 246 | python | en | code | 0 | github-code | 13 |
14059540776 | import torch
import numpy as np
from scipy import io
import h5py
import torch_geometric as pyg
from torch_geometric.data import Data, InMemoryDataset
from torch_geometric.transforms import KNNGraph, RadiusGraph
import os
from tqdm import tqdm
class SHARPData(torch.utils.data.Dataset):
def __init__(self, list_IDs):
'Initialization'
self.list_IDs = list_IDs
def __len__(self):
'Denotes the total number of samples'
return len(self.list_IDs)
def __getitem__(self, index):
'Generates one sample of data'
filename = _rawfolder + 'sharp' + str(self.list_IDs[index]) + '.mat'
try:
mat = io.loadmat(filename)
except NotImplementedError:
mat = {}
f = h5py.File(filename)
for k,v in f.items():
mat[k] = np.array(v)
n = int(mat['n'])
Bn = np.concatenate((mat['Bns'],mat['Bff']),1)
Bn = np.stack((Bn[0:n,:],Bn[n:2*n,:],Bn[2*n:3*n,:]),0)
Bn = np.transpose(Bn,(2,1,0))
nodesn = np.squeeze(mat['nodes'])
nodesn = np.repeat(np.expand_dims(nodesn,0),Bn.shape[0],axis=0)
index_z0 = np.squeeze(mat['index_z0']).astype(int)
Bn_bd = Bn[:,index_z0,:]
nodesn_bd = nodesn[:,index_z0,:];
plasman = np.concatenate((np.zeros((3*n,1)),mat['forcevec']),1)
plasman = np.stack((plasman[0:n,:],plasman[n:2*n,:],plasman[2*n:3*n,:]),0)
plasman = np.transpose(plasman,(2,1,0))
B = torch.Tensor(Bn[:,np.setdiff1d(range(n),index_z0),:])
nodes = torch.Tensor(nodesn[:,np.setdiff1d(range(n),index_z0),:])
B_bd = torch.Tensor(Bn_bd)
nodes_bd = torch.Tensor(nodesn[:,index_z0,0:2])
plasma = torch.Tensor(plasman[:,np.setdiff1d(range(n),index_z0),:])
plasma_bd = torch.Tensor(plasman[:,index_z0,:])
sharp = torch.full((Bn.shape[0],1), self.list_IDs[index])
return torch.utils.data.TensorDataset(nodes, B, nodes_bd, B_bd, plasma, plasma_bd, sharp)
class MHSDataset(pyg.data.Dataset):
def __init__(self, root, k=50,transform=None, pre_transform=None, pre_filter=None):
self.allSharps = get_allsharps()
self.k=k
super().__init__(root, transform, pre_transform, pre_filter)
@property
def raw_file_names(self):
return ['sharp' + str(s) + '.mat' for s in self.allSharps]
@property
def processed_file_names(self):
return ['simulation_' + str(t) + '.pt' for t in range(6 * len(self.allSharps))]
def process(self):
tensorData = SHARPData(self.allSharps)
numSharps = len(tensorData)
numPerSharp = len(tensorData[0])
counter = 0
for sharp_set in tqdm(tensorData):
for sim in sharp_set:
x_in = torch.zeros(sim[1].shape)
x_bd = sim[3]
y_in = sim[1]
y_bd = sim[3]
pos_in = sim[0]
pos_bd = torch.cat((sim[2],torch.zeros(sim[2].shape[0],1)),1)
p_in = sim[4]
p_bd = sim[5]
data = pyg.data.HeteroData()
data['in'].x = torch.cat((x_in,p_in),1)
data['in'].y = y_in
data['in'].pos = pos_in
data['in','adj','in'].edge_index = KNNGraph(k=self.k)(data['in']).edge_index
data['in'].edge_index = None
data['bd'].x = torch.cat((x_bd,p_bd),1)
data['bd'].y = y_bd
data['bd'].pos = pos_bd
data['bd','propagates','in'].edge_index, _ = \
pyg.utils.dense_to_sparse(
torch.ones(data['bd'].x.shape[0],data['in'].x.shape[0])
)
data['bd','propagates','in'].edge_index, mask = pyg.utils.dropout_edge(
edge_index = data['bd','propagates','in'].edge_index, p = 0.8,
training=True
)
# data['bd','propagates','in'].edge_attr = data['bd','propagates','in'].edge_attr[mask]
# data['bd','propagates','in'].edge_index = RadiusGraph()(data)
data.sharpnum = sim[6]
if self.pre_filter is not None and not self.pre_filter(data):
continue
if self.pre_transform is not None:
self.pre_transform(data=data['in'])
torch.save(data, os.path.join(self.processed_dir, f'simulation_{counter}.pt'))
counter += 1
def len(self):
return len(self.processed_file_names)
def get(self, index):
data = torch.load(os.path.join(self.processed_dir, f'simulation_{index}.pt'))
return data
def get_allsharps():
# return _allsharps
sharplist = os.listdir(_rawfolder)
allsharps = []
for filename in sharplist:
allsharps.append(int(filename.replace('sharp','').replace('.mat','')))
return allsharps
_rawfolder = 'D:\\MHS_solutions_v4\\'
_allsharps = [7058,7066,7067,7069,7070,7074,7078,7081,7083,7084,7085]
| apt-get-nat/graphPINN | graphPINN/data.py | data.py | py | 5,350 | python | en | code | 0 | github-code | 13 |
12058968757 | import sys
import tensorrt as trt
sys.path.append('../')
import common
'''
通过加载onnx文件,构建engine
'''
onnx_file_path = "yolox_s.onnx" #输入需要转换的onnx文件
G_LOGGER = trt.Logger(trt.Logger.WARNING)
# 1、动态输入第一点必须要写的
explicit_batch = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
batch_size = 1 # trt推理时最大支持的batchsize
with trt.Builder(G_LOGGER) as builder, builder.create_network(explicit_batch) as network, \
trt.OnnxParser(network, G_LOGGER) as parser:
builder.max_batch_size = batch_size
config = builder.create_builder_config()
config.max_workspace_size = 1<<32 # common文件可以自己去tensorrt官方例程下面找
config.set_flag(trt.BuilderFlag.TF32)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
parser.parse(model.read())
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
# 动态输入问题解决方案
profile = builder.create_optimization_profile()
profile.set_shape("input_1", (1, 512, 512, 3), (1, 512, 512, 3), (1, 512, 512, 3))
config.add_optimization_profile(profile)
engine = builder.build_engine(network, config)
print("Completed creating Engine")
# 保存输出的engine文件,并自定义engine文件名称
engine_file_path = 'yolox_fp323.engine'
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
| guojianyang/cv-detect-robot | CDR-docker_main_file/deepstream-yolox/onnx_to_trt.py | onnx_to_trt.py | py | 1,619 | python | en | code | 465 | github-code | 13 |
29110268311 |
import subprocess
from collections import defaultdict
import argparse
parser = argparse.ArgumentParser(description='Run LA analysis code automatically and determine the possible best model based on relative change in R squared between the last and current model.')
parser.add_argument('executable', metavar='executable_path', type=str, help='Path to LA analysis executable (Search)')
parser.add_argument('data_path', metavar='data_path', type=str, help='Path to the folder containing the locating array, the factor data file, the responses folder, and the output folder. ')
parser.add_argument('LA_name', metavar='LA_name', type=str, help='Name of the locating array file (ends in .tsv)')
parser.add_argument('FD_name', metavar='FD_name', type=str, help='Name of the factor data file (ends in .tsv and must match LA file)')
parser.add_argument('responses_folder', metavar='responses_folder', type=str, help='Name of the folder containing the response files')
parser.add_argument('output_folder', metavar='output_folder', type=str, help='Name of the folder where the models will be saved')
parser.add_argument('responses', metavar='responses', type=str, nargs='+', help='List of response columns in the response folder')
parser.add_argument('--threshold', default='0.01', type=float)
parser.add_argument('--min_num_terms', default='2', type=int)
parser.add_argument('--max_num_terms', default='10', type=int)
parser.add_argument('--num_models', default='50', type=int)
parser.add_argument('--num_new_models', default='50', type=int)
parser.add_argument('--debug', action="store_true", help='Run the LA analysis tool in debug mode')
args = parser.parse_args()
executable_path = args.executable
data_path = args.data_path
LA_path = data_path + args.LA_name
FD_path = data_path + args.FD_name
responses_path = data_path + args.responses_folder + '/'
output_path = data_path + args.output_folder + '/'
responses = args.responses
r_squared_threshold = args.threshold
debug = args.debug
def get_model(response, num_terms):
global executable_path, LA_path, FD_path, responses_path, debug, num_models, num_new_models
output = subprocess.check_output([executable_path, LA_path, FD_path, 'analysis', responses_path, f'{response}', f'{1 if debug else 0}', f'{num_terms}', f'{num_models}', f'{num_new_models}'])
s = output.decode('utf-8').split('Final Models Ranking: ')[1]
model = s.split('Model 2')[0]
occurrence_counts = output.decode('utf-8').split('Occurrence Counts')[1]
r_squared = model.split('(')[1].split(')')[0]
print(r_squared)
d = {
'num_terms': num_terms,
'top_model': model,
'occurrence_counts': occurrence_counts,
'r_squared': float(r_squared),
}
return d
models = defaultdict(list)
min_num_terms = args.min_num_terms
max_num_terms = args.max_num_terms
num_models = args.num_models
num_new_models = args.num_new_models
for response in responses:
print(f'Response: {response}')
print('-'*20)
last_r_squared = 0
best_model = None
best_model_index = None
for i in range(min_num_terms, max_num_terms+1):
print(f'Num_terms: {i}, R squared: ', end='')
new_model = get_model(response, i)
models[response].append( new_model )
if new_model['r_squared'] - last_r_squared < r_squared_threshold:
print(f'Best model probably: {best_model_index} terms')
else:
best_model = models[response][-1]
best_model_index = i
last_r_squared = new_model['r_squared']
print('\n\n')
with open(f'{output_path}/{response}.txt', 'w') as f:
f.write(f'Response: {response}\n')
f.write(f'Num_models: {num_models}, num_new_models: {num_new_models}\n')
f.write(f'R squared threshold: {r_squared_threshold}, min_num_terms: {min_num_terms}, max_num_terms: {max_num_terms}\n\n')
f.write(f'Best model: {best_model["num_terms"]} terms\n')
f.write(f'{best_model["top_model"]}\n')
f.write('Occurence Counts: \n')
f.write(f'{best_model["occurrence_counts"]}\n\n')
f.write('-'*110)
f.write('\n')
f.write('-'*110)
f.write('\n\n')
f.write('Other models: \n\n')
for index in range(len(models[response])):
f.write(f'Num terms: {models[response][index]["num_terms"]}\n')
f.write(f'{models[response][index]["top_model"]}\n')
f.write('Occurrence Counts: \n')
f.write(f'{models[response][index]["occurrence_counts"]}\n\n')
f.write('-'*110)
f.write('\n')
| bmhang/wireless-conference-guide | run_analysis.py | run_analysis.py | py | 4,600 | python | en | code | 0 | github-code | 13 |
25084591767 | #!/usr/bin/env python3
# This program converts temperature from/to Fahrenheit or Celsius
def print_options():
print("Options:")
print(" 'p' print options")
print(" 'c' convert from Celsius")
print(" 'f' convert from Fahrenheit")
print(" 'q' quit the program")
def celsius_to_fahrenheit(c_temp):
return 9.0 / 5.0 * c_temp + 32
def fahrenheit_to_celsius(f_temp):
return (f_temp - 32.0) * 5.0 / 9.0
choice = "p"
while choice != "q":
if choice == "c":
c_temp = float(input("Celsius temperature: "))
print("Fahrenheit:", celsius_to_fahrenheit(c_temp))
choice = input("option: ")
elif choice == "f":
f_temp = float(input("Fahrenheit temperature: "))
print("Celsius:", fahrenheit_to_celsius(f_temp))
choice = input("option: ")
else:
choice = "p"
# Alternatively choice != "q": so that print
# when anything unexpected is inputed
print_options()
choice = input("option: ") | rhc-iv/Python-3-Lessons | Non-Programmer's Tutorial for Python 3/06 - Defining Functions/temperature2.py | temperature2.py | py | 988 | python | en | code | 1 | github-code | 13 |
3721596390 | import heapq # function for sort
def solution(scoville, K):
answer = 0
heapq.heapify(scoville) # conversion to heap structure
while scoville[0] < K: # repeat until scoville number
if len(scoville) < 2:
return -1
else:
newNum = heapq.heappop(scoville) + (heapq.heappop(scoville) * 2)
heapq.heappush(scoville, newNum)
answer += 1
return answer | JaeEon-Ryu/Coding_test | Programmers/Level_2/Lv2_더맵게.py | Lv2_더맵게.py | py | 427 | python | en | code | 1 | github-code | 13 |
29696061354 | class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
candidates.sort()
tree=[]
num2len=dict()
for e in candidates:
if e in num2len:
num2len[e]+=1
else:
tree.append(e)
num2len[e]=1
def dfs(tree_level,cur_sum):
if tree_level==len(tree):
return []
all_res=[]
#not choose -> +0 choose action
choice=[0]
# have 1 2 ,... choose action
node=tree[tree_level]
for i in range(1,num2len[node]+1):
choice.append(node*i)
for i in range(len(choice)):#i is choose num
my_choice=choice[i]
if my_choice+cur_sum==target:
all_res.append( i*[node])
elif my_choice+cur_sum<target:
res=dfs(tree_level+1,my_choice+cur_sum)
for j in range(len(res)):
res[j]=i*[node]+res[j]
all_res+=res
else:
pass
return all_res
return dfs(0,0) | xincheng-cao/loser_fruit | backtracking/剑指 Offer II 082. 含有重复元素集合的组合.py | 剑指 Offer II 082. 含有重复元素集合的组合.py | py | 1,202 | python | en | code | 0 | github-code | 13 |
16117143099 | from bs4 import BeautifulSoup
from selenium import webdriver
import time
import json
import unidecode
urls =[ "https://www.ted.com/talks/helen_czerski_the_fascinating_physics_of_everyday_life/transcript?language=pt-br#t-81674",
"https://www.ted.com/talks/kevin_kelly_how_ai_can_bring_on_a_second_industrial_revolution/transcript?language=pt-br",
"https://www.ted.com/talks/sarah_parcak_help_discover_ancient_ruins_before_it_s_too_late/transcript?language=pt-br",
"https://www.ted.com/talks/sylvain_duranton_how_humans_and_ai_can_work_together_to_create_better_businesses/transcript?language=pt-br",
"https://www.ted.com/talks/chieko_asakawa_how_new_technology_helps_blind_people_explore_the_world/transcript?language=pt-br",
"https://www.ted.com/talks/pierre_barreau_how_ai_could_compose_a_personalized_soundtrack_to_your_life/transcript?language=pt-br",
"https://www.ted.com/talks/tom_gruber_how_ai_can_enhance_our_memory_work_and_social_lives/transcript?language=pt-br" ]
for url in urls :
driver = webdriver.Chrome(executable_path= "C://Program Files//chromedriver//chromedriver.exe")
driver.get(url)
time.sleep(1)
section = driver.find_elements_by_tag_name('section')
element_html = section[0].get_attribute("outerHTML")
soup = BeautifulSoup(element_html, 'html.parser')
h1 = driver.find_elements_by_tag_name('h1')
second_element = h1[0].get_attribute("outerHTML")
title = BeautifulSoup(second_element, 'html.parser').getText()
three_element = driver.find_elements_by_tag_name('section')
second_element = h1[0].get_attribute("outerHTML")
corpo = driver.find_element_by_tag_name('body')
element = corpo.get_attribute("outerHTML")
corpo_soup = BeautifulSoup(element, 'html.parser')
autor = corpo_soup.find("div", {'class': 'f:.9 m-b:.4 m-t:.5 d:i-b'}).getText()
text = []
for div in soup.find_all("div", {"class": "Grid"}):
div_text = div.find("div", {"class": "flx-s:1"})
for a in div_text.find_all("a"):
text.append(a.getText())
text.append('\n')
text_final = " ".join(text)
with open('ted'+str(urls.index(url))+'.json', 'w') as f:
json.dump({"author": autor, "body": text_final,"title": title, "type": "video", "url": url}, f)
driver.quit()
| RafaelBorges-code/Maratona_IBM-Desafio-3 | Desafio 3 FIAP/ted_scraping.py | ted_scraping.py | py | 2,337 | python | en | code | 0 | github-code | 13 |
13567937700 | import pandas as pd
import streamlit as st
import numpy as np
import plotly.express as px
st.title('Popular Names')
st.text('Popularity of a Name Over Time')
url = 'https://github.com/esnt/Data/raw/main/Names/popular_names.csv'
df = pd.read_csv(url)
selected_name = st.text_input('Enter a name', 'John') # default name is John
name_df = df[df['name'] == selected_name]
if name_df.empty:
st.write('Name not found')
else:
fig = px.line(name_df, x='year', y='n', color='sex', color_discrete_sequence=px.colors.qualitative.Light24)
st.plotly_chart(fig)
st.text('Top 10 Names for Male and Female by Year')
year = st.selectbox('Select a year', df['year'].unique())
year_df = df[df['year'] == year]
girl_names = year_df[year_df['sex'] == 'F'].sort_values(by = 'n', ascending = False).head(10)['name'].reset_index(drop=True)
boy_names = year_df[year_df['sex'] == 'M'].sort_values(by = 'n', ascending = False).head(10)['name'].reset_index(drop=True)
top_names = pd.concat([girl_names, boy_names], axis = 1)
top_names.columns = ['Girl Names', 'Boy Names']
st.dataframe(top_names)
| dlesueur/my_names_app | inclass.py | inclass.py | py | 1,092 | python | en | code | 0 | github-code | 13 |
22090796516 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
import sys
sys.path.append('/usr/bin/latex')
rc('text', usetex = True)
rc('font', family = 'serif', size = 16)
def deriv(x):
dx = np.zeros(len(x))
for i in range(0, len(x)):
if (i == 0):
dx[i] = (-3*x[0] + 4*x[1] - x[2])/2
elif (i == len(x)-1):
dx[i] = (3*x[len(x)-1] - 4*x[len(x)-2] + x[len(x)-3])/2
else:
dx[i] = (x[i+1] - x[i-1])/2
return dx
flnm = '../harm_data/lum_within_r_a0.0.npz'
# t = np.load(flnm)['t']
r = np.load(flnm)['r']
total_lum = np.load(flnm)['total_lum']
cor_lum = np.load(flnm)['cor_lum']
disk_lum = np.load(flnm)['disk_lum']
# total_lum = np.mean(disk_lum, axis = 0)
# disk_lum = np.mean(disk_lum, axis = 0)
plt.plot(r, r*(deriv(total_lum)/deriv(r)), 'k-')
plt.xlim([2, 70])
# plt.ylim([0.0, 1.0])
plt.loglog()
plt.show()
ax = plt.gca()
ax.xaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())
ax.set_xticks((2, 3, 4, 5, 6, 8, 10, 15, 20, 30, 40, 50, 70))
ax.set_xticklabels(('2', '3', '4', '5', '6', '8', '10', '15', '20', '30', '40', '50', '70'))
plt.xlabel(r'$r/M$')
# plt.ylabel(r'$L(r < R)/L_\mathrm{total}$')
plt.legend(frameon = False, loc = 'upper left')
plt.tight_layout()
# f = plt.gcf()
# f.savefig('lum_within_r.pdf', bbox_inches = 'tight')
plt.show()
| kinchb/ptransx | plots/dLdr_disk_plot_pq.py | dLdr_disk_plot_pq.py | py | 1,433 | python | en | code | 4 | github-code | 13 |
19904759947 | class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
#mine
if not prices or len(prices)==1:
return 0
stack = []
stack.append(prices[0])
profit = 0
for price in prices[1:]:
if price >= stack[-1]:
profit += price-stack[-1]
else:
stack = []
stack.append(price)
return profit
#easy
return sum(max(prices[i + 1] - prices[i], 0) for i in range(len(prices) - 1)) | littleliona/leetcode | easy/122.best_time_to_buy_and_sell_stock_II.py | 122.best_time_to_buy_and_sell_stock_II.py | py | 588 | python | en | code | 0 | github-code | 13 |
69897546579 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import textwrap
import weightedstats as ws
pd.set_option('display.max_columns', None)
desktop = "C:/Users/pc/Desktop/"
inicio = 2004
fin = 2021
fuente = {'fontname': "Times New Roman"}
gi = ["sin nivel",
"primaria incompleta", "primaria completa",
"secundaria incompleta", "secundaria completa",
"superior no universitaria incompleta", "superior no universitaria completa",
"superior universitaria incompleta", "superior universitaria completa",
"no sabe"]
grupos = ["Sin nivel", "Primaria", "Secundaria", "Superior no universitaria", "Superior universitaria"]
grupos2 = ["Sin nivel", "Educación básica", "Educación secundaria", "Educación superior"]
departamentos = ["Amazonas", "Áncash", "Apurímac", "Arequipa", "Ayacucho", "Cajamarca", "Callao", "Cusco",
"Huancavelica", "Huánuco", "Ica", "Junín", "La Libertad", "Lambayeque", "Lima", "Loreto",
"Madre de Dios", "Moquegua", "Pasco", "Piura", "Puno", "San Martín", "Tacna", "Tumbes",
"Ucayali"]
departamentosISO = ["AMA", "ANC", "APU", "AYA", "ARE", "CAJ", "CAL", "CUS",
"HUV", "HUC", "ICA", "JUN", "LAL", "LAM", "LIM", "LOR",
"MDD", "MOQ", "PAS", "PIU", "PUN", "SAM", "TAC", "TUM",
"UCA"]
periodo_i = [i for i in range(inicio, fin + 1)]
periodo_s = [str(i) for i in periodo_i]
negro = "black"
color_dict = {'capprops': dict(color=negro),
'medianprops': dict(color=negro, linewidth=2),
'whiskerprops': dict(color=negro),
'meanprops': dict(markeredgecolor=negro, markerfacecolor=negro)}
marcadores = ["^", "P", "s", "*", "D", "X", "p", "h", "8", "o"]
colores = [(0.2, 0.2, 0.8, 0.3), (0.2, 0.4, 0.8, 0.3), (0.2, 0.6, 0.8, 0.3), (0.2, 0.8, 0.8, 0.3)]
figsizes = (10, 5.7)
source = "Fuente: Elaboración propia a partir de datos del Instituto Nacional de Estadística e Informática (INEI)"
source_pos = (0.08, 0.01)
enaho = pd.read_csv(desktop + "data_indic.csv",
sep=";", encoding="ANSI", low_memory=False)
enaho = enaho[["p45_1", "p45_2", "factor07", "mieperho", "inghog1d", "aÑo", "defes", "dept"]]
enaho["yfam"] = enaho["inghog1d"]/(enaho["defes"] * 12)
# enaho["factorper"] = enaho["factor07"]*enaho["mieperho"]
# Variables usadas
# p45_1: Nivel estudios del padre del jefe del hogar
# p45_2: Nivel de estudios de la madre del jefe del hogar
# factor07: Factor de expansión del hogar
# mieperho: Miembros por hogar
# inghog1d: Ingreso bruto total anual
# defes: Deflactor espacial
# Listas para ingresos medios por grado de instrucción
# 0: sin nivel
# 1: primaria incompleta
# 2: primaria completa
# 3: secundaria incompleta
# 4: secundaria completa
# 5: superior no universitaria incompleta
# 6: superior no universitaria incompleta
# 7: superior universitaria incompleta
# 8: superior universitaria completa
# 9: no sabe
# 10: vacío
gi_dict = {key: value for (key, value) in zip(gi, [i for i in range(0, len(gi) - 1)])}
enaho["padre"] = enaho["p45_1"].map(gi_dict)
enaho["madre"] = enaho["p45_2"].map(gi_dict)
enaho["gimax"] = enaho[["padre", "madre"]].max(axis=1)
enaho["gimax"] = enaho["gimax"].map({key: value for (key, value) in zip(gi_dict.values(), gi_dict.keys())})
enaho = enaho.drop(["padre", "madre"], axis=1)
enahoyears = {key: value for (key, value) in zip([f"{i}" for i in range(inicio, fin + 1)], [enaho[enaho['aÑo'] == i] for i in range(inicio, fin + 1)])}
muestra = pd.DataFrame({"Año": periodo_i,
"Muestra": [enaho[enaho['aÑo'] == i].shape[0] for i in periodo_i],
"Población": [round(enahoyears[f'{i}']['factor07'].sum()) for i in periodo_i]})
# print(muestra)
# muestra.to_csv(desktop + "poblacion.csv", sep=";", encoding="ANSI")
yfam_medio = [round(np.average(enahoyears[f"{i}"]["yfam"], weights=enahoyears[f"{i}"]["factor07"])) for i in periodo_i]
yfam_mediano = [round(ws.weighted_median(enahoyears[f"{i}"]["yfam"], weights=enahoyears[f"{i}"]["factor07"])) for i in periodo_i]
# pd.DataFrame({"Año": periodo_i,
# "Ingreso medio": yfam_medio,
# "Ingreso mediano": yfam_mediano}).to_csv(desktop + "ymediomediano.csv", sep=";")
def reindex_df(dataframe, weight):
dataframe = dataframe.reindex(dataframe.index.repeat(dataframe[weight]))
dataframe.reset_index(drop=True, inplace=True)
return dataframe["yfam"]
plt.figure(figsize=figsizes)
caja = plt.boxplot([reindex_df(enahoyears[f"{i}"], weight="factor07") for i in periodo_i],
showmeans=True, showfliers=False, showbox=True, showcaps=True, whis=3, **color_dict)
plt.legend([caja['medians'][0], caja['means'][0]], ['Ingreso mediano', 'Ingreso medio'],
prop=font_manager.FontProperties(family=fuente["fontname"]))
# Ingreso medio
plt.plot([f"{inicio - 1}", f"{inicio}"], [yfam_medio[0], yfam_medio[1]], alpha=0)
for i in range(len(periodo_s) - 1):
plt.plot([f"{periodo_s[i]}", f"{periodo_s[i + 1]}"], [yfam_medio[i], yfam_medio[i + 1]], negro, linestyle="dashed", linewidth=0.9)
# Ingreso mediano
plt.plot([f"{inicio - 1}", f"{inicio}"], [yfam_mediano[0], yfam_mediano[1]], alpha=0)
for i in range(len(periodo_s) - 1):
plt.plot([f"{periodo_s[i]}", f"{periodo_s[i + 1]}"], [yfam_mediano[i], yfam_mediano[i + 1]], negro, linestyle="dashed", linewidth=0.9)
plt.xticks([i for i in range(1, len(periodo_s) + 1)], periodo_s, **fuente)
plt.yticks(**fuente)
plt.xlim([f"{inicio-1}", f"{fin+1}"])
plt.title(f"Ingreso familiar de la población peruana, {inicio}-{fin}", **fuente)
plt.xlabel("Año", **fuente)
plt.ylabel("Ingreso mensual familiar (en soles)", **fuente)
plt.grid()
plt.figtext(source_pos[0], source_pos[1], source, **fuente)
plt.subplots_adjust(bottom=0.12)
plt.savefig(desktop + f"imagenes/boxplot.png", bbox_inches='tight')
# plt.show()
plt.close()
def barchars(porcentajes, agrupado, labels, width=0.35):
x = np.arange(len(labels))
for j in ["padre", "madre"]:
if j == "padre":
colores1 = (0.1, 0.3, 0.8, 0.5)
colores2 = (0.1, 0.3, 0.8, 0.8)
k = "del"
else:
colores1 = (0.4, 0.2, 0.6, 0.3)
colores2 = (0.4, 0.2, 0.6, 0.6)
k = "de la"
for i in range(2020, 2021 + 1):
fig, ax = plt.subplots()
fig.set_size_inches(figsizes[0], figsizes[1])
rects1 = ax.bar(x - width / 2, porcentajes[f"{j}2004"], width, label=f'{periodo_s[0]}', color=colores1)
rects2 = ax.bar(x + width / 2, porcentajes[f"{j}{i}"], width, label=f'{i}', color=colores2)
plt.title(f"Grado de instrucción {k} {j} del jefe de hogar, {periodo_s[0]} vs. {i}", **fuente)
plt.xlabel("Grado de instrucción", **fuente)
plt.ylabel("% de las familias", **fuente)
plt.xticks(x, [textwrap.fill(m.capitalize(), width=16) for m in labels], **fuente)
plt.yticks(**fuente)
ax.legend(prop=font_manager.FontProperties(family=fuente["fontname"]))
ax.bar_label(rects1, padding=3, **fuente)
ax.bar_label(rects2, padding=3, **fuente)
fig.tight_layout()
plt.figtext(source_pos[0], source_pos[1], source, **fuente)
plt.subplots_adjust(bottom=0.18)
plt.savefig(desktop + f"/imagenes/barras{agrupado}{j}{i}.png", bbox_inches='tight')
# plt.show()
plt.close()
def weighted_vals(valores, pesos):
average = np.average(valores, weights=pesos)
variance = np.average((valores-average)**2, weights=pesos)
return average, variance
def etas_f(enahobygroups, etas, j, departamentos, dept_i, dept=False):
# media de tipos
medias = []
# ponderación de tipos
fs = []
for i in range(len(grupos2)):
if dept is True:
data = enahobygroups[f"{j}g{i + 1}"][enahobygroups[f"{j}g{i + 1}"]["dept"] == departamentos[dept_i]]
else:
data = enahobygroups[f"{j}g{i + 1}"]
media = weighted_vals(data["yfam"], pesos=data["factor07"])[0]
medias.append(media)
f = data.shape[0]
fs.append(f)
fs = [i / sum(fs) for i in fs]
# media y varianza de ingreso familiar
if dept is True:
data2 = enahoyears[str(j)][enahoyears[str(j)]["dept"] == departamentos[dept_i]]
# print(data2.head())
else:
data2 = enahoyears[str(j)]
media_muestral, var_muestral = weighted_vals(data2["yfam"], pesos=data2["factor07"])
var_phi = [((medias[i] - media_muestral) ** 2) * fs[i] for i in range(len(grupos2))]
var_phi = sum(var_phi)
var_H = var_muestral
eta = 1 - var_phi / var_H
etas.append(eta)
return etas, medias
def porgi():
enahobygi = {key: value for (key, value) in zip([f"{i}p{j}" for i in periodo_i for j in range(len(gi) - 1)], [enaho[(enaho['aÑo'] == i) & (enaho["gimax"] == gi[j])] for i in periodo_i for j in range(len(gi) - 1)])}
df_yfam_medio = pd.DataFrame(
{"Año": periodo_s,
"0": [round(i) for i in [np.average(enahobygi[f"{i}p0"]["yfam"], weights=enahobygi[f"{i}p0"]["factor07"]) for i in periodo_i]],
"1": [round(i) for i in [np.average(enahobygi[f"{i}p1"]["yfam"], weights=enahobygi[f"{i}p1"]["factor07"]) for i in periodo_i]],
"2": [round(i) for i in [np.average(enahobygi[f"{i}p2"]["yfam"], weights=enahobygi[f"{i}p2"]["factor07"]) for i in periodo_i]],
"3": [round(i) for i in [np.average(enahobygi[f"{i}p3"]["yfam"], weights=enahobygi[f"{i}p3"]["factor07"]) for i in periodo_i]],
"4": [round(i) for i in [np.average(enahobygi[f"{i}p4"]["yfam"], weights=enahobygi[f"{i}p4"]["factor07"]) for i in periodo_i]],
"5": [round(i) for i in [np.average(enahobygi[f"{i}p5"]["yfam"], weights=enahobygi[f"{i}p5"]["factor07"]) for i in periodo_i]],
"6": [round(i) for i in [np.average(enahobygi[f"{i}p6"]["yfam"], weights=enahobygi[f"{i}p6"]["factor07"]) for i in periodo_i]],
"7": [round(i) for i in [np.average(enahobygi[f"{i}p7"]["yfam"], weights=enahobygi[f"{i}p7"]["factor07"]) for i in periodo_i]],
"8": [round(i) for i in [np.average(enahobygi[f"{i}p8"]["yfam"], weights=enahobygi[f"{i}p8"]["factor07"]) for i in periodo_i]]})
# Ingreso medio
plt.figure(figsize=figsizes)
for i in range(len(df_yfam_medio.columns) - 1):
plt.plot(df_yfam_medio["Año"], df_yfam_medio[f"{i}"], label=gi[i].capitalize(), marker=marcadores[i], alpha=0.7, linestyle="dashed")
plt.plot(df_yfam_medio["Año"], yfam_medio, negro, linewidth=3, label="Ingreso medio", marker="o", alpha=0.6)
plt.title(f"Ingreso medio familiar por grado de instrucción del padre más instruido del jefe de hogar, {inicio}-{fin}", **fuente)
plt.xlabel("Año", **fuente)
plt.ylabel("Ingreso mensual familiar (en soles)", **fuente)
plt.xticks(**fuente)
plt.yticks(**fuente)
plt.ylim([-100, 8200])
plt.legend(prop=font_manager.FontProperties(family=fuente["fontname"], size=8))
plt.grid()
plt.figtext(source_pos[0], source_pos[1], source, **fuente)
plt.subplots_adjust(bottom=0.12)
plt.savefig(desktop + f"/imagenes/ingresomediogi.png", bbox_inches='tight')
# plt.show()
plt.close()
f1 = {key: value for (key, value) in zip([f"padre{i}" for i in periodo_i], [[enahoyears[f"{i}"][enahoyears[f"{i}"]["p45_1"] == j]["factor07"].sum() for j in gi[:-1]] for i in periodo_i])}
f2 = {key: value for (key, value) in zip([f"madre{i}" for i in periodo_i], [[enahoyears[f"{i}"][enahoyears[f"{i}"]["p45_2"] == j]["factor07"].sum() for j in gi[:-1]] for i in periodo_i])}
frecuencias = f1 | f2
p1 = {key: value for (key, value) in zip([f"padre{i}" for i in periodo_i], [[round(j*100/sum(frecuencias[f"padre{i}"]), 1) for j in frecuencias[f"padre{i}"]] for i in periodo_i])}
p2 = {key: value for (key, value) in zip([f"madre{i}" for i in periodo_i], [[round(j*100/sum(frecuencias[f"madre{i}"]), 1) for j in frecuencias[f"madre{i}"]] for i in periodo_i])}
porcentajes = p1 | p2
barchars(porcentajes, "gi", gi[:-1])
def porgrupos():
# Grupo 1: Sin nivel
g1 = {key: value for (key, value) in zip([f"{i}g1" for i in periodo_i], [enaho[(enaho["aÑo"] == i) & (enaho["gimax"] == gi[0])] for i in periodo_i])}
# Grupo 2: Educación primaria
g2 = {key: value for (key, value) in zip([f"{i}g2" for i in periodo_i], [enaho[(enaho["aÑo"] == i) & ((enaho["gimax"] == gi[1]) | (enaho["gimax"] == gi[2]))] for i in periodo_i])}
# Grupo 3: Educación secundaria
g3 = {key: value for (key, value) in zip([f"{i}g3" for i in periodo_i], [enaho[(enaho["aÑo"] == i) & ((enaho["gimax"] == gi[3]) | (enaho["gimax"] == gi[4]))] for i in periodo_i])}
# Grupo 4: Educación superior
g4 = {key: value for (key, value) in zip([f"{i}g4" for i in periodo_i], [enaho[(enaho["aÑo"] == i) & ((enaho["gimax"] == gi[5]) | (enaho["gimax"] == gi[6]) | (enaho["gimax"] == gi[7]) | (enaho["gimax"] == gi[8]))] for i in periodo_i])}
enahobygroups = g1 | g2 | g3 | g4
df_yfam_medio = pd.DataFrame(
{"Año": periodo_s,
"0": [round(i) for i in [np.average(enahobygroups[f"{i}g1"]["yfam"], weights=enahobygroups[f"{i}g1"]["factor07"]) for i in periodo_i]],
"1": [round(i) for i in [np.average(enahobygroups[f"{i}g2"]["yfam"], weights=enahobygroups[f"{i}g2"]["factor07"]) for i in periodo_i]],
"2": [round(i) for i in [np.average(enahobygroups[f"{i}g3"]["yfam"], weights=enahobygroups[f"{i}g3"]["factor07"]) for i in periodo_i]],
"3": [round(i) for i in [np.average(enahobygroups[f"{i}g4"]["yfam"], weights=enahobygroups[f"{i}g4"]["factor07"]) for i in periodo_i]]})
plt.figure(figsize=figsizes)
for i in range(len(df_yfam_medio.columns) - 1):
plt.plot(df_yfam_medio["Año"], df_yfam_medio[f"{i}"], label=grupos2[i], marker=marcadores[i], alpha=0.7, linestyle="dashed")
plt.plot(df_yfam_medio["Año"], yfam_medio, negro, linewidth=3, label="Ingreso medio", marker="o", alpha=0.6)
plt.title(f"Ingreso medio familiar por grado de instrucción del padre más instruido del jefe de hogar, {inicio}-{fin}", **fuente)
plt.xlabel("Año", **fuente)
plt.ylabel("Ingreso mensual familiar (en soles)", **fuente)
plt.xticks(**fuente)
plt.yticks(**fuente)
plt.legend(prop=font_manager.FontProperties(family=fuente["fontname"]))
plt.ylim([-100, 7100])
plt.grid()
plt.figtext(source_pos[0], source_pos[1], source, **fuente)
plt.subplots_adjust(bottom=0.12)
plt.savefig(desktop + f"/imagenes/ingresomediogrupos.png", bbox_inches='tight')
# plt.show()
plt.close()
f1 = {key: value for (key, value) in
zip([f"padre{i}" for i in periodo_i],
[[enahoyears[f"{i}"][enahoyears[f"{i}"]["p45_1"] == gi[0]]["factor07"].sum(),
enahoyears[f"{i}"][(enahoyears[f"{i}"]["p45_1"] == gi[1]) | (enahoyears[f"{i}"]["p45_1"] == gi[2])]["factor07"].sum(),
enahoyears[f"{i}"][(enahoyears[f"{i}"]["p45_1"] == gi[3]) | (enahoyears[f"{i}"]["p45_1"] == gi[4])]["factor07"].sum(),
enahoyears[f"{i}"][(enahoyears[f"{i}"]["p45_1"] == gi[5]) | (enahoyears[f"{i}"]["p45_1"] == gi[6]) |
(enahoyears[f"{i}"]["p45_1"] == gi[7]) | (enahoyears[f"{i}"]["p45_1"] == gi[8])]["factor07"].sum()]
for i in periodo_i])}
f2 = {key: value for (key, value) in
zip([f"madre{i}" for i in periodo_i],
[[enahoyears[f"{i}"][enahoyears[f"{i}"]["p45_2"] == gi[0]]["factor07"].sum(),
enahoyears[f"{i}"][(enahoyears[f"{i}"]["p45_2"] == gi[1]) | (enahoyears[f"{i}"]["p45_2"] == gi[2])]["factor07"].sum(),
enahoyears[f"{i}"][(enahoyears[f"{i}"]["p45_2"] == gi[3]) | (enahoyears[f"{i}"]["p45_2"] == gi[4])]["factor07"].sum(),
enahoyears[f"{i}"][(enahoyears[f"{i}"]["p45_2"] == gi[5]) | (enahoyears[f"{i}"]["p45_2"] == gi[6]) |
(enahoyears[f"{i}"]["p45_2"] == gi[7]) | (enahoyears[f"{i}"]["p45_2"] == gi[8])]["factor07"].sum()]
for i in periodo_i])}
frecuencias = f1 | f2
p1 = {key: value for (key, value) in zip([f"padre{i}" for i in periodo_i], [[round(j*100/sum(frecuencias[f"padre{i}"]), 1) for j in frecuencias[f"padre{i}"]] for i in periodo_i])}
p2 = {key: value for (key, value) in zip([f"madre{i}" for i in periodo_i], [[round(j*100/sum(frecuencias[f"madre{i}"]), 1) for j in frecuencias[f"madre{i}"]] for i in periodo_i])}
porcentajes = p1 | p2
barchars(porcentajes, "grupos2", grupos2)
etas_dep = []
medias_dep = []
etas = []
for j in periodo_i:
# Función de distribución acumulada (CDF)
plt.figure(figsize=figsizes)
for i in range(len(grupos2)):
data = np.array(enahobygroups[f"{j}g{i + 1}"]["yfam"])
x = np.sort(data)
y = np.arange(len(x)) / float(len(x))
plt.plot(x, y, marker='o', label=grupos2[i], color=colores[i])
plt.xlabel('Ingreso familiar (en soles)', **fuente)
plt.ylabel('Probabilidad', **fuente)
plt.title("Perú: Función de distribución acumulada por grado de instrucción del padre más instruido ($G^t_{\phi}$), " + f"{j}", **fuente)
plt.xticks(**fuente)
plt.yticks(**fuente)
plt.legend(prop=font_manager.FontProperties(family=fuente["fontname"]))
plt.xlim([0, 10000])
plt.grid()
plt.figtext(source_pos[0], source_pos[1], source, **fuente)
plt.subplots_adjust(bottom=0.12)
plt.savefig(desktop + f"/imagenes/cdf{j}.png", bbox_inches='tight')
# plt.show()
plt.close()
# Función inversa
plt.figure(figsize=[figsizes[0], figsizes[1]])
for i in range(len(grupos2)):
data = np.array(enahobygroups[f"{j}g{i + 1}"]["yfam"])
x = np.sort(data)
y = np.arange(len(x)) / float(len(x))
plt.plot(y, x, marker='o', label=grupos2[i], color=colores[i])
plt.xlabel('Grado de esfuerzo ($\pi$)', **fuente)
plt.ylabel('Ingreso familiar en soles ($v^t$) ', **fuente)
plt.title(f'Perú: Función del objetivo dada la política: $v^t(\pi, \phi)$, {j}', **fuente)
plt.xticks(**fuente)
plt.yticks(**fuente)
plt.legend(prop=font_manager.FontProperties(family=fuente["fontname"]))
plt.ylim([0, 20000])
plt.grid()
plt.figtext(source_pos[0], source_pos[1], source, **fuente)
plt.subplots_adjust(bottom=0.12)
plt.savefig(desktop + f"/imagenes/cdf{j}_2.png", bbox_inches='tight')
# plt.show()
plt.close()
etas = etas_f(enahobygroups, etas, j, departamentos, 1)[0]
medias_final = []
dep_final = []
for h in range(len(departamentos)):
dep_final, medias_fin = etas_f(enahobygroups, dep_final, j, departamentos, h, dept=True)
medias_final.append(min(medias_fin))
etas_dep.append(dep_final)
medias_dep.append(medias_final)
medias_dep_df = pd.DataFrame({"2004": medias_dep[0], "2005": medias_dep[1], "2006": medias_dep[2], "2007": medias_dep[3],
"2008": medias_dep[4], "2009": medias_dep[5], "2010": medias_dep[6], "2011": medias_dep[7],
"2012": medias_dep[8], "2013": medias_dep[9], "2014": medias_dep[10], "2015": medias_dep[11],
"2016": medias_dep[12], "2017": medias_dep[13], "2018": medias_dep[14], "2019": medias_dep[15],
"2020": medias_dep[16], "2021": medias_dep[17]}, index=departamentos)
etas_dep_df = pd.DataFrame({"2004": etas_dep[0], "2005": etas_dep[1], "2006": etas_dep[2], "2007": etas_dep[3],
"2008": etas_dep[4], "2009": etas_dep[5], "2010": etas_dep[6], "2011": etas_dep[7],
"2012": etas_dep[8], "2013": etas_dep[9], "2014": etas_dep[10], "2015": etas_dep[11],
"2016": etas_dep[12], "2017": etas_dep[13], "2018": etas_dep[14], "2019": etas_dep[15],
"2020": etas_dep[16], "2021": etas_dep[17]}, index=departamentos)
print(etas_dep_df)
print(medias_dep_df)
# resultados = pd.DataFrame({"periodos": periodo_i,
# "etas": etas})
# sns.regplot(data=resultados, x="periodos", y="etas")
plt.figure(figsize=(figsizes[0]*1.2, figsizes[1]))
plt.plot(periodo_s, etas, "black")
plt.title("Perú: Evolución de los grados de equiparación de oportunidades ($\eta$), 2004-2021", **fuente)
plt.xlabel('Año', **fuente)
plt.ylabel('$\eta$', **fuente)
plt.xticks(**fuente)
plt.yticks(**fuente)
plt.ylim([0.85, 1.05])
plt.figtext(source_pos[0], source_pos[1], source, **fuente)
plt.subplots_adjust(bottom=0.12)
plt.grid()
for x, y in zip(periodo_s, etas):
label = "{:.5f}".format(y)
plt.annotate(label, (x, y), xytext=(0, 10),
textcoords="offset points", ha='center',
arrowprops=dict(arrowstyle="->", color='black'), **fuente)
plt.savefig(desktop + f"/imagenes/eta.png", bbox_inches='tight')
# plt.show()
plt.close()
for j in periodo_s:
dataf = pd.DataFrame({"dep": departamentos,
"dep_df": etas_dep_df[j]})
dataf = dataf.sort_values("dep_df")
plt.figure(figsize=(figsizes[0]*1.2, figsizes[1]*1.5))
plt.barh(dataf["dep"], dataf["dep_df"], height=.8, align="center", color=(0.2, 0.4, 0.6, 0.6))
plt.title(f"Perú: Grados de equiparación de oportunidades ($\eta$) por departamentos, {j}", **fuente)
plt.xlabel('Grado de equiparación de oportunidades: $\eta$', **fuente)
plt.ylabel('Departamento', **fuente)
plt.xticks(**fuente)
plt.yticks(**fuente)
plt.xlim([0.75, 1.0])
plt.figtext(source_pos[0], source_pos[1], source, **fuente)
plt.subplots_adjust(bottom=0.12)
plt.grid(axis="x")
plt.savefig(desktop + f"/imagenes/eta{j}.png", bbox_inches='tight')
# plt.show()
plt.close()
m, b = np.polyfit(medias_dep_df[j], etas_dep_df[j], deg=1)
plt.figure(figsize=(figsizes[0]*1.2, figsizes[1]*1.4))
plt.scatter(medias_dep_df[j], etas_dep_df[j], color="black")
for i, txt in enumerate(departamentosISO):
plt.annotate(txt, (medias_dep_df[j][i], etas_dep_df[j][i]), **fuente)
plt.plot(medias_dep_df[j], m * medias_dep_df[j] + b, "black", alpha=0.7, linewidth=1.5)
# Perú: Pares ordenados $d = (\hat{W}_i^{EO}, \eta_i)$ por departamentos y regresión lineal
plt.title("Perú: Niveles ($\hat{W}_i^{EO}$) y grados ($\eta_i$) de desarrollo por departamentos, " + f"{j}", **fuente)
plt.xlabel('Nivel de equiparación de oportunidades: $\hat{W}^{EO}$', **fuente)
plt.ylabel('Grado de equiparación de oportunidades: $\eta$', **fuente)
plt.xticks(**fuente)
plt.yticks(**fuente)
plt.figtext(source_pos[0], source_pos[1], source, **fuente)
plt.subplots_adjust(bottom=0.12)
plt.grid()
plt.savefig(desktop + f"/imagenes/des{j}.png", bbox_inches='tight')
# plt.show()
plt.close()
# porgi()
porgrupos()
| fabazan/indice-desarrollo | indicadores.py | indicadores.py | py | 23,577 | python | es | code | 0 | github-code | 13 |
4791153648 | def demo(data: list, target: int):
try:
result = data.index(target)
except ValueError:
result = -1
return result
if __name__ == '__main__':
result = demo([2, 3, 1, 3, 124], 0)
print(result) | LeroyK111/BasicAlgorithmSet | 代码实现算法/SearchinRotatedSortedArray.py | SearchinRotatedSortedArray.py | py | 245 | python | en | code | 1 | github-code | 13 |
32859280768 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .. import Unit
from ...lib.patterns import defanged, indicators
class defang(Unit):
"""
Defangs all domains and ipv4 addresses in the input data by replacing the
last dot in the expression by `[.]`. For example, `127.0.0.1` will be
replaced by `127.0.0[.]1`.
"""
WHITELIST = [
B'wscript.shell',
]
def interface(self, argp):
argp.add_argument('-q', '--quote', action='store_true', help='Wrap all indicators in backticks for markdown code.')
argp.add_argument('-u', '--url-only', action='store_true', help='Only defang URLs, do not look for domains or IPs.')
argp.add_argument('-p', '--protocol', action='store_true', help='Escape the protocol colon in URLs.')
return super().interface(argp)
def _quote(self, word):
return word if not self.args.quote else B'`%s`' % word
def reverse(self, data):
def refang(socket_string):
return socket_string.group(0).replace(B'[.]', B'.')
data = defanged.socket.sub(refang, data)
data = data.replace(B'[:]//', B'://')
return data
def process(self, data):
def replace_socket(socket_string, match=True):
if match:
return self._quote(replace_socket(socket_string.group(0), False))
self.log_info('replace:', socket_string)
host = socket_string.rsplit(B':')[0].lower()
if host in self.WHITELIST:
return socket_string
return B'[.]'.join(socket_string.rsplit(B'.', 1))
def replace_url(url_string):
if not url_string:
return url_string
sep = B'[:]//' if self.args.protocol else B'://'
self.log_info('replace:', url_string)
p, q = url_string.split(B'://')
q = q.split(B'/', 1)
q[0] = replace_socket(q[0], False)
q = B'/'.join(q)
return self._quote(p + sep + q)
analyze = indicators.url.split(data)
analyze[1::2] = [replace_url(t) for t in analyze[1::2]]
if not self.args.url_only:
analyze[0::2] = [
indicators.socket.sub(replace_socket, t)
for t in analyze[0::2]
]
return B''.join(analyze)
| chubbymaggie/refinery | refinery/units/pattern/defang.py | defang.py | py | 2,322 | python | en | code | null | github-code | 13 |
22845043027 | from django.shortcuts import render
from PIL import Image
from io import BytesIO
import base64
import os
from django.conf import settings
from django.utils.crypto import get_random_string
import datetime
# Create your views here.
class InsertImage():
def insert_image(self,location,image_string):
print(location)
image_data=image_string
image_data_in_bytes=bytes(image_data,"utf-8")
image_form=base64.decodestring(image_data_in_bytes)
image=Image.open(BytesIO(image_form))
now=str(datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
file_name=now+str(get_random_string(length=5,allowed_chars='123456789'))+".jpg"
print(file_name)
print(settings.MEDIA_ROOT)
image.save(settings.MEDIA_ROOT+'/'+location+"/"+file_name)
return file_name
class DeleteImage():
def delete_image(self,location,image_name):
print(location)
print(image_name)
try:
path=settings.MEDIA_ROOT+'/'+location+"/"+image_name
print("path :")
print(path)
m=os.remove(path)
print(m)
return True
except Exception as e:
print(e)
print("No image was found")
return False
| Noorzaiba/final-2-rest-api | crime_management/images_app/views.py | views.py | py | 1,280 | python | en | code | 0 | github-code | 13 |
24146723159 | import numpy as np
import time
"""
Too make things cleaner, there should probably be a separation of the two objects "room" and "problem instanse", where properties such as DT, time length etc are
properties of the problem instance and not the room, but too keep it simple the room class will contain everything
"""
class Room:
def __init__(self,
c,
DX,
DT,
TIME_LENGTH,
room_type="standard",
ROOM_STARTING_TEMP=293.15,
OUTSIDE_STARTING_TEMP=253.15, # -20deg celcius
OUTSIDE_TEMP_FUNCTION=None,
IS_OVEN_OFF=False,
OVEN_WATTAGE=None,
# tuple with the dimensions of the room (length, width, height)
room_dims=None,
oven_type='3d'
):
self.c = c
self.Q_lost = 0
self.oven_temperature = 432.15
self.room_type = room_type
self.DX = DX
self.DT = DT
# the default room is 2x2x2 meters
self.ROOM_LENGTH = room_dims[0] if room_dims else 2
self.ROOM_WIDTH = room_dims[1] if room_dims else 2
self.ROOM_HEIGHT = room_dims[2] if room_dims else 2
self.TIME_LENGTH = TIME_LENGTH
self.LENGTH_STEPS = int(self.ROOM_LENGTH / DX) + 1
self.WIDTH_STEPS = int(self.ROOM_WIDTH / DX) + 1
self.HEIGHT_STEPS = int(self.ROOM_HEIGHT / DX) + 1
self.TIME_STEPS = int(self.TIME_LENGTH / DT) + 1
self.ROOM_STARTING_TEMP = ROOM_STARTING_TEMP
self.OUTSIDE_STARTING_TEMP = OUTSIDE_STARTING_TEMP
self.OUTSIDE_TEMP_FUNCTION = OUTSIDE_TEMP_FUNCTION
# store the properties of the door in the room
self.DOOR_WIDTH = int(0.5 / DX)
self.DOOR_HEIGHT = int(1 / DX)
self.DOOR_PLACEMENT_WIDTH = int(self.WIDTH_STEPS / 2)
self.DOOR_PLACEMENT_HEIGHT = int(self.HEIGHT_STEPS / 2)
# store the properties of the windows in the room
self.WINDOW_LENGTH = int(0.5 / DX)
self.WINDOW_WIDTH = int(0.5 / DX)
self.WINDOW_HEIGHT = int(0.5 / DX)
self.WINDOW_PLACEMENT_LENGTH = int(self.LENGTH_STEPS / 2)
self.WINDOW_PLACEMENT_WIDTH = int(self.WIDTH_STEPS / 2)
self.WINDOW_PLACEMENT_HEIGHT = int(self.HEIGHT_STEPS / 2)
# store the properties of the oven in the room. Oven height=0.6m etc.
self.OVEN_LENGTH = int(0.60 / DX)
self.OVEN_WIDTH = int(0.078 / DX)
self.OVEN_HEIGHT = int(0.37 / DX)
self.OVEN_TYPE = None
self.A = self.OVEN_LENGTH*self.OVEN_HEIGHT # area
self.test_mode = None
self.store_results_as_csv = False
if OVEN_WATTAGE is not None:
if len(OVEN_WATTAGE) != self.TIME_STEPS:
raise ValueError(
'The number of wattage entries differs from the number of time steps!')
else:
self.OVEN_WATTAGE = OVEN_WATTAGE
else:
# this means that the oven is on the entire simulation
self.OVEN_WATTAGE = 500*np.ones(self.TIME_STEPS)
self.IS_OVEN_OFF = IS_OVEN_OFF
# create a temperature matrix and somewhere to store potential doors/windows etc...
# Use sets of tuples, since looping them are way faster.
self.curr_temp = self.initialize_constant_starting_temp(
ROOM_STARTING_TEMP)
self.prev_temp = self.curr_temp
self.door = set()
self.windows = set()
self.ovens = set()
self.walls = set()
# 'standard' room_type has boundary with windows and correct u-values etc.
if room_type in {'standard', 'perfectly_insulated', 'poorly_insulated'}:
self.initialize_windows()
self.initialize_door()
if oven_type == '2d':
self.OVEN_TYPE = '2d'
self.initialize_ovens()
elif oven_type == '3d':
self.OVEN_TYPE = '3d'
self.initialize_3d_ovens()
else:
raise ValueError(
'Ovens must be 2d or 3d. There are no other options.')
self.initialize_walls()
elif room_type == "simple": # 6 walls, no windows, no door, no oven
self.initialize_walls()
def get_oven_wattage(self, timestep):
if self.IS_OVEN_OFF:
return 0
else:
return self.OVEN_WATTAGE[timestep]
def get_outside_temp(self, time_step):
if self.OUTSIDE_TEMP_FUNCTION is None:
raise ValueError(
'No outside-temperature function has been specified!')
else:
return self.OUTSIDE_TEMP_FUNCTION(time_step)
def initialize_constant_starting_temp(self, temp):
return np.ones((self.LENGTH_STEPS, self.WIDTH_STEPS, self.HEIGHT_STEPS))*temp
def initialize_windows(self):
# print(f"wpw: {WINDOW_PLACEMENT_WIDTH}, wph: {WINDOW_PLACEMENT_HEIGHT}, wpl: {WINDOW_PLACEMENT_LENGTH}, wl: {WINDOW_WIDTH}, wh: {WINDOW_HEIGHT}, wl: {WINDOW_LENGTH}")
for j in range(self.WINDOW_PLACEMENT_WIDTH - self.WINDOW_WIDTH, self.WINDOW_PLACEMENT_WIDTH + self.WINDOW_WIDTH):
for k in range(self.WINDOW_PLACEMENT_HEIGHT - self.WINDOW_HEIGHT, self.WINDOW_PLACEMENT_HEIGHT + self.WINDOW_HEIGHT):
self.windows.add((0, j, k))
for i in range(self.WINDOW_PLACEMENT_LENGTH - self.WINDOW_LENGTH, self.WINDOW_PLACEMENT_LENGTH + self.WINDOW_LENGTH):
for k in range(self.WINDOW_PLACEMENT_HEIGHT - self.WINDOW_HEIGHT, self.WINDOW_PLACEMENT_HEIGHT + self.WINDOW_HEIGHT):
self.windows.add((i, 0, k))
self.windows.add((i, self.WIDTH_STEPS-1, k))
def initialize_ovens(self):
for i in range(self.WINDOW_PLACEMENT_LENGTH - self.OVEN_LENGTH, self.WINDOW_PLACEMENT_LENGTH + self.OVEN_LENGTH):
for k in range(0, self.OVEN_HEIGHT):
self.ovens.add((i, 0, k))
self.ovens.add((i, self.WIDTH_STEPS-1, k))
if len(self.ovens) == 0:
print('NOTE: Room initialized with zero oven nodes. ')
time.sleep(3)
def initialize_3d_ovens(self):
"""Ovens have thickness and lie 2 layers from the boundary. We make
sure the oven is at least two nodes thick.
Raises error if the two ovens turn out to overlap due to bad parameters.
"""
for i in range(self.WINDOW_PLACEMENT_LENGTH - self.OVEN_LENGTH, self.WINDOW_PLACEMENT_LENGTH + self.OVEN_LENGTH):
for k in range(2, max(3, self.OVEN_HEIGHT)+2):
for j in range(0, max(2, self.OVEN_WIDTH)):
if (i, j+2, k) in self.ovens:
raise ValueError(
'Ovens were initialized on top of each other.')
else:
self.ovens.add((i, j+2, k))
if (i, self.WIDTH_STEPS-3-j, k) in self.ovens:
raise ValueError(
'Ovens were initialized on top of each other.')
else:
self.ovens.add((i, self.WIDTH_STEPS-3-j, k))
def initialize_door(self):
""" We force the door to not intersect with the floor or ceiling by demanding that
2 <= k <= room.HEIGHT_STEPS-2. If we don't do this, then it is harder to test our code.
"""
for j in range(self.DOOR_PLACEMENT_WIDTH - self.DOOR_WIDTH, self.DOOR_PLACEMENT_WIDTH + self.DOOR_WIDTH):
for k in range(max(2, self.DOOR_PLACEMENT_HEIGHT - self.DOOR_HEIGHT), min(self.HEIGHT_STEPS-1, self.DOOR_PLACEMENT_HEIGHT + self.DOOR_HEIGHT)):
self.door.add((self.LENGTH_STEPS-1, j, k))
def initialize_walls(self):
windows_and_stuff = self.windows.union(self.ovens, self.door)
for j in range(0, self.WIDTH_STEPS):
for k in range(0, self.HEIGHT_STEPS):
if (0, j, k) not in windows_and_stuff:
self.walls.add((0, j, k))
if (self.LENGTH_STEPS-1, j, k) not in windows_and_stuff:
self.walls.add((self.LENGTH_STEPS-1, j, k))
for i in range(0, self.LENGTH_STEPS):
for k in range(0, self.HEIGHT_STEPS):
if (i, 0, k) not in windows_and_stuff:
self.walls.add((i, 0, k))
if (i, self.WIDTH_STEPS-1, k) not in windows_and_stuff:
self.walls.add((i, self.WIDTH_STEPS-1, k))
# Floor and ceiling
for i in range(0, self.LENGTH_STEPS):
for j in range(0, self.WIDTH_STEPS):
if (i, j, 0) not in windows_and_stuff:
self.walls.add((i, j, 0))
if (i, j, self.HEIGHT_STEPS-1) not in windows_and_stuff:
self.walls.add((i, j, self.HEIGHT_STEPS-1))
def __str__(self):
foo = f'>>> Room description:\nDims={(self.ROOM_LENGTH, self.ROOM_WIDTH, self.ROOM_HEIGHT)}\n' \
f'Room nodes={(self.LENGTH_STEPS, self.WIDTH_STEPS, self.HEIGHT_STEPS)}\n' \
f'Room type={self.room_type}\n' \
f'c={self.c}, dx={self.DX}, dt={self.DT}\n' \
f'Time length={self.TIME_LENGTH}\n' \
f'Room starting temp={self.ROOM_STARTING_TEMP}\n' \
f'Oven type={self.OVEN_TYPE}\n' \
f'Volume ovens={self.DX**3 * len(self.ovens)}\n' \
f'Num nodes per oven={(self.OVEN_LENGTH, self.OVEN_WIDTH, self.OVEN_HEIGHT)}'
if self.OVEN_LENGTH*self.OVEN_WIDTH*self.OVEN_HEIGHT == 0:
foo += '\n[Oven thickness was zero in one direction. This was manually changed.]\n\n'
return foo
| robinfissum/Heat-Modelling-Using-Finite-Differences | room.py | room.py | py | 9,738 | python | en | code | 0 | github-code | 13 |
17343759081 | T = int(input())
for x in range(1, T+1):
w = input()
n = 1 # number of acceptable words
for i in range(len(w)):
m = 1
if i != 0 and w[i] != w[i-1]:
m += 1
if i != len(w)-1 and w[i] != w[i+1]:
m += 1
n *= m
print("Case #{}: {}".format(x, n % 1000000007)) | mgoks/compete | google-kick-start/2015/Round E/A. Lazy Spelling Bee/a-sol.py | a-sol.py | py | 327 | python | en | code | 0 | github-code | 13 |
16359875252 | #coding=utf8
from ..common import crawlerTool as ct
from HTMLParser import HTMLParser#这个出来是unicode的格式,后面没法弄
import sys
import traceback
reload(sys)
sys.setdefaultencoding('utf-8')
#bing 没编码,xpath text()结果是\xe5\xe2\x80\x98\xb5\xe5\xe2\x80\x98\xb5 是要从字节码编成str xpath结果是unicode,需要先encode('unicode-escape')再处理
#百度是unicode编码 u'\u5206\u9694', u'\u7b26\u201c\xb7\u201d\u662f\u600e
#//text()处理也有问题 唉,目前看来xpath还是只能配合HTMLParser().unescape 使用 不然来回转换坑爹
#相对导入不能超过最高层
def process(keyword,page):
url='https://www.bing.com/search?q=%s&pc=MOZI&form=MOZSBR&first=%s&FORM=PERE%s'%(keyword,page*10+1,page)
urlinfos=[]#bing页面结果与百度不同 百度输出已经是\uxxx格式了 bing还是\xe1格式(str) 所以需要先解码成unicode
page = ct.crawlerTool.getPage(url)#print HTMLParser().unescape('·').encode('unicode-escape').decode('string_escape')是乱码
#print page
segments = ct.crawlerTool.getXpath('//li[@class="b_algo"]',page)#这个xpath可以过滤掉很多广告。。
#print segments
for segment in segments:
try:
#print segment
segment=segment.replace('·','')
urlinfo={}
urlinfo['url']= ct.crawlerTool.getXpath('//h2/a[1]/@href',segment)[0]
title = HTMLParser().unescape(ct.crawlerTool.extractorText(ct.crawlerTool.getXpath('//h2/a[1]', segment)[0]))#好像不转str格式后面输出是乱码S
#print title,HTMLParser().unescape(title)
#print ct.crawlerTool.getXpath('//h2/a[1]', segment)#解码后·好像变乱码了
urlinfo['title'] = title
urlinfo['info'] = ct.crawlerTool.getXpath('//div[@class="b_caption"]', segment)[0]
#print urlinfo['url'], urlinfo['title'], urlinfo['info']
urlinfos.append(urlinfo)
except:
traceback.print_exc()
return {"urlinfos":urlinfos}
def test():
return process("https://www.bing.com/search?q=python&pc=MOZI&form=MOZSBR") | MemoryAndDream/searchForAll | searchForAll/crawler/extractors/bing.py | bing.py | py | 1,997 | python | zh | code | 2 | github-code | 13 |
37002790179 | #!/usr/bin/python3
# -*- coding:utf-8
'Fibonacci series'
__author__ = 'tanhc'
def test ():
a, b = 0, 1
while b < 10:
print(b, end=',')
a, b = b, a + b
if __name__ == '__main__':
test()
| tanhuacheng/Documents | python/fib.py | fib.py | py | 217 | python | en | code | 2 | github-code | 13 |
6869111399 | # 백준 문제번호 - 11651
num = int(input()) # 점의 개수 입력받기
temp_list = []
for i in range(num):
[x, y] = map(int, input().split())
reversed = [y, x]
temp_list.append(reversed)
sorted_list = sorted(temp_list) # sorted라는 정렬함수는 시퀀스 자료형 뿐만 아니라 순서에 구애받지 않는 자료형에도 적용할 수 있고, 정렬된 결과는 list로 반환한다.
for i in range(num):
print(sorted_list[i][1], sorted_list[i][0]) # [y, x] 형태로 저장 -> [1]은 x, [2]는 y 즉, x y 형태로 출력. | conagreen/TIL-hanghae99 | Chapter2/algorithm/chapter02/day04_02.py | day04_02.py | py | 568 | python | ko | code | 0 | github-code | 13 |
14694084871 | #!/usr/bin/python3
"""
Class square.
"""
from models.rectangle import Rectangle
class Square(Rectangle):
"""
The Square class represents a square
and inherits from the Rectangle class.
Attributes (inherited from Rectangle):
__width (int): The width of the square.
__height (int): The height of the square (same as width).
__x (int): The x-coordinate of the square's position.
__y (int): The y-coordinate of the square's position.
Methods:
__init__(self, size, x=0, y=0, id=None):
The constructor for the Square class.
Args:
size (int): The size of the square (width and height).
x (int, optional): The x-coordinate
of the square's position (default is 0).
y (int, optional): The y-coordinate
of the square's position (default is 0).
id (int, optional): An optional
parameter representing the ID of the square.
update(self, *args, **kwargs):
Assigns the provided key/value
arguments to the corresponding attributes.
__str__(self):
Returns the string representation
of the Square instance.
"""
def __init__(self, size, x=0, y=0, id=None):
"""
Constructor for the Square class.
Args:
size (int): The size of the square (width and height).
x (int, optional): The x-coordinate
of the square's position (default is 0).
y (int, optional): The y-coordinate
of the square's position (default is 0).
id (int, optional): An optional
parameter representing the ID of the square.
Raises:
ValueError: If size is less than or equal to 0.
TypeError: If any of the arguments
(size, x, y) is not an integer.
"""
super().__init__(size, size, x, y, id)
@property
def size(self):
"""
Get the size attribute.
"""
return self.width
@size.setter
def size(self, value):
"""
Set the size attribute.
"""
self.width = value
self.height = value
def update(self, *args, **kwargs):
"""
Assigns the provided key/value arguments
to the corresponding attributes.
Args:
*args: Variable-length argument list.
(Unused in this version of the method)
**kwargs: Keyworded argument list
representing attribute key/value pairs.
"""
if args:
if len(args) >= 1:
self.id = args[0]
if len(args) >= 2:
self.size = args[1]
if len(args) >= 3:
self.x = args[2]
if len(args) >= 4:
self.y = args[3]
elif kwargs:
if 'id' in kwargs:
self.id = kwargs['id']
if 'size' in kwargs:
self.size = kwargs['size']
if 'x' in kwargs:
self.x = kwargs['x']
if 'y' in kwargs:
self.y = kwargs['y']
def to_dictionary(self):
"""
Returns the dictionary representation of the Square instance.
Returns:
dict: A dictionary containing the attributes id, size, x, and y.
"""
return {
'id': self.id,
'size': self.size,
'x': self.x,
'y': self.y
}
def __str__(self):
"""
Returns the string representation
of the Square instance.
Returns:
str: The formatted string
representing the Square instance.
"""
rect_x = self._Rectangle__x
rect_y = self._Rectangle__y
return f"[Square] ({self.id}) {rect_x}/{rect_y} - {self.width}"
| Ninolincy/alx-higher_level_programming | 0x0C-python-almost_a_circle/models/square.py | square.py | py | 4,054 | python | en | code | 1 | github-code | 13 |
21293905533 | import numpy as np
import pandas as pd
filepath1 = ""
filepath2 = ""
filepath3 =""
d = {'pctile': [1, 2, 3, 4], 'race': ['White', 'White', 'Black', 'White'],
'gender' : ["F", "M", "F", "F"], 's_family' : [0.370000, 0.5555, 0.666, 0.7777],
's_indv' : [0.888, 0.999, 0.111, 0.222]}
df = pd.DataFrame(data=d)
print(df)
df = df.set_index([ 'pctile', 'race', 'gender']).unstack(0)
df = df.fillna(".")
print(df.reset_index(col_level=1).reset_index(col_level=1))
print(df.columns)
output
outcomes = ["indv", "family"]
skinnyoutcome = [""]
keepvars = ['s_' + var for var in outcomes]
data = pd.read_stata(filepath1 + "")
df = df[[keepvars, 'pctile', 'race', 'gender']]
df2 = df.unstack(level = ['s_family', 's_indv'])
#print(keepvars) | stavreva/stata_to_python | python_test.py | python_test.py | py | 748 | python | en | code | 0 | github-code | 13 |
7124623844 | from transformers import pipeline
import xml.dom.minidom
import os
# create initial BT according to parameter
def create_xml():
doc = xml.dom.minidom.Document()
root = doc.createElement('root')
doc.appendChild(root)
tree = doc.createElement('BehaviorTree')
root.appendChild(tree)
seq = doc.createElement('Sequence')
seq.setAttribute('text', 'IFTHENELSE')
tree.appendChild(seq)
for condition in conditions:
# print(node)
node = doc.createElement(condition[0])
attribute = condition[1] + ',' + condition[2]
node.setAttribute('text', attribute)
seq.appendChild(node)
# Please modify the path
with open("/home/henry/LLM-BT/BTs_Update/initial.xml", "w", encoding='utf-8') as f:
doc.writexml(f, indent='\t', addindent='\t', newl='\n', encoding="utf-8")
def matching(word):
if word == 'move':
create_node = ['IsObjectOnDestination', 'T-para','D-para']
conditions.append(create_node)
global num
num = num + 1
# The text is generated by ChatGPT.
# This is an example.
# you can use a interface of ChatGPT and link the output from ChatGPT to the text
classifier = pipeline("ner", model="keywords_extraction")
text = "1. Move object 1 (green block) from the sorting area to position 12 on shelf level 1. \
2. Move object 2 (green block) from the sorting area to position 13 on shelf level 1. \
3. Move object 4 (green block) from the sorting area to position 14 on shelf level 1. \
4. Move object 3 (yellow block) from the sorting area to position 22 on shelf level 2. \
5. Move object 5 (yellow block) from the sorting area to position 23 on shelf level 2. \
6. Move object 6 (red block) from the sorting area to position 32 on shelf level 3."
results = classifier(text)
for result in results:
result.pop('index')
result.pop('start')
result.pop('end')
result.pop('score')
# print(result)
# obtain parameter
num = -1
conditions = []
for result in results:
if result['entity'] == 'B-Action':
matching(result['word'])
elif result['entity'] == 'B-Target':
conditions[num][1] = result['word']
elif result['entity'] == 'I-Target':
conditions[num][1] = conditions[num][1] + '_' + result['word']
elif result['entity'] == 'B-Destination':
conditions[num][2] = result['word']
elif result['entity'] == 'I-Destination':
conditions[num][2] = conditions[num][2] + '_' + result['word']
create_xml()
os.system('cd ../BTs_Update/build/ && ./BT') | henryhaotian/LLM-BT | Parser/parser.py | parser.py | py | 2,557 | python | en | code | 0 | github-code | 13 |
5163793969 | from django.shortcuts import render
from django.http import HttpResponse
from google.analytics.data_v1beta import BetaAnalyticsDataClient, RunRealtimeReportRequest
from google.analytics.data_v1beta.types import DateRange
from google.analytics.data_v1beta.types import Dimension
from google.analytics.data_v1beta.types import Metric
from google.analytics.data_v1beta.types import MetricType
from google.analytics.data_v1beta.types import RunReportRequest
import os
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
SCOPES = ['https://www.googleapis.com/auth/analytics.readonly']
KEY_FILE_LOCATION = 'C:/Users/slinfo/Documents/GitHub/3Team/Video/active-landing-339302-f8a2d8c6730f.json'
VIEW_ID = '259130646'
def initialize_analyticsreporting():
"""Initializes an Analytics Reporting API V4 service object.
Returns:
An authorized Analytics Reporting API V4 service object.
"""
credentials = ServiceAccountCredentials.from_json_keyfile_name(
KEY_FILE_LOCATION, SCOPES)
# Build the service object.
analytics = build('analyticsreporting', 'v4', credentials=credentials)
return analytics
def get_report(analytics):
"""Queries the Analytics Reporting API V4.
Args:
analytics: An authorized Analytics Reporting API V4 service object.
Returns:
The Analytics Reporting API V4 response.
"""
return analytics.reports().batchGet(
body={
'reportRequests': [
{
'viewId': VIEW_ID,
'dateRanges': [{'startDate': '30daysAgo', 'endDate': 'today'}],
'metrics': [{'expression': 'ga:pageviews'}],
'dimensions': []
}]
}
).execute()
def get_visitors(response):
visitors = 0 # in case there are no analytics available yet
for report in response.get('reports', []):
columnHeader = report.get('columnHeader', {})
metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])
for row in report.get('data', {}).get('rows', []):
dateRangeValues = row.get('metrics', [])
for i, values in enumerate(dateRangeValues):
for metricHeader, value in zip(metricHeaders, values.get('values')):
visitors = value
return str(visitors)
def dashboard(request):
analytics = initialize_analyticsreporting()
response = get_report(analytics)
visitors = get_visitors(response)
print(visitors)
return render(request,'manager/dashboard.html', {'visitors':visitors})
def analyze(request):
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "C:/Users/slinfo/Documents/GitHub/3Team/Video/active-landing-339302-f8a2d8c6730f.json"
activeUsers = ''
client = BetaAnalyticsDataClient()
reportRequest = RunRealtimeReportRequest(
property=f"properties/300659810",
dimensions=[Dimension(name="country")],
metrics=[Metric(name="activeUsers")],
)
response = client.run_realtime_report(reportRequest)
print(f"{response.row_count} rows received")
for dimensionHeader in response.dimension_headers:
print(f"Dimension header name: {dimensionHeader.name}")
for metricHeader in response.metric_headers:
metric_type = MetricType(metricHeader.type_).name
print(f"Metric header name: {metricHeader.name} ({metric_type})")
# [END analyticsdata_print_run_report_response_header]
# [START analyticsdata_print_run_report_response_rows]
print("Report result:")
for row in response.rows:
for dimension_value in row.dimension_values:
print(dimension_value.value)
for metric_value in row.metric_values:
print(metric_value.value)
activeUsers = metric_value.value
return render(request,'manager/analyze.html', {'activeUsers' : activeUsers}) | jgone6/3Team | Video/views - 복사본.py | views - 복사본.py | py | 3,799 | python | en | code | 0 | github-code | 13 |
33020592106 | import JackTokenizer as tk
KEYWORD_CONST = ['true', 'false', 'null', 'this']
PRIM_VAR_TYPES = ['int', 'char', 'boolean']
OP = ["+", "-", "*", "/", "&", "|", "<", ">", "="]
UNARY_OP = ["-", "~"]
STATMENT_STARTERS = ["let", "if", "while", "do", "return"]
SYMBOL = 'SYMBOL'
KEYWORD = 'KEYWORD'
STRING_CONST = 'STRING_CONST'
INT_CONST = 'INT_CONST'
IDENTIFIER = 'IDENTIFIER'
class CompilationEngine:
def __init__(self, jack_lines):
self._xml = []
self._token = tk.JackTokenizer(jack_lines)
def compile(self):
self.compile_class()
return self._xml
def xml_append(self, symbol, type, advance=True):
self._xml.append(self._token.create_xml_label(type, symbol))
if advance:
self._token.advance()
def xml_append_opening(self, label):
label = '<' + label + '>'
self._xml.append(label)
def xml_append_closing(self, label):
self.xml_append_opening("/" + label)
def expect(self, e_type, value=None):
if e_type == SYMBOL:
if isinstance(value, list):
if self._token.symbol() not in value:
raise SyntaxError("Expected" + str(value) + "symbol")
else:
if self._token.symbol() != value:
raise SyntaxError("Expected" + str(value) + "symbol")
self.xml_append(self._token.symbol(), self._token.get_type())
return
if e_type == KEYWORD:
if isinstance(value, list):
if self._token.keyword() not in value:
raise SyntaxError("Expected" + str(value) + "keyword")
else:
if self._token.keyword() != value:
raise SyntaxError("Expected" + str(value) + "keyword")
self.xml_append(self._token.keyword(), self._token.get_type())
return
if e_type == IDENTIFIER:
if self._token.get_type() != IDENTIFIER:
raise SyntaxError("Expected an identifier")
self.xml_append(self._token.identifier(), self._token.get_type())
return
if e_type == INT_CONST:
if self._token.get_type() != INT_CONST:
raise SyntaxError("Expected an int_const")
self.xml_append(self._token.int_val(), self._token.get_type())
return
if e_type == STRING_CONST:
if self._token.get_type() != STRING_CONST:
raise SyntaxError("Expected a string_const")
self.xml_append(self._token.string_val(), self._token.get_type())
return
def compile_class(self):
if not self._token.has_more_tokens():
return
self.xml_append_opening('class')
self.expect(KEYWORD, 'class')
self.expect(IDENTIFIER)
self.expect(SYMBOL, '{')
self.compile_class_var_dec()
self.compile_subroutines()
self.expect(SYMBOL, '}')
self.xml_append_closing('class')
def compile_var_name_sequence(self):
self.expect(IDENTIFIER)
if self._token.get_type() == SYMBOL:
if self._token.symbol() == ';':
return True
self.expect(SYMBOL, ',')
return False
def compile_class_var_dec(self):
still_var_dec = True
while still_var_dec:
if self._token.keyword() in ['static', 'field']:
self.xml_append_opening('classVarDec')
# get 'static' or 'field'
self.expect(KEYWORD, ['static', 'field'])
# get type of variable
if self._token.get_type() == IDENTIFIER:
self.expect(IDENTIFIER)
else:
self.expect(KEYWORD, PRIM_VAR_TYPES)
done = False
while not done:
done = self.compile_var_name_sequence()
if done:
self.xml_append(self._token.symbol(),
self._token.get_type())
self.xml_append_closing('classVarDec')
else:
still_var_dec = False
return
def compile_subroutines(self):
while self.compile_subroutine():
pass
def compile_subroutine(self):
if self._token.get_type() == SYMBOL and \
self._token.symbol() == "}":
return False
if self._token.keyword() in ['constructor', 'function', 'method']:
self.xml_append_opening('subroutineDec')
self.expect(KEYWORD, ['constructor', 'function', 'method'])
if self._token.get_type() == KEYWORD:
self.expect(KEYWORD, PRIM_VAR_TYPES + ['void'])
else:
self.expect(IDENTIFIER)
self.expect(IDENTIFIER)
self.expect(SYMBOL, '(')
self.compile_parameter_list()
self.expect(SYMBOL, ')')
self.xml_append_opening('subroutineBody')
self.expect(SYMBOL, '{')
self.compile_var_dec()
self.compile_statements()
self.expect(SYMBOL, '}')
self.xml_append_closing('subroutineBody')
self.xml_append_closing('subroutineDec')
return True
return False
def compile_parameter_list(self):
self.xml_append_opening('parameterList')
while self._token.get_type() != SYMBOL:
self.expect(KEYWORD, PRIM_VAR_TYPES)
self.expect(IDENTIFIER)
if self._token.symbol() != ')':
self.expect(SYMBOL, ',')
self.xml_append_closing('parameterList')
def compile_var_dec(self):
while self._token.get_type() == KEYWORD \
and self._token.keyword() == "var":
self.xml_append_opening('varDec')
self.expect(KEYWORD, "var")
if self._token.get_type() == IDENTIFIER:
self.expect(IDENTIFIER)
else:
self.expect(KEYWORD, PRIM_VAR_TYPES)
self.expect(IDENTIFIER)
while self._token.get_type() == SYMBOL \
and self._token.symbol() == ",":
self.expect(SYMBOL, ',')
self.expect(IDENTIFIER)
self.expect(SYMBOL, ';')
self.xml_append_closing('varDec')
def compile_statements(self):
self.xml_append_opening('statements')
at_least_one = False
while self.compile_statement():
at_least_one = True
if at_least_one:
self.xml_append_closing('statements')
else:
self._xml.pop()
def compile_statement(self):
if self._token.get_type() == KEYWORD and \
self._token.keyword() in STATMENT_STARTERS:
if self._token.keyword() == 'let':
self.compile_let()
elif self._token.keyword() == 'if':
self.compile_if()
elif self._token.keyword() == 'while':
self.compile_while()
elif self._token.keyword() == 'do':
self.compile_do()
elif self._token.keyword() == 'return':
self.compile_return()
return True
return False
def compile_do(self):
self.xml_append_opening('doStatement')
self.expect(KEYWORD, 'do')
self.compile_subroutine_call()
self.expect(SYMBOL, ';')
self.xml_append_closing('doStatement')
def compile_let(self):
self.xml_append_opening('letStatement')
# 'let' keyword
self.expect(KEYWORD, 'let')
# varName
self.expect(IDENTIFIER)
# ( '[' expression ']' )? - optional
if self._token.get_type() == SYMBOL and self._token.symbol() == '[':
self.expect(SYMBOL, '[')
self.compile_expression()
self.expect(SYMBOL, ']')
# '=' symbol
self.expect(SYMBOL, '=')
# expression
self.compile_expression()
# ';' symbol
self.expect(SYMBOL, ';')
self.xml_append_closing('letStatement')
def compile_while(self):
self.xml_append_opening('whileStatement')
# 'while' keyword
self.expect(KEYWORD, 'while')
# '(' symbol
self.expect(SYMBOL, '(')
# expression
self.compile_expression()
# ')' symbol
self.expect(SYMBOL, ')')
# '{' symbol
self.expect(SYMBOL, '{')
# statements
self.compile_statements()
# '}' symbol
self.expect(SYMBOL, '}')
self.xml_append_closing('whileStatement')
def compile_return(self):
self.xml_append_opening('returnStatement')
# 'return' keyword
self.expect(KEYWORD, 'return')
# expression? - optional
if self._token.get_type() != SYMBOL or self._token.symbol() != ';':
self.compile_expression()
# ';' symbol
self.expect(SYMBOL, ';')
self.xml_append_closing('returnStatement')
def compile_if(self):
self.xml_append_opening('ifStatement')
# 'if' keyword
self.expect(KEYWORD, 'if')
# '(' symbol
self.expect(SYMBOL, '(')
# expression
self.compile_expression()
# ')' symbol
self.expect(SYMBOL, ')')
# '{' symbol
self.expect(SYMBOL, '{')
# statements
self.compile_statements()
# '}' symbol
self.expect(SYMBOL, '}')
# (else clause) - optional
if self._token.get_type() == KEYWORD and \
self._token.keyword() == 'else':
# 'else' keyword
self.expect(KEYWORD, 'else')
# '{' symbol
self.expect(SYMBOL, '{')
# statements
self.compile_statements()
# '}' symbol
self.expect(SYMBOL, '}')
self.xml_append_closing('ifStatement')
def compile_expression(self, mandatory=True):
self.xml_append_opening('expression')
# term - mandatory
if not self.compile_term():
self._xml.pop()
if mandatory:
raise SyntaxError("Expected term")
else:
return False
# (op term)*
while self._token.get_type() == SYMBOL and self._token.symbol() in OP:
self.expect(SYMBOL, OP)
self.compile_term()
self.xml_append_closing('expression')
return True
def compile_term(self):
self.xml_append_opening('term')
if self._token.get_type() == INT_CONST:
self.expect(INT_CONST)
elif self._token.get_type() == STRING_CONST:
self.expect(STRING_CONST)
elif self._token.get_type() == KEYWORD \
and self._token.keyword() in KEYWORD_CONST:
self.expect(KEYWORD, KEYWORD_CONST)
elif self._token.get_type() == SYMBOL:
if self._token.symbol() == '(':
self.expect(SYMBOL, '(')
self.compile_expression()
self.expect(SYMBOL, ')')
elif self._token.symbol() in UNARY_OP:
self.expect(SYMBOL, UNARY_OP)
self.compile_term()
else:
self._xml.pop()
return False
elif self._token.get_type() == IDENTIFIER:
next_token, next_type = self._token.peak(1)
if next_type == SYMBOL and next_token in ['(', '.']:
self.compile_subroutine_call()
elif next_type == SYMBOL and next_token == '[':
self.expect(IDENTIFIER)
self.expect(SYMBOL, '[')
self.compile_expression()
self.expect(SYMBOL, ']')
else:
self.expect(IDENTIFIER)
else:
self._xml.pop()
return False
self.xml_append_closing('term')
return True
def compile_expression_list(self):
self.xml_append_opening('expressionList')
if self.compile_expression(mandatory=False):
while self._token.get_type() == SYMBOL \
and self._token.symbol() == ',':
self.expect(SYMBOL, ',')
self.compile_expression()
self.xml_append_closing('expressionList')
def compile_subroutine_call(self):
self.expect(IDENTIFIER)
if self._token.get_type() == SYMBOL and self._token.symbol() == ".":
self.expect(SYMBOL, '.')
self.expect(IDENTIFIER)
self.expect(SYMBOL, '(')
self.compile_expression_list()
self.expect(SYMBOL, ')')
| damebrown/NAND_ex10 | NAND-ex10/CompilationEngine.py | CompilationEngine.py | py | 13,019 | python | en | code | 0 | github-code | 13 |
74564190738 | #!/usr/bin/env python
"""
_WMTweak_
Define extraction of a standard set of WM related PSet parameters
Note: This can be used within the CMSSW environment to act on a
process/config but does not depend on any CMSSW libraries. It needs to stay like this.
"""
from __future__ import print_function, division
from builtins import map, range, str, object
from future.utils import viewitems, viewkeys
import logging
import os
import pickle
from Utils.PythonVersion import PY3
from Utils.Utilities import encodeUnicodeToBytesConditional
from PSetTweaks.PSetTweak import PSetTweak
# params to be extracted from an output module
_TweakOutputModules = [
"fileName",
"logicalFileName",
"compressionLevel",
"basketSize",
"splitLevel",
"overrideInputFileSplitLevels",
"maxSize",
"fastCloning",
"sortBaskets",
"dropMetaData",
# "outputCommands", #this is just a huge pile of stuff which we probably shouldnt be setting anyways
"SelectEvents.SelectEvents",
"dataset.dataTier",
"dataset.filterName",
# TODO: support dataset.* here
]
_TweakParams = [
# options
"process.options.fileMode",
"process.options.wantSummary",
"process.options.allowUnscheduled",
"process.options.makeTriggerResults",
"process.options.Rethrow",
"process.options.SkipEvent",
"process.options.FailPath",
"process.options.FailModule",
"process.options.IgnoreCompletely",
# config metadata
"process.configurationMetadata.name",
"process.configurationMetadata.version",
"process.configurationMetadata.annotation",
# source
"process.source.maxEvents",
"process.source.skipEvents",
"process.source.firstEvent",
"process.source.firstRun",
"process.source.firstLuminosityBlock",
"process.source.numberEventsInRun",
"process.source.fileNames",
"process.source.secondaryFileNames",
"process.source.fileMatchMode",
"process.source.overrideCatalog",
"process.source.numberEventsInLuminosityBlock",
"process.source.firstTime",
"process.source.timeBetweenEvents",
"process.source.eventCreationDelay",
"process.source.needSecondaryFileNames",
"process.source.parametersMustMatch",
"process.source.branchesMustMatch",
"process.source.setRunNumber",
"process.source.skipBadFiles",
"process.source.eventsToSkip",
"process.source.lumisToSkip",
"process.source.eventsToProcess",
"process.source.lumisToProcess",
"process.source.noEventSort",
"process.source.duplicateCheckMode",
"process.source.inputCommands",
"process.source.dropDescendantsOfDroppedBranches",
# maxevents
"process.maxEvents.input",
"process.maxEvents.output",
# TODO: there are more settings stored as a VPSet which are a complete
# ballache to handle, suggest asking framework to change interface here
# job report service
# Everything has shifted to the default cff
# message logger
# Everything is in the default cff
# random seeds
"process.RandomNumberGeneratorService.*.initialSeed",
"process.GlobalTag.globaltag",
]
class WMTweakMaskError(Exception):
def __init__(self, mask=None, msg="Cannot set process from job mask"):
super(WMTweakMaskError, self).__init__()
self.mask = mask
self.message = msg
def __str__(self):
return "Error: %s \n Mask: %s" % (self.message, str(self.mask))
def lfnGroup(job):
"""
_lfnGroup_
Determine the lfnGroup from the job counter and the agent number
provided in the job baggage, the job counter and agent number
default both to 0. The result will be a 5-digit string.
"""
modifier = str(job.get("agentNumber", 0))
jobLfnGroup = modifier + str(job.get("counter", 0) // 1000).zfill(4)
return jobLfnGroup
def hasParameter(pset, param, nopop=False):
"""
_hasParameter_
check that pset provided has the attribute chain
specified.
Eg if param is pset.attr1.attr2.attr3
check for pset.attr1.attr2.attr3
returns True if parameter exists, False if not
"""
params = param.split(".")
if not nopop:
params.pop(0) # first param is the pset we have the reference to
lastParam = pset
for param in params:
lastParam = getattr(lastParam, param, None)
if lastParam is None:
return False
if lastParam is not None:
return True
return False
def getParameter(pset, param, nopop=False):
"""
_getParameter_
Retrieve the specified parameter from the PSet Provided
given the attribute chain
returns None if not found
"""
params = param.split(".")
if not nopop:
params.pop(0) # first param is the pset we have the reference to
lastParam = pset
for param in params:
lastParam = getattr(lastParam, param, None)
if lastParam is None:
return None
return lastParam.value()
def setParameter(process, param, value):
"""
_setParameter_
Set the value of the parameter to the given value.
- process is the reference to the process
- param is the name of the param as process.pset1.pset2...parameter
- value is the value to set that paramter to
"""
params = param.split('.')
params.pop(0) # first is process object
lastPSet = process
for pset in params:
lastPSet = getattr(lastPSet, pset, None)
if lastPSet is None:
msg = "Cannot find attribute named: %s\n" % pset
msg += "Cannot set value: %s" % param
logging.error(msg)
return
lastPSet.setValue(value)
return
def expandParameter(process, param):
"""
_expandParameter_
If param contains a wildcard * then expand it to the list of
matching parameters
"""
params = param.split('.')
params.pop(0)
lastResults = {"process": process}
finalResults = {}
for _ in range(0, len(params)):
pset = params.pop(0)
if pset == "*":
newResults = {}
for lastResultKey, lastResultVal in viewitems(lastResults):
for param in listParams(lastResultVal):
newResultKey = "%s.%s" % (lastResultKey, param)
newResultVal = getattr(lastResultVal, param)
if not hasattr(newResultVal, "parameters_"):
if len(params) == 0:
finalResults[newResultKey] = newResultVal
continue
newResults[newResultKey] = newResultVal
lastResults = newResults
else:
newResults = {}
for lastResultKey, lastResultVal in viewitems(lastResults):
newResultKey = "%s.%s" % (lastResultKey, pset)
newResultVal = getattr(lastResultVal, pset, None)
if not hasattr(newResultVal, "parameters_"):
finalResults[newResultKey] = newResultVal
continue
newResults[newResultKey] = newResultVal
lastResults = newResults
return finalResults
listParams = lambda x: [y for y in x.parameters_()]
class TweakMaker(object):
"""
_TweakMaker_
Object to generate a Tweak instance from a generic
configuration by searching for a set of specific parameters
within the process, all output modules and a set of parameters
within the output modules
"""
def __init__(self, processParams=None, outmodParams=None):
processParams = processParams or _TweakParams
outmodParams = outmodParams or _TweakOutputModules
self.processLevel = processParams
self.outModLevel = outmodParams
def __call__(self, process):
tweak = PSetTweak()
# handle process parameters
processParams = []
for param in self.processLevel:
processParams.extend(viewkeys(expandParameter(process, param)))
for param in processParams:
if hasParameter(process, param):
tweak.addParameter(param, getParameter(process, param))
# output modules
tweak.addParameter('process.outputModules_', [])
for outMod in process.outputModules_():
tweak.getParameter('process.outputModules_').append(outMod)
outModRef = getattr(process, outMod)
for param in self.outModLevel:
fullParam = "process.%s.%s" % (outMod, param)
if hasParameter(outModRef, param, True):
tweak.addParameter(fullParam, getParameter(outModRef, param, True))
return tweak
def makeTweak(process):
"""
_makeTweak_
Create a PSetTweak instance using the list of potential parameters
defined above. If the process has those parameters, they get added
to the tweak, if not, they are left out.
"""
maker = TweakMaker()
return maker(process)
def applyTweak(process, tweak, fixup=None):
"""
_applyTweak_
Add the changes contained in the tweak to the process to give a job specific
process. The fixup parameters is a dictionary keyed by parameter name. If
the tweak contains a parameter in the dictionary the value in the dict will
be calls and passed the process.
This is useful for preparing the process before the value is applied (ie-
making sure all the necessary PSets and configuration values exist).
"""
for param, value in tweak:
if isinstance(value, type(u'')) and hasattr(value, "encode"):
logging.info("Found unicode parameter type for param: %s, with value: %s", param, value)
value = value.encode("utf-8")
if fixup and param in fixup:
fixup[param](process)
setParameter(process, param, value)
childParameters = lambda p, x: [i for i in x._internal_settings if i not in x._internal_children]
childSections = lambda s: [getattr(s, x) for x in s._internal_children]
class ConfigSectionDecomposer(object):
"""
_ConfigSectionDecomposer_
Util to collapse a ConfigSection to a dict of . delimited param: values
where the params contain the section structure.
May turn out to be generally useful for ConfigSections
"""
def __init__(self):
self.configSects = []
self.parameters = {}
self.queue = []
def __call__(self, configSect):
"""
_operator(configSect)_
recursively traverse all parameters in this and all child
PSets
"""
self.queue.append(configSect._internal_name)
csectPath = ".".join(self.queue)
self.configSects.append(csectPath)
params = childParameters(csectPath, configSect)
for par in params:
paramName = ".".join([csectPath, par])
paramVal = getattr(configSect, par)
self.parameters[paramName] = paramVal
list(map(self, childSections(configSect)))
self.queue.pop(-1)
def decomposeConfigSection(csect):
"""
_decomposeConfigSection_
Util to convert a config section into a . delimited dict of
parameters mapped to values
"""
decomposer = ConfigSectionDecomposer()
decomposer(csect)
return decomposer.parameters
def makeTaskTweak(stepSection, result):
"""
_makeTaskTweak_
Create a tweak for options in the task that apply to all jobs.
"""
# GlobalTag
if hasattr(stepSection, "application"):
if hasattr(stepSection.application, "configuration"):
if hasattr(stepSection.application.configuration, "pickledarguments"):
pklArgs = encodeUnicodeToBytesConditional(stepSection.application.configuration.pickledarguments,
condition=PY3)
args = pickle.loads(pklArgs)
if 'globalTag' in args:
result.addParameter("process.GlobalTag.globaltag", "customTypeCms.string('%s')" % args['globalTag'])
if 'globalTagTransaction' in args:
result.addParameter("process.GlobalTag.DBParameters.transactionId", "customTypeCms.untracked.string('%s')" % args['globalTagTransaction'])
return
def makeJobTweak(job, result):
"""
_makeJobTweak_
Convert information from a WMBS Job object into a PSetTweak
that can be used to modify a CMSSW process.
"""
baggage = job.getBaggage()
# Check in the baggage if we are processing .lhe files
lheInput = getattr(baggage, "lheInputFiles", False)
# Input files and secondary input files.
primaryFiles = []
secondaryFiles = []
for inputFile in job["input_files"]:
if inputFile["lfn"].startswith("MCFakeFile"):
# If there is a preset lumi in the mask, use it as the first
# luminosity setting
if job['mask'].get('FirstLumi', None) != None:
logging.info("Setting 'firstLuminosityBlock' attr to: %s", job['mask']['FirstLumi'])
result.addParameter("process.source.firstLuminosityBlock",
"customTypeCms.untracked.uint32(%s)" % job['mask']['FirstLumi'])
else:
# We don't have lumi information in the mask, raise an exception
raise WMTweakMaskError(job['mask'],
"No first lumi information provided")
continue
primaryFiles.append(inputFile["lfn"])
for secondaryFile in inputFile["parents"]:
secondaryFiles.append(secondaryFile["lfn"])
logging.info("Adding %d files to 'fileNames' attr", len(primaryFiles))
logging.info("Adding %d files to 'secondaryFileNames' attr", len(secondaryFiles))
if len(primaryFiles) > 0:
result.addParameter("process.source.fileNames", "customTypeCms.untracked.vstring(%s)" % primaryFiles)
if len(secondaryFiles) > 0:
result.addParameter("process.source.secondaryFileNames", "customTypeCms.untracked.vstring(%s)" % secondaryFiles)
elif not lheInput:
# First event parameter should be set from whatever the mask says,
# That should have the added protection of not going over 2^32 - 1
# If there is nothing in the mask, then we fallback to the counter method
if job['mask'].get('FirstEvent', None) != None:
logging.info("Setting 'firstEvent' attr to: %s", job['mask']['FirstEvent'])
result.addParameter("process.source.firstEvent", "customTypeCms.untracked.uint32(%s)" % job['mask']['FirstEvent'])
else:
# No first event information in the mask, raise and error
raise WMTweakMaskError(job['mask'],
"No first event information provided in the mask")
mask = job['mask']
# event limits
maxEvents = mask.getMaxEvents()
if maxEvents is None:
maxEvents = -1
logging.info("Setting 'maxEvents.input' attr to: %s", maxEvents)
result.addParameter("process.maxEvents", "customTypeCms.untracked.PSet(input=cms.untracked.int32(%s))"% maxEvents)
# We don't want to set skip events for MonteCarlo jobs which have
# no input files.
firstEvent = mask['FirstEvent']
if firstEvent != None and firstEvent >= 0 and (len(primaryFiles) > 0 or lheInput):
if lheInput:
logging.info("Setting 'skipEvents' attr to: %s", firstEvent - 1)
result.addParameter("process.source.skipEvents", "customTypeCms.untracked.uint32(%s)" % (firstEvent - 1))
else:
logging.info("Setting 'skipEvents' attr to: %s", firstEvent)
result.addParameter("process.source.skipEvents", "customTypeCms.untracked.uint32(%s)" % firstEvent)
firstRun = mask['FirstRun']
if firstRun != None:
result.addParameter("process.source.firstRun", "customTypeCms.untracked.uint32(%s)" % firstRun)
elif not len(primaryFiles):
# Then we have a MC job, we need to set firstRun to 1
logging.debug("MCFakeFile initiated without job FirstRun - using one.")
result.addParameter("process.source.firstRun", "customTypeCms.untracked.uint32(1)")
runs = mask.getRunAndLumis()
lumisToProcess = []
for run in viewkeys(runs):
lumiPairs = runs[run]
for lumiPair in lumiPairs:
if len(lumiPair) != 2:
# Do nothing
continue
lumisToProcess.append("%s:%s-%s:%s" % (run, lumiPair[0], run, lumiPair[1]))
if len(lumisToProcess) > 0:
logging.info("Adding %d run/lumis mask to 'lumisToProcess' attr", len(lumisToProcess))
result.addParameter("process.source.lumisToProcess", "customTypeCms.untracked.VLuminosityBlockRange(%s)" % lumisToProcess)
# install any settings from the per job baggage
procSection = getattr(baggage, "process", None)
if procSection is None:
return result
baggageParams = decomposeConfigSection(procSection)
for k, v in viewitems(baggageParams):
if isinstance(v, str):
v = "customTypeCms.untracked.string(%s)" % v
elif isinstance(v, int):
v = "customTypeCms.untracked.uint32(%s)" % v
elif isinstance(v, list):
v = "customTypeCms.untracked.vstring(%s)" % v
result.addParameter(k, v)
return
def makeOutputTweak(outMod, job, result):
"""
_makeOutputTweak_
Make a PSetTweak for the output module and job instance provided
"""
# output filenames
modName = outMod.getInternalName()
logging.info("modName = %s", modName)
fileName = "%s.root" % modName
result.addParameter("process.%s.fileName" % modName, fileName)
lfnBase = str(getattr(outMod, "lfnBase", None))
if lfnBase != None:
lfn = "%s/%s/%s.root" % (lfnBase, lfnGroup(job), modName)
result.addParameter("process.%s.logicalFileName" % modName, lfn)
return
def readAdValues(attrs, adname, castInt=False):
"""
A very simple parser for the ads available at runtime. Returns
a dictionary containing
- attrs: A list of string keys to look for.
- adname: Which ad to parse; "job" for the $_CONDOR_JOB_AD or
"machine" for $_CONDOR_MACHINE_AD
- castInt: Set to True to force the values to be integer literals.
Otherwise, this will return the values as a string representation
of the ClassAd expression.
Note this is not a ClassAd parser - will not handle new-style ads
or any expressions.
Will return a dictionary containing the key/value pairs that were
present in the ad and parseable.
On error, returns an empty dictionary.
"""
retval = {}
adfile = None
if adname == 'job':
adfile = os.environ.get("_CONDOR_JOB_AD")
elif adname == 'machine':
adfile = os.environ.get("_CONDOR_MACHINE_AD")
else:
logging.warning("Invalid ad name requested for parsing: %s", adname)
return retval
if not adfile:
logging.warning("%s adfile is not set in environment.", adname)
return retval
attrs = [i.lower() for i in attrs]
try:
with open(adfile) as fd:
for line in fd:
info = line.strip().split("=", 1)
if len(info) != 2:
continue
attr = info[0].strip().lower()
if attr in attrs:
val = info[1].strip()
if castInt:
try:
retval[attr] = int(val)
except ValueError:
logging.warning("Error parsing %s's %s value: %s", adname, attr, val)
else:
retval[attr] = val
except IOError:
logging.exception("Error opening %s ad:", adname)
return {}
return retval
def resizeResources(resources):
"""
_resizeResources_
Look at the job runtime environment and determine whether we are allowed
to resize the core count. If so, change the resources dictionary passed
to this function according to the information found in $_CONDOR_MACHINE_AD.
The following keys are changed:
- cores -> uses value of Cpus from the machine ad.
- memory -> Memory
This only works when running under HTCondor, $_CONDOR_MACHINE_AD exists,
and WMCore_ResizeJob is true.
- WMCore_ResizeJob is 'true'
No return value - the resources directory is changed in-place.
Should not throw an exception - on error, no change is made and a message
is printed out.
"""
if readAdValues(['wmcore_resizejob'], 'job').get('wmcore_resizejob', 'false').lower() != "true":
logging.info("Not resizing job")
return
logging.info("Resizing job. Initial resources: %s", resources)
adValues = readAdValues(['memory', 'cpus'], 'machine', castInt=True)
machineCpus = adValues.get('cpus', 0)
machineMemory = adValues.get('memory', 0)
if machineCpus > 0 and 'cores' in resources:
resources['cores'] = machineCpus
if machineMemory > 0 and 'memory' in resources:
resources['memory'] = machineMemory
logging.info("Resizing job. Resulting resources: %s", resources)
| dmwm/WMCore | src/python/PSetTweaks/WMTweak.py | WMTweak.py | py | 21,264 | python | en | code | 44 | github-code | 13 |
9466976214 | from smpp5.lib.constants import command_ids
from smpp5.lib.pdu.session_management import (
BindTransmitter,
BindTransmitterResp,
BindReceiver,
BindReceiverResp,
BindTransceiver,
BindTransceiverResp,
OutBind,
UnBind,
UnBindResp,
EnquireLink,
EnquireLinkResp,
AlertNotification,
GenericNack)
from smpp5.lib.pdu.message_submission import (
SubmitSm,
SubmitSmResp,
DataSm,
DataSmResp,
SubmitMulti,
SubmitMultiResp)
from smpp5.lib.pdu.anciliary_submission import (
QuerySm,
QuerySmResp,
CancelSm,
CancelSmResp,
ReplaceSm,
ReplaceSmResp)
from smpp5.lib.pdu.message_delivery import (
DeliverSm,
DeliverSmResp)
# command_id to PDU Class mappings
command_mappings = {
command_ids.generic_nack: GenericNack,
command_ids.bind_receiver: BindReceiver,
command_ids.bind_receiver_resp: BindReceiverResp,
command_ids.bind_transmitter: BindTransmitter,
command_ids.bind_transmitter_resp: BindTransmitterResp,
command_ids.bind_transceiver: BindTransceiver,
command_ids.bind_transceiver_resp: BindTransceiverResp,
command_ids.outbind: OutBind,
command_ids.unbind: UnBind,
command_ids.unbind_resp: UnBindResp,
command_ids.enquire_link: EnquireLink,
command_ids.enquire_link_resp: EnquireLinkResp,
command_ids.alert_notification: AlertNotification,
command_ids.submit_sm: SubmitSm,
command_ids.submit_sm_resp: SubmitSmResp,
command_ids.query_sm: QuerySm,
command_ids.query_sm_resp : QuerySmResp,
command_ids.cancel_sm : CancelSm,
command_ids.cancel_sm_resp : CancelSmResp,
command_ids.replace_sm : ReplaceSm,
command_ids.replace_sm_resp : ReplaceSmResp,
command_ids.submit_multi : SubmitMulti,
command_ids.submit_multi_resp : SubmitMultiResp,
command_ids.deliver_sm : DeliverSm,
command_ids.deliver_sm_resp : DeliverSmResp
}
| kashifpk/smpp5 | smpp5/smpp5/lib/pdu/__init__.py | __init__.py | py | 1,913 | python | en | code | 0 | github-code | 13 |
14857542635 | import pygame
import random
import time
from pygame.locals import *
from setup import *
pygame.init()
vec = pygame.math.Vector2
framesPerSec = pygame.time.Clock()
displaySurface = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Jumper")
class Platform(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.shape = pygame.Surface((random.randint(50,100), 12))
self.shape.fill(platform_color)
self.rect = self.shape.get_rect(center = (random.randint(0,WIDTH-10),
random.randint(0, HEIGHT-30)))
self.gotTouched = False
self.velocity = random.randint(-1 , 1)
self.inMotion = True
def move(self):
if self.inMotion == True:
self.rect.move_ip(self.velocity,0)
if self.velocity > 0 and self.rect.left > WIDTH:
self.rect.right = 0
if self.velocity < 0 and self.rect.right < 0:
self.rect.left = WIDTH
if self.gotTouched:
self.velocity = 0
class Player(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.shape = pygame.Surface((player_size, player_size))
self.shape.fill(player_color)
self.rect = self.shape.get_rect()
self.pos = vec(30, 385)
self.velocity = vec(0,0)
self.acceleration = vec(0,0)
self.in_air = False
self.score = 0
def move(self):
self.acceleration = vec(0, 0.5)
pressed_keys = pygame.key.get_pressed()
if pressed_keys[K_LEFT]:
self.acceleration.x = -ACCELERATION
if pressed_keys[K_RIGHT]:
self.acceleration.x = ACCELERATION
self.acceleration.x += self.velocity.x * FRICTION
self.velocity += self.acceleration
self.pos += self.velocity + 0.5 * self.acceleration
if self.pos.x > WIDTH:
self.pos.x = 0
if self.pos.x < 0:
self.pos.x = WIDTH
self.rect.midbottom = self.pos
def jump(self):
hits = pygame.sprite.spritecollide(self, platforms, False)
if hits and not self.in_air:
self.in_air = True
self.velocity.y = -15
def back_down(self):
if self.in_air:
if self.velocity.y < -3:
self.velocity.y = -3
def update(self):
# self.move()
hits = pygame.sprite.spritecollide(player , platforms, False)
if player.velocity.y > 0:
if hits:
if self.pos.y < hits[0].rect.bottom:
if not hits[0].gotTouched:
hits[0].gotTouched = True
self.score += 1
self.pos.y = hits[0].rect.top +1
self.velocity.y = 0
self.in_air = False
# main game's entities
player = Player()
bottomPlatform = Platform()
bottomPlatform.shape = pygame.Surface((WIDTH, 20))
bottomPlatform.shape.fill((0,0,0))
bottomPlatform.rect = bottomPlatform.shape.get_rect(center = (WIDTH/2, HEIGHT - 10))
bottomPlatform.gotTouched = True
bottomPlatform.inMotion = False
all_sprites = pygame.sprite.Group()
all_sprites.add(bottomPlatform)
all_sprites.add(player)
platforms = pygame.sprite.Group()
platforms.add(bottomPlatform)
# level genration
def check_platforms(platform, grouped):
if pygame.sprite.spritecollideany(platform,grouped):
return True
else:
for entity in grouped:
if entity == platform:
continue
if (abs(platform.rect.top - entity.rect.bottom) < 50) and (abs(platform.rect.bottom - entity.rect.top) < 50):
return True
C = False
def platform_generator():
while len(platforms) < HARD :
width = random.randrange(50,100)
p = Platform()
C = True
while C:
p = Platform()
p.rect.center = (random.randrange(0, WIDTH - width),
random.randrange(-50, 0))
C = check_platforms(p, platforms)
platforms.add(p)
all_sprites.add(p)
# initial platforms
for x in range(random.randint(4,5)):
C = True
pl = Platform()
while C:
pl = Platform()
C = check_platforms(pl, platforms)
platforms.add(pl)
all_sprites.add(pl)
# main loop
while True:
f = pygame.font.SysFont("Verdana", 20)
player.update()
if player.rect.top <= HEIGHT / 3:
player.pos.y += abs(player.velocity.y)
for platform in platforms:
platform.rect.y += abs(player.velocity.y)
if platform.rect.top >= HEIGHT:
platform.kill()
if player.rect.top > HEIGHT:
for entity in all_sprites:
entity.kill()
time.sleep(1)
displaySurface.fill((255,0,0))
game_over = f.render("GAME OVER!", True, (0,0,0))
displaySurface.blit(game_over, (WIDTH/2 - 60, HEIGHT/2))
pygame.display.update()
time.sleep(2)
pygame.quit()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
player.jump()
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE:
player.back_down()
platform_generator()
displaySurface.fill((255,255,255))
g = f.render(str(player.score), True, (255,0,0))
displaySurface.blit(g, (WIDTH/2, 10))
for entity in all_sprites:
displaySurface.blit(entity.shape, entity.rect)
entity.move()
pygame.display.update()
framesPerSec.tick(FPS) | ceeelineee/Platformer-Game | main.py | main.py | py | 5,946 | python | en | code | 0 | github-code | 13 |
28545997581 |
import strawberry
from strawberry.types import Info
import strawberry_django
from django.contrib.auth import authenticate
from strawberry_django_auth.settings import app_settings
from strawberry_django_auth.types import (
LoginInput,
TokenType
)
from strawberry_django_auth.access_token.methods import (
AccessToken
)
from strawberry_django_auth.helpers import (
get_request,
get_header,
)
from strawberry_django_auth import exceptions
from strawberry_django_auth.refresh_token.models import RefreshToken
class Authenticate:
@strawberry.mutation
def method(self, info: Info, credentials: LoginInput) -> TokenType:
response = TokenType
request = get_request(info)
user = authenticate(
request,
username=credentials.username,
password=credentials.password
)
if user is None:
response.success = False
response.error = exceptions.InvalidCredentials.message
return response
response.success = True
response.access_token = AccessToken.create(user.get_username())
response.refresh_token = RefreshToken.objects.create(
user=user,
)
return response
class VerifyAccessToken:
@strawberry.mutation
def method(self, info: Info) -> bool:
request = get_request(info)
access_token = get_header(request, app_settings.AUTH_HEADER_NAME)
if access_token is None:
return False
return AccessToken.verify(access_token)
class RefreshAccessToken:
pass
| owendyer/strawberry-django-auth | strawberry_django_auth/mutations.py | mutations.py | py | 1,584 | python | en | code | 0 | github-code | 13 |
21264213416 | #
# @lc app=leetcode id=34 lang=python3
#
# [34] Find First and Last Position of Element in Sorted Array
#
# @lc code=start
from typing import List
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
if not nums or len(nums) == 0:
return [-1, -1]
first = self.findFirstOccurence(nums, target)
last = self.findLastOccurence(nums, target)
return [first, last]
'''
While searching the first occurrence
1. if mid == target, keep searching to the left, set rt = mid
2. if mid > target, keep searching to the left, set rt = mid
3. if mid < target, search to the right, set lt = mid
'''
def findFirstOccurence(self, nums: List[int], target: int):
# if found the target, keep looking to the left as there coould be more on the left
lt, rt = 0, len(nums) - 1
while lt < rt - 1:
mid = (lt + rt) // 2
if nums[mid] < target:
lt = mid
else:
rt = mid
# postprocessing the last two elements
if nums[lt] == target:
return lt
if nums[rt] == target:
return rt
return -1
'''
While searching the last occurrence
1. if mid == target, keep searching to the right, set lt = mid
2. if mid < target, keep searching to the right, set lt = mid
3. if mid > target, search to the left, set rt = mid
'''
def findLastOccurence(self, nums: List[int], target: int):
# if found the target, keep looking to the right as there coould be more on the right
lt, rt = 0, len(nums) - 1
while lt < rt - 1:
mid = (lt + rt) // 2
if nums[mid] <= target:
lt = mid
else:
rt = mid
# postprocessing the last two elements
if nums[rt] == target:
return rt
if nums[lt] == target:
return lt
return -1
# @lc code=end
nums = [5,7,7,8,8,10]
target = 9
rs = Solution().searchRange(nums, target)
print(rs) | sundaycat/Leetcode-Practice | solution/34. find-first-and-last-position-of-element-in-sorted-array.py | 34. find-first-and-last-position-of-element-in-sorted-array.py | py | 2,136 | python | en | code | 0 | github-code | 13 |
71863464338 | ## 클래스 선언 부분 ##
class Car :
color = ""
speed = 0
def upSpped(self, value) :
self.speed += value
def downSpeed(self, value) :
self.speed -= value
## 메인 코드 부분 ##
myCar1 = Car()
myCar1.color = "빨강"
myCar1.speed = 0
myCar2 = Car()
myCar2.color = "파랑"
myCar2.speed = 0
myCar3 = Car()
myCar3.color = "노랑"
myCar3.speed = 0
myCar1.upSpped(30)
print(f"자동차1의 색상은 {myCar1.color}, 현재 속도는 {myCar1.speed}km")
myCar2.upSpped(60)
print(f"자동차2의 색상은 {myCar2.color}, 현재 속도는 {myCar2.speed}km")
myCar3.upSpped(0)
print(f"자동차3의 색상은 {myCar3.color}, 현재 속도는 {myCar3.speed}km") | gurofinance/python_lecture | class/object_2.py | object_2.py | py | 705 | python | ko | code | 0 | github-code | 13 |
31321841054 | import bs4 as bs
import requests
import yaml
import jabberjaw.utils.mkt_classes as mkt_classes
import mkt_coord_defaults as mkt_coord_defaults
import dpath.util as dp
def load_sp500_tickers() -> list:
"""loads the list of the S&P500 tickers"""
resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable'})
tickers = []
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[0].text.replace('\n', '').replace(".", "-")
tickers.append(ticker)
print("loaded snp500 tickers")
return tickers
def save_snp_500_tickers(tickers: list) -> None:
"""update the YAML market coordinates config with the SNP500 tickers"""
mkt_class = "equity".upper()
mkt_type = "single stock".upper()
market_coordinates = mkt_classes.mkt_data_cfg()
# lets load the defaults and then see if there is tsdb yaml to overwrite base defaults
defaults = mkt_coord_defaults.defaults.copy()
mkt_default_cfg_load = mkt_classes.mkt_defaults_cfg()
dp.merge(defaults, mkt_default_cfg_load)
equity_defaults = [i for i in dp.search(defaults, '{0}/{1}'.format(mkt_class, mkt_type), yielded=True)].pop()[1]
for ticker in tickers:
mkt_asset = ticker
points_default = [i for i in
dp.search(market_coordinates, f'{mkt_class}/{mkt_type}/{mkt_asset}/points',
yielded=True)]
points_default = points_default.pop()[1] if len(points_default) else []
points = list(set(points_default))
exisiting_value = {'points': points}
value = equity_defaults.copy()
value.update(exisiting_value)
xpath = '{0}/{1}/{2}'.format(mkt_class, mkt_type, mkt_asset)
dp.new(market_coordinates, xpath, value)
print("data ready to be saved")
mkt_data_cfg = {'market_coordinates': market_coordinates, 'defaults': defaults}
with open(mkt_classes.tsdb_path() + 'market_coord_cfg.YAML', "w+") as f:
yaml.dump(mkt_data_cfg, f)
print("added snp500 tickers to the config")
def update_mkt_cfg_equity():
""" adds the equity tickers to the mkt data cfg"""
snp_tickers = load_sp500_tickers()
save_snp_500_tickers(snp_tickers)
if __name__ == '__main__':
# an example of how to update the cfg with the equity symbols
update_mkt_cfg_equity()
| imry-rosenbuam/jabberjaw | jabberjaw/tsdb_utils/equity_stock_cfg_update.py | equity_stock_cfg_update.py | py | 2,462 | python | en | code | 0 | github-code | 13 |
29278038046 | """
Programmer: Collin Michael Fields
Date: 11/1/2018
Purpose: Calculate the value of E out to a certain decimal place. (Currently only works to the 48th decimal place.
"""
import math
from decimal import *
#Setting the precision to a value that will not cause it to error out.
getcontext().prec = 999
print("Welcome to the E calculator.\nPlease enter a value to calculate e to that decimal place (Our current limit is 48).")
while(1 == 1):
userInput = int(input())
if(userInput > -1 and userInput < 49):
break
else:
print("You have entered an invalid value. Please re-enter a valid number")
valueToBeDisplayed = round(Decimal(math.e), userInput)
print(valueToBeDisplayed)
| CollinFields/ProjectsWIP | NumbersProjects/EToTheNthDigit.py | EToTheNthDigit.py | py | 688 | python | en | code | 0 | github-code | 13 |
31372060472 | from datetime import timedelta
from typing import Optional
from pendulum import Date, DateTime, Time, timezone
from airflow.plugins_manager import AirflowPlugin
from airflow.timetables.base import DagRunInfo, DataInterval, TimeRestriction, Timetable
UTC = timezone("UTC")
class UnevenIntervalsTimetable(Timetable):
def infer_manual_data_interval(self, run_after: DateTime) -> DataInterval:
delta = timedelta(days=1)
# If time is between 6:00 and 16:30, period ends at 6am and starts at 16:30 previous day
if run_after >= run_after.set(hour=6, minute=0) and run_after <= run_after.set(hour=16, minute=30):
start = (run_after-delta).set(hour=16, minute=30, second=0).replace(tzinfo=UTC)
end = run_after.set(hour=6, minute=0, second=0).replace(tzinfo=UTC)
# If time is after 16:30 but before midnight, period is between 6:00 and 16:30 the same day
elif run_after >= run_after.set(hour=16, minute=30) and run_after.hour <= 23:
start = run_after.set(hour=6, minute=0, second=0).replace(tzinfo=UTC)
end = run_after.set(hour=16, minute=30, second=0).replace(tzinfo=UTC)
# If time is after midnight but before 6:00, period is between 6:00 and 16:30 the previous day
else:
start = (run_after-delta).set(hour=6, minute=0).replace(tzinfo=UTC)
end = (run_after-delta).set(hour=16, minute=30).replace(tzinfo=UTC)
return DataInterval(start=start, end=end)
def next_dagrun_info(
self,
*,
last_automated_data_interval: Optional[DataInterval],
restriction: TimeRestriction,
) -> Optional[DagRunInfo]:
if last_automated_data_interval is not None: # There was a previous run on the regular schedule.
last_start = last_automated_data_interval.start
delta = timedelta(days=1)
if last_start.hour == 6: # If previous period started at 6:00, next period will start at 16:30 and end at 6:00 following day
next_start = last_start.set(hour=16, minute=30).replace(tzinfo=UTC)
next_end = (last_start+delta).replace(tzinfo=UTC)
else: # If previous period started at 14:30, next period will start at 6:00 next day and end at 14:30
next_start = (last_start+delta).set(hour=6, minute=0).replace(tzinfo=UTC)
next_end = (last_start+delta).replace(tzinfo=UTC)
else: # This is the first ever run on the regular schedule. First data interval will always start at 6:00 and end at 16:30
next_start = restriction.earliest
if next_start is None: # No start_date. Don't schedule.
return None
if not restriction.catchup: # If the DAG has catchup=False, today is the earliest to consider.
next_start = max(next_start, DateTime.combine(Date.today(), Time.min).replace(tzinfo=UTC))
next_start = next_start.set(hour=6, minute=0).replace(tzinfo=UTC)
next_end = next_start.set(hour=16, minute=30).replace(tzinfo=UTC)
if restriction.latest is not None and next_start > restriction.latest:
return None # Over the DAG's scheduled end; don't schedule.
return DagRunInfo.interval(start=next_start, end=next_end)
class UnevenIntervalsTimetablePlugin(AirflowPlugin):
name = "uneven_intervals_timetable_plugin"
timetables = [UnevenIntervalsTimetable]
| astronomer/airflow-scheduling-tutorial | plugins/uneven_intervals.py | uneven_intervals.py | py | 3,441 | python | en | code | 9 | github-code | 13 |
35268224295 | from unity_build_pipeline.Support.logger import color_print, GREEN
from unity_build_pipeline.Support.shell import run
from unity_build_pipeline.Support.fileutils import replace_string_entries
class Fastlane:
def __init__(self, project):
self.project = project
def execute(self, args):
project_path = self.project.get_export_path('xcode')
self.ensure_install(project_path)
color_print("Starting fastlane..", GREEN)
run(['bundle', 'exec', 'fastlane'] + args, cwd=project_path)
def ensure_install(self, path):
color_print("Updating fastlane..", GREEN)
run(['rm', '-rf', path + '/fastlane'], path)
run(['rm', '-f', path + '/Gemfile'], path)
run(['cp', '-R', self.project.get_stubs_folder() +
'/Fastlane/fastlane', path + '/fastlane'], path)
replace_string_entries(path + '/fastlane/Fastfile',
"options[:username]", "'"+self.project.username+"'")
replace_string_entries(path + '/fastlane/Fastfile',
"options[:teamid]", "'" + self.project.teamID + "'")
replace_string_entries(path + '/fastlane/Fastfile',
"options[:appid]", "'" + self.project.bundleID + "'")
replace_string_entries(path + '/fastlane/Appfile',
"username", self.project.username)
replace_string_entries(path + '/fastlane/Appfile',
"appid", self.project.bundleID)
replace_string_entries(path + '/fastlane/Appfile',
"teamid", self.project.teamID)
run(['cp', '-R', self.project.get_stubs_folder() +
'/Fastlane/Gemfile', path + '/Gemfile'], path)
run(['bundle', 'update'], cwd=path, silent=True)
pbx_project_path = path + '/Unity-iPhone.xcodeproj/project.pbxproj'
content = open(pbx_project_path, 'r').read()
if 'VERSIONING_SYSTEM = "apple-generic"' not in content:
color_print("Patching versioning system..", GREEN)
content = self.patch_pbx(content)
open(pbx_project_path, 'w').write(content)
def patch_pbx(self, content):
pbx = content.split("\n")
new_pbx = []
for i, line in enumerate(pbx):
new_pbx.append(line)
if "COPY_PHASE_STRIP" in line:
new_pbx.append(' CURRENT_PROJECT_VERSION = 0.1;')
if "UNITY_SCRIPTING_BACKEND" in line:
new_pbx.append(' VERSIONING_SYSTEM = "apple-generic";')
return "\n".join(new_pbx)
| MadCoder39/UnityBuildPipelineiOS | unity_build_pipeline/Services/Fastlane.py | Fastlane.py | py | 2,614 | python | en | code | 2 | github-code | 13 |
10747101482 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec
from matplotlib.ticker import MaxNLocator, NullLocator
from matplotlib.ticker import ScalarFormatter
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from forcepho.postprocess import Samples, Residuals
from forcepho.utils.corner import allcorner
def total_corner(samples, bands=["BLUE", "RED"], smooth=0.05, hkwargs=dict(alpha=0.65),
dkwargs=dict(color="red", marker="."), axes=None):
n_source = len(samples.active)
total = np.array([samples.chaincat[b].sum(axis=0) for b in bands])
xx = total
labels = [f"{b}_total" for b in bands]
truth = np.atleast_2d(xx[:, 0])
axes = allcorner(xx[:, samples.n_tune:], labels, axes,
#upper=False,
color="royalblue",
psamples=truth.T,
smooth=smooth,
hist_kwargs=hkwargs,
samples_kwargs=dkwargs)
for i, ax in enumerate(np.diag(axes)):
ax.axvline(truth[0, i], color="red")
def color_corner(samples, bands=["BLUE", "RED"], smooth=0.05, hkwargs=dict(alpha=0.65),
dkwargs=dict(color="red", marker="."), axes=None):
n_source = len(samples.active)
color = -2.5 * np.log10(samples.chaincat[bands[0]] / samples.chaincat[bands[1]])
xx = color
labels = [f"[{bands[0]} - {bands[1]}]_{i+1}" for i in range(n_source)]
truth = np.atleast_2d(xx[:, 0])
axes = allcorner(xx[:, samples.n_tune:], labels, axes,
#upper=True,
color="royalblue",
psamples=truth.T,
smooth=smooth,
hist_kwargs=hkwargs,
samples_kwargs=dkwargs)
for i, ax in enumerate(np.diag(axes)):
ax.axvline(truth[0, i], color="red")
return axes
def plot_residual(patchname, vmin=-3, vmax=10, rfig=None, raxes=None):
s = Samples(patchname)
r = Residuals(patchname.replace("samples", "residuals"))
if raxes is None:
rfig, raxes = pl.subplots(nexp, 3, sharex=True, sharey=True)
for i, e in enumerate(r.exposures):
data, _, _ = r.make_exp(i, value="data")
delta, _, _ = r.make_exp(i, value="residual")
ierr, _, _ = r.make_exp(i, value="ierr")
kw = dict(origin="lower", vmin=vmin, vmax=vmax)
cb = raxes[i, 0].imshow((data * ierr).T, **kw)
cb = raxes[i, 1].imshow((delta * ierr).T, **kw)
cb = raxes[i, 2].imshow(((data-delta) * ierr).T, **kw)
val = s.get_sample_cat(-1)
return rfig, raxes, cb, val
def plot_both(patchname, band=["BLUE", "RED"], show_current=True):
fig = pl.figure(figsize=(8, 13.5))
gs0 = GridSpec(2, 1, figure=fig)
nexp = 2
if True:
r = 20
c = nexp * r
gs_resid = GridSpecFromSubplotSpec(c+1, 3, subplot_spec=gs0[0], hspace=1.0)
raxes = []
for j in range(nexp):
raxes += [fig.add_subplot(gs_resid[r*j:r*(j+1), 0])]
raxes += [fig.add_subplot(gs_resid[r*j:r*(j+1), i], sharex=raxes[-1], sharey=raxes[-1])
for i in range(1, 3)]
raxes = np.array(raxes).reshape(nexp, 3)
titles = ["Data", "Residual", "Model"]
_, raxes, cb, val = plot_residual(patchname, raxes=raxes)
for i, rax in enumerate(raxes[0]):
cax = fig.add_subplot(gs_resid[c:c+1, i])
pl.colorbar(cb, cax=cax, orientation="horizontal", label=r"flux/$\sigma$")
rax.set_title(titles[i])
for j, rax in enumerate(raxes[:, 0]):
rax.text(0.5, 0.9, band[j], color="magenta", transform=rax.transAxes)
if True:
samples = Samples(patchname)
nx, ny = 4, 3
gs_corner = GridSpecFromSubplotSpec(ny, nx, subplot_spec=gs0[1])
paxes = np.array([fig.add_subplot(gs_corner[i, j])
for i in range(ny) for j in range(nx)]).reshape(ny, nx)
taxes = paxes[-2:, :2]
taxes = total_corner(samples, axes=taxes)
caxes = paxes[:2, -2:]
caxes = color_corner(samples, axes=caxes)
empty = paxes[0, :2].tolist() + paxes[-1, -2:].tolist()
[ax.set_frame_on(False) for ax in empty]
[ax.set_xticks([]) for ax in empty]
[ax.set_yticks([]) for ax in empty]
return fig, raxes, paxes
if __name__ == "__main__":
# --- Arguments ---
parser = argparse.ArgumentParser()
# input
parser.add_argument("--patchname", type=str, default="output/together_v1/patches/patch_BLUE+RED_samples.h5")
args = parser.parse_args()
fig, raxes, paxes = plot_both(args.patchname)
fig.savefig(args.patchname.split("/")[-3] + ".png") | bd-j/forcepho | demo/demo_color/color_plot_together.py | color_plot_together.py | py | 4,838 | python | en | code | 13 | github-code | 13 |
1873531332 | # Testing of model:
from tensorflow.keras.models import load_model
model=load_model('audio_classification.hdf5')
filename="D:\\sound_recog\\music\\bhairavi\\Bhairavi01.wav"
audio, sample_rate = librosa.load(file_name)
mfccs_features = librosa.feature.mfcc(y=audio, sr=sample_rate, n_mfcc=40)
mfccs_scaled_features = np.mean(mfccs_features.T,axis=0)
#Reshape MFCC feature to 2-D array
mfccs_scaled_features=mfccs_scaled_features.reshape(1,-1)
x_predict=model.predict(mfccs_scaled_features)
predicted_label=np.argmax(x_predict,axis=1)
print(predicted_label) | Biancaa-R/Swarakreeda-classical-music-app- | sound_recog/try.py | try.py | py | 574 | python | en | code | 0 | github-code | 13 |
56877499 | #!/usr/bin/env python
# coding=utf-8
"""Test notarization_poller.config
"""
import json
import logging
import os
from copy import deepcopy
import pytest
from immutabledict import immutabledict
import notarization_poller.config as npconfig
from notarization_poller.constants import DEFAULT_CONFIG
from notarization_poller.exceptions import ConfigError
# constants helpers and fixtures {{{1
def close_handlers(log_name=None):
log_name = log_name or __name__.split(".")[0]
log = logging.getLogger(log_name)
handlers = log.handlers[:]
for handler in handlers:
handler.close()
log.removeHandler(handler)
log.addHandler(logging.NullHandler())
# update_logging_config {{{1
def test_update_logging_config_verbose(config):
config["verbose"] = True
npconfig.update_logging_config(config, log_name=config["log_dir"])
log = logging.getLogger(config["log_dir"])
assert log.level == logging.DEBUG
assert len(log.handlers) == 3
close_handlers(log_name=config["log_dir"])
def test_update_logging_config_verbose_existing_handler(config):
log = logging.getLogger(config["log_dir"])
log.addHandler(logging.NullHandler())
log.addHandler(logging.NullHandler())
config["verbose"] = True
npconfig.update_logging_config(config, log_name=config["log_dir"])
assert log.level == logging.DEBUG
assert len(log.handlers) == 4
close_handlers(log_name=config["log_dir"])
def test_update_logging_config_not_verbose(config):
config["verbose"] = False
npconfig.update_logging_config(config, log_name=config["log_dir"])
log = logging.getLogger(config["log_dir"])
assert log.level == logging.INFO
assert len(log.handlers) == 3
close_handlers(log_name=config["log_dir"])
def test_watched_log_file(config):
config["watch_log_file"] = True
config["log_fmt"] = "%(levelname)s - %(message)s"
npconfig.update_logging_config(config, log_name=config["log_dir"])
path = os.path.join(config["log_dir"], "worker.log")
log = logging.getLogger(config["log_dir"])
log.info("foo")
os.rename(path, "{}.1".format(path))
log.info("bar")
with open(path, "r") as fh:
assert fh.read().rstrip() == "INFO - bar"
close_handlers(log_name=config["log_dir"])
def test_rotating_log_file(config):
# 500 should be enough to ~fill 2 files
MAX_SIZE = 500 # bytes
config["watch_log_file"] = False
config["log_max_bytes"] = MAX_SIZE
config["log_max_backups"] = 1
config["log_fmt"] = "%(levelname)s - %(message)s"
npconfig.update_logging_config(config, log_name=config["log_dir"])
path = os.path.join(config["log_dir"], "worker.log")
log = logging.getLogger(config["log_dir"])
for x in range(30):
log.info(f"{x}" * x)
assert os.path.getsize(path) < MAX_SIZE
assert os.path.getsize(path + ".1") < MAX_SIZE
close_handlers(log_name=config["log_dir"])
# get_config_from_cmdln {{{1
def test_get_config_from_cmdln():
path = os.path.join(os.path.dirname(__file__), "data", "good.json")
c = deepcopy(dict(DEFAULT_CONFIG))
with open(path) as fh:
c.update(json.load(fh))
expected_config = immutabledict(c)
config = npconfig.get_config_from_cmdln([path])
assert config == expected_config
@pytest.mark.parametrize(
"path,raises",
((os.path.join(os.path.dirname(__file__), "data", "good.json"), None), (os.path.join(os.path.dirname(__file__), "data", "bad.json"), ConfigError)),
)
def test_validate_config(path, raises):
if raises:
with pytest.raises(raises):
npconfig.get_config_from_cmdln([path])
else:
npconfig.get_config_from_cmdln([path])
| mozilla-releng/scriptworker-scripts | notarization_poller/tests/test_config.py | test_config.py | py | 3,679 | python | en | code | 13 | github-code | 13 |
28621877106 | class BuildStatusDetails:
def __init__(self, line):
line = line.replace ('\r', "")
line = line.replace ('\n', "")
data = line.split(" ")
self.server = data [0]
self.platform = data [1]
self.componentGroup = data [2]
self.component = data [3]
self.branch = data [4]
self.shortbranch = data [5]
self.configId = data [6]
self.configPath = data [7]
self.buildType = data [8]
self.configGuid = data [9]
self.buildVersion = data [10]
self.buildEndDateISO = data [11]
self.buildUrl = data [12]
self.buildStatus = data [13]
self.nicebranch = data [14] | pawan-darda/front-src | Magellan2/DjangoWebSite/portlets/PortletUtils/build_status.py | build_status.py | py | 710 | python | en | code | 0 | github-code | 13 |
632064052 | from django.conf import settings
from django.conf.urls import patterns, url, include
from django.conf.urls.static import static
from haystack.views import FacetedSearchView
from haystack.forms import FacetedSearchForm
from haystack.query import SearchQuerySet
# Uncomment the next two lines to enable the admin:
from cms.sitemaps import CMSSitemap
from django.contrib import admin
admin.autodiscover()
sqs = SearchQuerySet().facet('model_type').facet('sector').facet('sub_sector')
urlpatterns = patterns(
'',
# Examples:
# url(r'^$', 'admin.views.site.home', name='home'),
# url(r'^pursuite/', include('pursuite.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^analytics/', include('analytics.urls')),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^account/profile/$', 'account.views.profile', name="profile"),
url(r'^account/competency/$', 'account.views.check_competency',
name="check_competency"),
url(r'^account/', include('allauth.urls')),
url(r'^search/$', FacetedSearchView(
form_class=FacetedSearchForm,
template='search-result.html',
searchqueryset=sqs,
results_per_page=10,
), name='haystack_search'),
url(
r'^occupational-standard/(?P<code>[A-z]{3}/[NO]\d{4})/$',
'admin.views.occupational_standard.view_occupational_standard',
name="occupational_standard"
),
url(
r'^career-map/(?P<slug>.*).svg$',
'admin.views.occupation.view_career_map',
name="career_map"
),
url(
r'^occupation/(?P<slug>.*)/$',
'admin.views.occupation.render',
name="render_occupation"
),
url(
r'^occupational-standard/(?P<code>[A-z]{3}/[NO]\d{4})/'
'(?P<version>\d+\.\d+)/$',
'admin.views.occupational_standard.view_occupational_standard',
name="occupational_standard"
),
url(
r'^qualification-pack/(?P<id>\d+)/$',
'admin.views.qualification_pack.view_qualification_pack_id',
name="qualification_pack"
),
url(
r'^qualification-pack/(?P<code>[A-z]{3}/Q\d{4})/$',
'admin.views.qualification_pack.view_qualification_pack',
name="qualification_pack"
),
url(
r'^qualification-pack/(?P<code>[A-z]{3}/Q\d{4})/(?P<version>\d+\.\d+)/\
$', 'admin.views.qualification_pack.view_qualification_pack',
name="qualification_pack"
),
url(
r'^wfmis-json/$', 'admin.views.common.wfmis_json', name="wfmis_json"
),
# Job URLs
url(
r'^job/(?P<id>\d+)/$', 'admin.views.job.render', name="render_job"
),
url(
r'^jobs/$', 'admin.views.job.render_list', name="render_jobs"
),
url(
r'^jobs/-new$', 'admin.views.job.new_job', name="new_job"
),
url(
r'^job/(?P<id>\d+)/-delete$', 'admin.views.job.delete_job',
name="delete_job"
),
# Training URLs
url(
r'^training/(?P<id>\d+)/$', 'admin.views.training.render',
name="render_training"
),
url(
r'^trainings/$', 'admin.views.training.render_list',
name="render_trainings"
),
url(
r'^trainings/-new$', 'admin.views.training.new_training',
name="new_training"
),
url(
r'^training/(?P<id>\d+)/-delete$',
'admin.views.training.delete_training',
name="delete_training"
),
# CMS urls
url(r'^', include('cms.urls')),
url(
r'^sitemap.xml$', 'django.contrib.sitemaps.views.sitemap',
{'sitemaps': {'cmspages': CMSSitemap}}
),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| arpitprogressive/arpittest | pursuite/urls.py | urls.py | py | 3,865 | python | en | code | 0 | github-code | 13 |
17057179934 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.OpenApiSceneInstanceInfo import OpenApiSceneInstanceInfo
from alipay.aop.api.domain.OpenApiSkillGroupChannelInfo import OpenApiSkillGroupChannelInfo
from alipay.aop.api.domain.OpenApiTransferSkillGroupInfo import OpenApiTransferSkillGroupInfo
class OpenApiSkillGroupInfo(object):
def __init__(self):
self._clv_meta_organization_id = None
self._clv_skill_group_id = None
self._clv_skill_group_type = None
self._scene_instance_info = None
self._skill_group_channel = None
self._skill_group_id = None
self._skill_group_name = None
self._tnt_inst_id = None
self._transfer_skill_groups = None
@property
def clv_meta_organization_id(self):
return self._clv_meta_organization_id
@clv_meta_organization_id.setter
def clv_meta_organization_id(self, value):
self._clv_meta_organization_id = value
@property
def clv_skill_group_id(self):
return self._clv_skill_group_id
@clv_skill_group_id.setter
def clv_skill_group_id(self, value):
self._clv_skill_group_id = value
@property
def clv_skill_group_type(self):
return self._clv_skill_group_type
@clv_skill_group_type.setter
def clv_skill_group_type(self, value):
self._clv_skill_group_type = value
@property
def scene_instance_info(self):
return self._scene_instance_info
@scene_instance_info.setter
def scene_instance_info(self, value):
if isinstance(value, OpenApiSceneInstanceInfo):
self._scene_instance_info = value
else:
self._scene_instance_info = OpenApiSceneInstanceInfo.from_alipay_dict(value)
@property
def skill_group_channel(self):
return self._skill_group_channel
@skill_group_channel.setter
def skill_group_channel(self, value):
if isinstance(value, OpenApiSkillGroupChannelInfo):
self._skill_group_channel = value
else:
self._skill_group_channel = OpenApiSkillGroupChannelInfo.from_alipay_dict(value)
@property
def skill_group_id(self):
return self._skill_group_id
@skill_group_id.setter
def skill_group_id(self, value):
self._skill_group_id = value
@property
def skill_group_name(self):
return self._skill_group_name
@skill_group_name.setter
def skill_group_name(self, value):
self._skill_group_name = value
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
@property
def transfer_skill_groups(self):
return self._transfer_skill_groups
@transfer_skill_groups.setter
def transfer_skill_groups(self, value):
if isinstance(value, list):
self._transfer_skill_groups = list()
for i in value:
if isinstance(i, OpenApiTransferSkillGroupInfo):
self._transfer_skill_groups.append(i)
else:
self._transfer_skill_groups.append(OpenApiTransferSkillGroupInfo.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.clv_meta_organization_id:
if hasattr(self.clv_meta_organization_id, 'to_alipay_dict'):
params['clv_meta_organization_id'] = self.clv_meta_organization_id.to_alipay_dict()
else:
params['clv_meta_organization_id'] = self.clv_meta_organization_id
if self.clv_skill_group_id:
if hasattr(self.clv_skill_group_id, 'to_alipay_dict'):
params['clv_skill_group_id'] = self.clv_skill_group_id.to_alipay_dict()
else:
params['clv_skill_group_id'] = self.clv_skill_group_id
if self.clv_skill_group_type:
if hasattr(self.clv_skill_group_type, 'to_alipay_dict'):
params['clv_skill_group_type'] = self.clv_skill_group_type.to_alipay_dict()
else:
params['clv_skill_group_type'] = self.clv_skill_group_type
if self.scene_instance_info:
if hasattr(self.scene_instance_info, 'to_alipay_dict'):
params['scene_instance_info'] = self.scene_instance_info.to_alipay_dict()
else:
params['scene_instance_info'] = self.scene_instance_info
if self.skill_group_channel:
if hasattr(self.skill_group_channel, 'to_alipay_dict'):
params['skill_group_channel'] = self.skill_group_channel.to_alipay_dict()
else:
params['skill_group_channel'] = self.skill_group_channel
if self.skill_group_id:
if hasattr(self.skill_group_id, 'to_alipay_dict'):
params['skill_group_id'] = self.skill_group_id.to_alipay_dict()
else:
params['skill_group_id'] = self.skill_group_id
if self.skill_group_name:
if hasattr(self.skill_group_name, 'to_alipay_dict'):
params['skill_group_name'] = self.skill_group_name.to_alipay_dict()
else:
params['skill_group_name'] = self.skill_group_name
if self.tnt_inst_id:
if hasattr(self.tnt_inst_id, 'to_alipay_dict'):
params['tnt_inst_id'] = self.tnt_inst_id.to_alipay_dict()
else:
params['tnt_inst_id'] = self.tnt_inst_id
if self.transfer_skill_groups:
if isinstance(self.transfer_skill_groups, list):
for i in range(0, len(self.transfer_skill_groups)):
element = self.transfer_skill_groups[i]
if hasattr(element, 'to_alipay_dict'):
self.transfer_skill_groups[i] = element.to_alipay_dict()
if hasattr(self.transfer_skill_groups, 'to_alipay_dict'):
params['transfer_skill_groups'] = self.transfer_skill_groups.to_alipay_dict()
else:
params['transfer_skill_groups'] = self.transfer_skill_groups
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OpenApiSkillGroupInfo()
if 'clv_meta_organization_id' in d:
o.clv_meta_organization_id = d['clv_meta_organization_id']
if 'clv_skill_group_id' in d:
o.clv_skill_group_id = d['clv_skill_group_id']
if 'clv_skill_group_type' in d:
o.clv_skill_group_type = d['clv_skill_group_type']
if 'scene_instance_info' in d:
o.scene_instance_info = d['scene_instance_info']
if 'skill_group_channel' in d:
o.skill_group_channel = d['skill_group_channel']
if 'skill_group_id' in d:
o.skill_group_id = d['skill_group_id']
if 'skill_group_name' in d:
o.skill_group_name = d['skill_group_name']
if 'tnt_inst_id' in d:
o.tnt_inst_id = d['tnt_inst_id']
if 'transfer_skill_groups' in d:
o.transfer_skill_groups = d['transfer_skill_groups']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/OpenApiSkillGroupInfo.py | OpenApiSkillGroupInfo.py | py | 7,224 | python | en | code | 241 | github-code | 13 |
39776626532 | from queueos.expressions import functions
FUNCTIONS = {}
class UserFunction(functions.FunctionExpression):
def execute(self, context, *args):
func = self._func[0]
return func(context, *args)
class FunctionFactory:
"""This class instantiates objects that are sub-classes of the
FunctionExpression. Objects are created by name, by capitalising the
first letter. So if the function name is 'foo', the class
queueos.expressions.functions.Foo is instantiated.
"""
@classmethod
def create(cls, name, *args):
# if '.' in name:
# assert len(args) ==0
# names = name.split('.')
# result = cls.create(names[0])
# for n in names[1:]:
# result = cls.create('dot', result, n)
# return result
if name not in FUNCTIONS:
func = name[0].upper() + name[1:]
func = f"Function{func}"
func = getattr(functions, func, None)
if func is None:
raise ValueError(f"Cannot find a function called '{name}'")
FUNCTIONS[name] = func
return FUNCTIONS[name](name, args)
@classmethod
def register_function(cls, name, func):
# For some reason, we cannot set _func to be a callable because
# it becomes a method. So we wrap it in a list.
attributes = dict(_func=[func])
FUNCTIONS[name] = type(
f"Function_{name}",
(UserFunction,),
attributes,
)
| ecmwf/queueos | queueos/expressions/FunctionFactory.py | FunctionFactory.py | py | 1,528 | python | en | code | 2 | github-code | 13 |
23025991341 | def solution(data, n):
if n < 1:
return []
if len(data) < n:
return data
dataCountDir = {}
for i in data:
count = dataCountDir.get(i)
if count is not None:
dataCountDir[i] = count + 1
else:
dataCountDir[i] = 1
result = []
for i in data:
result.append(i)
for i in data:
if dataCountDir[i] > n:
result.remove(i)
return result
| xuanchuong/google-foobar | minion-task/solution.py | solution.py | py | 470 | python | en | code | 0 | github-code | 13 |
32513933386 | import requests
from bs4 import BeautifulSoup
import json
from soupsieve import select
url2="https://just-scrape-it.com/"
l="collections/hoodie-sweat","collections/tshirt-t-shirt-tee-shirt","collections/maillots-ete","collections/stickers"
up=[]
for i in l:
links=url2+i
up.append(links)
print(up)
# enlever les caracteres bizzare dans fichier json
# data_links=json.dump(données,f,ensure_ascii=False,indent=4)
# print(link.attrs['href'])
for i in up:
response=requests.get(i)
if response.ok:
soup=BeautifulSoup(response.text,"html.parser")
test=soup.select('.product-card')
for (i,u) in enumerate (test):
test2=u.find_all('span', class_="visually-hidden")
for span in test2:
print(span.text)
# for (i,u) in enumerate(test):
# df=u.find('.product-card__title')
# print(df.text)
# for div in df:
# print(div.text)
# for div in test2:
# print(div.text)
# for (i,u) in enumerate (test):
# test2=u.find('li', class_="grid__item grid__item--collection-template small--one-half medium-up--one-quarter")
# print(test2) | yvesmarius/yvesmarius | ultimate_test.py | ultimate_test.py | py | 1,193 | python | en | code | 0 | github-code | 13 |
17086092054 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.OrderDataDistributeInfo import OrderDataDistributeInfo
from alipay.aop.api.domain.OrderDataSyncSuggestion import OrderDataSyncSuggestion
class AlipayMerchantOrderSyncResponse(AlipayResponse):
def __init__(self):
super(AlipayMerchantOrderSyncResponse, self).__init__()
self._distribute_result = None
self._order_id = None
self._order_status = None
self._record_id = None
self._sync_suggestions = None
@property
def distribute_result(self):
return self._distribute_result
@distribute_result.setter
def distribute_result(self, value):
if isinstance(value, list):
self._distribute_result = list()
for i in value:
if isinstance(i, OrderDataDistributeInfo):
self._distribute_result.append(i)
else:
self._distribute_result.append(OrderDataDistributeInfo.from_alipay_dict(i))
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def order_status(self):
return self._order_status
@order_status.setter
def order_status(self, value):
self._order_status = value
@property
def record_id(self):
return self._record_id
@record_id.setter
def record_id(self, value):
self._record_id = value
@property
def sync_suggestions(self):
return self._sync_suggestions
@sync_suggestions.setter
def sync_suggestions(self, value):
if isinstance(value, list):
self._sync_suggestions = list()
for i in value:
if isinstance(i, OrderDataSyncSuggestion):
self._sync_suggestions.append(i)
else:
self._sync_suggestions.append(OrderDataSyncSuggestion.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayMerchantOrderSyncResponse, self).parse_response_content(response_content)
if 'distribute_result' in response:
self.distribute_result = response['distribute_result']
if 'order_id' in response:
self.order_id = response['order_id']
if 'order_status' in response:
self.order_status = response['order_status']
if 'record_id' in response:
self.record_id = response['record_id']
if 'sync_suggestions' in response:
self.sync_suggestions = response['sync_suggestions']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayMerchantOrderSyncResponse.py | AlipayMerchantOrderSyncResponse.py | py | 2,724 | python | en | code | 241 | github-code | 13 |
32294625665 | # encoding=utf8
import math
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
class MySlam:
dmax = 200
tmax = 5
rmax = 5000
rhothreshold = 5000
pcntthreshold = 50
pdisthreshold = 100
angper = 360.0/1024
errcontrl = [50, 5]
robotpos = [0, 0, 0]
def __init__(self):
self.orgrho = []
self.orgtheta = []
self.rho = []
self.theta = []
self.brkrho = []
self.brktheta = []
self.brkcnt = 0
self.seprho = []
self.septheta = []
self.linecnt = 0
self.glbline = []
self.glbrcdata = []
self.fittedline = []
self.fittedrcdata = []
self.matchedline = []
self.stepit = 0
def get_feature(self):
self.rho_filtration()
self.break_rho()
self.break_polyline()
self.fit_line()
self.get_fittedrcdata()
return
def line_match(self):
self.matchedline = []
glblinesize = len(self.glbline)
fittedlinesize = len(self.fittedline)
matchedit = 0
for i in range(0, glblinesize):
dismin = -1
itmin = -1
for j in range(0, fittedlinesize):
distmp = math.sqrt((self.fittedrcdata[j][1]-self.glbrcdata[i][1])**2 + (self.fittedrcdata[j][2]-self.glbrcdata[i][2])**2)
if dismin < 0 or distmp < dismin:
dtrho = abs(self.fittedrcdata[j][0]-self.glbrcdata[i][0])
dttheta = abs(self.fittedrcdata[j][3]-self.glbrcdata[i][3])
dttheta2 = abs(abs(self.fittedrcdata[j][3]-self.glbrcdata[i][3])-360.0)
dttheta = min(dttheta, dttheta2)
#dttheta = min(abs(self.fittedrcdata[j][3]-self.glbrcdata[i][3]), abs(self.fittedrcdata[j][3]-self.glbrcdata[i][3]))
if dtrho < self.errcontrl[0] and dttheta < self.errcontrl[1]:
dismin = distmp
itmin = j
if itmin > -1:
self.matchedline.append([i, itmin])
matchedit += 1
if matchedit == 1:
checkwhy = 1
return
def renew_robot(self):
matchedlinesize = len(self.matchedline)
dxythetait = 0
dxythetavec = []
for i in range(0, matchedlinesize):
tA1 = self.glbline[self.matchedline[i][0]][0]
tB1 = self.glbline[self.matchedline[i][0]][1]
for j in range(i+1, matchedlinesize):
tA2 = self.glbline[self.matchedline[j][0]][0]
tB2 = self.glbline[self.matchedline[j][0]][1]
touterproduct = abs(tA1*tB2-tA2*tB1)/(math.sqrt(tA1**2+tB1**2)*math.sqrt(tA2**2+tB2**2))
if touterproduct < 0.5:
continue
i1 = self.matchedline[i][0]
i2 = self.matchedline[j][0]
j1 = self.matchedline[i][1]
j2 = self.matchedline[j][1]
dx,dy,dtheta = self.cal_coortranspara(i1,i2,j1,j2)
dxythetavec.append([dx, dy, dtheta])
dxythetait += 1
if False:
self.robotpos[0] = dx
self.robotpos[1] = dy
self.robotpos[2] = dtheta
return
l1 = [tmp[0] for tmp in dxythetavec]
l2 = [tmp[1] for tmp in dxythetavec]
self.robotpos[0] = np.mean(np.array([tmp[0] for tmp in dxythetavec]))
self.robotpos[1] = np.mean(np.array([tmp[1] for tmp in dxythetavec]))
self.robotpos[2] = np.mean(np.array([tmp[2] for tmp in dxythetavec]))
return
def trans_feature(self):
fittedlinesize = len(self.fittedline)
for i in range(0, fittedlinesize):
xai = self.fittedline[i][3]
yai = self.fittedline[i][4]
xaii = self.fittedline[i][5]
yaii = self.fittedline[i][6]
xbi = xai*math.cos(self.robotpos[2]*math.pi/180.0) - yai*math.sin(self.robotpos[2]*math.pi/180.0)+self.robotpos[0]
ybi = xai*math.sin(self.robotpos[2]*math.pi/180.0) + yai*math.cos(self.robotpos[2]*math.pi/180.0)+self.robotpos[1]
xbii = xaii*math.cos(self.robotpos[2]*math.pi/180.0) - yaii*math.sin(self.robotpos[2]*math.pi/180.0)+self.robotpos[0]
ybii = xaii*math.sin(self.robotpos[2]*math.pi/180.0) + yaii*math.cos(self.robotpos[2]*math.pi/180.0)+self.robotpos[1]
tA = (ybii-ybi)
tB = -(xbii-xbi)
tC = -xbi*(ybii-ybi)+ybi*(xbii-xbi)
self.fittedline[i][0] = tA
self.fittedline[i][1] = tB
self.fittedline[i][2] = tC
self.fittedline[i][3] = xbi
self.fittedline[i][4] = ybi
self.fittedline[i][5] = xbii
self.fittedline[i][6] = ybii
matchedlinesize = len(self.matchedline)
if True:
for i in range(0, matchedlinesize):
Mi = self.matchedline[i][0]
Mj = self.matchedline[i][1]
xsi = self.glbline[Mi][3]
ysi = self.glbline[Mi][4]
xei = self.glbline[Mi][5]
yei = self.glbline[Mi][6]
xsii = self.fittedline[Mj][3]
ysii = self.fittedline[Mj][4]
xeii = self.fittedline[Mj][5]
yeii = self.fittedline[Mj][6]
lsi = math.sqrt((xei-xsii)**2+(yei-ysii)**2)
lsii = math.sqrt((xeii-xsii)**2+(yeii-ysii)**2)
if lsi > lsii:
self.fittedline[Mj][5] = xei
self.fittedline[Mj][6] = yei
lei = math.sqrt((xeii-xsi)**2+(yeii-ysi)**2)
leii = math.sqrt((xeii-xsii)**2+(yeii-ysii)**2)
if lei > leii:
self.fittedline[Mj][3] = xsi
self.fittedline[Mj][4] = ysi
self.glbline = []
self.glbrcdata = []
for i in range(0, fittedlinesize):
self.glbline.append(self.fittedline[i])
self.glbrcdata.append(self.fittedrcdata[i])
self.fittedline = []
self.fittedrcdata = []
return
def draw_orgdata(self, ms, ax):
ax.cla()
theta = np.array(ms.orgtheta)*math.pi/180
rho = np.array(ms.orgrho)
ax.plot(theta, rho, 'b+', linewidth=1)
return
def draw_feature(self, ms, ax):
fittedlinesize = len(ms.fittedline)
for i in range(0, fittedlinesize):
tmplinepara = ms.fittedline[i]
rhotmp = []
thetatmp = []
tx1 = tmplinepara[3]
ty1 = tmplinepara[4]
rhotmp.append(math.sqrt(tx1**2+ty1**2))
if tx1 >= 0 and ty1 >= 0:
thetatmp.append(math.asin(ty1/rhotmp[0]))
elif tx1 < 0 and ty1 >= 0:
thetatmp.append(math.pi-math.asin(ty1/rhotmp[0]))
elif tx1 < 0 and ty1 < 0:
thetatmp.append(math.pi-math.asin(ty1/rhotmp[0]))
else:
thetatmp.append(2*math.pi+math.asin(ty1/rhotmp[0]))
tx2 = tmplinepara[5]
ty2 = tmplinepara[6]
rhotmp.append(math.sqrt(tx2**2+ty2**2))
if tx2 >= 0 and ty2 >= 0:
thetatmp.append(math.asin(ty2/rhotmp[1]))
elif tx2 < 0 and ty2 >= 0:
thetatmp.append(math.pi-math.asin(ty2/rhotmp[1]))
elif tx2 < 0 and ty2 < 0:
thetatmp.append(math.pi-math.asin(ty2/rhotmp[1]))
else:
thetatmp.append(2*math.pi+math.asin(ty2/rhotmp[1]))
ax.plot(thetatmp,rhotmp,'r-',linewidth=1)
return
def draw_map(self, ms, ax):
glblinesize = len(ms.glbline)
for i in range(0, glblinesize):
ax.plot([ms.glbline[i][3],ms.glbline[i][5]],[ms.glbline[i][4],ms.glbline[i][6]],'b-',linewidth=3)
ax.plot(ms.robotpos[0],ms.robotpos[1],'r*')
return
def rho_filtration(self):
self.rho = []
self.theta = []
orgrhosize = len(self.orgrho)
rhoit=0
for i in range(0, orgrhosize):
if self.orgrho[i] < self.rmax:
self.rho.append(self.orgrho[i])
self.theta.append(self.orgtheta[i])
rhoit += 1
return
def break_rho(self):
self.brkrho = []
self.brktheta = []
lastrho = self.rho[0]
lasttheta = self.theta[0]
rhosize=len(self.rho)
self.brkrho.append(lastrho)
self.brktheta.append(lasttheta)
brkit=1
brkcnt=1
for i in range(1, rhosize):
trho = self.rho[i]
ttheta = self.theta[i]
dis = abs(trho - lastrho)
dtheta = abs(ttheta - lasttheta)
if dis>=self.dmax or dtheta>=self.tmax:
self.brkrho.append(-1)
self.brktheta.append(1000.0)
brkit += 1
brkcnt += 1
self.brkrho.append(trho)
self.brktheta.append(ttheta)
brkit += 1
lastrho = trho
lasttheta = ttheta
self.brkrho.append(-1)
self.brktheta.append(1000.0)
brkit += 1
return
def break_polyline(self):
self.seprho = []
self.septheta = []
pointcnt = 0
linecnt = 0
X = []
Y = []
rhocopy = []
thetacopy = []
brkrhosize = len(self.brkrho)
sepit=0
for i in range(0, brkrhosize):
trho = self.brkrho[i]
ttheta = self.brktheta[i]
if trho < 0:
if pointcnt > self.pcntthreshold:
cornercnt = 0
cornerindex = []
self.find_corners(cornerindex, X, Y, 0, pointcnt, self.pdisthreshold)
#sorted(cornerindex)
cornercnt = len(cornerindex)
if cornercnt > 1:
cornerindex.sort()
if cornercnt == 0:
linecnt += 1
for j in range(0, pointcnt):
self.seprho.append(rhocopy[j])
self.septheta.append(thetacopy[j])
sepit += 1
self.seprho.append(-1)
self.septheta.append(1000.0)
sepit += 1
else:
tmpindex = 0
for j in range(0, pointcnt):
self.seprho.append(rhocopy[j])
self.septheta.append(thetacopy[j])
sepit += 1
if j == cornerindex[tmpindex]:
self.seprho.append(-1)
self.septheta.append(1000.0)
sepit += 1
linecnt += 1
if tmpindex < cornercnt-1:
tmpindex += 1
self.seprho.append(-1)
self.septheta.append(1000.0)
sepit += 1
linecnt += 1
X = []
Y = []
pointcnt = 0
rhocopy = []
thetacopy = []
else:
X.append(trho*math.cos(ttheta*math.pi/180.0))
Y.append(trho*math.sin(ttheta*math.pi/180.0))
pointcnt += 1
rhocopy.append(trho)
thetacopy.append(ttheta)
def fit_line(self):
self.fittedline = []
X=[]
Y=[]
pointcnt = 0
seprhosize = len(self.seprho)
fittedit = 0
for i in range(0, seprhosize):
trho = self.seprho[i]
ttheta = self.septheta[i]
if trho < 0:
if pointcnt < 20:
pointcnt=0
X = []
Y = []
continue
tmplinepara = [0]*7
if max(X)-min(X) < 100:
tmplinepara[0] = (Y[pointcnt-1]-Y[0])
tmplinepara[1] = -(X[pointcnt-1]-X[0])
tmplinepara[2] = -X[0]*(Y[pointcnt-1]-Y[0]) + Y[0]*(X[pointcnt-1]-X[0]) #X+C=0
tmplinepara[3] = X[0]
tmplinepara[4] = Y[0]
tmplinepara[5] = X[pointcnt-1]
tmplinepara[6] = Y[pointcnt-1]
else:
npX = np.array(X)
npY = np.array(Y)
p = np.polyfit(npX, npY, 1)
tmplinepara[0] = p[0]
tmplinepara[1] = -1
tmplinepara[2] = p[1] #kX-Y+b=0
tmplinepara[3] = X[0]
tmplinepara[4] = tmplinepara[0]*X[0]+tmplinepara[2]
tmplinepara[5] = X[pointcnt-1]
tmplinepara[6] = tmplinepara[0]*X[pointcnt-1]+tmplinepara[2]
self.fittedline.append(tmplinepara)
fittedit += 1
pointcnt = 0
X = []
Y = []
else:
X.append(trho*math.cos(ttheta*math.pi/180.0))
Y.append(trho*math.sin(ttheta*math.pi/180.0))
pointcnt += 1
def find_corners(self, cornerindex, X, Y, pointsrt, pointcnt, eps):
maxdisind = self.poly_contourfit(X, Y, pointcnt, eps)
if maxdisind == 0:
return
else:
cornerindex.append(pointsrt + maxdisind)
self.find_corners(cornerindex, X[0:maxdisind], Y[0:maxdisind], pointsrt, maxdisind, eps)
self.find_corners(cornerindex, X[maxdisind:pointcnt], Y[maxdisind:pointcnt], pointsrt+maxdisind, pointcnt-maxdisind, eps)
def poly_contourfit(self, x, y, n, eps):
if n == 1:
return 0
dis = math.sqrt((x[0]-x[n-1])**2+(y[0]-y[n-1])**2)
costheta = (x[n-1]-x[0])/dis
sintheta = -(y[n-1]-y[0])/dis
maxdis = 0
maxdisind = -1
for i in range(0, n):
dbdis = abs((y[i]-y[0])*costheta+(x[i]-x[0])*sintheta)
if dbdis > maxdis:
maxdis = dbdis
maxdisind = i
if maxdis > eps:
return maxdisind
return 0
def get_fittedrcdata(self):
self.fittedrcdata = []
fittedlinesize = len(self.fittedline)
for i in range(0, fittedlinesize):
tA = self.fittedline[i][0]
tB = self.fittedline[i][1]
tC = self.fittedline[i][2]
trcdata = [0]*4
trcdata[0] = abs(tC/math.sqrt(tA**2+tB**2))
tx0 = -(tA*tC)/(tA**2+tB**2)
ty0 = -(tB*tC)/(tA**2+tB**2)
trcdata[1] = tx0
trcdata[2] = ty0
if tx0 >= 0 and ty0 >= 0:
trcdata[3] = math.asin(ty0/math.sqrt(tx0**2+ty0**2))/math.pi*180.0
elif tx0 <0 and ty0 >= 0:
trcdata[3] = 180.0-math.asin(ty0/math.sqrt(tx0**2+ty0**2))/math.pi*180.0
elif tx0 < 0 and ty0 <= 0:
trcdata[3] = 180.0-math.asin(ty0/math.sqrt(tx0**2+ty0**2))/math.pi*180.0
else:
trcdata[3] = 360.0+math.asin(ty0/math.sqrt(tx0**2+ty0**2))/math.pi*180.0
self.fittedrcdata.append(trcdata)
def cal_coortranspara(self, i1, i2, j1, j2):
dtheta1 = self.glbrcdata[i1][3]-self.fittedrcdata[j1][3]
if dtheta1 > self.errcontrl[1]:
dtheta1 = dtheta1 - 360.0
elif dtheta1 < -self.errcontrl[1]:
dtheta1 = dtheta1 + 360.0
dtheta2 = self.glbrcdata[i2][3] - self.fittedrcdata[j2][3]
if dtheta2 > self.errcontrl[1]:
dtheta2 = dtheta2 - 360.0
elif dtheta2 < -self.errcontrl[1]:
dtheta2 = dtheta2 + 360.0
dtheta = self.robotpos[2] + (dtheta1 + dtheta2)/2
tA = [0]*2
tB = [0]*2
tC = [0]*2
tA[0] = self.glbline[i1][0]
tB[0] = self.glbline[i1][1]
tC[0] = self.glbline[i1][2]
tA[1] = self.glbline[i2][0]
tB[1] = self.glbline[i2][1]
tC[1] = self.glbline[i2][2]
Xw = (tC[1]*tB[0]-tC[0]*tB[1])/(tA[0]*tB[1]-tA[1]*tB[0])
Yw = -(tC[1]*tA[0]-tC[0]*tA[1])/(tA[0]*tB[1]-tA[1]*tB[0])
tA[0] = self.fittedline[j1][0]
tB[0] = self.fittedline[j1][1]
tC[0] = self.fittedline[j1][2]
tA[1] = self.fittedline[j2][0]
tB[1] = self.fittedline[j2][1]
tC[1] = self.fittedline[j2][2]
Xr = (tC[1]*tB[0]-tC[0]*tB[1])/(tA[0]*tB[1]-tA[1]*tB[0])
Yr = -(tC[1]*tA[0]-tC[0]*tA[1])/(tA[0]*tB[1]-tA[1]*tB[0])
dx = Xw - math.cos(dtheta*math.pi/180.0)*Xr + math.sin(dtheta*math.pi/180)*Yr
dy = Yw - math.sin(dtheta*math.pi/180.0)*Xr - math.cos(dtheta*math.pi/180)*Yr
return dx, dy, dtheta | rainbell/PySLAM | myslam.py | myslam.py | py | 17,460 | python | en | code | 0 | github-code | 13 |
37785324446 | import math
import random
from numpy import array
import numpy as np
import matplotlib.pyplot as plot
from scipy.interpolate import interp1d
x = array([0, 6, 0, -17, -31, -28, 0, 39, 63])
y = array([0, 6, 16, 17, 0, -28, -47, -39, 0])
time = np.arange(0,10,0.1)
plot.title('Espiral')
plot.xlabel('X')
plot.ylabel('Y')
plot.grid(True, which='both')
plot.axhline(y=0, color='k')
f = interp1d(x, y)
f2 = interp1d(x, y, kind="cubic")
plot.plot(x, f2(x))
minimo = min(x)
maximo = max(x)
xnew = np.linspace(minimo, maximo, num=400, endpoint=True)
plot.plot(x, y, 'o', xnew, f2(xnew), '--')
plot.scatter(x,y)
plot.show()
| pdelfino/numerical-analysis | lista-4/rascunho-5.py | rascunho-5.py | py | 633 | python | en | code | 0 | github-code | 13 |
16129579163 | #!/usr/bin/python
"""
Purpose: creating DOCX files
pip install python-docx
"""
from docx import Document
document = Document()
# Adding a paragraph
paragraph = document.add_paragraph("Lorem ipsum dolor sit amet.")
# It’s also possible to use one paragraph as a “cursor” and insert a new paragraph directly above it:
prior_paragraph = paragraph.insert_paragraph_before("Lorem ipsum")
# Adding a heading
document.add_heading("The REAL meaning of the universe")
# sub-heading from level 1 to 9
document.add_heading("The role of dolphins", level=2)
# Adding a page break
document.add_page_break()
# Adding a table
table = document.add_table(rows=2, cols=2)
cell = table.cell(0, 1)
cell.text = "parrot, possibly dead"
row = table.rows[1]
row.cells[0].text = "Foo bar to you."
row.cells[1].text = "And a hearty foo bar to you too sir!"
for row in table.rows:
for cell in row.cells:
print(cell.text)
document.save("result.docx")
# Ref: https://python-docx.readthedocs.io/en/latest/user/quickstart.html
"""
Packages
---------
word Documentation - python-docx
Powerpoint Presentation - python-pptx
Excel/Spreadsheet - openpyxl
PDF - Reportlab, python-pdfkit
"""
| udhayprakash/PythonMaterial | python3/11_File_Operations/02_structured_files/09_docx/docx_files_ex.py | docx_files_ex.py | py | 1,214 | python | en | code | 7 | github-code | 13 |
34739805969 | # coding: utf-8
from my_linear_algebra import *
from test_statistics import *
from test_gradient_descent import *
from my_multiple_regression import *
from test_adjusted_data import *
from my_cluster import *
import math
import random, re
from collections import defaultdict
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" }
]
# 以及用户之间的好友关系:
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# 给每个用户的 dict 结构添加了相应的朋友列表:
for user in users:
user["friends"] = []
for i, j in friendships:
# 这能奏效是因为users[i]是id为i的用户
users[i]["friends"].append(users[j]) # 添加i作为j的朋友
users[j]["friends"].append(users[i]) # 添加j作为i的朋友
# 作为第一步,我们需要找出所有用户对之间的最短路径。
# 将采用效率虽低一些但更加容易理解的一个广度优先搜索的算法。
# 我们可以将这些步骤放入一个(大型)函数中,代码如下所示:
from collections import deque
def shortest_paths_from(from_user):
# 一个由"user_id"到该用户所有最短路径的字典
shortest_paths_to = { from_user["id"] : [[]] }
# 我们需要检查的(previous user, next user)队列
# 从所有(from_user, friend_of_from_user)对开始着手
frontier = deque((from_user, friend)
for friend in from_user["friends"])
# 直到队列为空为止
while frontier:
prev_user, user = frontier.popleft() # 删除该用户
user_id = user["id"] # 即队列中的第一个用户
# 若要向队列添加内容
# 我们必须知道通向prev_user的某些最短路径
paths_to_prev_user = shortest_paths_to[prev_user["id"]]
new_paths_to_user = [path + [user_id] for path in paths_to_prev_user]
# 我们可能已经知道了一条最短路径
old_paths_to_user = shortest_paths_to.get(user_id, [])
# 到目前为止,我们看到的到达这里的最短路径有多长?
if old_paths_to_user:
min_path_length = len(old_paths_to_user[0])
else:
min_path_length = float('inf')
# 只留下那些刚找到的不太长的路径
new_paths_to_user = [path
for path in new_paths_to_user
if len(path) <= min_path_length
and path not in old_paths_to_user]
shortest_paths_to[user_id] = old_paths_to_user + new_paths_to_user
# 将这些从未谋面的"邻居"添加到frontier中
frontier.extend((user, friend)
for friend in user["friends"]
if friend["id"] not in shortest_paths_to)
return shortest_paths_to
# 现在我们可以将这些 dict 存放到各个节点中了:
for user in users:
user["shortest_paths"] = shortest_paths_from(user)
# 好了,现在终于可以计算中介中心度了。
for user in users:
user["betweenness_centrality"] = 0.0
for source in users:
source_id = source["id"]
for target_id, paths in source["shortest_paths"].items():
if source_id < target_id: # 不要加倍计数
num_paths = len(paths) # 有多少最短路径
contrib = 1 / num_paths # 中心度加1/n
for path in paths:
for id in path:
if id not in [source_id, target_id]:
users[id]["betweenness_centrality"] += contrib
# 由于我们已经计算出每一对节点之间的最短路径,因此,只要对其求和即可。
def farness(user):
"""the sum of the lengths of the shortest paths to each other user"""
return sum(len(paths[0])
for paths in user["shortest_paths"].values())
# 这样一来,接近中心度的计算量就很小了
for user in users:
user["closeness_centrality"] = 1 / farness(user)
# 下面实现矩阵乘法
# 计算矩阵 A 的第 i 行与矩阵 B 的第 j 列的点积,具体代码如下所示:
def matrix_product_entry(A, B, i, j):
return dot(get_row(A, i), get_column(B, j))
# 以通过下列代码实现矩阵的乘法运算了:
def matrix_multiply(A, B):
n1, k1 = shape(A)
n2, k2 = shape(B)
if k1 != n2:
raise ArithmeticError("incompatible shapes!")
return make_matrix(n1, k2, partial(matrix_product_entry, A, B))
# 要定义相应的辅助函数,以便实现向量和列表两种表示形式之间的转换:
def vector_as_matrix(v):
"""returns the vector v (represented as a list) as a n x 1 matrix"""
return [[v_i] for v_i in v]
def vector_from_matrix(v_as_matrix):
"""returns the n x 1 matrix as a list of values"""
return [row[0] for row in v_as_matrix]
# 如此一来,我们就可以利用 matrix_multiply 来定义矩阵运算了:
def matrix_operate(A, v):
v_as_matrix = vector_as_matrix(v)
product = matrix_multiply(A, v_as_matrix)
return vector_from_matrix(product)
# 确定矩阵 A 的特征向量的一种可行方法是取一个随机向量 v,
# 然后利用 matrix_operate 对其进行调整,
# 从而得到一个长度为 1 的向量,重复该过程直到收敛为止:
def find_eigenvector(A, tolerance=0.00001):
guess = [random.random() for __ in A]
while True:
result = matrix_operate(A, guess)
length = magnitude(result)
next_guess = scalar_multiply(1/length, result)
if distance(guess, next_guess) < tolerance:
return next_guess, length # eigenvector, eigenvalue
guess = next_guess
# 需要用 adjacency_matrix 来表示网络中的连接
def entry_fn(i, j):
return 1 if (i, j) in friendships or (j, i) in friendships else 0
n = len(users)
adjacency_matrix = make_matrix(n, n, entry_fn)
# 我们只要借助于 find_eigenvector 函数就能够找到这种 adjacency_matrix。
eigenvector_centralities, _ = find_eigenvector(adjacency_matrix)
# 下面加入赞助列表。
endorsements = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2),
(2, 1), (1, 3), (2, 3), (3, 4), (5, 4),
(5, 6), (7, 5), (6, 8), (8, 7), (8, 9)]
for user in users:
user["endorses"] = [] # 增加一个列表来追踪外方的赞助
user["endorsed_by"] = [] # 增加另外一个列表来追踪赞助
for source_id, target_id in endorsements:
users[source_id]["endorses"].append(users[target_id])
users[target_id]["endorsed_by"].append(users[source_id])
# 找出 most_endorsed(最受推崇的)数据科学家,从而将这些信息出售给猎头们:
endorsements_by_id = [(user["id"], len(user["endorsed_by"]))
for user in users]
sorted(endorsements_by_id,
key=lambda num_endorsements : num_endorsements,
reverse=True)
# 来自得票数较多的人的投票的分量应该重于得票数较少的那些人的投票。
# 这实际上就是 PageRank 算法的思想精华,Google 就是利用它来给网站排名的。
# 下面是这种思想的简化版本。
# 1. 网络中 PageRank 的总分数为 1(或 100%)。
# 2. 最初,这个 PageRank 被均匀分布到网络的各个节点中。
# 3. 在每一步中,每个节点的 PageRank 很大一部分将均匀分布到其外部链接中。
# 4. 在每个步骤中,每个节点的 PageRank 的其余部分被均匀地分布到所有节点上。
def page_rank(users, damping = 0.85, num_iters = 100):
# 一开始均匀分布PageRank
num_users = len(users)
pr = { user["id"] : 1 / num_users for user in users }
# 这是PageRank的一小部分
# 每个节点进行各自的迭代
base_pr = (1 - damping) / num_users
for __ in range(num_iters):
next_pr = { user["id"] : base_pr for user in users }
for user in users:
# 将PageRank分布到外部链接中
links_pr = pr[user["id"]] * damping
for endorsee in user["endorses"]:
next_pr[endorsee["id"]] += links_pr / len(user["endorses"])
pr = next_pr
return pr
print("page_rank : ", page_rank(users))
| lucelujiaming/dataScienceFromSCratch | test_network_analyze.py | test_network_analyze.py | py | 7,765 | python | zh | code | 0 | github-code | 13 |
3249563263 | import random
width = 100 # the width of the board
height = 100 # the height of the board
# create a board with the given width and height
# we'll use a list of list to represent the board
board = [] # start with an empty list
for i in range(height): # loop over the rows
board.append([]) # append an empty row
for j in range(width): # loop over the columns
board[i].append(' ') # append an empty space to the board
# define the player position
player_i = height // 2
player_j = width // 2
#define player health
health = 5
# add 4 enemies in random locationsz
for i in range(200):
enemy_i = random.randint(0, height - 1)
enemy_j = random.randint(0, width - 1)
board[enemy_i][enemy_j] = '§'
for k in range(100):
treasure_l = random.randint(0, height -1)
treasure_k = random.randint(0, width - 1)
board[treasure_l][treasure_k] = '💩'
# loop until the user says 'done' or dies
while True:
command = input('Use (awds) to move. To exit, type "done". What is your command? ') # get the command from the user
if command == 'done':
break # exit the game
elif command in ['left', 'a']:
player_j -= 1 # move left
if player_j == 0:
player_j = width - 1
elif command in ['right', 'd']:
player_j += 1 # move right
if player_j == width:
player_j %= width
elif command in ['up', 'w']:
player_i -= 1 # move up
if player_i == 0:
player_i = height - 1
elif command in ['down', 's']:
player_i += 1 # move down
if player_i == height:
player_i %= height
# check if the player is on the same space as an enemy
if board[player_i][player_j] == '§':
print('you\'ve encountered an enemy! type "attack" to attack this monster')
action = input('what will you do? ')
if action == 'attack':
print('you\'ve slain the enemy')
board[player_i][player_j] = ' ' # remove the enemy from the board
else:
print('you hestitated and were injured')
health -= 1
print(health)
if health == 0:
print('You loose')
break
#
if board[player_i][player_j] == '💩':
print('Yuck!, what will you do? will you (a)"wipe your shoe", or (b)"take it like a man"?')
action = input('what will you do? ')
if action == 'a':
print('cleanliness: +2 points')
elif action == 'b':
print('dude...')
health -= 1
print(health)
if health == 0:
print('You loose')
break
# player viewport
for i in range(player_i - 10, player_i +10):
for j in range(player_j - 10, player_j + 10):
if i == player_i and j == player_j:
print('☺', end=' ')
else:
print(board[i][j], end = '')
print()
| PdxCodeGuild/class_sheep | Code/charlie/python/lab26.py | lab26.py | py | 2,989 | python | en | code | 1 | github-code | 13 |
70728824979 | """Day 10 puzzle solutions"""
import sys
import day10_lib
with open(sys.argv[1], 'r') as inputFile:
INPUT = inputFile.readlines()
print("Day10 --- Part One --- result is: ")
DURATION = day10_lib.getMessage(INPUT)
print("Day10 --- Part Two --- result is: {0}".format(DURATION)) | Elgolfin/adventofcode-2018 | day10.py | day10.py | py | 284 | python | en | code | 0 | github-code | 13 |
37995617882 | import logging
import numpy as np
import asyncio
import time
import math
import cv2
from PIL import Image
from PIL import ImageDraw
from pycoral.adapters import common
from pycoral.adapters import detect
from pycoral.utils.dataset import read_label_file
from pycoral.utils.edgetpu import make_interpreter
from numpy.linalg import norm
from motion import Tracker
from utils.drawing import draw_objects
from utils.vision import unpack_fingerprint, unpack_scene
from utils.helpers import CALIB, arr_to_bbox, calculate_distance, calculate_focal_length
class ASSETS:
MODEL = './assets/ssdlite_mobiledet_landingpad_edgetpu.tflite'
LFILE = './assets/labels.txt'
def __setup_stream(channel):
# TODO: add picam bindings
return cv2.VideoCapture(channel)
def __load_interpreter():
interpreter = make_interpreter(ASSETS.MODEL)
interpreter.allocate_tensors()
return interpreter
def __estimate_local_position(source_image, bbox, F):
root_point = (320, 240)
if len(bbox) < 1:
return (None, None)
source_image, roi, dim0, center_point = unpack_scene(source_image, arr_to_bbox(bbox[0]))
if roi is not None:
fingerprint = unpack_fingerprint(roi)
if len(fingerprint) == 4:
ratio = math.hypot(root_point[0] - center_point[0],
root_point[1] - center_point[1]) / dim0
distance_y = calculate_distance(F, CALIB.REAL_WIDTH, dim0)
distance_x = (root_point[0] - center_point[0])
distance_x = distance_y * (distance_x / dim0)
distance_z = (root_point[1] - center_point[1])
distance_z = distance_y * (distance_z / dim0)
return (ratio, (distance_x, distance_y, distance_z))
return (None, None)
async def __prepare_landing(system, mav, x, z):
r_earth = 6371000.0 # in meters
current_pos = mav.pos
new_latitude = current_pos[0] + (z / r_earth) * (180 / math.pi);
new_longitude = current_pos[1] + (x / r_earth) * (180 / math.pi) / math.cos(current_pos[0] * math.pi/180);
await system.goto_location(new_latitude, new_longitude, 3, 0)
async def __do_landing(system):
await system.action.land()
async def do_landing(**kwagrs):
focal_length = calculate_focal_length(CALIB.REAL_DISTANCE,
CALIB.REAL_WIDTH, CALIB.REFERENCE_WIDTH)
labels = read_label_file(ASSETS.LFILE) if ASSETS.LFILE else {}
interpreter = __load_interpreter()
tracker = Tracker(shape=(320, 320, 3), min_hits=0, num_classes=len(labels),
interval=3)
capture = cv2.VideoCapture(0)
frameid = 0
while capture.isOpened():
return_value, frame = capture.read()
if not return_value:
break
x = Image.fromarray(frame)
_, scale = common.set_resized_input(interpreter, x.size,
lambda size: x.resize(size, Image.ANTIALIAS))
detections0, labels0, active = (None, None, None)
if np.mod(frameid, 3) == 0:
interpreter.invoke()
outputs = detect.get_objects(interpreter, 0.8, scale)
detections0 = (
np.array(
[
[
outputs[0].bbox[0],
outputs[0].bbox[1],
outputs[0].bbox[2],
outputs[0].bbox[3],
]
]
)
if len(outputs) > 0
else np.array([])
)
labels0 = np.array(['0']).astype(np.uint8) if len(outputs) > 0 else np.array([])
active = True
elif np.mod(frameid, 3) != 0:
detections0, labels0 = (np.array([]), np.array([]))
active = False
tracks0 = tracker.update(detections0, labels0, active)
x = np.asarray(x)
ratio, local_position = __estimate_local_position(x, tracks0, focal_length)
if ratio is not None:
logging.info("local-position-estimation: SUCCESS")
logging.info(f"pos := <{local_position[0]}, {local_position[1]}, {local_position[2]}> [METRIC: CM]")
await __prepare_landing(kwagrs["mavsdk_system"], kwagrs["mav"], local_position[0] / 10, local_position[2] / 10)
if ratio < 0.16:
logging.info("drone overlaps with landing pad --> landing")
await __do_landing(kwagrs["mavsdk_system"])
logging.info("drone landed")
break
frameid += 1
| Dronesome-Archive/companion | landing.py | landing.py | py | 4,714 | python | en | code | 0 | github-code | 13 |
4044485077 | from django.db import models
from django.conf import settings
from candidate.models import Candidate
from company.models import Poc
class Client(models.Model):
name = models.CharField(
verbose_name = "Name of the company",
max_length = 100,
help_text = "Name of the company",
blank = False,
)
address = models.TextField(
verbose_name = "Address",
help_text = "The address of the company, as in bank and official records",
blank = False,
)
company_size_choices = [
('SM' , 'Less than 20'),
('MD' , '20 - 50'),
('LG' , '50 - 250'),
('XL' , '250+'),
]
company_size = models.CharField(
verbose_name = "No of Employees",
max_length = 3,
choices = company_size_choices,
help_text ="No of companies employees",
blank = False,
default = 'MD',
)
about = models.TextField(
verbose_name = "About the company",
help_text = "Describe your company",
blank = False,
)
logo = models.ImageField(
verbose_name = "Company Logo",
upload_to = "client_logos/",
blank = True
)
poc = models.ForeignKey(
Poc,
on_delete = models.CASCADE,
editable = False,
null = True,
)
class Employee(models.Model):
first_name = models.CharField(
verbose_name = "First Name",
max_length = 100,
help_text = "First Name",
blank = False,
)
last_name = models.CharField(
verbose_name = "Last Name",
max_length = 100,
help_text = "Last Name",
blank = True,
)
email = models.EmailField(
verbose_name = "Email",
help_text = "Email id of the candidate.",
unique = True,
)
designation = models.CharField(
verbose_name = "Designation",
max_length = 30,
blank = False,
)
profile_photo = models.ImageField(
verbose_name = "Profile Photo",
upload_to = "profile_photos/client/",
blank = True
)
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete = models.CASCADE,
editable = False
)
company = models.ForeignKey(Client, on_delete = models.CASCADE, editable = False)
class Job(models.Model):
title = models.CharField(
verbose_name = "Job Title",
help_text = "Title of the job",
max_length = 40,
blank = False,
)
location = models.CharField(
help_text = "Location of the job",
max_length = 70,
blank = False,
)
region = models.CharField(
help_text = "Region of the job",
max_length = 70,
blank = False,
)
job_types = [
('Full Time', 'Full Time'),
('Internship', 'Internship'),
('Part Time', 'Part Time'),
('Temporary', 'Temporary'),
]
job_type = models.CharField(
choices = job_types,
max_length = 10,
blank = False,
help_text = "Type of the vaccancy",
default = 'Full'
)
category = models.CharField(
max_length = 30,
blank = False,
help_text = "Category of the job",
)
tags = models.CharField(
max_length = 255,
blank = False,
help_text = "Tags which best describes the job."
)
description = models.TextField(
help_text = "Description of the job",
blank = False,
)
salary = models.IntegerField(
help_text = "Salary for the job",
blank = False,
)
added = models.DateField(
auto_now_add = True,
help_text = "Date of adding this job",
)
client = models.ForeignKey(Client, on_delete = models.CASCADE, editable = False)
applicants = models.ManyToManyField(
Candidate,
related_name = 'jobs',
through = 'Schedule',
through_fields = ('job', 'candidate'),
editable = False,
)
class Schedule(models.Model):
screening_statuses = [
('Not Screened', 'Not Screened'),
('Passed', 'Passed'),
('Failed', 'Failed'),
]
screening_status = models.CharField(
max_length = 12,
choices = screening_statuses,
verbose_name = "Screening Status",
default = "Not Screened",
)
interview_date = models.DateTimeField(
verbose_name = "Time of the interview",
null = True,
blank = True
)
interview_statuses = [
('Not Done', 'Not Done'),
('Accepted', 'Accepted'),
('Rejected', 'Rejected'),
]
interview_status =models.CharField(
max_length = 8,
choices = interview_statuses,
verbose_name = "Interview Status",
default = "Not Done",
)
date_joined = models.DateField(
verbose_name = "Date of Joining",
null = True,
blank = True,
)
progress = models.IntegerField(
default = 0,
verbose_name = "Progress of the schedule",
)
candidate = models.ForeignKey(
Candidate,
on_delete = models.CASCADE,
editable = False
)
job = models.ForeignKey(
Job,
on_delete = models.CASCADE,
editable = False
)
| innovoguetechnologies/jobified | client/models.py | models.py | py | 4,399 | python | en | code | 0 | github-code | 13 |
21629722545 | import numpy as np
import collections
import itertools
def pf(k):
i = 2
while i * i <= k:
if k % i == 0:
k /= i
yield i
else:
i += 1
if k > 1:
yield k
def product(s):
result = 1
for i in s:
result *= i
return result
def get_divisors(k):
factors = pf(k)
factors = collections.Counter(factors)
powers = [[factor**i for i in range(count + 1)] for factor, count in factors.items()]
for combs in itertools.product(*powers):
yield product(combs)
N = 128
Lambda = 24
N = int(input("How many processors?\n"))
Lambda = int(input("number of lattice points per dimension in cubic lattice?\n"))
divisors = list(get_divisors(Lambda))
divisors = [i-1 for i in divisors]
combinations = itertools.combinations_with_replacement(divisors,4)
result = [[x, (x[0]+1)*(x[1]+1)*(x[2]+1)*(x[3]+1)] for x in combinations if (x[0]+1)*(x[1]+1)*(x[2]+1)*(x[3]+1)<=N]
sorted_list = sorted(result,key = lambda x: x[1])
for i in range(len(sorted_list)):
print(sorted_list[i])
| adrian2208/msc_project | Simulation-Tools/partitioning_check.py | partitioning_check.py | py | 1,070 | python | en | code | 0 | github-code | 13 |
28109225160 | from util.request_util import RequestUtil
from spider.extractor.abc_extractor import AbsExtractor
from util.ip_proxy import IpProxy
class E_Ihuan(AbsExtractor):
""" 小幻代理 """
_SOURCE_DOMAIN = 'https://ip.ihuan.me/address/5Lit5Zu9.html'
_SOURCE_NAME = '小幻代理'
def __init__(self):
super().__init__()
def extractor(self):
""" 小幻代理 https://ip.ihuan.me/address/5Lit5Zu9.html
有验证码,后面在弄
"""
headers = {
'Cookie': 'cf_chl_2=39b1b9f301fbbb4; cf_clearance=lbc2UL4D3N1sCI3dGb4a7AU9fMHUXc0fZSP64MV87d8-1699410415-0-1-953adbbb.f0bffe15.c23b845b-250.0.0; Hm_lvt_8ccd0ef22095c2eebfe4cd6187dea829=1699410428; statistics=6bf9f47fa7833780f7fb47814ffc5090; Hm_lpvt_8ccd0ef22095c2eebfe4cd6187dea829=1699410813',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36'
}
try:
res = RequestUtil().tree("https://ip.ihuan.me/address/5Lit5Zu9.html", headers=headers, timeout=10)
page_list = res.xpath('//ul[@class=pagination]//li//a/@href')
for page in page_list:
page_res = RequestUtil().tree("https://ip.ihuan.me/address/5Lit5Zu9.html", headers=headers, timeout=10)
yield {}
except Exception as e:
print(e)
| bigfat-will/ip_pool_free | spider/extractor/e_ihuan.py | e_ihuan.py | py | 1,404 | python | en | code | 0 | github-code | 13 |
16710261394 | def write_ply_point_normal(name, vertices, colors):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("property uchar red\n")
fout.write("property uchar green\n")
fout.write("property uchar blue\n")
fout.write("end_header\n")
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+" "+str(min(255,int(255*colors[ii,2])))+" "+str(min(255,int(255*colors[ii,1])))+" "+str(min(255,int(255*colors[ii,0])))+"\n")
import numpy as np
import random,torch
data=torch.load('gt_train/Area_1_WC_1_inst_nostuff.pth')
#data=torch.load('../data/train_cuda_s3dis/Area_6_office_1_inst_nostuff.pth')
vertices_coords=data[0]
semantic_pred=data[4]
colors2=np.zeros((semantic_pred.shape[0],3))
for i in np.unique(semantic_pred):
r0=random.uniform(0.2, 1)
r1=random.uniform(0.2, 1)
r2=random.uniform(0.2, 1)
idxs=np.where(semantic_pred==i)[0]
colors2[idxs,0]=r0
colors2[idxs,1]=r1
colors2[idxs,2]=r2
write_ply_point_normal('vis_sv.ply', vertices_coords, colors2)
#data=torch.load('../data/train_cuda_s3dis/Area_6_office_1_inst_nostuff.pth')
vertices_coords=data[0]
semantic_pred=data[2]
colors2=np.zeros((semantic_pred.shape[0],3))
for i in range(13):
r0=random.uniform(0.2, 1)
r1=random.uniform(0.2, 1)
r2=random.uniform(0.2, 1)
idxs=np.where(semantic_pred==i)[0]
colors2[idxs,0]=r0
colors2[idxs,1]=r1
colors2[idxs,2]=r2
write_ply_point_normal('vis_seg.ply', vertices_coords, colors2)
| liuzhengzhe/One-Thing-One-Click | s3dis/data/vis.py | vis.py | py | 1,780 | python | en | code | 48 | github-code | 13 |
34278503251 | from django_restapi.resource import Resource
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from utilities.FormatExceptionInfo import formatExceptionInfo
from users.utilities import get_requestor
import simplejson as json
import reversion
from reversion import revision
class NellResource(Resource):
def __init__(self, dbobject, adapter, *args, **kws):
self.dbobject = dbobject
self.adapter = adapter(None)
super(NellResource, self).__init__(*args, **kws)
def create(self, request, *args, **kws):
method = request.POST.get("_method", None)
if method == "put":
return self.update(request, *args, **kws)
elif method == "delete":
return self.delete(request, *args, **kws)
else:
return self.create_worker(request, *args, **kws)
def get_rev_comment(self, request, obj, method):
where = "%s %s" % (obj.__class__.__name__, method)
who = get_requestor(request)
return "WHO: %s, WHERE: %s" % (who, where)
@revision.create_on_success
def create_worker(self, request, *args, **kws):
o = self.dbobject()
self.adapter.load(o)
self.adapter.init_from_post(request.POST)
# Query the database to insure data is in the correct data type
o = self.dbobject.objects.get(id = o.id)
self.adapter.load(o)
revision.comment = self.get_rev_comment(request, o, "create_worker")
return HttpResponse(json.dumps(self.adapter.jsondict())
, mimetype = "text/plain")
@revision.create_on_success
def update(self, request, *args, **kws):
id = int(args[0])
o = get_object_or_404(self.dbobject, pk = id)
self.adapter.load(o)
error = None
try:
self.adapter.update_from_post(request.POST)
except:
e, m, t = formatExceptionInfo()
error = ": ".join((e, m))
revision.comment = self.get_rev_comment(request, o, "update")
# NOTE: this originally returned "", but if we want JSON callbacks
# to work from GWT, need A response. This change seems benign
response = {"success" : "ok"}
if error:
response.update({"error" : error})
return HttpResponse(json.dumps(response)
, mimetype = "text/plain")
@revision.create_on_success
def delete(self, request, *args):
id = int(args[0])
o = self.dbobject.objects.get(id = id)
revision.comment = self.get_rev_comment(request, o, "delete")
try:
o.delete()
except:
return HttpResponse(json.dumps({"error": "You cannot delete this object since it has children that would be orphened. :("}))
else:
return HttpResponse(json.dumps({"success": "ok"}))
| nrao/nell | scheduler/resources/NellResource.py | NellResource.py | py | 2,965 | python | en | code | 0 | github-code | 13 |
24564240749 | import airflow
import configparser
from airflow import DAG
from datetime import datetime, timedelta
from airflow.operators.dummy_operator import DummyOperator
from airflow.contrib.operators.emr_add_steps_operator import EmrAddStepsOperator
from airflow.contrib.operators.emr_create_job_flow_operator import EmrCreateJobFlowOperator
from airflow.contrib.operators.emr_terminate_job_flow_operator import EmrTerminateJobFlowOperator
from airflow.contrib.sensors.emr_step_sensor import EmrStepSensor
from operators import CreateS3BucketOperator, UploadFilesToS3Operator
raw_data_bucket = 'covid19_raw_datalake'
code_bucket = 'code_spark_etl'
covid_bucket_name = 'accidents-datalake'
default_args = {
'owner': 'kehinde',
'start_date': datetime(2019, 10, 25),
'depends_on_past': False,
'retries': 1,
'retry_delay': 300,
'email_on_retry': False
}
etl_steps = [
{
'Name': 'Setup Debugging',
'ActionOnFailure': 'TERMINATE_CLUSTER',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['state-pusher-script']
}
},
{
'Name': 'Setup - copy files',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['aws', 's3', 'cp', 's3://' + code_bucket, '/home/hadoop/', '--recursive']
}
},
{
'Name': 'covidus - ETL',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['spark-submit', '/home/hadoop/util/covidus.py', 's3a://' + raw_data_bucket,
's3a://' + covid_bucket_name]
}
},
{
'Name': 'county - ETL',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['spark-submit', '/home/hadoop/util/county.py', 's3a://' + raw_data_bucket,
's3a://' + covid_bucket_name]
}
},
{
'Name': 'Check data quality',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['spark-submit', '/home/hadoop/util/check_data_quality.py',
's3a://' + covid_bucket_name]
}
}
]
JOB_FLOW_OVERRIDES = {
'Name': 'Covid-Datalake-ETL'
}
dag = DAG('cpvid_datalake_etl_dag',
default_args=default_args,
description='Extract transform and load data to S3 datalake.',
schedule_interval='@monthly',
catchup=False
)
start_operator = DummyOperator(task_id='start', dag=dag)
create_code_bucket = CreateS3BucketOperator(
task_id='Create_code_bucket',
bucket_name=code_bucket,
dag=dag
)
upload_etl_code = UploadFilesToS3Operator(
task_id='Upload_etl_code',
bucket_name=code_bucket,
path='/opt/bitnami/script/',
dag=dag
)
create_datalake_bucket = CreateS3BucketOperator(
task_id='Create_datalake_bucket',
bucket_name=covid_bucket_name,
dag=dag
)
create_cluster = EmrCreateJobFlowOperator(
task_id='Create_EMR_cluster',
job_flow_overrides=JOB_FLOW_OVERRIDES,
aws_conn_id='aws_credentials',
emr_conn_id='emr_default',
dag=dag
)
add_jobflow_steps = EmrAddStepsOperator(
task_id='Add_jobflow_steps',
job_flow_id="{{ task_instance.xcom_pull(task_ids='Create_EMR_cluster', key='return_value') }}",
aws_conn_id='aws_credentials',
steps=etl_steps,
dag=dag
)
check_covid_table_processing = EmrStepSensor(
task_id='Watch_city_processing_step',
job_flow_id="{{ task_instance.xcom_pull('Create_EMR_cluster', key='return_value') }}",
step_id="{{ task_instance.xcom_pull(task_ids='Add_jobflow_steps', key='return_value')[2] }}",
aws_conn_id='aws_credentials',
dag=dag
)
check_county_table_processing = EmrStepSensor(
task_id='Watch_airport_processing_step',
job_flow_id="{{ task_instance.xcom_pull('Create_EMR_cluster', key='return_value') }}",
step_id="{{ task_instance.xcom_pull(task_ids='Add_jobflow_steps', key='return_value')[3] }}",
aws_conn_id='aws_credentials',
dag=dag
)
check_data_quality_check = EmrStepSensor(
task_id='Watch_data_quality_check_step',
job_flow_id="{{ task_instance.xcom_pull('Create_EMR_cluster', key='return_value') }}",
step_id="{{ task_instance.xcom_pull(task_ids='Add_jobflow_steps', key='return_value')[4] }}",
aws_conn_id='aws_credentials',
dag=dag
)
delete_cluster = EmrTerminateJobFlowOperator(
task_id='Delete_EMR_cluster',
job_flow_id="{{ task_instance.xcom_pull(task_ids='Create_EMR_cluster', key='return_value') }}",
aws_conn_id='aws_credentials',
dag=dag
)
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
start_operator >> create_datalake_bucket >> create_cluster
start_operator >> create_code_bucket >> upload_etl_code >> create_cluster
create_cluster >> add_jobflow_steps
add_jobflow_steps >> check_covid_table_processing >> check_data_quality_check
add_jobflow_steps >> check_county_table_processing >> check_data_quality_check
check_data_quality_check >> delete_cluster >> end_operator | kehindetomiwa/covid_data_enginering | src/airflow/dag/etl.py | etl.py | py | 5,152 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.