seq_id
stringlengths
4
11
text
stringlengths
113
2.92M
repo_name
stringlengths
4
125
sub_path
stringlengths
3
214
file_name
stringlengths
3
160
file_ext
stringclasses
18 values
file_size_in_byte
int64
113
2.92M
program_lang
stringclasses
1 value
lang
stringclasses
93 values
doc_type
stringclasses
1 value
stars
int64
0
179k
dataset
stringclasses
3 values
pt
stringclasses
78 values
69975743699
""" 1. casing does not matter - done via .lower() 2. do not count punctuation symbols, digits, whitespaces.. - done via regex 3. if you have two or more letters with the same frequency, then return the letter which comes first in the alphabet """ # done in 3 hours import re from collections import Counter clean = re.compile('[a-z]') def checkio(text: str) -> str: count = dict(Counter(text.lower()).most_common()) count_cleaned = {} for k, v in count.items(): if clean.search(k): count_cleaned[k] = v most_freq_letters = {} for key, value in count_cleaned.items(): if value == max(count_cleaned.values()): most_freq_letters[key] = value most_freq_letters_cleaned = sorted(most_freq_letters.keys()) result = most_freq_letters_cleaned[0] return result if __name__ == '__main__': print("Example:") print(checkio("Hello World!")) # These "asserts" using only for self-checking and not necessary for auto-testing assert checkio("Hello World!") == "l", "Hello test" assert checkio("How do you do?") == "o", "O is most wanted" assert checkio("One") == "e", "All letter only once." assert checkio("Oops!") == "o", "Don't forget about lower case." assert checkio("AAaooo!!!!") == "a", "Only letters." assert checkio("abe") == "a", "The First." print("Start the long test") assert checkio("a" * 9000 + "b" * 1000) == "a", "Long." print("The local tests are done.")
chicocheco/checkio
home/wanted_letter.py
wanted_letter.py
py
1,490
python
en
code
0
github-code
13
42794287361
def arithmetic_arranger(problems, optional=None): if len(problems) > 5: return("Error: Too many problems.") for i in range(len(problems)): if '+' not in problems[i] and '-' not in problems[i]: return("Error: Operator must be '+' or '-'.") for i in range(len(problems)): if '+' in problems[i]: l = problems[i].split(" + ") #print(l) for j in range(len(l)): if l[j].isdigit(): if len(l[j]) > 4: return("Error: Numbers cannot be more than four digits.") else: return("Error: Numbers must only contain digits.") if '-' in problems[i]: l = problems[i].split(" - ") #print(l) for j in range(len(l)): if l[j].isdigit(): if len(l[j]) > 4: return("Error: Numbers cannot be more than four digits.") else: return("Error: Numbers must only contain digits.") solutions = [] for i in range(len(problems)): if "+" in problems[i]: nums = problems[i].split(" + ") solution = int(nums[0])+int(nums[1]) solutions.append(solution) elif "-" in problems[i]: nums = problems[i].split(" - ") solution = int(nums[0])-int(nums[1]) solutions.append(solution) #print(solutions) arranged_problems = "" final_line1="" for i in range(len(problems)): l = problems[i].split(" ") #print(l) num1_len = len(l[0]) num2_len = len(l[2]) if num1_len > num2_len : line1 = " "*(2) + l[0] else: line1 = " "*((num2_len+2)-num1_len) + l[0] final_line1 = final_line1 + line1 + " "*4 final_line1 = final_line1.rstrip() final_line2="" for i in range(len(problems)): l = problems[i].split(" ") num1_len = len(l[0]) num2_len = len(l[2]) if num1_len > num2_len : line2 = l[1]+" "*(1+(num1_len - num2_len))+l[2] else : line2 = l[1]+" "+l[2] final_line2 = final_line2+ line2 + " "*4 final_line2 = final_line2.rstrip() final_dash="" for i in range(len(problems)): l = problems[i].split(" ") num1_len = len(l[0]) num2_len = len(l[2]) if num1_len > num2_len : dash = "-"*(num1_len+2) else : dash = "-"*(num2_len+2) final_dash = final_dash + dash + " "*4 final_dash = final_dash.rstrip() final_sol="" for i in range(len(problems)): l = problems[i].split(" ") num1_len = len(l[0]) num2_len = len(l[2]) if num1_len > num2_len : sol =" "*((num1_len+2)-len(str(solutions[i]))) + str(solutions[i]) else : sol =" "*((num2_len+2)-len(str(solutions[i]))) + str(solutions[i]) final_sol = final_sol + sol + " "*4 final_sol = final_sol.rstrip() if optional == True: arranged_problems = final_line1 + "\n" + final_line2 + "\n" + final_dash + "\n" + final_sol else: arranged_problems = final_line1 + "\n" + final_line2 + "\n" + final_dash arranged_problems = arranged_problems.rstrip() return arranged_problems
maanuw/Scientific_Computing_With_Python
arithmetic_arranger/arithmatic_arranger.py
arithmatic_arranger.py
py
3,447
python
en
code
0
github-code
13
6101140806
from django.db import models from mptt.models import MPTTModel, TreeForeignKey from posts.models import Posts, Users from django.contrib.auth.models import User class Genre(MPTTModel): blog = models.ForeignKey(Posts, on_delete=models.CASCADE, related_name='comment') name = models.ForeignKey(Users, on_delete=models.CASCADE) body = models.TextField(null=False) timestamp = models.DateTimeField(auto_now_add=True) parent = TreeForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='children') def __str__(self): return self.body class MPTTMeta: order_insertion_by = ['name']
cyberchao/Blog
comment/models.py
models.py
py
678
python
en
code
1
github-code
13
31923319673
from flask import Flask, jsonify, request import flask_cors from predictors.disease_predictor import disease_predictor, add_data_to_model from predictors.symptom_predictor import predict_next_symptoms app = Flask(__name__) flask_cors.CORS(app) import pandas as pd @flask_cors.cross_origin() @app.route('/symptoms', methods=['OPTIONS', 'GET']) def getSymptoms(): if request.method == 'OPTIONS': return jsonify({}) data = pd.read_csv("../data/trial2/disease_prediction_input.csv", header=0, sep=';') symptoms = list(data.columns.values) return jsonify(symptoms[0:-1]) @flask_cors.cross_origin() @app.route('/symptoms/predict', methods=['OPTIONS', 'POST']) def getNextSymptoms(): if request.method == 'OPTIONS': return jsonify({}) data = request.get_json() positives = data['positives'] negatives = data['negatives'] possible_symptoms = predict_next_symptoms(positives, negatives) possible_symptoms = sorted(possible_symptoms[0], key=lambda x: x[1], reverse=True) possible_symptoms_new = [] for pd in possible_symptoms: if pd[0] not in possible_symptoms_new: possible_symptoms_new.append(pd[0]) return jsonify(possible_symptoms_new) @flask_cors.cross_origin() @app.route('/diseases/predict', methods=['OPTIONS', 'POST']) def getNextDiseases(): if request.method == 'OPTIONS': return jsonify({}) data = request.get_json() positives = data['positives'] possible_diseases = sorted(disease_predictor(positives), key=lambda x: x[1], reverse=True) return jsonify(possible_diseases) @flask_cors.cross_origin() @app.route('/symptoms/diseases', methods=['OPTIONS', 'POST']) def saveEntry(): if request.method == 'OPTIONS': return jsonify({}) data = request.get_json() symptoms = data['symptoms'] diseases = data['disease'] add_data_to_model(symptoms, diseases); return ("",201) @app.route('/predict', methods=["POST"]) def predict(): positives = request.json.get("positives") negatives = request.json.get("negatives") possible_diseases = sorted(disease_predictor(positives), key=lambda x: x[1], reverse=True) possible_symptoms = predict_next_symptoms(positives, negatives)[0] possible_symptoms = sorted(possible_symptoms, key=lambda x: x[1], reverse=True) possible_symptoms_new = [] for pd in possible_symptoms: if pd[0] not in possible_symptoms_new: possible_symptoms_new.append(pd[0]) result = {"possibleSymptoms": possible_symptoms_new, "possibleDiseases": possible_diseases} return jsonify(result) if __name__ == '__main__': app.run()
anjsudh/ml-hack
server/server.py
server.py
py
2,651
python
en
code
2
github-code
13
32598547336
#!/usr/bin/python3 # -*- coding: utf-8 -*- # 导入tkinter包,为其定义别名tk import tkinter as tk import docx import xlrd import time import sys import threading from tkinter import ttk from docx.shared import Pt from tkinter import filedialog from tkinter import messagebox from docxtpl import DocxTemplate from PIL import Image, ImageTk # 定义Application类表示应用/窗口,继承Frame类 class Application(tk.Frame): # Application构造函数,master为窗口的父控件 def __init__(self, master=None): # 初始化Application的Frame部分 tk.Frame.__init__(self, master) # 显示窗口,并使用grid布局 self.grid() # 创建控件 self.create_imput() # 创建控件 def create_imput(self): # 创建一个文字为'Quit',点击会退出的按钮 # self.quitButton = tk.Button(self, text='Quit', command=self.quit) # 显示按钮,并使用grid布局 # self.quitButton.grid() load = Image.open(r'face.jpg') render = ImageTk.PhotoImage(load) img = tk.Label(image=render) img.image = render img.grid(row=0, column=0, rowspan=8, columnspan=4) self.label1 = tk.Label(text="导入Excel:", fg="black") self.label1.grid(row=0, column=4, columnspan=2) self.file_import = tk.Button(text="选择文件", command=self.file_picker) self.file_import.grid( row=0, column=6, columnspan=2, padx=10, sticky='w') self.label2 = tk.Label(text="测区名称:", fg="black") self.label2.grid(row=2, column=4, columnspan=2) self.entry = tk.Entry(width=15) self.entry.grid(row=2, column=6, columnspan=2, padx=10, sticky='w') self.label3 = tk.Label(text="比例尺:", fg="black") self.label3.grid(row=3, column=4, columnspan=2) # 创建Radiobutton组 var = tk.IntVar() # 设置组号为1 var.set(1) self.var = var self.ratio500 = tk.Radiobutton( text="1:500", variable=var, value=1, command=self.select_ratio) self.ratio500.grid(row=4, column=4, columnspan=2, padx=5, sticky='w') self.ratio1000 = tk.Radiobutton( text="1:1000", variable=var, value=2, command=self.select_ratio) self.ratio1000.grid(row=4, column=6, columnspan=2, padx=5, sticky='w') self.ratio10000 = tk.Radiobutton( text="1:10000", variable=var, value=3, command=self.select_ratio) self.ratio10000.grid(row=5, column=4, columnspan=2, padx=5, sticky='w') self.ratio50000 = tk.Radiobutton( text="1:50000", variable=var, value=4, command=self.select_ratio) self.ratio50000.grid(row=5, column=6, columnspan=2, padx=5, sticky='w') # 比例尺默认选项 self.scale = "1:500" self.btn = tk.Button(text=" 执行 ", command=self.path_picker) self.btn.grid(row=6, column=4, columnspan=2) self.pbar = ttk.Progressbar( orient = "horizontal", length = 110, mode = "determinate", value = 0) self.pbar.grid(row=6, column=6,columnspan=2) self.label4 = tk.Label( text="@联系作者:FJSM 汪含秋", fg="black", font=('微软雅黑', 7)) self.label4.grid(row=7, column=4, columnspan=4) # 文件选择 def file_picker(self): filename = filedialog.askopenfilename(filetypes=[("Excel file", "*.xls*")]) if filename != '': messagebox.showinfo(title='提示', message="您选择的文件是:" + filename) self.excel_path = filename else: messagebox.showerror(title='错误', message="您未选择任何文件") # 路径选择 def path_picker(self): save_path = filedialog.askdirectory() if save_path != '': messagebox.showinfo(title='提示', message="您选择的路径是:" + save_path) self.save_path = save_path # self.analysis_excel(self.excel_path) self.pbar.start() self.thread1 = threading.Thread( target = self.analysis_excel, args=(self.excel_path,)) self.thread1.setDaemon(True) self.thread1.start() else: messagebox.showerror(title='错误', message="您未选择任何路径") # 比例尺选择 def select_ratio(self): selection = self.var.get() scale = { 1: '1:500', 2: '1:1000', 3: '1:10000', 4: '1:50000' } self.scale = scale.get(selection) # print(self.scale) # 检查者+日期 格式转换 def myfomart(self, head, tail, length): blank = ' ' return head + blank*(length - len(head)*2) + tail #汉字 空格推算 def myfomart2(self, head, length): blank = ' ' return head + blank*(length - len(head)*2) #字母数字 def myfomart3(self, head, length): blank = ' ' return head + blank*(length - len(head)) # 解析excel表格 def analysis_excel(self, excel_path): # excel book = xlrd.open_workbook(excel_path) sheet1 = book.sheet_by_index(0) sheet_row = sheet1.nrows sheet_col = sheet1.ncols for r in range(sheet_row): if r == 0: continue data = [] for c in range(sheet_col): value = sheet1.cell_value(r, c) if (isinstance(value, float)): value = str(int(value)) data.append(value) # self.docxtpl_test(data) self.thread2 = threading.Thread( target=self.docxtpl_test,args=(data,) ) self.thread2.start() # print ("结束") return # messagebox.showinfo(title= '提示', message= '输出完成') # 将表格数据填入word def docxtpl_test(self, data): # print(data) # print(len(data)) auth2 = self.myfomart(data[24], data[25], 12) check2 = self.myfomart(data[26], data[27], 12) auth3 = self.myfomart(data[28], data[29], 12) check3 = self.myfomart(data[30], data[31], 12) auth4 = self.myfomart(data[32], data[33], 12) check4 = self.myfomart(data[34], data[35], 12) auth5 = self.myfomart(data[36], data[37], 12) check5 = self.myfomart(data[38], data[39], 12) method1 = self.myfomart2(data[40], 16) soft = self.myfomart3(data[41], 19) gs1 = self.myfomart3(data[42], 38) method2 = self.myfomart2(data[43], 16) gs2 = self.myfomart3(data[44], 18) method3 = self.myfomart2(data[45], 16) gs3 = self.myfomart3(data[46], 18) # print (data[41]) # print (len(data[41])) # print (soft) # print (data[42]) # print (len(data[42])) # print (gs1) # print (data[43]) # print (method2) # print (len(method2)) # print (len(method3)) # print (len(gs2)) # print (len(gs3)) doc = DocxTemplate(r'blank_pack.docx') context = { '图号': data[0], '图名': data[1], '测区名称': self.entry.get(), '比例尺': self.scale, '年': data[2], 'auth0': data[3], 'check0': data[4], 'No1': data[5], 'man1': data[6], 'chk1': data[7], 'No2': data[8], 'man2': data[9], 'chk2': data[10], 'No3': data[11], 'man3': data[12], 'chk3': data[13], 'No4': data[14], 'man4': data[15], 'chk4': data[16], 'No5': data[17], 'man5': data[18], 'chk5': data[19], 'No6': data[20], 'man6': data[21], 'chk6': data[22], 'auth1': data[23], 'auth2': auth2, 'check2': check2, 'auth3': auth3, 'check3': check3, 'auth4': auth4, 'check4': check4, 'auth5': auth5, 'check5': check5, 'method1': method1, 'method2': method2, 'soft': soft, 'gs1': gs1, 'gs2': gs2, 'method3': method3, 'gs3': gs3, } doc.render(context) test_path = self.save_path + "/" + data[0] + ".docx" # print (self.save_path) doc.save(test_path) self.pbar.stop() return def docx_test(): # excel book = xlrd.open_workbook(r'D:\工作\word操作\code_v2\code\data.xlsx') sheet1 = book.sheet_by_index(0) sheet_row = sheet1.nrows sheet_col = sheet1.ncols for r in range(sheet_row): if r == 0: continue data = [] for c in range(sheet_col): value = sheet1.cell_value(r, c) if (isinstance(value, float)): value = str(int(value)) data.append(value) #print(data) # word document = docx.Document(r'D:/code/py_code/pyGUI/temple.docx') tables = document.tables homeTable = tables[0] # 图号 run = homeTable.cell(0, 0).paragraphs[0].add_run(data[0]) run.font.name = u'微软雅黑' run.font.size = Pt(15) # 图名 run = homeTable.cell(1, 0).paragraphs[0].add_run(data[1]) run.font.name = u'微软雅黑' run.font.size = Pt(15) # 测区名称 run = homeTable.cell(2, 0).paragraphs[0].add_run(data[2]) run.font.name = u'微软雅黑' run.font.size = Pt(15) # 比例尺 # 年 run = tables[1].cell(0, 0).paragraphs[0].add_run(data[3]) run.font.name = u'微软雅黑' run.font.size = Pt(15) annotateTable = tables[3] # 循环填表 # for i in range(6): # # 绘图编号1 run = annotateTable.cell(1, 1).paragraphs[0].add_run(data[6]) run.font.name = u'宋体' run.font.size = Pt(12) # # 作业者1 # run = annotateTable.cell(1, 2).paragraphs[0].add_run(data[7 + i*3]) # run.font.name = u'宋体' # run.font.size = Pt(12) # # 检查者1 # run = annotateTable.cell(1, 3).paragraphs[0].add_run(data[8 + i*3]) # run.font.name = u'宋体' # run.font.size = Pt(12) paragraphs = document.paragraphs i = 0 t = 0 blank = ' ' for p in paragraphs: # print(i) i = i + 1 if ('作业方法' in p.text): p.text = '' run = p.add_run('作业方法:') run.font.name = u'宋体' run.font.size = Pt(12) workway = '随机数测试' workway = workway + blank * (16 - len(workway) * 2) run = p.add_run(workway) run.font.name = u'宋体' run.font.size = Pt(12) run.font.underline = True run = p.add_run(' 质量检查方法(软件):') run.font.name = u'宋体' run.font.size = Pt(12) checkway = '测试随机长度' checkway = checkway + blank * (21 - len(checkway) * 2) run = p.add_run(checkway) run.font.name = u'宋体' run.font.size = Pt(12) run.font.underline = True if ('填表者' in p.text): if (t == 0 or t == 2): # p.text = '' run = p.add_run(data[4]) run.font.name = u'宋体' run.font.size = Pt(12) if (t == 1): pass if ('检查者' in p.text): if (t == 0 or t == 2): run = p.add_run(data[5]) run.font.name = u'宋体' run.font.size = Pt(12) timestamp = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) test_path = "D:\工作\word操作\code_v2\code" + timestamp + ".docx" document.save(test_path) # docx_test() # 创建一个Application对象app app = Application() # 设置窗口标题为'First Tkinter' app.master.title = 'First Tkinter' # 主循环开始 app.mainloop() #messagebox.showinfo("ok")
sulmt/py_analysis_word
wordBig V1.0/wordBig.py
wordBig.py
py
12,209
python
en
code
0
github-code
13
9532276066
# -*- coding:utf-8 -*- import helpers import string_utils def compute_logs_interval(pattern, base_log, compared_log): time1 = helpers.get_log_time(base_log, pattern) time2 = helpers.get_log_time(compared_log, pattern) if not time1 or not time2: return None time_ms1 = string_utils.str_time_long(time1) time_ms2 = string_utils.str_time_long(time2) return time_ms2-time_ms1
LittleOrchid/AndroidLogTools
anlysize/methods/log_interval.py
log_interval.py
py
407
python
en
code
0
github-code
13
34654113526
import base64 import httplib import json import logging import os import flask from google.appengine.api import mail import jinja2 app = flask.Flask(__name__) # The build statuses that will trigger a notification email. _STATUSES_TO_REPORT = ('SUCCESS', 'FAILURE') _TAGS_TO_REPORT = frozenset(['feedloader']) # Get environment variables _EMAIL_TO = os.getenv('EMAIL_TO') @app.route('/health', methods=['GET']) def health(): """Checks the deployed application is running correctly.""" return 'OK', httplib.OK @app.route('/pubsub/push', methods=['POST']) def pubsub_push(): """Sends a notification email based on the Cloud Build PubSub message. The email will not be sent if the build status is not in the list of _STATUSES_TO_REPORT. Also, at least one of the build tags must be contained within _TAGS_TO_REPORT, or the email will not be sent. Emails can also be disabled by setting the SHOULD_SEND_EMAIL environment variable to False. Returns: A str indicating the result of the call and an http status code. """ if not _EMAIL_TO: logging.info('Build email not sent: EMAIL_TO is empty') return 'OK!', httplib.OK request_body = json.loads(flask.request.data.decode('utf-8')) message = request_body.get('message', {}) attributes = message.get('attributes', {}) decoded_data = base64.decodebytes(message.get('data', '')) data_dict = json.loads(decoded_data) status = attributes.get('status', '').upper() tags = set(data_dict.get('tags', [])) if status not in _STATUSES_TO_REPORT: logging.info('Build email not sent: status not in statuses to report: %s', status) return 'OK!', httplib.OK if not tags.intersection(_TAGS_TO_REPORT): logging.info('Build email not sent: build tag not in TAGS_TO_REPORT') return 'OK!', httplib.OK build_id = attributes.get('buildId', '') build_logs = data_dict.get('logUrl', '') project_id = data_dict.get('projectId', '') publish_time = message.get('publish_time', '') jinja_environment = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), extensions=['jinja2.ext.autoescape'], autoescape=True) template_values = { 'build_id': build_id, 'build_logs': build_logs, 'project_id': project_id, 'publish_time': publish_time, 'status': status, } template = jinja_environment.get_template('build_report_template.html') html_body = template.render(template_values) message = mail.EmailMessage( sender='no-reply@{0}.appspotmail.com'.format(project_id), subject='Feedloader Build Result: {}'.format(status), to=_EMAIL_TO, html=html_body) message.send() return 'OK!', httplib.OK if __name__ == '__main__': app.run(host='127.0.0.1', port=8080, debug=True)
google/feedloader
appengine/build_reporter/main.py
main.py
py
2,800
python
en
code
9
github-code
13
40131569530
# -*- coding: utf-8 -*- import itertools as it def solution(numbers, target): answer = 0 for i in range(len(numbers)+1): tmp = it.combinations(range(len(numbers)), i) #0~len(numbers)만큼 뽑 for j in tmp: #tmp에서 튜플 하나씩 꺼내기 tmp_numbers = numbers[:] #numbers 원본 복사 for k in j: #튜플에서 값 하나씩 꺼내기 tmp_numbers[k] = -tmp_numbers[k] #해당 인덱스 음수 sum_nums = sum(tmp_numbers) if sum_nums == target: #tmp_numbers의 총합이 target일 경우 #print(tmp_numbers) #print('==============') answer += 1 print(answer) return answer solution([1, 1, 1, 1, 1], 3)
dlwlstks96/codingtest
프로그래머스/깊이,너비우선탐색_타겟넘버.py
깊이,너비우선탐색_타겟넘버.py
py
809
python
ko
code
2
github-code
13
21571675123
from flask import Flask import pandas as pd from flask_cors import CORS app = Flask(__name__) CORS(app) df = pd.read_excel("PrecoMerc.xlsx") @app.route("/bydate/<date>") def date(date): rows = df.loc[(df['Data'] == date) & (df["Sessão"] == 0), "Preço - PT [€/MWh]"] return rows.tolist()
luciusvinicius/personal-sauna
EnergyApi/app.py
app.py
py
303
python
en
code
0
github-code
13
37991160552
# Oauth/OIDC OAUTH_SERVERS = ["google"] # Authlib GOOGLE_SERVER_METADATA_URL = 'https://accounts.google.com/.well-known/openid-configuration' GOOGLE_CLIENT_KWARGS = {'scope': 'openid'} # Globals MAX_NAME_LENGTH = 16 NEW_USER_KEY_LENGTH = 8
Dronesome-Archive/server
config.py
config.py
py
242
python
en
code
0
github-code
13
15153542252
import pandas as pd from textblob import TextBlob #importing the data ds=pd.read_csv("CEH_exam_negative_reviews.csv") ds #converting the csv file to string format dataset=ds.to_string(index=False) type(dataset) dataset blob = TextBlob(dataset) print(blob.sentiment) #data cleaning import re dataset = re.sub("[^A-Za-z0-9]+"," ",dataset) #data tokenizing import nltk from nltk.tokenize import word_tokenize Tokens = word_tokenize(dataset) print(Tokens) from nltk.probability import FreqDist fdist = FreqDist() for word in Tokens: fdist[word]+=1 fdist fdist.plot(20) #stemming from nltk.stem import PorterStemmer pst=PorterStemmer() pst.stem("having") # removing stopwords import nltk.corpus stopwords = nltk.corpus.stopwords.words("english") stopwords[0:10] # getting rid of stopwords filtered_sentence = [] for FinalWord in Tokens: if FinalWord not in stopwords: filtered_sentence.append(FinalWord) print(filtered_sentence) len(filtered_sentence) len(Tokens) #calculating final sentimental score filtered_sentence = ' '.join([str(elem) for elem in filtered_sentence]) print(filtered_sentence) score=TextBlob(filtered_sentence) print(score.sentiment) ###--------------found sentiment score------------------- from wordcloud import WordCloud word_cloud = WordCloud(width=512,height=512,background_color="white",stopwords=stopwords).generate(filtered_sentence) import matplotlib.pyplot as plt plt.imshow(word_cloud)
pbt12/CHE_model
C.H.E_model.py
C.H.E_model.py
py
1,602
python
en
code
0
github-code
13
39282126520
# Created by Qingzhi Ma at 18/11/2019 # All right reserved # Department of Computer Science # the University of Warwick # Q.Ma.2@warwick.ac.uk # from builtins import print import category_encoders as ce import numpy as np import pandas as pd import torch import torch.nn.functional as functional from qregpy import qreg from sklearn.preprocessing import OneHotEncoder # from xgboost.compat import DataFrame # print(functional.one_hot(torch.tensor([[0, 1], [0, 2], [0, 3]]))) # from dbestclient.ml import mdn # x = np.linspace(0, 10, 100) # y = x ** 2 + 1 # x = x[:, np.newaxis] # # print(x) # # print(y) # # plt.plot(x,y) # # plt.show() # reg1 = qreg.QReg(base_models=["linear", "polynomial"], verbose=False).fit(x, y) # preds1 = reg1.predict([[1], [2]]) # print(preds1) # # x=x.reshape(-1,1) # reg2 = mdn.RegMdn(dim_input=1).fit(x, y, num_gaussians=4, num_epoch=400) # # preds2 = reg2.predict([1, 2]) # preds2 = reg2.predict([[1], [2]]) # preds2 = reg2.predict([[1], [2]]) # print("preds2", preds2) # preds2 def test_onhot(): df = pd.DataFrame({"city": ["c1", "c2", "c1"], "phone": [158, 169, 173]}) ohe = OneHotEncoder(categories='auto') feature_arr = ohe.fit_transform(df[['phone', 'city']]).toarray() feature_labels = ohe.categories_ print(feature_labels) feature_labels = np.array(feature_labels).ravel() print(feature_arr) def test_binary_encoding(): print("binray") df = pd.DataFrame({'ID': [1, 2, 3, 4, 5, 6], 'RATING': ['G', 'B', 'G', 'B', 'B', 'G'], 'type': [1, 2, 1, 3, 1, 1, ]}) data = [['G', '1'], ['B', '2'], ['G', '3'], ['G', '4']] print(data) encoder = ce.BinaryEncoder(cols=[0, 1]).fit(data) numeric_dataset = encoder.transform(data) # print(df) print(numeric_dataset) print(encoder.transform([['G', 4]])) def test_dic(): dic = {} dic["[1, 2]"] = 9 print(dic) test_dic() # test_binary_encoding()
qingzma/DBEstClient
dbestclient/executor/test.py
test.py
py
1,953
python
en
code
14
github-code
13
3966970121
from datetime import datetime from django.shortcuts import render, redirect, reverse, get_object_or_404 from django.contrib import messages from django.contrib.auth.decorators import login_required from accounts.models import Profile from products.models import Product from shopping_cart.models import OrderItem, Order from shopping_cart.extras import generate_order_id def get_user_pending_order(request): user_profile = get_object_or_404(Profile, user=request.user) order = Order.objects.filter(owner=user_profile, is_ordered=False) if order.exists(): return order[0] return 0 @login_required() def add_to_cart(request, pk): user_profile = get_object_or_404(Profile, user=request.user) product = Product.objects.filter(pk=pk).first() if product in request.user.profile.ebooks.all(): messages.info(request, 'You already own this ebook') return redirect(reverse('products:product_list')) order_item, status = OrderItem.objects.get_or_create(product=product) user_order, status = Order.objects.get_or_create(owner=user_profile, is_ordered=False) user_order.items.add(order_item) if status: user_order.ref_code = generate_order_id() user_order.save() messages.info(request, 'item added to cart') return redirect(reverse('products:product_list')) @login_required() def delete_from_cart(request, pk): item_to_delete = OrderItem.objects.filter(pk=pk) if item_to_delete.exists(): item_to_delete[0].delete() messages.info(request, 'Item has been deleted') return redirect(reverse('shopping_cart:order_summary')) @login_required() def order_details(request): existing_order = get_user_pending_order(request) context = { 'order': existing_order } return render(request, 'shopping_cart/order_summary.html', context) @login_required() def checkout(request): existing_order = get_user_pending_order(request) context = { 'order': existing_order } return render(request, 'shopping_cart/checkout.html', context) @login_required() def process_payment(request, pk): return redirect(reverse('shopping_cart:update_records', args=[pk])) @login_required() def update_transaction_records(request, pk): order_to_purchase = Order.objects.filter(pk=pk).first() order_to_purchase.is_ordered = True order_to_purchase.date_ordered = datetime.now() order_to_purchase.save() order_items = order_to_purchase.items.all() order_items.update(is_ordered=True, date_ordered=datetime.now()) user_profile = get_object_or_404(Profile, user=request.user) order_products = [item.product for item in order_items] user_profile.ebooks.add(*order_products) user_profile.save() messages.info(request, 'Thank you! Your purchase was successful!') return redirect(reverse('accounts:my_profile')) def success(request): return render(request, 'shopping_cart/purchase_success.html')
thinh9e/learn-django
shoppingcart/shopping_cart/views.py
views.py
py
2,967
python
en
code
0
github-code
13
10699216899
from sklearn.svm import SVC from sklearn.cross_validation import train_test_split import numpy as np from sklearn.preprocessing import StandardScaler def preProcess(X): scalar=StandardScaler() scalar.fit(X) X=scalar.transform(X) return X def trainTest(x_train,x_test,y_train,y_test): svm=SVC() svm.fit(x_train,y_train) pred=svm.predict(x_test) total=(len(y_test)) c=0 for i in range(0,total): if(pred[i]==y_test[i]): c+=1 print(float(c)/total *100) return svm def main(): X=np.load('Xedge.npy') Y=np.load('Yedge.npy') X=preProcess(X) print("SVM") x_train,x_test,y_train,y_test=train_test_split(X,Y) svm=trainTest(x_train,x_test,y_train,y_test) #display(x_train,y_train,svm) if __name__=="__main__": main()
vineetjoshi253/Image-based-Indian-Monument-Recognition-using-Convoluted-Neural-Networks
Edge_Svm.py
Edge_Svm.py
py
862
python
en
code
1
github-code
13
31902219311
""" This module scrapes the lyrics of Etta James and Billy Joel form lyrics.com and saves it as as .csv file """ import re import pandas as pd import requests from bs4 import BeautifulSoup ETTA = 'Etta' JAMES = 'James' URL_ETTA = 'https://www.lyrics.com/artist.php?name=Etta-James&aid=387&o=1' BILLY = 'Billy' JOEL = 'Joel' URL_BILLY = 'https://www.lyrics.com/artist.php?name=Billy-Joel&aid=4615&o=1' def append_url(first_name, name, url_artist): """ Append the URLs into a single list, return the artist URL list """ artist_linked = re.findall(f'href=\"(\/lyric\/\d+\/{first_name}\+{name}\/[^"]*)\"', requests.get(url_artist).text) artist_list = [] for i in artist_linked: url = ('https://www.lyrics.com' + i) artist_list.append(url) return artist_list def get_lyrics(first_name, name, url_artist): """ Scrape lyrics of an artist, return a {first_name}_cleaned.csv file """ artist_soup = [] for j in append_url(first_name, name, url_artist): file = BeautifulSoup(requests.get(j).text, features='lxml').find_all(attrs={'id': 'lyric-body-text'}) artist_soup.append(file) flat_list = [] for sublist in artist_soup: for item in sublist: flat_list.append(item.text) return pd.DataFrame(flat_list).to_csv(f'{first_name}_cleaned.csv', sep='\t', index=False) get_lyrics(ETTA, JAMES, URL_ETTA) get_lyrics(BILLY, JOEL, URL_BILLY)
helenaEH/Lyrics_classifier_NLP
lyrics_scraper.py
lyrics_scraper.py
py
1,459
python
en
code
0
github-code
13
71418789459
# key_vault.py from azure.identity import ClientSecretCredential from azure.keyvault.secrets import SecretClient from config import TENANT_ID, CLIENT_ID, CLIENT_SECRET, KEY_VAULT_NAME def get_secret_client(): credential = ClientSecretCredential( tenant_id=TENANT_ID, client_id=CLIENT_ID, client_secret=CLIENT_SECRET ) key_vault_url = f"https://{KEY_VAULT_NAME}.vault.azure.net" secret_client = SecretClient(vault_url=key_vault_url, credential=credential) return secret_client def create_or_update_secret(secret_name, secret_value): secret_client = get_secret_client() secret = secret_client.set_secret(secret_name, secret_value) return secret
crisroco/auzure_key_vault_integration_py
key_vault.py
key_vault.py
py
704
python
en
code
0
github-code
13
22829551404
from django.shortcuts import render, redirect, get_object_or_404 from django.utils import timezone from .models import Post, Profile from django.contrib.auth import login, authenticate from django.conf import settings from django.core.files.storage import FileSystemStorage from blog.forms import SignUpForm, ImageUploadForm from django.http.response import HttpResponseForbidden, HttpResponse from django.contrib.auth.models import User from .forms import PostForm from django.contrib.auth.decorators import login_required from friendship.models import Friend, Follow, FollowingManager from django.contrib import messages from django.db.models import Q, Count def index(request): if request.user.is_authenticated(): return redirect('homefeed') return render(request, 'blog/index.html') @login_required def myprofile(request): posts = Post.objects.filter(author=request.user) user = request.user return render(request, 'blog/userprofile.html', {'posts':posts}) def photogallery(request): posts = Post.objects.all() return render(request, 'blog/photogallery.html', {'posts':posts}) def signup(request): if request.method == 'POST': form = SignUpForm(request.POST) if form.is_valid(): form.save() username = form.cleaned_data.get('username') raw_password = form.cleaned_data.get('password1') user = authenticate(username=username, password=raw_password) login(request, user) return redirect('newUser') else: form = SignUpForm() return render(request, 'signup.html', {'form': form}) def simple_upload(request): if request.method == 'POST' and request.FILES['myfile']: myfile = request.FILES['myfile'] fs = FileSystemStorage() filename = fs.save(myfile.name, myfile) uploaded_file_url = fs.url(filename) return render(request, 'blog/simple_upload.html', { 'uploaded_file_url': uploaded_file_url }) return render(request, 'blog/simple_upload.html') # Profile picture upload def upload_pp(request): if request.method == 'POST': form = ImageUploadForm(request.POST, request.FILES) if form.is_valid(): m = Profile.objects.get(pk=request.user.id) m.profilepicture = form.cleaned_data['image'] m.save() return redirect('newUser') return HttpResponseForbidden('allowed only via POST') # Post image upload def upload_img(request): if request.method == 'POST': form = ImageUploadForm(request.POST, request.FILES) if form.is_valid(): m = Post.objects.get(pk=Post.id) m.img = form.cleaned_data['image'] m.save() return redirect('newUser') return HttpResponseForbidden('allowed only via POST') def post_new(request): if request.method == "POST": form = PostForm(request.POST, request.FILES) if form.is_valid(): post = form.save() post.author = request.user post.published_date = timezone.now() post.save() return redirect('newUser') else: form = PostForm() return render(request, 'blog/post_edit.html', {'form': form}) def post_edit(request, pk): post = get_object_or_404(Post, pk=pk) if request.method == "POST": form = PostForm(request.POST, instance=post) if form.is_valid(): post = form.save(commit=False) post.author = request.user post.published_date = timezone.now() post.save() return redirect('post_detail', pk=post.pk) else: form = PostForm(instance=post) return render(request, 'blog/post_edit.html', {'form': form}) def post_delete(request, pk): post = get_object_or_404(Post, pk=pk) post.delete() #messages.success(request, "Successfully Deleted") return redirect('newUser') def userlist(request): users = User.objects.all() return render(request, 'blog/userlist.html', { 'users': users }) def userprofile(request, pk): try: user = User.objects.get(username=pk) except: return 404 posts = Post.objects.filter(author=user.pk) if request.user.is_authenticated(): already_following = FollowingManager.follows(request.user, request.user, user) return render(request, 'blog/userprofile.html', { 'posts': posts, 'user': user, 'username': user.username, 'already_following': already_following}) else: return render(request, 'blog/userprofile.html', { 'posts': posts, 'user': user, 'username': user.username,}) #follow and unfollow @login_required def follow(request, pk): followed = User.objects.get(username=pk) if not FollowingManager.follows(request.user, request.user, followed): Follow.objects.add_follower(request.user, followed) return redirect('userprofile', pk=pk) @login_required def unfollow(request, pk): followed = User.objects.get(username=pk) Follow.objects.remove_follower(request.user, followed) return redirect('userprofile', pk=pk) #usersearch def usersearch(request, pk): whitespace = False for c in pk: if c == '_': whitespace = True if whitespace == True: first, last = pk.split("_") users = User.objects.filter(Q(username__contains=last) | Q(username__contains=first) | Q(first_name__contains=first) | Q(last_name__contains=last)) else: users = User.objects.filter(Q(username__contains=pk) | Q(first_name__contains=pk) | Q(last_name__contains=pk)) return render(request, 'blog/userlist.html', { 'users': users }) #homefeed @login_required def homefeed(request): #query with 10 most followed users users = User.objects.annotate(follower_count=Count('followers')).order_by('-follower_count')[:10] following = Follow.objects.following(request.user) posts = Post.objects.filter(author__in=following).order_by('-published_date') return render(request, 'blog/feed.html', {'posts': posts, 'following': following, 'users': users}) #download def download(request, pk): post = Post.objects.get(img=pk) post.downloads += 1 return redirect('homefeed')
poltimmer/2ID60
blog/views.py
views.py
py
6,243
python
en
code
0
github-code
13
26301902842
import colorlog handler = colorlog.StreamHandler() handler.setFormatter(colorlog.ColoredFormatter( fmt='%(log_color)s %(asctime)s : %(message)s', datefmt='%m-%d %H:%M:%S' )) logger = colorlog.getLogger('example') logger.setLevel('DEBUG') logger.addHandler(handler)
naveennvrgup/smart-traffic-light
IOTdevices/logger.py
logger.py
py
275
python
en
code
0
github-code
13
26002995515
# Напишите программу, которая принимает на вход вещественное число и показывает сумму его цифр. x1 = float(input()) + 100 # + 100, чтобы при * 10 не возникало погрешностей .. например: 0.56 * 10 = 5.6000000000000005 sum_number = 0 while x1 != 0: if x1 - int(x1) == 0: sum_number += x1 % 10 x1 //= 10 elif x1 - int(x1) != 0: x1 *= 10 print(int(sum_number) - 1) # строковый вариант: sum_number2 = 0 x2 = ''.join(input().split('.')) for i in x2: sum_number2 += int(i) print(sum_number2)
Pol888/home_work_py_2
task1.py
task1.py
py
660
python
ru
code
0
github-code
13
70139884818
import unittest import medsrtqc.qc.history as hist class TestHistory(unittest.TestCase): def test_qctests(self): hexval = hex(2**63 + 2**4) tests = hist.read_qc_hex(hexval) self.assertEqual(tests, [4, 63]) qc_arr = hist.qc_array(hexval) self.assertTrue(qc_arr[hist.test_index(4)] == 1) fail = hex(2**62 + 2**9) qcpf = hist.QCx.qc_tests(hexval, fail) hist.QCx.update_safely(qcpf, 8, 'pass') self.assertEqual(qcpf[0, hist.test_index(8)], 1) hist.QCx.update_safely(qcpf, 2, 'fail') self.assertEqual(qcpf[1, hist.test_index(2)], 1) hist.QCx.update_safely(qcpf, 4, 'fail') self.assertEqual(qcpf[0, hist.test_index(4)], 0) self.assertEqual(qcpf[1, hist.test_index(4)], 1) if __name__ == '__main__': unittest.main()
ArgoCanada/medsrtqc
tests/test_history.py
test_history.py
py
838
python
en
code
0
github-code
13
7040889528
""" Code for investigating the effect of recurrent connections in a two-layer hierarchical PCN i.e. a latent variable model. Laten variable: x, observations $y \sim N(Wx, \Sigma)$. Goal is to find the most likely x and model parameter W """ import torch import torch.nn as nn import random import numpy as np import matplotlib.pyplot as plt nonlinear = False def nonlinearity(x): if nonlinear: return np.tanh(x) else: return x def deriv(x): if nonlinear: return 1.0 - np.tanh(x) ** 2.0 else: return 1.0 # Generate observed data. One latent variable for each observation sample_size = 1000 W = np.array([[2, 1], [1, 2]]) mu_x = np.array([1, 1]) Sx = np.eye(2) def data_generation(cov): Sy = np.array([[1, cov], [cov, 1]]) x = np.empty((sample_size, 2)) y = np.empty((sample_size, 2)) mu_y = np.empty((sample_size, 2)) np.random.seed(8) for i in range(sample_size): xi = np.random.multivariate_normal(mu_x, Sx) mu_yi = np.matmul(W, xi) yi = np.random.multivariate_normal(mu_yi, Sy) x[i] = xi y[i] = yi mu_y[i] = mu_yi # y_c = y * np.concatenate([np.ones((sample_size,1)), np.ones((sample_size,1))], axis=1) return y def standard_pcn(y): # The STANDARD model; d: dimension of x; k: dimension of y epochs = 500 batch_size = 100 relax_itrs = 100 eval_itrs = 100 lr_x = 3e-3 lr_W = 1e-3 lr_eval = 3e-3 # initialize parameters curr_mu_x = np.zeros_like(mu_x) curr_W = np.eye(2) # k*d std_mses_mu_x = [] err_ys = [] curr_x = np.ones_like(curr_mu_x) # 1*d for e in range(epochs): curr_xs = np.empty((sample_size, 2)) for i in range(0, sample_size, batch_size): # set nodes given the current batch of observations batch_y = y[i:i+batch_size] # bsz*k pred_y = np.matmul(curr_x, curr_W.T) # 1*k err_y = batch_y - pred_y # bsz*k err_x = curr_x - curr_mu_x # 1*d # relaxation for j in range(relax_itrs): delta_x = (-err_x + np.matmul(err_y, curr_W)) # bsz*d curr_x = curr_x + lr_x * delta_x # bsz*d # update error nodes pred_y = np.matmul(curr_x, curr_W.T) # bsz*k err_y = batch_y - pred_y # bsz*k err_x = curr_x - curr_mu_x # bsz*d # learning delta_W = np.matmul(err_y.T, curr_x) #/ batch_size # k*d curr_W = curr_W + lr_W * delta_W delta_mu_x = np.sum(err_x, axis=0) #/ batch_size # 1*d curr_mu_x = curr_mu_x + lr_W * delta_mu_x curr_xs[i:i+batch_size] = curr_x # N*d # print(np.mean(err_x**2)) all_pred = np.matmul(curr_xs, curr_W.T) all_err_y = y - all_pred err_ys.append(np.mean(all_err_y**2)) std_mses_mu_x.append(np.mean((curr_mu_x - mu_x)**2)) print('Standard PCN') print(curr_mu_x) print(curr_W) print(err_ys[-1]) return err_ys, all_pred def recurrent_pcn(y): # The RECURRENT model; d: dimension of x; k: dimension of y epochs = 500 batch_size = 100 relax_itrs = 100 eval_itrs = 100 lr_x = 3e-3 lr_W = 1e-3 lr_Wr = 5e-5 lr_eval = 3e-3 # initialize parameters curr_mu_x = np.zeros_like(mu_x) curr_W = np.eye(2) # k*d curr_Wr = np.zeros((2, 2)) rec_mses_mu_x = [] err_ys = [] curr_x = np.ones_like(curr_mu_x) # 1*d for e in range(epochs): curr_xs = np.empty((sample_size, 2)) for i in range(0, sample_size, batch_size): # set nodes given the current batch of observations batch_y = y[i:i+batch_size] # bsz*k pred_y = np.matmul(curr_x, curr_W.T) + np.matmul(batch_y, curr_Wr) # 1*k err_y = batch_y - pred_y # bsz*k err_x = curr_x - curr_mu_x # 1*d # relaxation for j in range(relax_itrs): delta_x = (-err_x + np.matmul(err_y, curr_W)) # bsz*d curr_x = curr_x + lr_x * delta_x # bsz*d # update error nodes pred_y = np.matmul(curr_x, curr_W.T) + np.matmul(batch_y, curr_Wr) # bsz*k err_y = batch_y - pred_y # bsz*k err_x = curr_x - curr_mu_x # bsz*d # learning delta_W = np.matmul(err_y.T, curr_x) #/ batch_size # k*d curr_W = curr_W + lr_W * delta_W delta_Wr = np.matmul(err_y.T, batch_y) #/ batch_size # k*k np.fill_diagonal(delta_Wr, 0) curr_Wr = curr_Wr + lr_Wr * delta_Wr delta_mu_x = np.sum(err_x, axis=0) #/ batch_size # 1*d curr_mu_x = curr_mu_x + lr_W * delta_mu_x curr_xs[i:i+batch_size] = curr_x all_pred = np.matmul(curr_xs, curr_W.T) + np.matmul(y, curr_Wr) all_err_y = y - all_pred err_ys.append(np.mean(all_err_y**2)) rec_mses_mu_x.append(np.mean((curr_mu_x - mu_x)**2)) print(curr_xs) print('Recurrent PCN') print(curr_mu_x) print(curr_W) print(curr_Wr) return err_ys, all_pred # check learning curves fig, ax = plt.subplots(1, 2, figsize=(10,4)) for cov in [0, 0.1, 0.25, 0.5, 0.75, 1]: y = data_generation(cov) std_err_ys, _ = standard_pcn(y) rec_err_ys, _ = recurrent_pcn(y) ax[0].plot(std_err_ys, label=f'cov={cov}') ax[1].plot(rec_err_ys, label=f'cov={cov}') ax[0].legend() ax[0].set_title('MSE on observations: standard') ax[0].set_xlabel('training epochs') ax[0].set_ylabel('MSE') ax[0].set ax[1].legend() ax[1].set_title('MSE on observations: recurrent') ax[1].set_xlabel('training epochs') ax[0].set_ylabel('MSE') plt.show() # check prediction alignment to true # fig, ax = plt.subplots(1, 2, figsize=(10,4)) # y = data_generation(1) # std_err_ys, std_pred = standard_pcn(y) # rec_err_ys, rec_pred = recurrent_pcn(y) # ax[0].scatter(y[:,0], std_pred[:,0], alpha=0.3, label='standard') # ax[0].scatter(y[:,0], rec_pred[:,0], alpha=0.3, label='recurrent') # ax[0].plot([-5, 13], [-5,13], ls='--', label='identity', c='k') # ax[0].set_xlabel('True y[0]') # ax[0].set_ylabel('Prediction from model (Wx[0])') # ax[0].legend() # ax[1].scatter(y[:,1], std_pred[:,1], alpha=0.3, label='standard') # ax[1].scatter(y[:,1], rec_pred[:,1], alpha=0.3, label='recurrent') # ax[1].plot([-5, 13], [-5,13], ls='--', label='identity', c='k') # ax[1].set_xlabel('True y[1]') # ax[1].set_ylabel('Prediction from model (Wx[1])') # ax[1].legend() # plt.show()
C16Mftang/covariance-learning-PCNs
tests/hierarchical_PCNs.py
hierarchical_PCNs.py
py
6,699
python
en
code
3
github-code
13
14646824175
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, Table from . import metadata SourceTransactionJson = Table( "source_transactionjson", metadata, Column( "ach_credit_transfer", SourceTransactionAchCreditTransferData, ForeignKey("SourceTransactionAchCreditTransferData"), nullable=True, ), Column( "amount", Integer, comment="A positive integer in the smallest currency unit (that is, 100 cents for $1.00, or 1 for ¥1, Japanese Yen being a zero-decimal currency) representing the amount your customer has pushed to the receiver", ), Column( "chf_credit_transfer", SourceTransactionChfCreditTransferData, ForeignKey("SourceTransactionChfCreditTransferData"), nullable=True, ), Column( "created", Integer, comment="Time at which the object was created. Measured in seconds since the Unix epoch", ), Column( "currency", String, comment="Three-letter [ISO currency code](https://www.iso.org/iso-4217-currency-codes.html), in lowercase. Must be a [supported currency](https://stripe.com/docs/currencies)", ), Column( "gbp_credit_transfer", SourceTransactionGbpCreditTransferData, ForeignKey("SourceTransactionGbpCreditTransferData"), nullable=True, ), Column("id", String, comment="Unique identifier for the object", primary_key=True), Column( "livemode", Boolean, comment="Has the value `true` if the object exists in live mode or the value `false` if the object exists in test mode", ), Column( "object", String, comment="String representing the object's type. Objects of the same type share the same value", ), Column( "paper_check", SourceTransactionPaperCheckData, ForeignKey("SourceTransactionPaperCheckData"), nullable=True, ), Column( "sepa_credit_transfer", SourceTransactionSepaCreditTransferData, ForeignKey("SourceTransactionSepaCreditTransferData"), nullable=True, ), Column( "source", String, comment="The ID of the source this transaction is attached to" ), Column( "status", String, comment="The status of the transaction, one of `succeeded`, `pending`, or `failed`", ), Column( "type", String, comment="The type of source this transaction is attached to" ), ) __all__ = ["source_transaction.json"]
offscale/stripe-sql
stripe_openapi/source_transaction.py
source_transaction.py
py
2,567
python
en
code
1
github-code
13
23697861696
import sys class Research: def __init__(self, path_to_the_file): self.file_path = path_to_the_file def file_reader(self): try: with open(self.file_path) as f: lines = f.readlines() if len(lines) < 2: raise Exception for line in lines: if len(line.split(',')) != 2: raise Exception return ''.join(lines) except Exception: return'Wrong struct to file' if __name__ == '__main__': if len(sys.argv) != 2: raise RuntimeError('Wrong number of arguments') file_path = sys.argv[1] r = Research(file_path) print(r.file_reader())
hrema/Python-Data-Science
day02/ex02/first_constructor.py
first_constructor.py
py
578
python
en
code
0
github-code
13
33935154530
import numpy as np class Road(): def __init__(self, DATA, param): ''' area_num 节点数量 Road_network 路网连接拓扑 Road_length 道路长度 Road_grade 道路等级 Road_capacity 道路通行能力 Road_flow 道路流量 Road_charge 道路充电需求 a_b_n 道路等级对应参数 ''' self.area_num = DATA['area_num'] self.Road_network = DATA['Road_network'] self.Road_num = len(self.Road_network) self.Road_length = DATA['Road_length'] self.Road_grade = DATA['Road_grade'] self.Road_capacity = DATA['Road_capacity'] self.Road_flow = np.zeros((self.Road_num, param.TT)) self.Road_charge = np.zeros((self.Road_num, param.TT)) self.a_b_n = np.array([[1.726,3.15,3.],[2.076,2.87,3.]])
rightyou/-
需求响应/EVA_SecneGeneration/Road_network.py
Road_network.py
py
878
python
en
code
0
github-code
13
30850429315
import time import pyupbit import datetime import schedule from fbprophet import Prophet import numpy as np access = "0hNyHckgUEQ0GpFb97xmHOtLGKx8AevNu7pzz2Vo" secret = "NtOH4y4d3G2I4gG13G1KVhAYhPck2xWMxLd6xYvd" def get_target_price(ticker, k): """변동성 돌파 전략으로 매수 목표가 조회""" df = pyupbit.get_ohlcv(ticker, interval="day", count=2) target_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k return target_price def get_start_time(ticker): """시작 시간 조회""" df = pyupbit.get_ohlcv(ticker, interval="day", count=1) start_time = df.index[0] return start_time def get_balance(ticker): """잔고 조회""" balances = upbit.get_balances() for b in balances: if b['currency'] == ticker: if b['balance'] is not None: return float(b['balance']) else: return 0 return 0 def get_current_price(ticker): """현재가 조회""" return pyupbit.get_orderbook(ticker=ticker)["orderbook_units"][0]["ask_price"] predicted_close_price = 0 predicted_high_price = 0 def predict_price(ticker): """Prophet으로 당일 종가, 최고가 가격 예측""" global predicted_close_price global predicted_high_price df = pyupbit.get_ohlcv(ticker, interval="minute60") df = df.reset_index() df['ds'] = df['index'] df['y'] = df['close'] data = df[['ds','y']] model = Prophet(daily_seasonality=10,weekly_seasonality=10,changepoint_prior_scale=0.3) model.fit(data) future = model.make_future_dataframe(periods=24, freq='H') forecast = model.predict(future) closeDf = forecast[forecast['ds'] == forecast.iloc[-1]['ds'].replace(hour=9)] if len(closeDf) == 0: closeDf = forecast[forecast['ds'] == data.iloc[-1]['ds'].replace(hour=9)] closeValue = closeDf['yhat'].values[0] MaxDf=forecast.iloc[-25:-1] MaxDf=MaxDf[::-1] highprice=list(MaxDf['yhat']) sort_high_price=sorted(highprice) predicted_high_price=sort_high_price[-1] predicted_close_price = closeValue predict_price("KRW-BTC") schedule.every().hour.do(lambda: predict_price("KRW-BTC")) def get_ma5d(ticker): """5일 이동 평균선 조회""" df = pyupbit.get_ohlcv(ticker, interval="day", count=5) ma5d = df['close'].rolling(5).mean().iloc[-1] return ma5d print(predicted_high_price) selling = False """떡락장 감지""" def DduckRack(ticker): global selling df1510 = pyupbit.get_ohlcv(ticker, interval="minute15", count=10) ma150m = df1510['close'].rolling(15).mean().iloc[-1] df155 = pyupbit.get_ohlcv(ticker, interval="minute15", count=5) ma75m = df155['close'].rolling(15).mean().iloc[-1] if ma150m > ma75m : selling = True else: selling = False DduckRack("KRW-BTC") print(selling) # 로그인 upbit = pyupbit.Upbit(access, secret) print("autotrade start") # 자동매매 시작 while True: try: now = datetime.datetime.now() start_time = get_start_time("KRW-BTC") end_time = start_time + datetime.timedelta(days=1) schedule.run_pending() if start_time < now < end_time - datetime.timedelta(seconds=600): target_price = get_target_price("KRW-BTC", 0.36) ma5d = get_ma5d("KRW-BTC") DduckRack("KRW-BTC") current_price = get_current_price("KRW-BTC") if target_price < current_price and current_price < predicted_close_price: krw = get_balance("KRW") if krw > 5000: upbit.buy_market_order("KRW-BTC", krw*0.9995) print("buy") if predicted_close_price *1.05 < current_price and selling == True : btc = get_balance("BTC") if btc > 5000/current_price: upbit.sell_market_order("KRW-BTC", btc*0.9995) time.sleep(1) else: btc = get_balance("BTC") if btc > 0.00008: upbit.sell_market_order("KRW-BTC", btc*0.9995) print("f") time.sleep(3) except Exception as e: print(e) time.sleep(3)
wlsdn031/aitrust
ai.py
ai.py
py
4,309
python
en
code
0
github-code
13
40113048879
from fastapi import APIRouter, Depends, HTTPException from sqlalchemy.orm import Session from database import get_db import schemas from crud.authentication import get_current_user import crud.users router = APIRouter( prefix="/users", tags=["users"], ) # ________________POST________________ @router.post("/", response_model=schemas.User) async def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)): db_user = crud.users.get_user_by_email(db, email=user.email) if db_user: raise HTTPException(status_code=400, detail="Email already registered") return crud.users.create_user(db=db, user=user) @router.post("/{user_id}/create_user_detail", response_model=schemas.UserDetail) async def create_user_detail(user_id: int, user_detail: schemas.UserDetailCreate, db: Session = Depends(get_db)): db_user_detail = crud.users.get_user_detail(db, user_id=user_id) if db_user_detail: raise HTTPException(status_code=400, detail="You can create user information only once") return crud.users.create_user_detail(db=db, user_detail=user_detail, user_id=user_id) # ________________GET_______________ @router.get("/me", response_model=schemas.User) async def read_users_me(current_user: schemas.User = Depends(get_current_user)): return current_user
forza111/fastapi_simple_api
routers/users.py
users.py
py
1,317
python
en
code
0
github-code
13
18989205094
#Python 3 Example of how to use https://macvendors.co to lookup vendor from mac address import pandas as pd url = "https://macvendors.co/api/00:00:00:00:00:00/csv" #url = "https://macvendors.co/api/00:B2:E8:00:00:00/csv" df = pd.read_csv(url) txt = str(len(df.columns)) print(txt) #Fix: json object must be str, not 'bytes' """ret = "unknown" reader = codecs.getreader("utf-8") obj = json.load(reader(response)) if (len(obj['result']) > 1): ret = obj['result']['company'] print (ret) #print(ret) #Print company name #print (+"<br/>"); #print company address #print (obj['result']['address']); """
marzam/python-script-occupation
mac-vendor.py
mac-vendor.py
py
606
python
en
code
0
github-code
13
36684347796
from selenium.webdriver.common.by import By from selenium import webdriver from bs4 import BeautifulSoup import requests import csv from selenium.common.exceptions import NoSuchElementException, ElementClickInterceptedException import re from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC import pandas as pd #specify driver path # DRIVER_PATH = 'C:\webdrivers\chromedriver.exe' # driver = webdriver.Chrome(executable_path = DRIVER_PATH) # driver.implicitly_wait(0) links=[] title = [] Company=[] Location=[] level=[] domaine = [] contract=[] Requirements=[] Experience=[] url=[] df = pd.DataFrame(columns=["Title", "Company","Location","Experience","Studies-level","Domain","Requirements","Contract","Links","Date"]) for i in range(0,20): #driver.get('https://ma.indeed.com/jobs?q=stage%20informatique&l=Maroc&start=' + str(i)) data = requests.get('https://www.marocannonces.com/maroc/offres-emploi-domaine-informatique-multimedia-internet-b309.html?f_3=Informatique+%2F+Multim%C3%A9dia+%2F+Internet&pge=' + str(i)) soup = BeautifulSoup(data.text, "lxml") job_titles=soup.find_all("div",{"class":"holder"}) for j in range(len(job_titles)) : links.append(job_titles[j].find("a").attrs["href"]) print('+link') for link in links: url='https://www.marocannonces.com/'+link result = requests.get('https://www.marocannonces.com/'+link) src = result.content soup = BeautifulSoup(src, 'lxml') try: title = soup.find("h1").text except: title="null" try: Company = soup.find('ul',class_="extraQuestionName").find_all('li')[3].find_all('a')[0].text.strip() except: Company = 'null' try: contract = soup.find('ul', class_="extraQuestionName").find_all('li')[2].find_all('a')[0].text.strip() except: contract = 'null' try: Location = soup.find('ul', class_="info-holder").find_all('li')[0].find_all('a')[0].text.strip() except: Location = 'null' try: level = soup.find('ul', class_="extraQuestionName").find_all('li')[5].find_all('a')[0].text.strip() except: level = 'null' try: domaine = soup.find('ul', class_="extraQuestionName").find_all('li')[1].find_all('a')[0].text.strip() except: domaine = 'null' try: Requirements = soup.find('div',class_="description desccatemploi").find('div',class_="block").text.strip() except: Requirements = 'null' try: Experience = "null" except: Experience = 'null' try: date=soup.find('title').text date=re.sub(r'.*- ', ' ', date).replace(']',"").strip() except: date='null' df = df.append({"Title": title, "Company": Company, "Location": Location, "Experience":Experience,"Studies-level": level, "Domain": domaine,"Requirements":Requirements, "Contract": contract,"Links": url,"Date":date}, ignore_index=True) print('+job') print(len(df)) # df.to_csv("./csvFiles/marocannonces.csv", index=False)
imaneelbakk/matching_resume
MAROCANNONCESscraping.py
MAROCANNONCESscraping.py
py
3,154
python
en
code
0
github-code
13
28176017860
import json import datetime import calendar import logging import itertools import threading import json from requests_futures.sessions import FuturesSession import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry from concurrent.futures import ThreadPoolExecutor # from ripe.atlas.cousteau import AtlasResultsRequest from progress.bar import Bar # Semaphore used to control the number of buffered results from the pool # semaphore = threading.Semaphore(4) def requests_retry_session( retries=3, backoff_factor=0.3, status_forcelist=(500, 502, 504), session=None, max_workers=4, ): session = session or FuturesSession(max_workers=max_workers) retry = Retry( total=retries, read=retries, connect=retries, backoff_factor=backoff_factor, status_forcelist=status_forcelist, ) adapter = HTTPAdapter(max_retries=retry) session.mount('http://', adapter) session.mount('https://', adapter) return session def worker_task(resp, *args, **kwargs): """Process json in background""" try: resp.data = resp.json() except json.decoder.JSONDecodeError: logging.error("Error while reading Atlas json data.\n") resp.data = {} def cousteau_on_steroid(params, retry=3): url = "https://atlas.ripe.net/api/v2/measurements/{0}/results" queries = [] probes = [None] if params["probe_ids"]: probes = [params["probe_ids"][x:x+20] for x in range(0, len(params["probe_ids"]), 20)] session = requests_retry_session() for msm in params["msm_id"]: req_param = { "start": int(calendar.timegm(params["start"].timetuple())), "stop": int(calendar.timegm(params["stop"].timetuple())), } for pb in probes: if pb is not None: req_param["probe_ids"] = ",".join([str(i) for i in pb]) queries.append( session.get(url=url.format(msm), params=req_param, hooks={ 'response': worker_task, }) ) for query in queries: try: resp = query.result() yield resp except requests.exceptions.ChunkedEncodingError: logging.error("Could not retrieve traceroutes for {}".format(query)) def get_results(param, retry=3): traceroute2timetrack, kwargs = param # if retry==3 : # semaphore.acquire() # logging.info("Requesting results for {}".format(kwargs)) # success_list, results_list = AtlasResultsRequest(**kwargs).create() results_list = cousteau_on_steroid(kwargs) # logging.info("Server replied {}".format(kwargs)) for resp in results_list: if resp.ok: # logging.info("Received {} traceroutes".format(len(resp.data))) yield map(traceroute2timetrack, resp.data) else: logging.error("All retries failed for {}".format(resp.url)) # logging.warning("Atlas request failed for {}".format(kwargs)) # if retry > 0: # return get_results(param, retry-1) # else: # logging.error("All retries failed for {}".format(kwargs)) # return class Reader(): def __init__(self, start, end, timetrack_converter, msm_ids=[5001,5004,5005], probe_ids=[1,2,3,4,5,6,7,8], chunk_size=900, config=None): self.pool = None # self.semaphore = None self.msm_ids = msm_ids self.probe_ids = probe_ids self.start = start self.end = end self.chunk_size = chunk_size self.params = [] self.timetrack_converter = timetrack_converter self.bar = None def __enter__(self): self.params = [] logging.warn("creating the pool") self.pool = ThreadPoolExecutor(max_workers=4) window_start = self.start while window_start+datetime.timedelta(seconds=self.chunk_size) <= self.end: kwargs = { "msm_id": self.msm_ids, "start": window_start, "stop": window_start+datetime.timedelta(seconds=self.chunk_size), "probe_ids": self.probe_ids, } self.params.append( [ self.timetrack_converter.traceroute2timetrack, kwargs]) window_start += datetime.timedelta(seconds=self.chunk_size) logging.warn("pool ready") return self def __exit__(self, type, value, traceback): self.close() def read(self): self.bar = Bar("Processing", max=len(self.params), suffix='%(percent)d%%') # m = map(get_results, self.params) msm_results = self.pool.map(get_results, self.params) for res in msm_results: for tracks in res: yield from tracks # semaphore.release() self.bar.next() def close(self): if self.pool is not None: self.pool.shutdown() self.bar.finish() return False
InternetHealthReport/raclette
raclette/atlasrestreader.py
atlasrestreader.py
py
5,135
python
en
code
8
github-code
13
33961492235
import requests from flask import Flask, request app = Flask(__name__) @app.route('/') def get_players_info(): result = "" if not request.args.get("count") or not request.args.get("players"): return "no players or count</br>" players_count = int(request.args.get("count")) players = request.args.get("players").split(",", players_count) for pl in players: acc_id = int(pl) & 0xFFFFFFFF parsed = requests.get(f"https://api.opendota.com/api/players/{str(acc_id)}/heroes").json() first = parsed[0] if first: hero_id = first["hero_id"] try: winrate = round(100*(first["win"]/first["games"]),1) except: continue result += f"acc: {pl}, wr: {str(winrate)}%</br>" return result if __name__ == '__main__': app.run()
F0RQU1N/dota_new_hack
!dota_new_hack/core/overwolf_server.py
overwolf_server.py
py
848
python
en
code
10
github-code
13
8422785657
x = int(input("Gib die erste Zahl ein: ")) y = int(input("Gib die zweite Zahl ein: ")) operation = input("Wähle eine Rechenart (+, -, *, /): ") def add(x, y): print (x + y) def subtract(x, y): print (x - y) def multiplication(x, y): print (x * y) def division(x, y): print (x / y) if operation == "+": add(x, y) elif operation == "-": subtract(x, y) elif operation == "*": multiplication(x, y) elif operation == "/": division(x, y) else: print("Diese Eingabe ist ungültig.")
katpol/homework
calculator_functions_hw5.py
calculator_functions_hw5.py
py
525
python
de
code
0
github-code
13
7260342501
# Two players are playing a game of Tower Breakers! Player always moves first, and both players always play optimally.The rules of the game are as follows: # Initially there are towers. # Each tower is of height . # The players move in alternating turns. # In each turn, a player can choose a tower of height and reduce its height to , where and evenly divides . # If the current player is unable to make a move, they lose the game. # Given the values of and , determine which player will win. If the first player wins, return . Otherwise, return . # Example. # There are towers, each units tall. Player has a choice of two moves: # - remove pieces from a tower to leave as # - remove pieces to leave # Let Player remove . Now the towers are and units tall. # Player matches the move. Now the towers are both units tall. # Now Player has only one move. # Player removes pieces leaving . Towers are and units tall. # Player matches again. Towers are both unit tall. # Player has no move and loses. Return . # Function Description # Complete the towerBreakers function in the editor below. # towerBreakers has the following paramter(s): # int n: the number of towers # int m: the height of each tower # Returns # int: the winner of the game # Input Format # The first line contains a single integer , the number of test cases. # Each of the next lines describes a test case in the form of space-separated integers, and . # Constraints # Sample Input # STDIN Function # ----- -------- # 2 t = 2 # 2 2 n = 2, m = 2 # 1 4 n = 1, m = 4 # Sample Output # 2 # 1 # Explanation # We'll refer to player as and player as # In the first test case, chooses one of the two towers and reduces it to . Then reduces the remaining tower to a height of . As both towers now have height , cannot make a move so is the winner. # In the second test case, there is only one tower of height . can reduce it to a height of either or . chooses as both players always choose optimally. Because has no possible move, wins. def towerBreakersBAD(n, m): towers = {} for i in range(n): towers[i] = m print('towers = ', towers) turn = 1 play = True while play == True: for i in range (len(towers.keys())): chop = 0 if towers[i] > 1: ht = towers[i] print('tower# ', i, ' = ', ht) chop = ht//2 print('chop= ', chop) if ht % chop == 0: towers[i] = chop turn = -turn else: while chop > 1: chop -= 1 if ht % chop == 0: towers[i] = chop turn = -turn break else: towers[i] = 1 turn = -turn break else: break break # while chop > 1: # if towers[i] % chop == 1: # chop -= 1 # else: # towers[i] = chop # turn = -turn # break # else: # turn = -turn # play = False # break # winner = 0 # if turn == 1: # winner = 1 # else: # winner = 2 # return winner return ex1 = [2, 6] test1 = towerBreakersBAD(ex1[0], ex1[1]) print(test1) # All of the above is TRASH - it all comes down to game theory # The solution comes from thinking at the end/what is needed to win and then working backwards. A player wins if the opponent has no moves left, and that occurs when the height of the last tower is 1. # Since both players are playing optimally, they will make the same moves. If there is one move left, then the first player wins because he/she takes the move and the second player is left with no moves. If there are two moves left, the first player takes a move, then the second player and the first player is left with no moves. We can extend this thinking to any number, so any even numbers of plays makes second player win and any odd number makes first player win. # The question seems complex, but: # if tower%2 == 1: # p1 wins # else: # p2 wins. # SPECIAL CASES: # if there is only 1 tower (n == 1), player 1 always wins bc can reduce the tower to 1 block every time (since every number can be divided by 1) # if tower heights are 1 (m == 1), player 2 always wins bc player 1 can't do anything # when there are even number of towers, player 1 will always lose bc player 2 will match/mirror previous play # when there are odd number of towers, player 2 will always lose bc player 1 can come back and leave player 2 w no moves def towerBreakers(n, m): if m == 1 or n % 2 == 0: return 2 else: return 1
siobhankb/recursion-practice
tower-breakers.py
tower-breakers.py
py
5,027
python
en
code
0
github-code
13
8119076492
from __future__ import print_function from __future__ import division import di_i2c import time # Constants SYSRANGE_START = 0x00 SYSTEM_THRESH_HIGH = 0x0C SYSTEM_THRESH_LOW = 0x0E SYSTEM_SEQUENCE_CONFIG = 0x01 SYSTEM_RANGE_CONFIG = 0x09 SYSTEM_INTERMEASUREMENT_PERIOD = 0x04 SYSTEM_INTERRUPT_CONFIG_GPIO = 0x0A GPIO_HV_MUX_ACTIVE_HIGH = 0x84 SYSTEM_INTERRUPT_CLEAR = 0x0B RESULT_INTERRUPT_STATUS = 0x13 RESULT_RANGE_STATUS = 0x14 RESULT_CORE_AMBIENT_WINDOW_EVENTS_RTN = 0xBC RESULT_CORE_RANGING_TOTAL_EVENTS_RTN = 0xC0 RESULT_CORE_AMBIENT_WINDOW_EVENTS_REF = 0xD0 RESULT_CORE_RANGING_TOTAL_EVENTS_REF = 0xD4 RESULT_PEAK_SIGNAL_RATE_REF = 0xB6 ALGO_PART_TO_PART_RANGE_OFFSET_MM = 0x28 I2C_SLAVE_DEVICE_ADDRESS = 0x8A MSRC_CONFIG_CONTROL = 0x60 PRE_RANGE_CONFIG_MIN_SNR = 0x27 PRE_RANGE_CONFIG_VALID_PHASE_LOW = 0x56 PRE_RANGE_CONFIG_VALID_PHASE_HIGH = 0x57 PRE_RANGE_MIN_COUNT_RATE_RTN_LIMIT = 0x64 FINAL_RANGE_CONFIG_MIN_SNR = 0x67 FINAL_RANGE_CONFIG_VALID_PHASE_LOW = 0x47 FINAL_RANGE_CONFIG_VALID_PHASE_HIGH = 0x48 FINAL_RANGE_CONFIG_MIN_COUNT_RATE_RTN_LIMIT = 0x44 PRE_RANGE_CONFIG_SIGMA_THRESH_HI = 0x61 PRE_RANGE_CONFIG_SIGMA_THRESH_LO = 0x62 PRE_RANGE_CONFIG_VCSEL_PERIOD = 0x50 PRE_RANGE_CONFIG_TIMEOUT_MACROP_HI = 0x51 PRE_RANGE_CONFIG_TIMEOUT_MACROP_LO = 0x52 SYSTEM_HISTOGRAM_BIN = 0x81 HISTOGRAM_CONFIG_INITIAL_PHASE_SELECT = 0x33 HISTOGRAM_CONFIG_READOUT_CTRL = 0x55 FINAL_RANGE_CONFIG_VCSEL_PERIOD = 0x70 FINAL_RANGE_CONFIG_TIMEOUT_MACROP_HI = 0x71 FINAL_RANGE_CONFIG_TIMEOUT_MACROP_LO = 0x72 CROSSTALK_COMPENSATION_PEAK_RATE_MCPS = 0x20 MSRC_CONFIG_TIMEOUT_MACROP = 0x46 SOFT_RESET_GO2_SOFT_RESET_N = 0xBF IDENTIFICATION_MODEL_ID = 0xC0 IDENTIFICATION_REVISION_ID = 0xC2 OSC_CALIBRATE_VAL = 0xF8 GLOBAL_CONFIG_VCSEL_WIDTH = 0x32 GLOBAL_CONFIG_SPAD_ENABLES_REF_0 = 0xB0 GLOBAL_CONFIG_SPAD_ENABLES_REF_1 = 0xB1 GLOBAL_CONFIG_SPAD_ENABLES_REF_2 = 0xB2 GLOBAL_CONFIG_SPAD_ENABLES_REF_3 = 0xB3 GLOBAL_CONFIG_SPAD_ENABLES_REF_4 = 0xB4 GLOBAL_CONFIG_SPAD_ENABLES_REF_5 = 0xB5 GLOBAL_CONFIG_REF_EN_START_SELECT = 0xB6 DYNAMIC_SPAD_NUM_REQUESTED_REF_SPAD = 0x4E DYNAMIC_SPAD_REF_EN_START_OFFSET = 0x4F POWER_MANAGEMENT_GO1_POWER_FORCE = 0x80 VHV_CONFIG_PAD_SCL_SDA__EXTSUP_HV = 0x89 ALGO_PHASECAL_LIM = 0x30 ALGO_PHASECAL_CONFIG_TIMEOUT = 0x30 ADDRESS_DEFAULT = 0x29 class VL53L0X(object): """Drivers for VL53L0X laser distance sensor""" VcselPeriodPreRange = 0 VcselPeriodFinalRange = 1 # "global variables" io_timeout = 0 did_timeout = False # The I2C address is software programmable (volatile), and defaults to 0x52 >> 1 = 0x29. # __init__ changes the address (default to 0x54 >> 1 = 0x2A) to prevent conflicts. ADDRESS = ADDRESS_DEFAULT def __init__(self, address = 0x2A, timeout = 0.5, bus = "RPI_1SW"): self.i2c_bus = di_i2c.DI_I2C(bus = bus, address = address) try: self.i2c_bus.write_reg_8(SOFT_RESET_GO2_SOFT_RESET_N, 0x00) # try resetting from 0x2A time.sleep(0.002) except IOError: pass self.ADDRESS = ADDRESS_DEFAULT self.i2c_bus.set_address(self.ADDRESS) self.i2c_bus.write_reg_8(SOFT_RESET_GO2_SOFT_RESET_N, 0x00) # reset default 0x29 time.sleep(0.005) # delay added because threaded instantiation would fail,even with mutex in place self.i2c_bus.write_reg_8(SOFT_RESET_GO2_SOFT_RESET_N, 0x01) # release reset time.sleep(0.005) # delay added because threaded instantiation would fail,even with mutex in place self.set_address(address) self.init() # initialize the sensor self.set_timeout(timeout) # set the timeout def set_address(self, address): address &= 0x7f try: self.i2c_bus.write_reg_8(I2C_SLAVE_DEVICE_ADDRESS, address) self.ADDRESS = address self.i2c_bus.set_address(self.ADDRESS) except IOError: self.i2c_bus.set_address(address) self.i2c_bus.write_reg_8(I2C_SLAVE_DEVICE_ADDRESS, address) self.ADDRESS = address self.i2c_bus.set_address(self.ADDRESS) def init(self): self.i2c_bus.write_reg_8(VHV_CONFIG_PAD_SCL_SDA__EXTSUP_HV, (self.i2c_bus.read_8(VHV_CONFIG_PAD_SCL_SDA__EXTSUP_HV) | 0x01)) # set bit 0 # "Set I2C standard mode" self.i2c_bus.write_reg_8(0x88, 0x00) self.i2c_bus.write_reg_8(0x80, 0x01) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(0x00, 0x00) self.stop_variable = self.i2c_bus.read_8(0x91) self.i2c_bus.write_reg_8(0x00, 0x01) self.i2c_bus.write_reg_8(0xFF, 0x00) self.i2c_bus.write_reg_8(0x80, 0x00) # disable SIGNAL_RATE_MSRC (bit 1) and SIGNAL_RATE_PRE_RANGE (bit 4) limit checks self.i2c_bus.write_reg_8(MSRC_CONFIG_CONTROL, (self.i2c_bus.read_8(MSRC_CONFIG_CONTROL) | 0x12)) # set final range signal rate limit to 0.25 MCPS (million counts per second) self.set_signal_rate_limit(0.25) self.i2c_bus.write_reg_8(SYSTEM_SEQUENCE_CONFIG, 0xFF) # VL53L0X_DataInit() end # VL53L0X_StaticInit() begin spad_count, spad_type_is_aperture, success = self.get_spad_info() if not success: return False # The SPAD map (RefGoodSpadMap) is read by VL53L0X_get_info_from_device() in # the API, but the same data seems to be more easily readable from # GLOBAL_CONFIG_SPAD_ENABLES_REF_0 through _6, so read it from there ref_spad_map = self.i2c_bus.read_list(GLOBAL_CONFIG_SPAD_ENABLES_REF_0, 6) # -- VL53L0X_set_reference_spads() begin (assume NVM values are valid) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(DYNAMIC_SPAD_REF_EN_START_OFFSET, 0x00) self.i2c_bus.write_reg_8(DYNAMIC_SPAD_NUM_REQUESTED_REF_SPAD, 0x2C) self.i2c_bus.write_reg_8(0xFF, 0x00) self.i2c_bus.write_reg_8(GLOBAL_CONFIG_REF_EN_START_SELECT, 0xB4) if spad_type_is_aperture: first_spad_to_enable = 12 # 12 is the first aperture spad else: first_spad_to_enable = 0 spads_enabled = 0 for i in range(48): if i < first_spad_to_enable or spads_enabled == spad_count: # This bit is lower than the first one that should be enabled, or # (reference_spad_count) bits have already been enabled, so zero this bit ref_spad_map[int(i / 8)] &= ~(1 << (i % 8)) elif (ref_spad_map[int(i / 8)] >> (i % 8)) & 0x1: spads_enabled += 1 self.i2c_bus.write_reg_list(GLOBAL_CONFIG_SPAD_ENABLES_REF_0, ref_spad_map) # -- VL53L0X_set_reference_spads() end # -- VL53L0X_load_tuning_settings() begin # DefaultTuningSettings from vl53l0x_tuning.h self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(0x00, 0x00) self.i2c_bus.write_reg_8(0xFF, 0x00) self.i2c_bus.write_reg_8(0x09, 0x00) self.i2c_bus.write_reg_8(0x10, 0x00) self.i2c_bus.write_reg_8(0x11, 0x00) self.i2c_bus.write_reg_8(0x24, 0x01) self.i2c_bus.write_reg_8(0x25, 0xFF) self.i2c_bus.write_reg_8(0x75, 0x00) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(0x4E, 0x2C) self.i2c_bus.write_reg_8(0x48, 0x00) self.i2c_bus.write_reg_8(0x30, 0x20) self.i2c_bus.write_reg_8(0xFF, 0x00) self.i2c_bus.write_reg_8(0x30, 0x09) self.i2c_bus.write_reg_8(0x54, 0x00) self.i2c_bus.write_reg_8(0x31, 0x04) self.i2c_bus.write_reg_8(0x32, 0x03) self.i2c_bus.write_reg_8(0x40, 0x83) self.i2c_bus.write_reg_8(0x46, 0x25) self.i2c_bus.write_reg_8(0x60, 0x00) self.i2c_bus.write_reg_8(0x27, 0x00) self.i2c_bus.write_reg_8(0x50, 0x06) self.i2c_bus.write_reg_8(0x51, 0x00) self.i2c_bus.write_reg_8(0x52, 0x96) self.i2c_bus.write_reg_8(0x56, 0x08) self.i2c_bus.write_reg_8(0x57, 0x30) self.i2c_bus.write_reg_8(0x61, 0x00) self.i2c_bus.write_reg_8(0x62, 0x00) self.i2c_bus.write_reg_8(0x64, 0x00) self.i2c_bus.write_reg_8(0x65, 0x00) self.i2c_bus.write_reg_8(0x66, 0xA0) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(0x22, 0x32) self.i2c_bus.write_reg_8(0x47, 0x14) self.i2c_bus.write_reg_8(0x49, 0xFF) self.i2c_bus.write_reg_8(0x4A, 0x00) self.i2c_bus.write_reg_8(0xFF, 0x00) self.i2c_bus.write_reg_8(0x7A, 0x0A) self.i2c_bus.write_reg_8(0x7B, 0x00) self.i2c_bus.write_reg_8(0x78, 0x21) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(0x23, 0x34) self.i2c_bus.write_reg_8(0x42, 0x00) self.i2c_bus.write_reg_8(0x44, 0xFF) self.i2c_bus.write_reg_8(0x45, 0x26) self.i2c_bus.write_reg_8(0x46, 0x05) self.i2c_bus.write_reg_8(0x40, 0x40) self.i2c_bus.write_reg_8(0x0E, 0x06) self.i2c_bus.write_reg_8(0x20, 0x1A) self.i2c_bus.write_reg_8(0x43, 0x40) self.i2c_bus.write_reg_8(0xFF, 0x00) self.i2c_bus.write_reg_8(0x34, 0x03) self.i2c_bus.write_reg_8(0x35, 0x44) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(0x31, 0x04) self.i2c_bus.write_reg_8(0x4B, 0x09) self.i2c_bus.write_reg_8(0x4C, 0x05) self.i2c_bus.write_reg_8(0x4D, 0x04) self.i2c_bus.write_reg_8(0xFF, 0x00) self.i2c_bus.write_reg_8(0x44, 0x00) self.i2c_bus.write_reg_8(0x45, 0x20) self.i2c_bus.write_reg_8(0x47, 0x08) self.i2c_bus.write_reg_8(0x48, 0x28) self.i2c_bus.write_reg_8(0x67, 0x00) self.i2c_bus.write_reg_8(0x70, 0x04) self.i2c_bus.write_reg_8(0x71, 0x01) self.i2c_bus.write_reg_8(0x72, 0xFE) self.i2c_bus.write_reg_8(0x76, 0x00) self.i2c_bus.write_reg_8(0x77, 0x00) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(0x0D, 0x01) self.i2c_bus.write_reg_8(0xFF, 0x00) self.i2c_bus.write_reg_8(0x80, 0x01) self.i2c_bus.write_reg_8(0x01, 0xF8) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(0x8E, 0x01) self.i2c_bus.write_reg_8(0x00, 0x01) self.i2c_bus.write_reg_8(0xFF, 0x00) self.i2c_bus.write_reg_8(0x80, 0x00) # -- VL53L0X_load_tuning_settings() end # "Set interrupt config to new sample ready" # -- VL53L0X_SetGpioConfig() begin self.i2c_bus.write_reg_8(SYSTEM_INTERRUPT_CONFIG_GPIO, 0x04) self.i2c_bus.write_reg_8(GPIO_HV_MUX_ACTIVE_HIGH, self.i2c_bus.read_8(GPIO_HV_MUX_ACTIVE_HIGH) & ~0x10) # active low self.i2c_bus.write_reg_8(SYSTEM_INTERRUPT_CLEAR, 0x01) # -- VL53L0X_SetGpioConfig() end self.measurement_timing_budget_us = self.get_measurement_timing_budget() # "Disable MSRC and TCC by default" # MSRC = Minimum Signal Rate Check # TCC = Target CentreCheck # -- VL53L0X_SetSequenceStepEnable() begin self.i2c_bus.write_reg_8(SYSTEM_SEQUENCE_CONFIG, 0xE8) # -- VL53L0X_SetSequenceStepEnable() end # "Recalculate timing budget" self.set_measurement_timing_budget(self.measurement_timing_budget_us) # VL53L0X_StaticInit() end # VL53L0X_PerformRefCalibration() begin (VL53L0X_perform_ref_calibration()) # -- VL53L0X_perform_vhv_calibration() begin self.i2c_bus.write_reg_8(SYSTEM_SEQUENCE_CONFIG, 0x01) if not self.perform_single_ref_calibration(0x40): return False # -- VL53L0X_perform_vhv_calibration() end # -- VL53L0X_perform_phase_calibration() begin self.i2c_bus.write_reg_8(SYSTEM_SEQUENCE_CONFIG, 0x02) if not self.perform_single_ref_calibration(0x00): return False # -- VL53L0X_perform_phase_calibration() end # "restore the previous Sequence Config" self.i2c_bus.write_reg_8(SYSTEM_SEQUENCE_CONFIG, 0xE8) # VL53L0X_PerformRefCalibration() end return True # duplicate method # def set_signal_rate_limit(self, limit_Mcps): # if (limit_Mcps < 0 or limit_Mcps > 511.99): # return False # # Q9.7 fixed point format (9 integer bits, 7 fractional bits) # self.i2c_bus.write_reg_16(FINAL_RANGE_CONFIG_MIN_COUNT_RATE_RTN_LIMIT, int(limit_Mcps * (1 << 7))) # return True # Get reference SPAD (single photon avalanche diode) count and type # based on VL53L0X_get_info_from_device(), # but only gets reference SPAD count and type def get_spad_info(self): self.i2c_bus.write_reg_8(0x80, 0x01) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(0x00, 0x00) self.i2c_bus.write_reg_8(0xFF, 0x06) self.i2c_bus.write_reg_8(0x83, self.i2c_bus.read_8(0x83) | 0x04) self.i2c_bus.write_reg_8(0xFF, 0x07) self.i2c_bus.write_reg_8(0x81, 0x01) self.i2c_bus.write_reg_8(0x80, 0x01) self.i2c_bus.write_reg_8(0x94, 0x6b) self.i2c_bus.write_reg_8(0x83, 0x00) self.start_timeout() while (self.i2c_bus.read_8(0x83) == 0x00): if (self.check_timeout_expired()): return 0, 0, False self.i2c_bus.write_reg_8(0x83, 0x01) tmp = self.i2c_bus.read_8(0x92) count = tmp & 0x7f type_is_aperture = (tmp >> 7) & 0x01 self.i2c_bus.write_reg_8(0x81, 0x00) self.i2c_bus.write_reg_8(0xFF, 0x06) self.i2c_bus.write_reg_8(0x83, self.i2c_bus.read_8(0x83) & ~0x04) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(0x00, 0x01) self.i2c_bus.write_reg_8(0xFF, 0x00) self.i2c_bus.write_reg_8(0x80, 0x00) return count, type_is_aperture, True # Check if timeout is enabled (set to nonzero value) and has expired def check_timeout_expired(self): if(self.io_timeout > 0 and (time.time() - self.timeout_start) > self.io_timeout): return True return False # Record the current time to check an upcoming timeout against def start_timeout(self): self.timeout_start = time.time() #SequenceStepEnables = {"tcc":0, "msrc":0, "dss":0, "pre_range":0, "final_range":0} #SequenceStepTimeouts = {"pre_range_vcsel_period_pclks":0, "final_range_vcsel_period_pclks":0, "msrc_dss_tcc_mclks":0, "pre_range_mclks":0, "final_range_mclks":0, "msrc_dss_tcc_us":0, "pre_range_us":0, "final_range_us":0} # Get the measurement timing budget in microseconds # based on VL53L0X_get_measurement_timing_budget_micro_seconds() # in us def get_measurement_timing_budget(self): StartOverhead = 1910 # note that this is different than the value in set_ EndOverhead = 960 MsrcOverhead = 660 TccOverhead = 590 DssOverhead = 690 PreRangeOverhead = 660 FinalRangeOverhead = 550 # "Start and end overhead times always present" budget_us = StartOverhead + EndOverhead enables = self.get_sequence_step_enables() timeouts = self.get_sequence_step_timeouts(enables["pre_range"]) if (enables["tcc"]): budget_us += (timeouts["msrc_dss_tcc_us"] + TccOverhead) if (enables["dss"]): budget_us += 2 * (timeouts["msrc_dss_tcc_us"] + DssOverhead) elif (enables["msrc"]): budget_us += (timeouts["msrc_dss_tcc_us"] + MsrcOverhead) if (enables["pre_range"]): budget_us += (timeouts["pre_range_us"] + PreRangeOverhead) if (enables["final_range"]): budget_us += (timeouts["final_range_us"] + FinalRangeOverhead) self.measurement_timing_budget_us = budget_us # store for internal reuse return budget_us # Get sequence step enables # based on VL53L0X_get_sequence_step_enables() def get_sequence_step_enables(self): sequence_config = self.i2c_bus.read_8(SYSTEM_SEQUENCE_CONFIG) SequenceStepEnables = {"tcc":0, "msrc":0, "dss":0, "pre_range":0, "final_range":0} SequenceStepEnables["tcc"] = (sequence_config >> 4) & 0x1 SequenceStepEnables["dss"] = (sequence_config >> 3) & 0x1 SequenceStepEnables["msrc"] = (sequence_config >> 2) & 0x1 SequenceStepEnables["pre_range"] = (sequence_config >> 6) & 0x1 SequenceStepEnables["final_range"] = (sequence_config >> 7) & 0x1 return SequenceStepEnables # Get sequence step timeouts # based on get_sequence_step_timeout(), # but gets all timeouts instead of just the requested one, and also stores # intermediate values def get_sequence_step_timeouts(self, pre_range): SequenceStepTimeouts = {"pre_range_vcsel_period_pclks":0, "final_range_vcsel_period_pclks":0, "msrc_dss_tcc_mclks":0, "pre_range_mclks":0, "final_range_mclks":0, "msrc_dss_tcc_us":0, "pre_range_us":0, "final_range_us":0} SequenceStepTimeouts["pre_range_vcsel_period_pclks"] = self.get_vcsel_pulse_period(self.VcselPeriodPreRange) SequenceStepTimeouts["msrc_dss_tcc_mclks"] = self.i2c_bus.read_8(MSRC_CONFIG_TIMEOUT_MACROP) + 1 SequenceStepTimeouts["msrc_dss_tcc_us"] = self.timeout_mclks_to_microseconds(SequenceStepTimeouts["msrc_dss_tcc_mclks"], SequenceStepTimeouts["pre_range_vcsel_period_pclks"]) SequenceStepTimeouts["pre_range_mclks"] = self.decode_timeout(self.i2c_bus.read_16(PRE_RANGE_CONFIG_TIMEOUT_MACROP_HI)) SequenceStepTimeouts["pre_range_us"] = self.timeout_mclks_to_microseconds(SequenceStepTimeouts["pre_range_mclks"], SequenceStepTimeouts["pre_range_vcsel_period_pclks"]) SequenceStepTimeouts["final_range_vcsel_period_pclks"] = self.get_vcsel_pulse_period(self.VcselPeriodFinalRange) SequenceStepTimeouts["final_range_mclks"] = self.decode_timeout(self.i2c_bus.read_16(FINAL_RANGE_CONFIG_TIMEOUT_MACROP_HI)) if (pre_range): SequenceStepTimeouts["final_range_mclks"] -= SequenceStepTimeouts["pre_range_mclks"] SequenceStepTimeouts["final_range_us"] = self.timeout_mclks_to_microseconds(SequenceStepTimeouts["final_range_mclks"], SequenceStepTimeouts["final_range_vcsel_period_pclks"]) return SequenceStepTimeouts # Decode VCSEL (vertical cavity surface emitting laser) pulse period in PCLKs # from register value # based on VL53L0X_decode_vcsel_period() def decode_vcsel_period(self, reg_val): return (((reg_val) + 1) << 1) # Get the VCSEL pulse period in PCLKs for the given period type. # based on VL53L0X_get_vcsel_pulse_period() def get_vcsel_pulse_period(self, type): if type == self.VcselPeriodPreRange: return self.decode_vcsel_period(self.i2c_bus.read_8(PRE_RANGE_CONFIG_VCSEL_PERIOD)) elif type == self.VcselPeriodFinalRange: return self.decode_vcsel_period(self.i2c_bus.read_8(FINAL_RANGE_CONFIG_VCSEL_PERIOD)) else: return 255 # Convert sequence step timeout from MCLKs to microseconds with given VCSEL period in PCLKs # based on VL53L0X_calc_timeout_us() def timeout_mclks_to_microseconds(self, timeout_period_mclks, vcsel_period_pclks): macro_period_ns = self.calc_macro_period(vcsel_period_pclks) return ((timeout_period_mclks * macro_period_ns) + (macro_period_ns / 2)) / 1000 # Calculate macro period in *nanoseconds* from VCSEL period in PCLKs # based on VL53L0X_calc_macro_period_ps() # PLL_period_ps = 1655; macro_period_vclks = 2304 def calc_macro_period(self, vcsel_period_pclks): return (((2304 * vcsel_period_pclks * 1655) + 500) / 1000) # Decode sequence step timeout in MCLKs from register value # based on VL53L0X_decode_timeout() # Note: the original function returned a uint32_t, but the return value is #always stored in a uint16_t. def decode_timeout(self, reg_val): # format: "(LSByte * 2^MSByte) + 1" return ((reg_val & 0x00FF) << ((reg_val & 0xFF00) >> 8)) + 1 # Set the measurement timing budget in microseconds, which is the time allowed # for one measurement the ST API and this library take care of splitting the # timing budget among the sub-steps in the ranging sequence. A longer timing # budget allows for more accurate measurements. Increasing the budget by a # factor of N decreases the range measurement standard deviation by a factor of # sqrt(N). Defaults to about 33 milliseconds the minimum is 20 ms. # based on VL53L0X_set_measurement_timing_budget_micro_seconds() def set_measurement_timing_budget(self, budget_us): StartOverhead = 1320 # note that this is different than the value in get_ EndOverhead = 960 MsrcOverhead = 660 TccOverhead = 590 DssOverhead = 690 PreRangeOverhead = 660 FinalRangeOverhead = 550 MinTimingBudget = 20000 if budget_us < MinTimingBudget: return False used_budget_us = StartOverhead + EndOverhead enables = self.get_sequence_step_enables() timeouts = self.get_sequence_step_timeouts(enables["pre_range"]) if enables["tcc"]: used_budget_us += (timeouts["msrc_dss_tcc_us"] + TccOverhead) if enables["dss"]: used_budget_us += 2 * (timeouts["msrc_dss_tcc_us"] + DssOverhead) elif enables["msrc"]: used_budget_us += (timeouts["msrc_dss_tcc_us"] + MsrcOverhead) if enables["pre_range"]: used_budget_us += (timeouts["pre_range_us"] + PreRangeOverhead) if enables["final_range"]: used_budget_us += FinalRangeOverhead # "Note that the final range timeout is determined by the timing # budget and the sum of all other timeouts within the sequence. # If there is no room for the final range timeout, then an error # will be set. Otherwise the remaining time will be applied to # the final range." if used_budget_us > budget_us: # "Requested timeout too big." return False final_range_timeout_us = budget_us - used_budget_us # set_sequence_step_timeout() begin # (SequenceStepId == VL53L0X_SEQUENCESTEP_FINAL_RANGE) # "For the final range timeout, the pre-range timeout # must be added. To do this both final and pre-range # timeouts must be expressed in macro periods MClks # because they have different vcsel periods." final_range_timeout_mclks = self.timeout_microseconds_to_mclks(final_range_timeout_us, timeouts["final_range_vcsel_period_pclks"]) if enables["pre_range"]: final_range_timeout_mclks += timeouts["pre_range_mclks"] self.i2c_bus.write_reg_16(FINAL_RANGE_CONFIG_TIMEOUT_MACROP_HI, self.encode_timeout(final_range_timeout_mclks)) # set_sequence_step_timeout() end self.measurement_timing_budget_us = budget_us # store for internal reuse return True # Encode sequence step timeout register value from timeout in MCLKs # based on VL53L0X_encode_timeout() # Note: the original function took a uint16_t, but the argument passed to it # is always a uint16_t. def encode_timeout(self, timeout_mclks): # format: "(LSByte * 2^MSByte) + 1" ls_byte = 0 ms_byte = 0 if timeout_mclks > 0: ls_byte = timeout_mclks - 1 while ((int(ls_byte) & 0xFFFFFF00) > 0): ls_byte /= 2 # >>= ms_byte += 1 return ((ms_byte << 8) | (int(ls_byte) & 0xFF)) else: return 0 # Convert sequence step timeout from microseconds to MCLKs with given VCSEL period in PCLKs # based on VL53L0X_calc_timeout_mclks() def timeout_microseconds_to_mclks(self, timeout_period_us, vcsel_period_pclks): macro_period_ns = self.calc_macro_period(vcsel_period_pclks) return (((timeout_period_us * 1000) + (macro_period_ns / 2)) / macro_period_ns) # based on VL53L0X_perform_single_ref_calibration() def perform_single_ref_calibration(self, vhv_init_byte): self.i2c_bus.write_reg_8(SYSRANGE_START, 0x01 | vhv_init_byte) # VL53L0X_REG_SYSRANGE_MODE_START_STOP self.start_timeout() while ((self.i2c_bus.read_8(RESULT_INTERRUPT_STATUS) & 0x07) == 0): if self.check_timeout_expired(): return False self.i2c_bus.write_reg_8(SYSTEM_INTERRUPT_CLEAR, 0x01) self.i2c_bus.write_reg_8(SYSRANGE_START, 0x00) return True def set_timeout(self, timeout): self.io_timeout = timeout # Start continuous ranging measurements. If period_ms (optional) is 0 or not # given, continuous back-to-back mode is used (the sensor takes measurements as # often as possible) otherwise, continuous timed mode is used, with the given # inter-measurement period in milliseconds determining how often the sensor # takes a measurement. # based on VL53L0X_StartMeasurement() def start_continuous(self, period_ms = 0): self.i2c_bus.write_reg_8(0x80, 0x01) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(0x00, 0x00) self.i2c_bus.write_reg_8(0x91, self.stop_variable) self.i2c_bus.write_reg_8(0x00, 0x01) self.i2c_bus.write_reg_8(0xFF, 0x00) self.i2c_bus.write_reg_8(0x80, 0x00) if period_ms != 0: # continuous timed mode # VL53L0X_SetInterMeasurementPeriodMilliSeconds() begin osc_calibrate_val = self.i2c_bus.read_16(OSC_CALIBRATE_VAL) if osc_calibrate_val != 0: period_ms *= osc_calibrate_val self.i2c_bus.write_reg_32(SYSTEM_INTERMEASUREMENT_PERIOD, period_ms) # VL53L0X_SetInterMeasurementPeriodMilliSeconds() end self.i2c_bus.write_reg_8(SYSRANGE_START, 0x04) # VL53L0X_REG_SYSRANGE_MODE_TIMED else: # continuous back-to-back mode self.i2c_bus.write_reg_8(SYSRANGE_START, 0x02) # VL53L0X_REG_SYSRANGE_MODE_BACKTOBACK # Returns a range reading in millimeters when continuous mode is active # (read_range_single_millimeters() also calls this function after starting a # single-shot range measurement) def read_range_continuous_millimeters(self): self.start_timeout() while ((self.i2c_bus.read_8(RESULT_INTERRUPT_STATUS) & 0x07) == 0): if self.check_timeout_expired(): self.did_timeout = True raise IOError("read_range_continuous_millimeters timeout") # assumptions: Linearity Corrective Gain is 1000 (default) # fractional ranging is not enabled range = self.i2c_bus.read_16(RESULT_RANGE_STATUS + 10) self.i2c_bus.write_reg_8(SYSTEM_INTERRUPT_CLEAR, 0x01) return range # Did a timeout occur in one of the read functions since the last call to # timeout_occurred()? def timeout_occurred(self): tmp = self.did_timeout self.did_timeout = False return tmp # Set the VCSEL (vertical cavity surface emitting laser) pulse period for the # given period type (pre-range or final range) to the given value in PCLKs. # Longer periods seem to increase the potential range of the sensor. # Valid values are (even numbers only): # pre: 12 to 18 (initialized default: 14) # final: 8 to 14 (initialized default: 10) # based on VL53L0X_setVcselPulsePeriod() def set_vcsel_pulse_period(self, type, period_pclks): vcsel_period_reg = self.encode_vcsel_period(period_pclks) enables = self.get_sequence_step_enables() timeouts = self.get_sequence_step_timeouts(enables["pre_range"]) # "Apply specific settings for the requested clock period" # "Re-calculate and apply timeouts, in macro periods" # "When the VCSEL period for the pre or final range is changed, # the corresponding timeout must be read from the device using # the current VCSEL period, then the new VCSEL period can be # applied. The timeout then must be written back to the device # using the new VCSEL period. # # For the MSRC timeout, the same applies - this timeout being # dependant on the pre-range vcsel period." if type == self.VcselPeriodPreRange: # "Set phase check limits" if period_pclks == 12: self.i2c_bus.write_reg_8(PRE_RANGE_CONFIG_VALID_PHASE_HIGH, 0x18) elif period_pclks == 14: self.i2c_bus.write_reg_8(PRE_RANGE_CONFIG_VALID_PHASE_HIGH, 0x30) elif period_pclks == 16: self.i2c_bus.write_reg_8(PRE_RANGE_CONFIG_VALID_PHASE_HIGH, 0x40) elif period_pclks == 18: self.i2c_bus.write_reg_8(PRE_RANGE_CONFIG_VALID_PHASE_HIGH, 0x50) else: return False self.i2c_bus.write_reg_8(PRE_RANGE_CONFIG_VALID_PHASE_LOW, 0x08) # apply new VCSEL period self.i2c_bus.write_reg_8(PRE_RANGE_CONFIG_VCSEL_PERIOD, vcsel_period_reg) # update timeouts # set_sequence_step_timeout() begin # (SequenceStepId == VL53L0X_SEQUENCESTEP_PRE_RANGE) new_pre_range_timeout_mclks = self.timeout_microseconds_to_mclks(timeouts["pre_range_us"], period_pclks) self.i2c_bus.write_reg_16(PRE_RANGE_CONFIG_TIMEOUT_MACROP_HI, self.encode_timeout(new_pre_range_timeout_mclks)) # set_sequence_step_timeout() end # set_sequence_step_timeout() begin # (SequenceStepId == VL53L0X_SEQUENCESTEP_MSRC) new_msrc_timeout_mclks = self.timeout_microseconds_to_mclks(timeouts["msrc_dss_tcc_us"], period_pclks) if new_msrc_timeout_mclks > 256: self.i2c_bus.write_reg_8(MSRC_CONFIG_TIMEOUT_MACROP, 255) else: self.i2c_bus.write_reg_8(MSRC_CONFIG_TIMEOUT_MACROP, (new_msrc_timeout_mclks - 1)) # set_sequence_step_timeout() end elif type == self.VcselPeriodFinalRange: if period_pclks == 8: self.i2c_bus.write_reg_8(FINAL_RANGE_CONFIG_VALID_PHASE_HIGH, 0x10) self.i2c_bus.write_reg_8(FINAL_RANGE_CONFIG_VALID_PHASE_LOW, 0x08) self.i2c_bus.write_reg_8(GLOBAL_CONFIG_VCSEL_WIDTH, 0x02) self.i2c_bus.write_reg_8(ALGO_PHASECAL_CONFIG_TIMEOUT, 0x0C) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(ALGO_PHASECAL_LIM, 0x30) self.i2c_bus.write_reg_8(0xFF, 0x00) elif period_pclks == 10: self.i2c_bus.write_reg_8(FINAL_RANGE_CONFIG_VALID_PHASE_HIGH, 0x28) self.i2c_bus.write_reg_8(FINAL_RANGE_CONFIG_VALID_PHASE_LOW, 0x08) self.i2c_bus.write_reg_8(GLOBAL_CONFIG_VCSEL_WIDTH, 0x03) self.i2c_bus.write_reg_8(ALGO_PHASECAL_CONFIG_TIMEOUT, 0x09) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(ALGO_PHASECAL_LIM, 0x20) self.i2c_bus.write_reg_8(0xFF, 0x00) elif period_pclks == 12: self.i2c_bus.write_reg_8(FINAL_RANGE_CONFIG_VALID_PHASE_HIGH, 0x38) self.i2c_bus.write_reg_8(FINAL_RANGE_CONFIG_VALID_PHASE_LOW, 0x08) self.i2c_bus.write_reg_8(GLOBAL_CONFIG_VCSEL_WIDTH, 0x03) self.i2c_bus.write_reg_8(ALGO_PHASECAL_CONFIG_TIMEOUT, 0x08) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(ALGO_PHASECAL_LIM, 0x20) self.i2c_bus.write_reg_8(0xFF, 0x00) elif period_pclks == 14: self.i2c_bus.write_reg_8(FINAL_RANGE_CONFIG_VALID_PHASE_HIGH, 0x48) self.i2c_bus.write_reg_8(FINAL_RANGE_CONFIG_VALID_PHASE_LOW, 0x08) self.i2c_bus.write_reg_8(GLOBAL_CONFIG_VCSEL_WIDTH, 0x03) self.i2c_bus.write_reg_8(ALGO_PHASECAL_CONFIG_TIMEOUT, 0x07) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(ALGO_PHASECAL_LIM, 0x20) self.i2c_bus.write_reg_8(0xFF, 0x00) else: # invalid period return False # apply new VCSEL period self.i2c_bus.write_reg_8(FINAL_RANGE_CONFIG_VCSEL_PERIOD, vcsel_period_reg) # update timeouts # set_sequence_step_timeout() begin # (SequenceStepId == VL53L0X_SEQUENCESTEP_FINAL_RANGE) # "For the final range timeout, the pre-range timeout # must be added. To do this both final and pre-range # timeouts must be expressed in macro periods MClks # because they have different vcsel periods." new_final_range_timeout_mclks = self.timeout_microseconds_to_mclks(timeouts["final_range_us"], period_pclks) if enables["pre_range"]: new_final_range_timeout_mclks += timeouts["pre_range_mclks"] self.i2c_bus.write_reg_16(FINAL_RANGE_CONFIG_TIMEOUT_MACROP_HI, self.encode_timeout(new_final_range_timeout_mclks)) # set_sequence_step_timeout end else: # invalid type return False # "Finally, the timing budget must be re-applied" self.set_measurement_timing_budget(self.measurement_timing_budget_us) # "Perform the phase calibration. This is needed after changing on vcsel period." # VL53L0X_perform_phase_calibration() begin sequence_config = self.i2c_bus.read_8(SYSTEM_SEQUENCE_CONFIG) self.i2c_bus.write_reg_8(SYSTEM_SEQUENCE_CONFIG, 0x02) self.perform_single_ref_calibration(0x0) self.i2c_bus.write_reg_8(SYSTEM_SEQUENCE_CONFIG, sequence_config) # VL53L0X_perform_phase_calibration() end return True # Performs a single-shot range measurement and returns the reading in # millimeters # based on VL53L0X_PerformSingleRangingMeasurement() def read_range_single_millimeters(self): self.i2c_bus.write_reg_8(0x80, 0x01) self.i2c_bus.write_reg_8(0xFF, 0x01) self.i2c_bus.write_reg_8(0x00, 0x00) self.i2c_bus.write_reg_8(0x91, self.stop_variable) self.i2c_bus.write_reg_8(0x00, 0x01) self.i2c_bus.write_reg_8(0xFF, 0x00) self.i2c_bus.write_reg_8(0x80, 0x00) self.i2c_bus.write_reg_8(SYSRANGE_START, 0x01) # "Wait until start bit has been cleared" self.start_timeout() while (self.i2c_bus.read_8(SYSRANGE_START) & 0x01): if self.check_timeout_expired(): self.did_timeout = True raise IOError("read_range_single_millimeters timeout") return self.read_range_continuous_millimeters() # Set the return signal rate limit check value in units of MCPS (mega counts # per second). "This represents the amplitude of the signal reflected from the # target and detected by the device"; setting this limit presumably determines # the minimum measurement necessary for the sensor to report a valid reading. # Setting a lower limit increases the potential range of the sensor but also # seems to increase the likelihood of getting an inaccurate reading because of # unwanted reflections from objects other than the intended target. # Defaults to 0.25 MCPS as initialized by the ST API and this library. def set_signal_rate_limit(self, limit_Mcps): if limit_Mcps < 0 or limit_Mcps > 511.99: raise ValueError("set_signal_rate_limit limit out of range (0 - 511.99)") # Q9.7 fixed point format (9 integer bits, 7 fractional bits) self.i2c_bus.write_reg_16(FINAL_RANGE_CONFIG_MIN_COUNT_RATE_RTN_LIMIT, (limit_Mcps * (1 << 7)))# writeReg16Bit(FINAL_RANGE_CONFIG_MIN_COUNT_RATE_RTN_LIMIT, limit_Mcps * (1 << 7)); # Encode VCSEL pulse period register value from period in PCLKs # based on VL53L0X_encode_vcsel_period() def encode_vcsel_period(self, period_pclks): return((period_pclks >> 1) - 1)
DexterInd/DI_Sensors
Python/di_sensors/VL53L0X.py
VL53L0X.py
py
37,337
python
en
code
12
github-code
13
74527851216
__author__ = 'Gabor Wnuk' __date__ = '$Date: 2016-05-07 18:47:03 +0200 (Sat, 7 May 2016) $' import sqlite3 SQLITE_PATH = '/Users/GaborWnuk/irrigator.db' """Geolocation (for weather """ LATITUDE_AND_LONGITUDE = (52.227578, 20.986796) """Water pump """ WATER_PUMP_RELAY_GPIO = 4 WATER_PUMP_LITER_PER_MINUTE = 3.4 """Ultrasonic sensor (Water level) """ ULTRASONIC_SENSOR_TRIG_GPIO = 20 ULTRASONIC_SENSOR_ECHO_GPIO = 21 # Tank capacity in liters WATER_TANK_CAPACITY = 60 # Distance between sensor and water when tank is full (cm) DISTANCE_FROM_SENSOR_WHEN_FULL = 5 # Distance between sensor and bottom of the tank when tank is empty (cm) DISTANCE_FROM_SENSOR_WHEN_EMPTY = 61 # Calibration difference CALIBRATION_DIFFERENCE = -2.3 # Water level warning (in liters) WATER_LEVEL_WARNING = 10 # Water level error (in liters), pump won't start below this level # as some pumps shouldn't work dry WATER_LEVEL_ERROR = 5 """ SQLite database helpers """ def dict_factory(cursor, row): dictionary = {} for idx, column in enumerate(cursor.description): dictionary[column[0]] = row[idx] return dictionary def get_db(): db = sqlite3.connect(SQLITE_PATH) db.row_factory = dict_factory return db
GaborWnuk/irrigator
src/nuke/irrigator/settings.py
settings.py
py
1,225
python
en
code
0
github-code
13
18306577335
import kivy from kivy.app import App from kivy.uix.label import Label from kivy.uix.gridlayout import GridLayout from kivy.uix.textinput import TextInput from kivy.uix.button import Button class AppLayout(GridLayout): def __init__(self, **kwargs): super(AppLayout,self).__init__(**kwargs) self.cols = 2 self.add_widget(Label(text="Name :")) self.name = TextInput(multiline=False) self.add_widget(self.name) self.add_widget(Label(text="Favorite pizza :")) self.pizza = TextInput(multiline=False) self.add_widget(self.pizza) self.add_widget(Label(text="Favorite color :")) self.color = TextInput(multiline=False) self.add_widget(self.color) self.submit = Button(text="Submit", font_size=32) self.submit.bind(on_press=self.press) self.add_widget(self.submit) self.display = Label(text="<change on submit>") self.add_widget(self.display) def press(self,instance): self.display.text = f'Hello {self.name.text} ! \nYour favorite pizza is {self.pizza.text} and \nyour favorite color is {self.color.text}' class HelloKivyApp(App): def build(self): return AppLayout() if __name__ == '__main__': HelloKivyApp().run()
TGITS/programming-workouts
python/kivy/codemy_tutorial/basic_kivy_gui.py
basic_kivy_gui.py
py
1,282
python
en
code
0
github-code
13
37264406131
import os from pathlib import Path import time import random import numpy as np import parse_intent import utils import dataset_io as dio import parameter_io as pio import population_funcs class Universe(object): """ Create the universe of the simulation. """ def __init__(self, dataset_fn=None, world_param_fn=None, species_param_fns=None, world_param_dict={}, species_param_dicts=[{}], custom_module_fns=None, current_time=0, end_time=10, dataset_dir='datasets/', pad_zeros=0, file_extension='.txt', seed=None): """ Initialize universe based on either parameter files or saved datasets. Parameters ---------- dataset_fn : str Filename of saved organism + world dataset. world_param_fn : str Filename of world parameter file. species_param_fns : list of str List of filenames of species parameter files. world_param_dict : dict Dictionary containing initial world parameters. species_param_dicts : list of dict List of dictionaries containing initial species parameters. custom_module_fns : list of str List of filenames of external python scripts containing custom behaviors. current_time : int Current time of simulation. end_time : int End time of simulation. dataset_dir : str Directory path for saving all world and organism datasets. pad_zeros : int Number of zeroes to pad in dataset filenames. file_extension : str File extension for saving dataset files. Should generally be '.txt' or '.json'. seed : int, optional Random seed for the simulation. """ # Set random seeds for the entire simulation if seed is not None: random.seed(seed) np.random.seed(seed) self.start_timestamp = time.time() self.last_timestamp = self.start_timestamp self.dataset_fn = dataset_fn self.world_param_fn = world_param_fn self.species_param_fns = species_param_fns self.custom_module_fns = custom_module_fns self.world_param_dict = world_param_dict self.species_param_dicts = species_param_dicts if self.custom_module_fns is not None: self.custom_module_fns = [os.path.abspath(path) for path in self.custom_module_fns if os.path.isfile(path)] self.dataset_dir = Path(dataset_dir) self.dataset_dir.mkdir(exist_ok=True) self.current_time = current_time self.end_time = end_time self.pad_zeros = pad_zeros # while (self.end_time - self.current_time) >= 10 ** self.pad_zeros: # self.pad_zeros += 1 self.file_extension = file_extension self.population_dict, self.world = self.initialize() self.species_names = sorted(list(self.population_dict.keys())) self.intent_list = [] def initialize(self): """ Initialize world and organisms in the universe, from either saved datasets or from parameter files (and subsequently writing the initial time step to file). Returns ------- population_dict : dict Dict of organisms at the beginning of the simulation. world : World World at the beginning of the simulation. """ if self.dataset_fn is not None: # Set up entire universe based on saved dataset population_dict, world = dio.load_universe(self.dataset_fn) if self.world_param_fn is not None: # Set up entire world based on parameter file world = pio.load_world(fn=self.world_param_fn) else: world = pio.load_world(init_dict=self.world_param_dict) if self.species_param_fns is not None: # Set up all organisms based on species specifications population_dict = pio.load_species( fns=self.species_param_fns, init_world=world, custom_module_fns=self.custom_module_fns) else: population_dict = pio.load_species( init_dicts=self.species_param_dicts, init_world=world, custom_module_fns=self.custom_module_fns) output_fn = ( self.dataset_dir / 'ds{}'.format(str(self.current_time) .zfill(self.pad_zeros)) ).with_suffix(self.file_extension) dio.save_universe(population_dict, world, output_fn) return population_dict, world def step(self): """ Steps through one time step, iterating over all organisms and computing new organism states. Saves all organisms and the world to file at the end of each step. """ # Increment time step self.current_time += 1 # This is just updating the age, not evaluating whether an organism # is at death, since organism actions should be evaluated based on # the current state. Age needs to be updated so that every organism # in intent list has the correct age. organism_list = population_funcs.get_organism_list(self.population_dict) t_organism_list = [organism.clone_self()._update_age() for organism in organism_list if organism.alive] position_hash_table = (population_funcs .hash_by_position(t_organism_list)) # intent_list is a list of lists, one list per organism in the current # time step self.intent_list = [] for organism in organism_list: if organism.alive: # currently t_organism_list isn't used by any actions... self.intent_list.append( organism.step(population_funcs.get_population_dict(t_organism_list, self.species_names), self.world, position_hash_table=position_hash_table) ) # Parse intent list and ensure it is valid self.population_dict = parse_intent.parse(self.intent_list, self.population_dict) # Potential changes to the world would go here self.world.step() output_fn = ( self.dataset_dir / 'ds{}'.format(str(self.current_time) .zfill(self.pad_zeros)) ).with_suffix(self.file_extension) dio.save_universe(self.population_dict, self.world, output_fn) def current_info(self, verbosity=1, expanded=True): total_num = sum([self.population_dict[species]['statistics']['total'] for species in self.species_names]) pstring = 't = %s' % (self.current_time) if verbosity >= 1: if expanded: pstring = ( '... t = %s\n' % str(self.current_time).zfill(self.pad_zeros) + ' Number of organisms: %s\n' % total_num ) else: rt_pstring = 't = %s: %s organisms' % (self.current_time, total_num) if verbosity >= 4: if expanded: for species_name in self.species_names: pstring += ( ' %s: %d organisms\n' % (species_name, self.population_dict[species_name]['statistics']['total']) ) else: rt_pstring = rt_pstring + ' (' for i, species_name in enumerate(self.species_names): rt_pstring += str(self.population_dict[species_name]['statistics']['total']) if i != len(self.species_names) - 1: rt_pstring += ':' rt_pstring += ')' if verbosity >= 2: now = time.time() last_time_diff = now - self.last_timestamp self.last_timestamp = now if expanded: pstring += ( ' Time elapsed since last time step: %s\n' % utils.time_to_string(last_time_diff) ) else: pstring = rt_pstring + ( ' (%s)' % (utils.time_to_string(last_time_diff)) ) if verbosity >= 3: start_time_diff = now - self.start_timestamp if expanded: pstring += ( ' Time elapsed since start: %s\n' % utils.time_to_string(start_time_diff) ) else: pstring = rt_pstring + ( ' (%s; %s)' % (utils.time_to_string(last_time_diff), utils.time_to_string(start_time_diff)) ) return pstring def run(self, verbosity=1, expanded=True): print(self.current_info(verbosity=verbosity, expanded=expanded)) while self.current_time < self.end_time: self.step() print(self.current_info(verbosity=verbosity, expanded=expanded)) # At its simplest, the entire executable could just be written like this if __name__ == '__main__': universe = Universe() universe.run()
blossom-evolution/blossom
blossom/universe.py
universe.py
py
9,995
python
en
code
8
github-code
13
1589534672
import os import sys import time while True: print("==============检测nginx是否正在运行===============") time.sleep(4) try: ret = os.popen('ps -C nginx -o pid,cmd').readlines() if len(ret) < 2: print("nginx进程异常退出,4秒后重启") time.sleep(3) os.system('service nginx restart') print("nginx正在重启") else: print("nginx正在运行当中...") break except: print("Error", sys.exc_info())
Abeautifulsnow/python_learning
scripts/python/monitor_nginx.py
monitor_nginx.py
py
535
python
en
code
0
github-code
13
15886489152
from collections import defaultdict from random import random import numpy as np import pandas as pd from category_encoders import OrdinalEncoder, TargetEncoder from joblib import parallel_backend from matplotlib import pyplot as plt from matplotlib.font_manager import FontProperties from scipy.cluster import hierarchy from scipy.cluster._optimal_leaf_ordering import squareform from scipy.stats import spearmanr from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) import warnings warnings.filterwarnings("ignore") def random_imputation(df): """ :param df: a data frame with missing values :return: complete data frame, where missing values are imputed by random imputation """ df_imp = df.copy() for c in df_imp.columns: data = df_imp[c] mask = data.isnull() imputations = random.choices(data[~mask].values, k=mask.sum()) data[mask] = imputations return df_imp def permutate_features(X, threshold): """ :param X: the observed data (df) :param threshold: the (cut) level for clustering :return: X_new (the subset of observed data), cluster_id_to_feat_id (list with cluster info) """ # calculate correlation corr = spearmanr(X).correlation # ensure symmetry corr = (corr + corr.T) / 2 np.fill_diagonal(corr, 1) # distance matrix and linkage with Ward's dist_matrix = 1 - np.abs(corr) dist_link = hierarchy.ward(squareform(dist_matrix)) #checks=False # group features in clusters and keep one feature per cluster cluster_ids = hierarchy.fcluster(dist_link, threshold, criterion='distance') cluster_id_to_feat_id = defaultdict(list) for idx, cluster_id in enumerate(cluster_ids): cluster_id_to_feat_id[cluster_id].append(idx) selected_features = [v[0] for v in cluster_id_to_feat_id.values()] X_new = X.iloc[:, selected_features] return X_new, cluster_id_to_feat_id def encode_scale_data_perm(data, tuning_target, threshold, num_feat): """ :param data: the complete data set :param tuning_target: the target feature :param threshold: the (cut) level for clustering :param num_feat: a list with numeric features :return: X_new (the complete and scaled/encoded subset of observed data) y (the binarized target feature), features (list with features present in X_new), clusters (list with cluster information) """ # encode objects in data enc = OrdinalEncoder() data_obj = data[data.columns.intersection(num_feat)] enc.fit(data_obj) encoding = enc.fit_transform(data[data_obj.columns]) c = 0 for i in data_obj.columns: data[i] = encoding.iloc[:, c] c += 1 # binarize target to 0 (missing) and 1 (non-missing) y = data[tuning_target].notnull().astype('int') # drop target from observed data X = data.drop(tuning_target, axis=1) if tuning_target in num_feat: num_feat = [i for i in num_feat if i != tuning_target] cat_feat = X.drop(num_feat, axis=1).columns for c in cat_feat: # missing values as new category in the categorical data X[c] = X[c].fillna(-1).astype('category', copy=False) for n in num_feat: X[n] = random_imputation(X[n].to_frame()) # define scaling-encoding pipeline numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())]) categorical_transformer = Pipeline(steps=[("encoder", TargetEncoder())]) preprocessor = ColumnTransformer(transformers=[("num", numeric_transformer, num_feat), ("cat", categorical_transformer, cat_feat)]) # extract feature names features_compl = X.columns # scale/encode the observed data X_scaled = pd.DataFrame(preprocessor.fit_transform(X, y), columns=features_compl) # remove multicollinearity X_new, clusters = permutate_features(X_scaled, threshold) features = X_new.columns return X_new, y, features, clusters def encode_scale_data_perm_mi(data, tuning_target, threshold, num_feat): """ :param data: the complete data set :param tuning_target: the target feature :param threshold: the (cut) level for clustering :param num_feat: a list with numeric features :return: X_new (the complete and scaled/encoded subset of observed data) y (the binarized target feature), features (list with features present in X_new), clusters (list with cluster information) """ # encode objects in data enc = OrdinalEncoder() data_obj = data[data.columns.intersection(num_feat)] enc.fit(data_obj) encoding = enc.fit_transform(data[data_obj.columns]) c = 0 for i in data_obj.columns: data[i] = encoding.iloc[:, c] c += 1 # binarize target to 0 (missing) and 1 (non-missing) y = data[tuning_target].notnull().astype('int') # drop target from observed data X = data.drop(tuning_target, axis=1) # make copy of observed variables set X_copy = X.copy() # make mask of missing values missing_mask = X_copy.isna() # initialize MICE imputer # with parallel_backend('threading', n_jobs=-1): imputer = IterativeImputer(max_iter=3, random_state=144, verbose=2, initial_strategy='most_frequent', add_indicator=False, imputation_order='random') # calculate values to impute and impute them into the data X_imp = imputer.fit_transform(X_copy) X_imputed = pd.DataFrame(X_imp, columns=X_copy.columns) # X_imputed = X_copy.where(missing_mask, X_imp) # X_copy[missing_mask] = X_imp[missing_mask] # print(X_imp.shape()) if tuning_target in num_feat: num_feat = [i for i in num_feat if i != tuning_target] cat_feat = X.drop(num_feat, axis=1).columns X_imputed[cat_feat] = X_imputed[cat_feat].astype('category') # print(X_imputed.isnull().any()) # for c in cat_feat: # # missing values as new category in the categorical data # X[c] = X[c].fillna(-1).astype('category', copy=False) # for n in num_feat: # X[n] = random_imputation(X[n].to_frame()) # define scaling-encoding pipeline numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())]) categorical_transformer = Pipeline(steps=[("encoder", TargetEncoder(handle_missing='error', handle_unknown='error'))]) preprocessor = ColumnTransformer(transformers=[("num", numeric_transformer, num_feat), ("cat", categorical_transformer, cat_feat)]) # extract feature names features_compl = X_imputed.columns # scale/encode the observed data X_scaled = pd.DataFrame(preprocessor.fit_transform(X_imputed, y), columns=features_compl) # print(y[y==0]) # print(X_scaled.nunique()) #.nunique() # remove multicollinearity X_new, clusters = permutate_features(X_scaled, threshold) features = X_new.columns return X_new, y, features, clusters def rosenbrock(vector, a=1, b=100): """ :param vector: a vector to calculate the rosenbrock function on :param a: variable in the function, default a=1 :param b: variable in the function, default b=100 :return solution of the rosenbrock function for the given vector f(x, y) = (a-x)^2 + b(y-x^2)^2 """ vector = np.array(vector) return (a - vector[0])**2 + b * (vector[1] - vector[0]**2)**2 def rastrigin(vector): """ :param vector: a vector to calculate the rastrigin function on :return solution of the rastrigin function for the given vector f(x) = 10*n + Sigma { x_i^2 - 10*cos(2*PI*x_i) } """ vector = np.array(vector) return 10 * vector.size + sum(vector * vector - 10 * np.cos(2 * np.pi * vector)) def multi_csv_to_df(files, axis=0, index_col=None): """ :param files: list of csv file paths :param axis: on what axis to aggregate the files (rows (0) or columns (1)) :param index_col: index column to use, if applicable :return: a single data frame of the aggregated csv files """ lst = [] # files to alphabetical order files_sorted = sorted(files) for filename in files_sorted: df = pd.read_csv(filename, index_col=index_col, header=0) lst.append(df) df_results = pd.concat(lst, axis=axis, ignore_index=True) return df_results def ConvergencePlot(cost): """ Monitors convergence. Parameters: ---------- :param dict cost: mean and best cost over cycles/generations as returned by an optimiser. """ font = FontProperties() font.set_size('larger') labels = ["Best Cost Function", "Mean Cost Function"] plt.figure(figsize=(12.5, 4)) plt.plot(range(len(cost["best"])), cost["best"], label=labels[0]) plt.scatter(range(len(cost["mean"])), cost["mean"], color='red', label=labels[1]) plt.xlabel("Iteration #") plt.ylabel("Value [-]") plt.legend(loc="best", prop=font) plt.xlim([0, len(cost["mean"])]) plt.grid() plt.show()
Lieve2/ADSthesis_multiverse_analysis
utility_functions.py
utility_functions.py
py
10,034
python
en
code
1
github-code
13
26052190040
import time from django.shortcuts import get_object_or_404 from rest_framework import permissions, status, views from rest_framework.decorators import action from rest_framework.response import Response from rest_framework_simplejwt.tokens import AccessToken from users.models import User from .mixins import RetrieveUpdateListViewSet from .permissions import ReadOnlyPermission from .serializers import (UserAuthSerializer, UserLoginSerializer, UserSerializer) from .utils import generate_invite_code class UserAuthSignupView(views.APIView): permission_classes = [permissions.AllowAny] def post(self, request): serializer = UserAuthSerializer(data=request.data) if serializer.is_valid(): username = serializer.validated_data['username'] status_code = status.HTTP_200_OK if not User.objects.filter(username=username).exists(): invite_code = generate_invite_code() while User.objects.filter(invite_code=invite_code).exists(): invite_code = generate_invite_code() serializer.save(invite_code=invite_code) status_code = status.HTTP_201_CREATED user = User.objects.get(username=username) # it is necessary to implement code generation # and sending it via SMS in the future time.sleep(2) user.auth_code = '0000' user.save() return Response(serializer.data, status=status_code) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class UserAuthLoginView(views.APIView): permission_classes = [permissions.AllowAny] @staticmethod def post(request): serializer = UserLoginSerializer(data=request.data) if serializer.is_valid(): username = serializer.data['username'] user = get_object_or_404(User, username=username) access_token = str(AccessToken.for_user(user)) return Response({'token': access_token}, status=status.HTTP_200_OK) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class UserViewSet(RetrieveUpdateListViewSet): queryset = User.objects.select_related('referrer') serializer_class = UserSerializer lookup_field = 'username' permission_classes = [ReadOnlyPermission] @action(detail=False, methods=['GET', 'PATCH'], url_path='me', permission_classes=[permissions.IsAuthenticated]) def me_action(self, request): if self.request.method == 'GET': instance = self.request.user serializer = self.get_serializer(instance) return Response(serializer.data) instance = self.request.user serializer = self.get_serializer(instance, data=request.data, partial=True) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data)
grmzk/referral_system
referral_system/api/views.py
views.py
py
3,016
python
en
code
0
github-code
13
72349910098
import os import math import torch from torch.nn import BCEWithLogitsLoss from transformers import XLNetTokenizer, XLNetModel from keras.preprocessing.sequence import pad_sequences import numpy as np import pandas as pd import sentencepiece import logging from logtail import LogtailHandler """ Define the classification model for evaluation. """ handler = LogtailHandler(source_token="tvoi6AuG8ieLux2PbHqdJSVR") logger = logging.getLogger(__name__) logger.handlers = [handler] logger.setLevel(logging.INFO) polarizationWeightsFile = "./modelWeights/xlnet_1.bin" MAX_LEN = 512 class XLNetForPolarizationClassification(torch.nn.Module): def __init__(self, num_labels=2): """ Initialize the model with the default config for XLNet """ super(XLNetForPolarizationClassification, self).__init__() self.num_labels = num_labels self.xlnet = XLNetModel.from_pretrained('xlnet-base-cased') self.classifier = torch.nn.Linear(768, 1) torch.nn.init.xavier_normal_(self.classifier.weight) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None): """ The architecture is xlnet + pooling layer + classifier + BCE """ last_hidden_state = self.xlnet(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids) mean_last_hidden_state = self.pool_hidden_state(last_hidden_state) logits = self.classifier(mean_last_hidden_state) # If you know the labels, compute the loss otherwise if labels is not None: loss_fct = BCEWithLogitsLoss() # 16, 8 loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1, self.num_labels)) return loss else: return logits def pool_hidden_state(self, last_hidden_state): """ Pools the hidden state in the XLNet architecture """ last_hidden_state = last_hidden_state[0] mean_last_hidden_state = torch.mean(last_hidden_state, 1) return mean_last_hidden_state class XLNetPredict(torch.nn.Module): def __init__(self): """ Initializes the prediction class for XLNet """ super(XLNetPredict, self).__init__() self.tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased', do_lower_case=True) self.model = XLNetForPolarizationClassification(2) self.loaded_model = self.load_model(polarizationWeightsFile) def load_model(self, save_path): """ Load the model from the path directory provided """ # Load the model checkpoint = torch.load(save_path, map_location=torch.device('cpu')) model_state_dict = checkpoint['state_dict'] model = XLNetForPolarizationClassification(num_labels=2) model.load_state_dict(model_state_dict) logger.info("Successfully loaded the polarization model") return model def batch_predict(self, text_list): """ Return the polarization scores for the entire list of articles as evaluated by the model """ encoded_text = self.tokenizer.encode_plus( text_list, max_length=MAX_LEN, add_special_tokens=True, return_token_type_ids=False, pad_to_max_length=False, return_attention_mask=True, return_tensors='pt', ) outputs = self.loaded_model(input_ids=torch.tensor(encoded_text["input_ids"]), attention_mask=torch.tensor(encoded_text["attention_mask"])) logits = outputs.sigmoid().detach().cpu().numpy() logger.info(logits) # Round each of the values in the logits returned polarizationLogits = [] for logit in logits: logger.info(round(logit[0][0])) polarizationLogits.append(logit[0][0]) return polarizationLogits def predict(self, text): """ Return the polarization score evaluated by the model """ encoded_text = self.tokenizer.encode_plus( text, max_length=MAX_LEN, add_special_tokens=True, return_token_type_ids=False, pad_to_max_length=False, return_attention_mask=True, return_tensors='pt', ) input_ids = pad_sequences(encoded_text['input_ids'], maxlen=MAX_LEN, dtype=torch.Tensor ,truncating="post",padding="post") input_ids = input_ids.astype(dtype = 'int64') input_ids = torch.tensor(input_ids) attention_mask = pad_sequences(encoded_text['attention_mask'], maxlen=MAX_LEN, dtype=torch.Tensor ,truncating="post",padding="post") attention_mask = attention_mask.astype(dtype = 'int64') attention_mask = torch.tensor(attention_mask) logger.info("Prepared the input for polarization model") input_ids = input_ids.reshape(1,512) attention_mask = attention_mask outputs = self.loaded_model(input_ids=input_ids, attention_mask=attention_mask) logits = outputs.sigmoid().detach().cpu().numpy() logger.info(logits) logger.info(round(logits[0][0])) return logits[0][0]
aiswaryasankar/dbrief
polarityModel/training.py
training.py
py
4,883
python
en
code
1
github-code
13
33565489268
import shelve, time arguments = ["self", "info", "args", "world"] helpstring = "vote <topic #> <choice #>" minlevel = 1 def main(connection, info, args, world) : """Lets user vote""" votes = shelve.open("votes.db", writeback=True) if votes["networks"][connection.networkname][int(args[1]) - 1]["started"] : if info["sender"] not in votes["networks"][connection.networkname][int(args[1]) - 1]["voters"] and (int(args[1]) >= 1) and ((int(args[2]) - 1) >= 0) : votes["networks"][connection.networkname][int(args[1]) - 1]["choices"][int(args[2]) - 1]["votes"].append(info["sender"]) votes.sync() votes["networks"][connection.networkname][int(args[1]) - 1]["voters"].append(info["sender"]) votes.sync() connection.msg(info["sender"], _("You have successfully voted '%(vote)s' on '%(topic)s'.") % dict(vote=votes["networks"][connection.networkname][int(args[1]) - 1]["choices"][int(args[2]) - 1]["choice"], topic=votes["networks"][connection.networkname][int(args[1]) - 1]["topic"])) elif (int(args[2]) -1) < 0 : connection.msg(info["channel"], "Invalid vote!", True) else : connection.msg(info["channel"], _("%(sender)s: You have already voted!") % dict(sender=info["sender"])) else : connection.msg(info["channel"], _("That vote has not yet started!")) votes.close()
sonicrules1234/sonicbot
oldplugins/vote.py
vote.py
py
1,380
python
en
code
10
github-code
13
20602914664
import pandas as pd import numpy as np from preprocessing.indicators import * coins = ['BCH', 'BTC', 'ETH', 'LTC', 'XRP'] coins_idx = {'BCH': 0, 'BTC': 1, 'ETH': 2, 'LTC': 3, 'XRP': 4} def process_one(coin): df = pd.read_csv("./data/Bitstamp_" + coin + "USD.csv", low_memory=False) data_macd = macd(df) data_rsi = rsi(df) data_cci = cci(df) data_adx = adx(df) df = df.assign(MACD = data_macd) df = df.assign(RSI = data_rsi) df = df.assign(CCI = data_cci) df = df.assign(ADX = data_adx) return df[['open', 'high', 'low', 'close', 'Volume USD', 'RSI', 'MACD', 'CCI', 'ADX']].to_numpy() def process_all(): frames = [] sizes = [] for coin in coins: df = process_one(coin) frames.append(df) sizes.append(df.shape[0]) min_size = min(sizes) for i in range(len(frames)): frames[i] = frames[i][:min_size, :] combined = np.stack(frames) idx = np.argwhere(np.isnan(combined)) first_defined = np.max(idx[:, 1]) + 1 final = combined[:, first_defined:, :].astype(np.float32) max_norm = np.max(final, axis=1) min_norm = np.min(final, axis=1) return final, max_norm, min_norm
sanchitvohra/crypto-bot
preprocessing/create_dataset.py
create_dataset.py
py
1,190
python
en
code
0
github-code
13
73789561619
#!/usr/bin/env python # coding: utf-8 # In[ ]: #!pip install nest-asyncio # In[ ]: import numpy as np import pandas as pd from requests_html import HTMLSession#, AsyncHTMLSession from bs4 import BeautifulSoup from datetime import date import re import json from urllib.parse import unquote # In[ ]: debug_mode = False # In[ ]: #import nest_asyncio #nest_asyncio.apply() #r = await asession.get(links[0]) #await r.html.arender(scrolldown=True) # # Mapeando a árvore de URLs # In[ ]: def filter_links(links, link_blacklist, base_url): passlist = [l for l in links if not any(xl in l for xl in link_blacklist)] # Filtra links que contem texto constante na blacklist filtered_list = [] discarded_list = [] for i in passlist: if (len(i) <= 1) or i == base_url: # Descarta se for so uma barra ou o url base do boticario discarded_list.append(i) elif i[:len(base_url)] == base_url: # Mantem se for um url do boticario filtered_list.append(i) elif i[:4] == 'http': discarded_list.append(i) elif i[0] == '/': # Monta o url completo filtered_list.append(base_url+i) elif i[0] != '/': # Monta o url completo filtered_list.append(base_url+'/'+i) else: discarded_list.append(i) return filtered_list # In[ ]: def get_filtered_links(seed_url, base_url, link_blacklist): r = session.get(seed_url) return filter_links(r.html.links, link_blacklist, base_url) # In[ ]: def link_type(session, link): try: page_type = link.split('/')[4].capitalize() except Exception as e: page_type = "Unknown" if debug_mode:print(e) if debug_mode: print("Type: ", page_type) return page_type # In[ ]: def new_link(link, product_set, discard_set): # Verifica se o link ja foi visto ou nao new = not(link in product_set or link in discard_set) return new # In[ ]: def get_product_links(session, link): r = session.get(link) soup = BeautifulSoup(r.text, "html.parser") lks = [] pods = soup.find_all("div", {"pod-layout":"4_GRID"}) for pod in pods: a = pod.find("a", {"class":"jsx-2907167179 layout_grid-view layout_view_4_GRID"}).attrs['href'] if link_type(session, a) == "Product": lks.append(a) return lks # In[ ]: def product_mapper(session, seed_links, link_type_blacklist, link_blacklist, product_max): product_set = set() # Pilha de produtos discard_set = set() # Pilha de descarte seed_set = set(seed_links) # Pilha de links while (len(seed_set) > 0) and (len(product_set) < product_max): link = seed_set.pop() # Pega o primeiro link da lista if debug_mode: print("---------------------------------------") print("Product stack: ", len(product_set)) print("Discard stack: ", len(discard_set)) print("Link stack: ", len(seed_set)) print("Current link: ", link) p_links = get_product_links(session, link) old_size = len(product_set) for p in p_links: # Adiciona todos os produtos da pagina na pilha de produtos product_set.add(p) new_size = len(product_set) if debug_mode: print(new_size-old_size, " new links added to product stack") discard_set.add(link) return product_set # # Pegando dados de um único produto # # Dados desejados: # - País # - Concorrente # - Data scrape # - ID produto # - Título # - Descrição # - Preço atual # - Preço antigo # - Desconto atual # - Moeda # - Disponibilidade # - Condição # - Departamento # - Categoria # - Marca # - Linha # - URL # In[ ]: def get_item_data(session, link): # A partir da pagina do item, busca todas suas informacoes pertinentes r = session.get(link) item = BeautifulSoup(r.text, "html.parser") pais = "Chile" competidor = "Falabella" data = date.today().strftime("%d/%m/%Y") # ID try: id = link.split('/')[-1] except Exception as e: id = None if debug_mode:print(e) # Title try: title = item.head.find("meta", {"property":"og:title"}).attrs['content'].split('|')[0].strip() except Exception as e: title = None if debug_mode:print(e) # Description try: description = item.head.find("meta", {"name":"description"}).attrs['content'] except Exception as e: description = None if debug_mode:print(e) # Current price try: price = item.find("li", {"class":"jsx-749763969 prices-0"}).attrs['data-event-price'].replace('.','') price = int(price) except Exception as e: price = None if debug_mode:print(e) # Previous price try: maxprice = item.find("li", {"class":"jsx-749763969 prices-1"}).attrs['data-normal-price'].replace('.','') maxprice = int(maxprice) except Exception as e: maxprice = None if debug_mode:print(e) # Currency try: currency = 'CLP' except Exception as e: currency = None if debug_mode:print(e) # Seller try: seller = item.find("a", {"id":"testId-SellerInfo-sellerName"}).attrs['href'].split('/')[-1] seller = unquote(unquote(seller)) except Exception as e: seller = None if debug_mode:print(e) # URL url = link # Other attributes try: attrs = item.find_all("tr", {"class":"jsx-428502957"}) tempDict = {} for att in attrs: k = att.contents[0].text v = att.contents[1].text tempDict[k] = v except Exception as e: tempDict = {} if debug_mode:print(e) d = { "País":pais, "Concorrente":competidor, "Data scrape":data, "ID produto":id, "Título":title, "Descrição":description, "Preço atual":price, "Preço antigo":maxprice, "Moeda":currency, "Vendedor":seller, "URL":url } d.update(tempDict) # Inclui demais atributos return pd.Series(d) # In[ ]: def get_all_items(session, links): df = pd.DataFrame() for link in links: try: item_data = get_item_data(session, link) df = df.append(item_data, ignore_index=True) except Exception as e: if debug_mode: print(e) return df # # Função Main # In[ ]: def main(max_pages = 200, product_max = 10_000_000): session = HTMLSession() #asession = AsyncHTMLSession() base_url = "https://www.falabella.com" seed_url = "https://www.falabella.com/falabella-cl/category/cat7660002/Belleza--higiene-y-salud" link_blacklist = [] link_type_blacklist = ['Unknown'] # Scraping seed_links = [] for i in range(1, max_pages+1): j = seed_url + '?page=' + str(i) seed_links.append(j) products = product_mapper(session, seed_links, link_type_blacklist, link_blacklist, product_max) df = get_all_items(session, products) # Reordenando first = ['País', 'Concorrente', 'Data scrape', 'ID produto', 'Título', 'Descrição', 'Preço atual', 'Preço antigo', 'Moeda', 'Vendedor', 'URL'] cols = first + sorted([c for c in df.columns.to_list() if c not in first]) try: df = df[cols] except Exception as e: if debug_mode: print(e) df.to_excel("../02_Results/"+date.today().strftime("%Y_%m_%d")+"_"+"falabella.xlsx") # In[ ]: if __name__ == "__main__": main()
mariomirow/scraping_beauty
01_Scripts/scraper_falabella.py
scraper_falabella.py
py
7,793
python
en
code
0
github-code
13
38731827941
""" sources: https://github.com/norabelrose/transformers-plus-performers/ """ from dataclasses import dataclass from typing import Callable, Sequence, Optional, Union from enum import Enum PerformerKernel = Enum('PerformerKernel', ['cosh', 'exp', 'elu', 'relu']) OrthogonalFeatureAlgorithm = Enum('OrthogonalFeatureAlgorithm', ['auto', 'kacs', 'qr']) @dataclass class PerformerAttentionConfig: r""" This is the configuration class to store the configuration of a :class:`~transformers.PerformerAttention` module. It is used to define the behavior of a Performer/FAVOR+ attention module when it is initialized. Args: attention_dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention probabilities. causal (:obj:`bool`, `optional`, defaults to False): Whether to apply causal attention, where positions are prevented from attending to positions to ahead of themselves in the sequence, using the prefix-sum method. kernel_type (:obj:`Enum(PerformerKernel)`, `optional`, defaults to :obj:`'exp'`): The type of kernel function to use for comparing the queries and keys. Possible options are :obj:`'exp'`, :obj:`'cosh'`, and :obj:`'relu'`. The :obj:`'cosh'` option approximates softmax attention with a smaller variance than :obj:`'exp'`, but at the cost of using twice as many random features. :obj:`'relu'` may result in better performance than :obj:`'exp'` and :obj:`'cosh'` in certain circumstances, but it is not an unbiased estimator of softmax attention and thus should not be used with pretrained models that were pretrained with softmax attention. kernel_epsilon (:obj:`float`, `optional`, defaults to 1e-4): Stabilizer term added to the output of the kernel function to avoid dividing by very small numbers. normalize_output (:obj:`bool`, `optional`, defaults to True): Whether to ensure that the output vectors are convex combinations of the input vectors; that is, that the rows of the implicit attention map sum to 1. normalization_stabilizer (:obj:`float`, `optional`, defaults to 1e-6): Stabilizer term used when normalizing the output to avoid dividing by very small numbers. num_random_features (:obj:`int`, `optional`, defaults to None): The dimensionality of the random feature vectors to use. When None, the dimensionality is set to D * log(D), where D is the dimensionality of each attention head. orthogonal_feature_algorithm (:obj:`Enum(OrthogonalFeatureAlgorithm)`, defaults to 'auto'): The algorithm to use for generating random orthogonal features. Possible values are 'kacs', which uses a Kac's random walk Markov chain; 'qr', which performs QR decomposition on a random Gaussian matrix at each redraw; and 'auto', which is equivalent to 'kacs' on PyTorch and 'qr' on TensorFlow, since the Kac's random walk algorithm is not supported on TensorFlow. Kac's is generally faster than QR, but successive samples are correlated with each other. use_recurrent_decoding (:obj:`bool`, `optional`, defaults to False): Whether to use recurrent autoregressive decoding, as described in the 'Transformers are RNNs' paper. If True, the PerformerAttention object will expect input tensors with a sequence length dimension of exactly 1, and will output tensors with sequence length of 1. It will retain a recurrent hidden state between forward passes that can be reset with the reset_recurrent_state() method. use_thick_features (:obj:`bool`, `optional`, defaults to False): Whether to generate a random feature tensor that has a batch dimension. use_orthogonal_features (:obj:`bool`, `optional`, defaults to True): Whether to use strictly orthogonal random features, as opposed to features drawn from a standard Gaussian distribution. Orthogonal features result in outputs that more closely approximate softmax attention, but at the cost of doing QR decomposition on the CPU every time the features are redrawn. Best combined with a reasonably large value of :obj:`feature_redraw_interval` (1-5k). use_linear_layers (:obj:`bool`, `optional`, defaults to True): Whether to transform the Q, K, and V inputs with a Linear layer before applying attention. Setting this to False may be useful if you want to use PerformerAttention as one component of a more complex attention mechanism. regularize_feature_norms (:obj:`bool`, `optional`, defaults to False): Whether to ensure that the random feature vectors have a norm of sqrt(`d`), where `d` is the dimensionality of each attention head. feature_redraw_interval (:obj:`int`, `optional`, defaults to 100): The number of forward passes after which the random feature matrix should be redrawn. If None, then the feature matrix is never redrawn. When combined with :obj:`redraw_stochastically`, this parameter determines the expected value of the redraw interval, rather than the interval itself. redraw_stochastically (:obj:`bool`, `optional`, defaults to False): If true, PerformerAttention will redraw its random features each forward pass with a probability equal to (1 / :obj:`feature_redraw_interval`), instead of deterministically redrawing once every N passes. This could be desirable in large models to ensure that the attention layers don't all redraw their features at the same time. redraw_verbose (:obj:`bool`, `optional`, defaults to False): Whether to log a message when random features are redrawn during training. dim (:obj:`int`, `optional`): Dimensionality of the queries, keys, and values. num_heads (:obj:`int`, `optional`): Number of attention heads. """ attention_dropout: float = 0.1 kernel_type: Union[str, Callable, PerformerKernel] = PerformerKernel.exp causal: bool = False use_recurrent_decoding: bool = False kernel_epsilon: float = 1e-4 normalize_output: bool = True normalization_stabilizer: float = 1e-6 # The linear_layer_names parameter is needed to allow the PerformerAttention object to imitate the naming # convention of arbitrary attention modules, and therefore load weights from pretrained models. It can either have # 3 or 4 elements; if it has 3, then no output linear layer is used. use_linear_layers: bool = True linear_layer_names: Sequence[str] = ('q_linear', 'k_linear', 'v_linear', 'out_linear') num_random_features: Optional[int] = None use_thick_features: bool = False regularize_feature_norms: bool = True use_orthogonal_features: bool = True orthogonal_feature_algorithm: Union[str, OrthogonalFeatureAlgorithm] = OrthogonalFeatureAlgorithm.auto feature_redraw_interval: Optional[int] = 100 redraw_stochastically: bool = False redraw_verbose: bool = False # Optional here so the user doesn't have to set redundant parameters, but must be set by model before config is # passed to PerformerAttention.__init__() d_model: Optional[int] = None num_heads: Optional[int] = None # Make enums JSON serializable def to_dict(self): return {k: v.name if isinstance(v, Enum) else v for k, v in self.__dict__.items()}
LuCeHe/pyaromatics
keras_tools/configuration_performer_attention.py
configuration_performer_attention.py
py
7,658
python
en
code
6
github-code
13
44597138815
import logging from django.http import HttpRequest, JsonResponse from .models import Room logger = logging.getLogger(__name__) def index(request: HttpRequest) -> JsonResponse: return JsonResponse({"message": "Success", "user": str(request.user)}) def room(request: HttpRequest, room_name: str) -> JsonResponse: try: obj = Room.objects.get(slug=room_name) return JsonResponse( {"message": "Success", "data": {"id": obj.id, "name": obj.name}} ) except Room.DoesNotExist: response = JsonResponse({"message": "Room does not exist"}) response.status_code = 404 return response
martasd/moving-fast-backend
apps/chat/views.py
views.py
py
651
python
en
code
0
github-code
13
17061056224
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.Money import Money class TuitionInremitOrder(object): def __init__(self): self._alipay_payment_id = None self._isv_payment_id = None self._order_created = None self._order_modified = None self._order_status = None self._order_status_desc = None self._payment_amount = None self._payment_item_code = None self._school_pid = None self._student_id = None @property def alipay_payment_id(self): return self._alipay_payment_id @alipay_payment_id.setter def alipay_payment_id(self, value): self._alipay_payment_id = value @property def isv_payment_id(self): return self._isv_payment_id @isv_payment_id.setter def isv_payment_id(self, value): self._isv_payment_id = value @property def order_created(self): return self._order_created @order_created.setter def order_created(self, value): self._order_created = value @property def order_modified(self): return self._order_modified @order_modified.setter def order_modified(self, value): self._order_modified = value @property def order_status(self): return self._order_status @order_status.setter def order_status(self, value): self._order_status = value @property def order_status_desc(self): return self._order_status_desc @order_status_desc.setter def order_status_desc(self, value): self._order_status_desc = value @property def payment_amount(self): return self._payment_amount @payment_amount.setter def payment_amount(self, value): if isinstance(value, Money): self._payment_amount = value else: self._payment_amount = Money.from_alipay_dict(value) @property def payment_item_code(self): return self._payment_item_code @payment_item_code.setter def payment_item_code(self, value): self._payment_item_code = value @property def school_pid(self): return self._school_pid @school_pid.setter def school_pid(self, value): self._school_pid = value @property def student_id(self): return self._student_id @student_id.setter def student_id(self, value): self._student_id = value def to_alipay_dict(self): params = dict() if self.alipay_payment_id: if hasattr(self.alipay_payment_id, 'to_alipay_dict'): params['alipay_payment_id'] = self.alipay_payment_id.to_alipay_dict() else: params['alipay_payment_id'] = self.alipay_payment_id if self.isv_payment_id: if hasattr(self.isv_payment_id, 'to_alipay_dict'): params['isv_payment_id'] = self.isv_payment_id.to_alipay_dict() else: params['isv_payment_id'] = self.isv_payment_id if self.order_created: if hasattr(self.order_created, 'to_alipay_dict'): params['order_created'] = self.order_created.to_alipay_dict() else: params['order_created'] = self.order_created if self.order_modified: if hasattr(self.order_modified, 'to_alipay_dict'): params['order_modified'] = self.order_modified.to_alipay_dict() else: params['order_modified'] = self.order_modified if self.order_status: if hasattr(self.order_status, 'to_alipay_dict'): params['order_status'] = self.order_status.to_alipay_dict() else: params['order_status'] = self.order_status if self.order_status_desc: if hasattr(self.order_status_desc, 'to_alipay_dict'): params['order_status_desc'] = self.order_status_desc.to_alipay_dict() else: params['order_status_desc'] = self.order_status_desc if self.payment_amount: if hasattr(self.payment_amount, 'to_alipay_dict'): params['payment_amount'] = self.payment_amount.to_alipay_dict() else: params['payment_amount'] = self.payment_amount if self.payment_item_code: if hasattr(self.payment_item_code, 'to_alipay_dict'): params['payment_item_code'] = self.payment_item_code.to_alipay_dict() else: params['payment_item_code'] = self.payment_item_code if self.school_pid: if hasattr(self.school_pid, 'to_alipay_dict'): params['school_pid'] = self.school_pid.to_alipay_dict() else: params['school_pid'] = self.school_pid if self.student_id: if hasattr(self.student_id, 'to_alipay_dict'): params['student_id'] = self.student_id.to_alipay_dict() else: params['student_id'] = self.student_id return params @staticmethod def from_alipay_dict(d): if not d: return None o = TuitionInremitOrder() if 'alipay_payment_id' in d: o.alipay_payment_id = d['alipay_payment_id'] if 'isv_payment_id' in d: o.isv_payment_id = d['isv_payment_id'] if 'order_created' in d: o.order_created = d['order_created'] if 'order_modified' in d: o.order_modified = d['order_modified'] if 'order_status' in d: o.order_status = d['order_status'] if 'order_status_desc' in d: o.order_status_desc = d['order_status_desc'] if 'payment_amount' in d: o.payment_amount = d['payment_amount'] if 'payment_item_code' in d: o.payment_item_code = d['payment_item_code'] if 'school_pid' in d: o.school_pid = d['school_pid'] if 'student_id' in d: o.student_id = d['student_id'] return o
alipay/alipay-sdk-python-all
alipay/aop/api/domain/TuitionInremitOrder.py
TuitionInremitOrder.py
py
6,122
python
en
code
241
github-code
13
42166207440
import os import shutil from admin import Admin from autoit import Autoit from base import Base from category import Category from channel import Channel from clsQrCodeReader import QrCodeReader import clsTestService from editEntryPage import EditEntryPage from entryPage import EntryPage from general import General from kea import Kea import localSettings from logger import * from login import Login from myHistory import MyHistory from myMedia import MyMedia from myPlaylists import MyPlaylists from player import Player from upload import Upload from webcast import Webcast from home import Home from freeTrial import FreeTrial from pitch import Pitch from kafBB import BlackBoard from kafSharepoint import SharePoint from selenium.webdriver.common.keys import Keys from api import ApiClientSession from globalSearch import GlobalSearch import filecmp from kafGeneric import KafGeneric from kafMoodle import Moodle from kafCanvas import Canvas from kafD2L import D2L from kafJive import Jive from kafSakai import Sakai from recscheduling import Recscheduling from kafBBUltra import BlackBoardUltra from quizAnalytics import QuizAnalytics from reach import Reach #============================================================================================================ # The class contains functions that relates to common actions #============================================================================================================ class Common(): # Parameters driver = None def __init__(self, driver): self.driver = driver self.base = Base(driver) self.autoit = Autoit(self) self.admin = Admin(self, driver) self.login = Login(self, driver) self.upload = Upload(self, driver) self.general = General(self, driver) self.myMedia = MyMedia(self, driver) self.entryPage = EntryPage(self, driver) self.editEntryPage = EditEntryPage(self, driver) self.category = Category(self, driver) self.channel = Channel(self, driver) self.myPlaylists = MyPlaylists(self, driver) self.player = Player(self, driver) self.myHistory = MyHistory(self, driver) self.qrcode = QrCodeReader(self, driver) self.kea = Kea(self, driver) self.home = Home(self, driver) self.freeTrail = FreeTrial(self, driver) self.apiClientSession = ApiClientSession(self, driver) self.globalSearch = GlobalSearch(self, driver) self.webcast = Webcast(self, driver) self.recscheduling = Recscheduling(self, driver) self.quizAnalytics = QuizAnalytics(self, driver) self.pitch = Pitch(self, driver) self.reach = Reach(self, driver) ### KAF ### self.kafGeneric = KafGeneric(self, driver) self.blackBoard = BlackBoard(self, driver) self.sharePoint = SharePoint(self, driver) self.moodle = Moodle(self, driver) self.canvas = Canvas(self, driver) self.d2l = D2L(self, driver) self.jive = Jive(self, driver) self.sakai = Sakai(self, driver) self.blackBoardUltra = BlackBoardUltra(self, driver) #============================================================================================================= # Locators: #============================================================================================================= #============================================================================================================ # Common Methods #============================================================================================================ def instertPathInFileUploadWindows(self, path): if localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd": autoitDr = self.autoit.autoitDriver elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd2": autoitDr = self.autoit.autoitDriver2 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd3": autoitDr = self.autoit.autoitDriver3 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd4": autoitDr = self.autoit.autoitDriver4 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd5": autoitDr = self.autoit.autoitDriver5 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd6": autoitDr = self.autoit.autoitDriver6 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd7": autoitDr = self.autoit.autoitDriver7 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd8": autoitDr = self.autoit.autoitDriver8 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd9": autoitDr = self.autoit.autoitDriver9 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd10": autoitDr = self.autoit.autoitDriver10 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd11": autoitDr = self.autoit.autoitDriver11 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd12": autoitDr = self.autoit.autoitDriver12 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd13": autoitDr = self.autoit.autoitDriver13 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd14": autoitDr = self.autoit.autoitDriver14 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd15": autoitDr = self.autoit.autoitDriver15 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd16": autoitDr = self.autoit.autoitDriver16 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd17": autoitDr = self.autoit.autoitDriver17 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd18": autoitDr = self.autoit.autoitDriver18 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd19": autoitDr = self.autoit.autoitDriver19 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd20": autoitDr = self.autoit.autoitDriver20 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd21": autoitDr = self.autoit.autoitDriver21 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd22": autoitDr = self.autoit.autoitDriver22 elif localSettings.LOCAL_SETTINGS_SELENIUM_GRID_POOL == "qaKmsFrontEnd23": autoitDr = self.autoit.autoitDriver23 if (localSettings.LOCAL_RUNNING_BROWSER == clsTestService.PC_BROWSER_IE): # TODO IE not implemented yet autoitDr.execute_script(localSettings.LOCAL_SETTINGS_REMOTE_KMS_WEB_DIR + r'autoit\openFile.exe', path) elif(localSettings.LOCAL_RUNNING_BROWSER == clsTestService.PC_BROWSER_FIREFOX): autoitDr.execute_script(localSettings.LOCAL_SETTINGS_REMOTE_KMS_WEB_DIR + r'autoit\openFileFirefox.exe', path) elif(localSettings.LOCAL_RUNNING_BROWSER == clsTestService.PC_BROWSER_CHROME): # If running on chrome, use autoitDriver2 because it on another node autoitDr.execute_script(localSettings.LOCAL_SETTINGS_REMOTE_KMS_WEB_DIR + r'autoit\openFileChrome.exe', path) else: writeToLog("INFO","FAILED to type into 'Choose File' window, unknown browser: '" + localSettings.LOCAL_RUNNING_BROWSER + "'") def loginAsUser(self): if self.base.getAppUnderTest() == enums.Application.MEDIA_SPACE: return self.login.loginToKMS(localSettings.LOCAL_SETTINGS_LOGIN_USERNAME, localSettings.LOCAL_SETTINGS_LOGIN_PASSWORD) elif self.base.getAppUnderTest() == enums.Application.PITCH: return self.pitch.loginToPitch(localSettings.LOCAL_SETTINGS_LOGIN_USERNAME, localSettings.LOCAL_SETTINGS_LOGIN_PASSWORD) elif self.base.getAppUnderTest() == enums.Application.BLACK_BOARD: return self.blackBoard.loginToBlackBoard(localSettings.LOCAL_SETTINGS_LOGIN_USERNAME, localSettings.LOCAL_SETTINGS_LOGIN_PASSWORD) elif self.base.getAppUnderTest() == enums.Application.SHARE_POINT: return self.sharePoint.loginToSharepoint(localSettings.LOCAL_SETTINGS_LOGIN_USERNAME, localSettings.LOCAL_SETTINGS_LOGIN_PASSWORD) elif self.base.getAppUnderTest() == enums.Application.MOODLE: return self.moodle.loginToMoodle(localSettings.LOCAL_SETTINGS_LOGIN_USERNAME, localSettings.LOCAL_SETTINGS_LOGIN_PASSWORD) elif self.base.getAppUnderTest() == enums.Application.CANVAS: return self.canvas.loginToCanvas(localSettings.LOCAL_SETTINGS_LOGIN_USERNAME, localSettings.LOCAL_SETTINGS_LOGIN_PASSWORD) elif self.base.getAppUnderTest() == enums.Application.D2L: return self.d2l.loginToD2L(localSettings.LOCAL_SETTINGS_LOGIN_USERNAME, localSettings.LOCAL_SETTINGS_LOGIN_PASSWORD) elif self.base.getAppUnderTest() == enums.Application.JIVE: return self.jive.loginToJive(localSettings.LOCAL_SETTINGS_LOGIN_USERNAME, localSettings.LOCAL_SETTINGS_LOGIN_PASSWORD) elif self.base.getAppUnderTest() == enums.Application.SAKAI: return self.sakai.loginToSakai(localSettings.LOCAL_SETTINGS_LOGIN_USERNAME, localSettings.LOCAL_SETTINGS_LOGIN_PASSWORD) elif self.base.getAppUnderTest() == enums.Application.BLACKBOARD_ULTRA: return self.blackBoardUltra.loginToBlackBoardUltra(localSettings.LOCAL_SETTINGS_LOGIN_USERNAME, localSettings.LOCAL_SETTINGS_LOGIN_PASSWORD) # Author: Tzachi Guetta def navigateTo(self, navigateTo, navigateFrom='', nameValue='', forceNavigate=False): if navigateTo == enums.Location.ENTRY_PAGE: if self.entryPage.navigateToEntry(nameValue, navigateFrom) == False: writeToLog("INFO","FAILED navigate to entry: '" + nameValue) return False elif navigateTo == enums.Location.EDIT_ENTRY_PAGE: if self.editEntryPage.navigateToEditEntry(nameValue, navigateFrom) == False: writeToLog("INFO","FAILED navigate to edit entry: '" + nameValue) return False elif navigateTo == enums.Location.MY_MEDIA: if self.myMedia.navigateToMyMedia(forceNavigate) == False: writeToLog("INFO","FAILED navigate to my media") return False elif navigateTo == enums.Location.CHANNELS_PAGE: if self.channel.navigateToChannels() == False: writeToLog("INFO","FAILED navigate to Channels page") return False elif navigateTo == enums.Location.MY_CHANNELS_PAGE: if self.channel.navigateToMyChannels() == False: writeToLog("INFO","FAILED navigate to my Channels") return False elif navigateTo == enums.Location.CHANNEL_PAGE: if self.channel.navigateToChannel(nameValue, navigateFrom) == False: writeToLog("INFO","FAILED navigate to Channel: " + nameValue) return False elif navigateTo == enums.Location.MY_PLAYLISTS: if self.myPlaylists.navigateToMyPlaylists(forceNavigate) == False: writeToLog("INFO","FAILED navigate to my Playlists") return False elif navigateTo == enums.Location.CATEGORY_PAGE: if self.category.navigateToCategory(nameValue) == False: writeToLog("INFO","FAILED navigate to Category: " + str(nameValue)) return False elif navigateTo == enums.Location.MY_HISTORY: if self.myHistory.navigateToMyHistory(forceNavigate) == False: writeToLog("INFO","FAILED navigate to my history") return False elif navigateTo == enums.Location.HOME: if self.home.navigateToHomePage(forceNavigate) == False: writeToLog("INFO","FAILED navigate to home page") return False return True # @Author: Inbar Willman def writeToFile(self, path, text): try: file1 = open(path,"w") file1.write(text) file1.close() except: writeToLog("INFO","FAILED to write file: " + path + "; Text: " + text) return False return True # @Author: Oleg Sigalov def deleteFile(self, path): try: os.remove(path) except: writeToLog("INFO","FAILED to delete file: " + path) return False return True # @Author: Oleg Sigalov # Use ONLY for linux def createFolder(self, path): try: os.makedirs(path) writeToLog("INFO","Created folder: " + path) except: writeToLog("INFO","FAILED to create folder: " + path) return False return True # @Author: Oleg Sigalov # Use ONLY for linux def deleteFolder(self, path): try: shutil.rmtree(path) writeToLog("INFO","Deleted folder: " + path) except: writeToLog("INFO","FAILED to delete folder: " + path) return False return True # @Author: Oleg Sigalov # leavePageExpected=True if the test may fail somewhere, and Leave Page may appear. # we need to click leave page, because it will not continue to tearDown and other tests... def handleTestFail(self, status, leavePageExpected=False): self.switch_to_default_iframe_generic() if status == "Fail": # Get the page source #TODO #self.base.craetePageSourceLogFile() # Take last screenshot self.base.takeScreeshotGeneric('LAST_SCRENNSHOT') if leavePageExpected==True: # Try to navigate to any place to show leave page if it was not visible self.base.navigate(localSettings.LOCAL_SETTINGS_TEST_BASE_URL) # Try to click leave page if already present self.base.click_leave_page() return True # @Author: Oleg Sigalov # Switch to default Media Space Iframe, if testing Media Space it will switch to default_content # If testing KAF, it will switch to KAF Media Space Iframe def switch_to_default_iframe_generic(self): if localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.MEDIA_SPACE: return self.base.switch_to_default_content() elif localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.BLACK_BOARD: return self.blackBoard.switchToBlackboardIframe() elif localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.MOODLE: return self.moodle.switchToMoodleIframe() elif localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.CANVAS: return self.canvas.switchToCanvasIframe() elif localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.D2L: return self.d2l.switchToD2LIframe() elif localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.JIVE: return self.jive.switchToJiveIframe() elif localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.SAKAI: return self.sakai.switchToSakaiIframe() elif localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.SHARE_POINT: return self.sharePoint.switchToSharepointIframe() elif localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.BLACKBOARD_ULTRA: return self.blackBoardUltra.switchToBlackboardUltraIframe() else: self.base.switch_to_default_content() def sendKeysToBodyElement(self, keys, multipleAction=1): for i in range(multipleAction): self.base.send_keys_to_element(self.base.get_body_element(), keys) return True # Check which search bar do we have: old or new (elastic) def isElasticSearchOnPage(self): if localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST != enums.Application.MEDIA_SPACE: return True if len(self.base.get_elements(self.myMedia.MY_MEDIA_ELASTIC_SEARCH_BAR)) > 1: return True else: return False # @Author: Inbar Willman # Compare between two files binary def compareBetweenTwoFilesBinary(self, path1, path2): # Compare between two files if filecmp.cmp(path1, path2) == False: writeToLog("INFO","FAILED to find match between two files") return False else: writeToLog("INFO","Two files are identical (binary)") writeToLog("INFO","File Path 1: '" + path1 + "'") writeToLog("INFO","File Path 2: '" + path2 + "'") return True # @Author: Horia Cus # This functions verifies if a file is present in the specific filePath location # filePath must contain the following format: os.path.join(localSettings.LOCAL_SETTINGS_TEMP_DOWNLOADS, name + ".extension") def verifyFilePathLocationIsValid(self, filePath): if os.path.isfile(filePath) == True: writeToLog("INFO", "The following path location is present: " + filePath ) return True else: writeToLog("INFO", "The following path location is not present: " + filePath ) return False # @Author: Horia Cus # This functions verifies if a file has a minimum size # filePath must contain the following format: os.path.join(localSettings.LOCAL_SETTINGS_TEMP_DOWNLOADS, name + ".extension") def verifyMinimumSizeOfAFile(self, filePath, fileSize=1024): if os.path.getsize(filePath) >= fileSize: writeToLog("INFO", "The downloaded file has content in it") return True elif os.path.getsize(filePath) <= 1: writeToLog("INFO", "The " + filePath + " file location is empty") return False # @Author: Inbar Willman # Compare between two csv files def compareBetweenTwoCsvFiles(self, file1, file2): with open(file1, 'r', encoding='utf-8') as t1, open(file2, 'r', encoding='utf-8') as t2: fileOne = t1.readlines() fileTwo = t2.readlines() for line in fileTwo: if line not in fileOne: writeToLog("INFO", "FAILED: Files aren't matching") return False writeToLog("INFO", "Success: files are matching") return True
NadyaDi/kms-automation
web/lib/clsCommon.py
clsCommon.py
py
19,736
python
en
code
0
github-code
13
13377918791
import requests import json import re from bs4 import BeautifulSoup from selenium.webdriver import Chrome from selenium.webdriver.chrome.options import Options import csv import html import time from datetime import datetime def recent_posts(username,no_of_post = 50): """With the input of an account page and number of posts to scrape, return the posts urls""" url = "https://www.instagram.com/" + username + "/" chrome_options = Options() chrome_options.add_argument("--disable-extensions") chrome_options.add_argument("--disable-gpu") chrome_options.add_argument("--headless") browser = Chrome(options=chrome_options) browser.get(url) post = 'https://www.instagram.com/p/' post_links = [] while len(post_links) < no_of_post: links = [a.get_attribute('href') for a in browser.find_elements_by_tag_name('a')] for link in links: if post in link and link not in post_links: post_links.append(link) scroll_down = "window.scrollTo(0, document.body.scrollHeight);" browser.execute_script(scroll_down) time.sleep(8) print('post url collection done...') return post_links def get_stat(posts): data=[] time_pattern = re.compile(r"\"taken_at_timestamp\":(\d+),") for post in posts: response = requests.get(post) soup = BeautifulSoup(response.content,features="html.parser") detail = (soup.find(property="og:description")['content']) title = html.unescape(soup.find(property="og:title")['content']).encode("utf-8") title = title.decode('utf-8') like_index = detail.find('Likes')-1 # comments_index = detail.find('Comments')-1 # comments = detail[detail.find(',')+2:comments_index] # data.append([post,likes,comments]) likes = detail[:like_index] t=re.search(time_pattern,str(soup)) timestamp=int(t.group()[21:-1]) dt = datetime.fromtimestamp(timestamp) readable_time=dt.strftime('%B %d') data.append([post,timestamp,readable_time,title,likes]) time.sleep(3) print('data cleaning done...') return data urls = recent_posts("pikkal_creative",50) # urls = recent_posts("gem0816",10) data=get_stat(urls) with open('insta_01032020.csv', mode='w',encoding="utf-8") as insta_file: insta_writer = csv.writer(insta_file, delimiter=',', quoting=csv.QUOTE_ALL) insta_writer.writerow(['link','timestamp','time','summary','likes']) for item in data: insta_writer.writerow(item) insta_file.close()
tlylt/Social-Media-Dashboard
insta_likes_v1.py
insta_likes_v1.py
py
2,570
python
en
code
1
github-code
13
70696627219
import cv2 import numpy as np frameHeight = 480 frameWidth = 640 cap = cv2.VideoCapture(1) cap.set(3, frameWidth) cap.set(4, frameHeight) while True: _, img = cap.read() cv2.cvtColor(img, cv2.COLOR_BGR2HSV) cv2.imshow("Original", img) # cv2.imshow("OriginalHsv", imgHsv) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
anant-harryfan/Python_basic_to_advance
PythonTuts/Python_other_tuts/murtaza_workshop/open-cv/Tut8_Realtime_Color_Detection.py
Tut8_Realtime_Color_Detection.py
py
385
python
en
code
0
github-code
13
31740291885
# encoding: utf-8 """ @author: nanjixiong @time: 2020/6/27 16:06 @file: examle7.py @desc: """ from urllib import request,parse url='http://localhost/post' headers={ 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36', 'Host':'' } dict={ 'name':'Genery' } data=bytes(parse.urlencode(dict),encoding='utf-8') req=request.Request(url=url,data=data,headers=headers,method="POST") response=request.urlopen(req) print(response.read().decode("utf-8"))
lixixi89055465/py_stu
crawler/urllib/examle7.py
examle7.py
py
536
python
en
code
1
github-code
13
74176815699
import requests from io import BytesIO import numpy as np from PIL import Image import psycopg2 from datetime import datetime import nongit def main(): r = requests.get("https://api.tomtom.com/traffic/map/4/tile/flow/relative/11/1136/693.png?key="+nongit.apikey()) stream = BytesIO(r.content) UL = Image.open(stream).convert("RGBA") stream.close() r = requests.get("https://api.tomtom.com/traffic/map/4/tile/flow/relative/11/1137/693.png?key="+nongit.apikey()) stream = BytesIO(r.content) UR = Image.open(stream).convert("RGBA") stream.close() r = requests.get("https://api.tomtom.com/traffic/map/4/tile/flow/relative/11/1136/694.png?key="+nongit.apikey()) stream = BytesIO(r.content) DL = Image.open(stream).convert("RGBA") stream.close() r = requests.get("https://api.tomtom.com/traffic/map/4/tile/flow/relative/11/1137/694.png?key="+nongit.apikey()) stream = BytesIO(r.content) DR= Image.open(stream).convert("RGBA") stream.close() UP=[UL,UR] DOWN=[DL,DR] uimg= Image.fromarray( np.hstack([UL,UR])) dimg= Image.fromarray( np.hstack([DL,DR])) img=Image.fromarray(np.vstack([uimg,dimg])) img.save('fin.png') try: connection = nongit.connectiondata() cursor = connection.cursor() postgres_insert_query = """ INSERT INTO images VALUES (%s,%s)""" a=np.array(img) temp=a.tolist() record_to_insert = (datetime.now(),str.encode(str(temp))) cursor.execute(postgres_insert_query, record_to_insert) connection.commit() count = cursor.rowcount print (count, "Record inserted successfully into mobile table") except (Exception, psycopg2.Error) as error : if(connection): print("Failed to insert record into mobile table", error) finally: #closing database connection. if(connection): cursor.close() connection.close() print("PostgreSQL connection is closed")
GrzegorzZmuda/korki
tomtomrequest.py
tomtomrequest.py
py
2,066
python
en
code
0
github-code
13
25296865050
import mock from django.test import TestCase from django.core.exceptions import ObjectDoesNotExist from sawps.tests.models.account_factory import ( UserF, ) from population_data.models import AnnualPopulation, AnnualPopulationPerActivity from species.models import OwnedSpecies from species.factories import TaxonFactory from property.factories import PropertyFactory from activity.factories import ActivityTypeFactory from population_data.utils import ( copy_owned_species_fields, assign_annual_population ) class TestMigrationFunction(TestCase): """ Test migration function. """ def setUp(self) -> None: taxon = TaxonFactory.create() property_obj = PropertyFactory.create() user = UserF.create() self.activity_type = ActivityTypeFactory.create() self.owned_species = OwnedSpecies.objects.create( taxon=taxon, property=property_obj, user=user, area_available_to_species=10 ) self.annual_population = AnnualPopulation.objects.create( year=2023, owned_species=self.owned_species, total=100 ) AnnualPopulation.objects.create( year=2022, owned_species=self.owned_species, total=50 ) self.annual_population_per_activity = AnnualPopulationPerActivity.objects.create( year=2023, owned_species=self.owned_species, total=100, activity_type=self.activity_type ) def test_copy_owned_species_fields(self): fields = [ 'user', 'taxon', 'property', 'area_available_to_species' ] for field in fields: if field == 'area_available_to_species': self.assertEqual( getattr(self.annual_population, field), 0.0 ) else: self.assertIsNone( getattr(self.annual_population, field) ) copy_owned_species_fields(self.annual_population) self.annual_population.refresh_from_db() for field in fields: self.assertEqual( getattr(self.annual_population, field), getattr(self.owned_species, field) ) def test_assign_annual_population(self): assign_annual_population(self.annual_population_per_activity) self.annual_population_per_activity.refresh_from_db() self.assertEqual( self.annual_population_per_activity.annual_population, self.annual_population, ) def test_assign_annual_population_not_exist(self): taxon = TaxonFactory.create() property_obj = PropertyFactory.create() user = UserF.create() activity_type = ActivityTypeFactory.create() owned_species = OwnedSpecies.objects.create( taxon=taxon, property=property_obj, user=user, area_available_to_species=5 ) an_pop_pa = AnnualPopulationPerActivity.objects.create( year=2023, owned_species=owned_species, total=10, activity_type=self.activity_type ) AnnualPopulationPerActivity.objects.create( year=2023, owned_species=owned_species, total=20, activity_type=activity_type ) assign_annual_population(an_pop_pa) # The annual population total would be 30, coming from 20 + 10 self.assertEqual(an_pop_pa.annual_population.total, 30) def test_owned_species_representation(self): taxon = TaxonFactory.create() property_obj = PropertyFactory.create() user = UserF.create() owned_species = OwnedSpecies.objects.create( taxon=taxon, property=property_obj, user=user, area_available_to_species=5 ) self.assertEqual(str(owned_species), property_obj.name) # mock property field to raise ObjectDoesNotExist with mock.patch.object(OwnedSpecies, 'property', new_callable=mock.PropertyMock) as mocked_obj: mocked_obj.side_effect = ObjectDoesNotExist('error') self.assertEqual(str(owned_species), "OwnedSpecies-{}".format(owned_species.id))
kartoza/sawps
django_project/population_data/tests/test_utils.py
test_utils.py
py
4,431
python
en
code
0
github-code
13
7337615354
from django.shortcuts import render, redirect, reverse from django.views import generic from django.http import HttpResponse from django.core.mail import send_mail from .models import Lead, Agent from .forms import LeadForm, CustomerForm from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.forms import UserCreationForm # Create your views here. class SignupView(generic.CreateView): template_name = "registration/signup.html" form_class = CustomerForm def get_success_url(self): return reverse("login") # CLASS BASED VIEW FOR HOMEPAGE class HomePage(generic.TemplateView): template_name = "leads/homepage_lead.html" # FUNCTION BASED VIEW FOR HOMEPAGE # def homepage(request): # return render(request, 'leads/homepage_lead.html') # CLASS BASED VIEW FOR DETAILS VIEW # class LeadDetailsView(DetailView): # template_name = "leaddetails.html" # queryset = Lead.objects.all() # context_object_name = "details" # FUNCTION BASED VIEW FOR DETAILS VIEW @login_required(login_url='/login') def lead_details(request, pk): details = Lead.objects.get(id=pk) context = {"details": details} return render(request, 'leaddetails.html', context) # class LeadListView(ListView): # template_name = "leadlist.html" # queryset = Lead.objects.all() # context_object_name = "leads" @login_required(login_url='/login') def lead_list(request): leads = Lead.objects.all() context = {"leads": leads} return render(request, 'leadlist.html', context) # class LeadCreateView(CreateView): # template_name = "lead_form.html" # lead_form = LeadForm # # def get_success_url(self): # return reverse("leadlist") @login_required(login_url='/login') def lead_form(request): form = LeadForm() if request.method == "POST": form = LeadForm(request.POST) subject = "Welcome to TCRM." message = f"Hi , you are now a lead at TCRM." email_from = "oyeniyiemperor@gmail.com" recipient_list = ["oyeniyiemperor@gmail.com"] if form.is_valid(): send_mail(subject, message, email_from, recipient_list) form.save() return redirect('/') context = {"form": form} return render(request, 'lead_form.html', context) @login_required(login_url='/login') def lead_update(request, pk): lead = Lead.objects.get(id=pk) form = LeadForm(instance=lead) if request.method == "POST": form = LeadForm(request.POST, instance=lead) if form.is_valid(): form.save() return redirect('/leadlist') context = {"form": form, "lead": lead} return render(request, 'lead_update.html', context) @login_required(login_url='/login') def lead_delete(request, pk): lead = Lead.objects.get(id=pk) lead.delete() return redirect('/')
Taoheed-O/CRM_w_Django
TCRM/leads/views.py
views.py
py
2,941
python
en
code
1
github-code
13
37215916107
#!/usr/bin/env python import petl as etl from datetime import datetime print("PULSE DATA") pulse_tab = ( etl .fromcsv('measurements.csv') .convert('value', float) .convert('value', int) .convert('timestamp', int) .convert('timestamp', lambda t: datetime.fromtimestamp(int(t/1000.0))) ) print(pulse_tab.lookall()) print("PANAMA DATA") src_tab = ( etl .fromjson('monitor.json', header=['timestamp', 'metric', 'source', 'measure']) .convert('timestamp', lambda t: datetime.fromtimestamp(int(t/1000.0))) .select('source', lambda v: v == 'panama-scheduler') .rename('measure', 'value') ) print(src_tab.lookall())
jdgwartney/measurement-debugging
munge.py
munge.py
py
654
python
en
code
0
github-code
13
32823716282
#!/usr/bin/env python3 """ @summary: test Ethereum RPC = helps to identify the correct RPC-address @version: v60 (26/October/2020) @since: 26/October/2020 @author: https://github.com/drandreaskrueger @see: https://github.com/drandreaskrueger/chainhammer for updates """ from pprint import pprint import requests # pip3 install requests print ("Start a network node, for example by:") print (" geth --rpc --dev") input ("Press ENTER when ready") RPCaddress = 'http://192.168.1.1:8545' RPCaddress = 'http://wrongaddress:8545' RPCaddress = 'http://localhost:8545' # See e.g. https://eth.wiki/json-rpc/API#eth_blocknumber for methods method, parameters = "eth_getBlockByNumber", ["0x0", False] method, parameters = "eth_blockNumber", [] method, parameters = "eth_nonExistingMethod", [] method, parameters = "web3_clientVersion", [] payload= {"method" : method, "params" : parameters, "jsonrpc" : "2.0", "id" : 1} headers = {'Content-type' : 'application/json'} print ("\nUsing '%s' to query RPC, with payload '%s'\n" % (RPCaddress, payload)) try: response = requests.post(RPCaddress, json=payload, headers=headers, timeout=5) except Exception as e: print ("Bad: (%s) %s" % (type(e), e)) print ("Try again...") else: print ("response.status_code:", response.status_code) print ("response.text", response.text) error=response.json().get("error", None) if error: print ("Yes but only partial success, as we got an answer - but it says error='(%s) %s'" % (error['code'], error['message'])) else: print ("method --> response.json()['result']:\n%s --> " % method, end="") pprint (response.json()['result']) print ("\nYes, full success. So this '%s' did answer. Great." % RPCaddress) """ # example output, in case of success: Start a network node, for example by: geth --rpc --dev Press ENTER when ready Using 'http://localhost:8545' to query RPC, with payload '{'method': 'web3_clientVersion', 'params': [], 'jsonrpc': '2.0', 'id': 1}' response.status_code: 200 response.text {"jsonrpc":"2.0","id":1,"result":"Geth/v1.9.6-stable/linux-amd64/go1.13.4"} method --> response.json()['result']: web3_clientVersion --> 'Geth/v1.9.6-stable/linux-amd64/go1.13.4' Yes, full success. So this 'http://localhost:8545' did answer. Great. """
drandreaskrueger/chainhammer
hammer/test_RPC.py
test_RPC.py
py
2,361
python
en
code
121
github-code
13
39109886012
import tensorflow as tf import numpy as np import time from asynch_mb.logger import logger class Trainer(object): """ Performs steps for MAML Args: algo (Algo) : env (Env) : sampler (Sampler) : sample_processor (SampleProcessor) : baseline (Baseline) : policy (Policy) : n_itr (int) : Number of iterations to train for start_itr (int) : Number of iterations policy has already trained for, if reloading num_inner_grad_steps (int) : Number of inner steps per maml iteration sess (tf.Session) : current tf session (if we loaded policy, for example) """ def __init__( self, env, sampler, dynamics_sample_processor, policy, dynamics_model, n_itr, start_itr=0, initial_random_samples=True, initial_sinusoid_samples=False, sess=None, dynamics_model_max_epochs=200, ): self.env = env self.sampler = sampler self.dynamics_sample_processor = dynamics_sample_processor self.dynamics_model = dynamics_model self.policy = policy self.n_itr = n_itr self.start_itr = start_itr self.dynamics_model_max_epochs = dynamics_model_max_epochs self.initial_random_samples = initial_random_samples self.initial_sinusoid_samples = initial_sinusoid_samples if sess is None: sess = tf.Session() self.sess = sess def train(self): """ Trains policy on env using algo Pseudocode: for itr in n_itr: for step in num_inner_grad_steps: sampler.sample() algo.compute_updated_dists() algo.optimize_policy() sampler.update_goals() """ with self.sess.as_default() as sess: # initialize uninitialized vars (only initialize vars that were not loaded) uninit_vars = [var for var in tf.global_variables() if not sess.run(tf.is_variable_initialized(var))] sess.run(tf.variables_initializer(uninit_vars)) start_time = time.time() for itr in range(self.start_itr, self.n_itr): itr_start_time = time.time() logger.log("\n ---------------- Iteration %d ----------------" % itr) time_env_sampling_start = time.time() if self.initial_random_samples and itr == 0: logger.log("Obtaining random samples from the environment...") env_paths = self.sampler.obtain_samples(log=True, random=True, log_prefix='') elif self.initial_sinusoid_samples and itr == 0: logger.log("Obtaining sinusoidal samples from the environment using the policy...") env_paths = self.sampler.obtain_samples(log=True, log_prefix='', sinusoid=True) else: logger.log("Obtaining samples from the environment using the policy...") env_paths = self.sampler.obtain_samples(log=True, log_prefix='') logger.record_tabular('Time-EnvSampling', time.time() - time_env_sampling_start) logger.log("Processing environment samples...") # first processing just for logging purposes time_env_samp_proc = time.time() samples_data = self.dynamics_sample_processor.process_samples(env_paths, log=True, log_prefix='EnvTrajs-') logger.record_tabular('Time-EnvSampleProc', time.time() - time_env_samp_proc) ''' --------------- fit dynamics model --------------- ''' time_fit_start = time.time() logger.log("Training dynamics model for %i epochs ..." % (self.dynamics_model_max_epochs)) self.dynamics_model.fit(samples_data['observations'], samples_data['actions'], samples_data['next_observations'], epochs=self.dynamics_model_max_epochs, verbose=False, log_tabular=True) logger.record_tabular('Time-ModelFit', time.time() - time_fit_start) """ ------------------- Logging Stuff --------------------------""" logger.logkv('Itr', itr) logger.logkv('n_timesteps', self.sampler.total_timesteps_sampled) logger.logkv('Time', time.time() - start_time) logger.logkv('ItrTime', time.time() - itr_start_time) logger.log("Saving snapshot...") params = self.get_itr_snapshot(itr) self.log_diagnostics(env_paths, '') logger.save_itr_params(itr, params) logger.log("Saved") logger.dumpkvs() if itr == 0: sess.graph.finalize() logger.log("Training finished") self.sess.close() def get_itr_snapshot(self, itr): """ Gets the current policy and env for storage """ return dict(itr=itr, policy=self.policy, env=self.env, dynamics_model=self.dynamics_model) def log_diagnostics(self, paths, prefix): self.env.log_diagnostics(paths, prefix) self.policy.log_diagnostics(paths, prefix)
zzyunzhi/asynch-mb
asynch_mb/trainers/mb_trainer.py
mb_trainer.py
py
5,530
python
en
code
12
github-code
13
24898314542
listcontact= {} def getname(): name=input("Введите имя контакта: ") name=name.title() name=name.strip() return name def trans_name(listcontact,name,num): if name in listcontact: listcontact[name] = num print("\nКонтакт успешно изменён\n") else: print("Такого контакта нету") return trans_name() def delete_contact(listcontact,name): if name in listcontact: listcontact.pop(name) print("\nКонтакт успешно удалён\n") else: print("Такого контакта нету") def get_num(): num=input("Введите номер телефона: ") num=num.replace(" ","").replace("-","") if num[0]=="9" and len(num)==10: num="+7"+num return num if num[0]=="8" and len(num)==11: num = "+7" + num[1:] return num if num[0]=="7" and len(num)==11: num = "+" + num return num if num[:2]=="+7" and len(num)==12: return num else: print("Неправильно набран номер\n") return get_num() def get_contact(listcontact,name,num): listcontact[name]=num print("Контакт успешно добавлен\n") return listcontact def show_contact(listcontact): print("Список контактов: ") for i in listcontact: print(i, listcontact[i]) def menu(): print("Выберите действие: \n 1.Добавить Контакт \n 2.Показать контакты \n 3.Удалить контакт \n" " 4.Изменить номер \n 5.Выход") while True: menu() p=int(input()) if p==1: get_contact(listcontact,getname(),get_num()) if p==2: show_contact(listcontact) if p==3: delete_contact(listcontact,getname()) if p==4: trans_name(listcontact,getname(),get_num()) if p==5: print("Спасибо за использование") break
BatyrKot/Univer4
HOME-CLASS-WORK/Phons/Phons.py
Phons.py
py
2,054
python
ru
code
0
github-code
13
45465882576
from particles import Particle_Set from parameters import EPS, dt import numpy as np class Interactions: ''' Class to deal with interactions (forces, collisions, ...) ''' def __init__(self, particles): self.particles = particles def elastic_collision(self, part1, part2): ''' Update speeds of two particles considering an elastic collision. ''' pos1, pos2 = part1.pos, part2.pos v1, v2 = part1.speed, part2.speed m1, m2 = part1.mass, part2.mass # updating m_factor1 = 2*m2/(m1+m2) m_factor2 = 2*m1/(m1+m2) scal1 = (v1-v2).dot(pos1-pos2) scal2 = (v2-v1).dot(pos2-pos1) if np.abs(scal1) < 0.1: # slight condition to prevent tangential behavior error. scal1 = 10*scal1/np.abs(scal1) scal2 = 10*scal2/np.abs(scal2) delta_pos1 = (pos1 - pos2) / np.linalg.norm(pos2-pos1)**2 delta_pos2 = -delta_pos1 part1.speed = v1 - m_factor1 * scal1 * delta_pos1 part2.speed = v2 - m_factor2 * scal2 * delta_pos2 def is_out_of_bounds(self, part): ''' Checks if a particle is out of bounds.''' out_of_bounds = False x, y = part.pos s = part.size break_loop = False for x_ in range(int(x-s/2), int(x+s/2)+1): if break_loop: break for y_ in range(int(y-s/2), int(y+s/2)+1): if True: out_of_bounds = (self.particles.map[(x_,y_)] < 0) if out_of_bounds: break_loop = True break return out_of_bounds def boundaries_collision(self, part, index): ''' Update particle when it hits a boundary ''' if index == -11: norm = np.array([0,1]) if index == -12: norm = np.array([0,-1]) if index == -21: norm = np.array([1,0]) if index == -22: norm = np.array([-1,0]) # the speed is updated accordingly. part.speed = part.speed - 2*part.speed.dot(norm)*norm # update positions previous_pos = part.pos while self.is_out_of_bounds(part): part.pos += norm # wipe the current index in the map. self.particles.map[tuple(previous_pos)] = 0 self.particles.map[tuple(part.pos)] = part.index def check_collisions(self, part): ''' Applies the collision between particles. The use of hash map makes the check constant. ''' x, y = part.pos s = part.size k = part.index stop_loop_x = False # iterate over the particle area. for x_ in range(int(x-s/2), int(x+s/2)+1): if stop_loop_x: stop_loop_x = False break # first, find out if we hit a boundary. for y_ in range(int(y-s/2), int(y+s/2)+1): k_ = self.particles.map[(int(x_),int(y_))] if k_ in [-11,-12,-21,-22]: self.boundaries_collision(part, k_) stop_loop_x = True break # then check if we collide with another particle. elif k_ != 0 and k_ != k: # avoid out of disk + eps if (x_-x)**2 + (y_-y)**2 > s**2/4 + EPS: continue part_ = self.particles[k_-1] self.elastic_collision(part, part_) # update positions part.pos += dt*part.speed + part.speed / np.linalg.norm(part.speed) part_.pos += dt*part_.speed + part_.speed / np.linalg.norm(part_.speed) # we found a correct place to set the particle. Stop the loop. stop_loop_x = True break
MaGnaFlo/Brownian
interactions.py
interactions.py
py
3,117
python
en
code
0
github-code
13
31420537063
#TODO add offline mode from asciimatics.event import KeyboardEvent from asciimatics.widgets import * from asciimatics.scene import Scene from asciimatics.screen import Screen from asciimatics.exceptions import ResizeScreenError, StopApplication, NextScene import sys import os try: import magic except ImportError: pass from gui.bar import * from gui.mainplaylist import * from gui.browser import * from gui.clock import * from gui.equalizer import * from gui.playlists import * from gui.visualization import * from gui.medialib import * from gui.artistInfo import * from gui.lyrics import * from player import Player from gui.presenter import * from gui.search import * from lastfm_client import * from lyricsWiki import * from soundcloud_client import SoundcloudClient from db import * SCR = 1 f = open('config', 'rb') data = f.read().decode('utf-8') config = json.loads(data, object_hook=lambda d: namedtuple('X', d.keys())(*d.values())) import pathlib pathlib.Path(config.cash_folder).mkdir(parents=True, exist_ok=True) pathlib.Path(config.playlist_folder).mkdir(parents=True, exist_ok=True) if config.useInternet: canConnectToSC = True try: sc = SoundcloudClient( config.sound_cloud.client_id, config.sound_cloud.client_secret, config.sound_cloud.username, config.sound_cloud.password, config.sound_cloud.bpm, config.sound_cloud.search_pages) except: canConnectToSC = False if config.useInternet: lastfm = Lastfm(config.lastfm.apikey, config.lastfm.lang) lyricsWiki = LyricsWiki() upBar = Bar() upBar.parse(config, UP_BAR) downBar = Bar() downBar.parse(config, DOWN_BAR) player = Player(config) presenter = Presenter(config) presenter.setPlayer(player) if config.useInternet: presenter.setSoundCloud(sc) presenter.setLastfm(lastfm) presenter.setLyricsWiki(lyricsWiki) lastfm.setPresenter(presenter) db = Database() db.PATH = config.root_dir presenter.setDb(db) def init(screen, old_scene): if config.useInternet: sc.setPresenter(presenter) browser = BrowserFrame(screen, upBar, downBar, config) browser.setPresenter(presenter) medialib = MedialibFrame(screen, upBar, downBar, config) medialib.setPresenter(presenter) playlists = PlaylistsFrame(screen, upBar, downBar, config) playlists.setPresenter(presenter) equalizer = EqualizerFrame(screen, upBar, downBar, config) equalizer.setPresenter(presenter) viz = VisualizationFrame(screen, upBar, downBar, config) viz.setPresenter(presenter) clock = ClockFrame(screen, upBar, downBar, config) clock.setPresenter(presenter) if config.useInternet: artistinfo = ArtistInfoFrame(screen, upBar, downBar, config) artistinfo.setPresenter(presenter) if config.useInternet: lyrics = LyricsFrame(screen, upBar, downBar, config) lyrics.setPresenter(presenter) search = SearchFrame(screen, upBar, downBar, config) search.setPresenter(presenter) mainplaylist = MainPlaylistFrame(screen, upBar, downBar, config) mainplaylist.setPresenter(presenter) presenter.setBrowser(browser) presenter.setMainPlaylist(mainplaylist) presenter.setPlaylists(playlists) presenter.setEqualizer(equalizer) presenter.setClock(clock) presenter.setUpBar(upBar) presenter.setDownBar(downBar) presenter.setVisualization(viz) presenter.setMedialib(medialib) if config.useInternet: presenter.setArtistInfo(artistinfo) presenter.setLyrics(lyrics) presenter.setSearch(search) player.setPresenter(presenter) presenter.run() screens = [Scene([mainplaylist], -1, name="MainPlaylist"), Scene([browser], -1, name="Browser"), Scene([medialib], -1, name="Medialib"), Scene([playlists], -1, name="Playlists"), Scene([equalizer], -1, name="Equalizer"), Scene([viz], -1, name="Visualizer")] if config.useInternet: screens.append(Scene([artistinfo], -1, name="ArtistInfo")) screens.append(Scene([lyrics], -1, name="Lyrics")) screens.append(Scene([clock], -1, name="Clock")) screens.append(Scene([search], -1, name="Search")) screen.play(screens, stop_on_resize=True, start_scene=old_scene) def openFile(fname): path = config.cash_folder + "/cash.json" if config.cash_folder[len(config.cash_folder)-1] != "/" else "cash.json" playlist = loadPlaylist(path) tag = getTagFromPath(fname) tag.id = 0 for t in playlist: t.id += 1 playlist = [tag] + playlist savePlaylist(playlist, path) player.playlist = playlist #player.play() def printHelp(): from gui.dialog_info import (CONTROL_INFO, CLOCK_INFO, PLAYER_CONTROL_INFO, MAINPLAYLIST_INFO, PLAYLISTS_INFO, BROWSER_INFO, EQUALIZER_INFO, MEDIALIB_INFO, SEARCH_INFO, VIZUALIZER_INFO) text = "-db - create db (need delete old db)\n"+\ "-h --help - print help\n" + CONTROL_INFO + "\n"+ CLOCK_INFO + "\n"+ PLAYER_CONTROL_INFO + "\n"+\ MAINPLAYLIST_INFO + "\n"+ PLAYLISTS_INFO + "\n"+ BROWSER_INFO + "\n"+ EQUALIZER_INFO + "\n"+\ MEDIALIB_INFO + "\n"+ SEARCH_INFO + "\n"+ VIZUALIZER_INFO + "\n" print(text) def createDb(): #TODO delete old db if exist db.walk() def argParse(): lenargs = len(sys.argv) if lenargs == 2 and (sys.argv[1] != "-h" and sys.argv[1] != "--help" and sys.argv[1] != "-db"): #TODO format test openFile(sys.argv[1]) elif lenargs == 2 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"): printHelp() sys.exit() elif lenargs == 2 and sys.argv[1] == "-db": createDb() sys.exit() argParse() last_scene = None while True: try: Screen.wrapper(init, catch_interrupt=False, arguments=[last_scene]) path = config.cash_folder + "/cash.json" if config.cash_folder[len(config.cash_folder)-1] != "/" else "cash.json" savePlaylist(player.playlist, path) player.destructor() sys.exit(0) except ResizeScreenError as e: last_scene = e.scene
J-CITY/Kitsune
main.py
main.py
py
5,704
python
en
code
9
github-code
13
39810278621
import os.path import jax import jax.numpy as jnp import jax.lax as lax import jax.scipy as jsp import numpy as np import einops from iqa.utils.convert_img import rgb2y, rgb2gray, imresize_half from typing import Literal, Sequence from functools import partial import pickle def _gamma(x): """ There's no gamma function in JAX, so we use the log abs gamma function and exp function instead. """ return jnp.exp(jsp.special.gammaln(x)) def _estimate_aggd_param(block) -> Sequence[jnp.ndarray]: block = block.flatten() gam = jnp.arange(0.2, 10.001, 0.001) gam_reciprocal = jnp.reciprocal(gam) r_gam = jnp.square(_gamma(gam_reciprocal * 2)) / (_gamma(gam_reciprocal) * _gamma(gam_reciprocal * 3)) left_std = jnp.sqrt( jnp.nanmean(jnp.where(block < 0, jnp.square(block), jnp.nan)) ) right_std = jnp.sqrt( jnp.nanmean(jnp.where(block > 0, jnp.square(block), jnp.nan)) ) gamma_hat = left_std / right_std rhat = (jnp.mean(jnp.abs(block))) ** 2 / jnp.mean(jnp.square(block)) rhat_norm = (rhat * (gamma_hat ** 3 + 1) * (gamma_hat + 1)) / ((gamma_hat ** 2 + 1) ** 2) array_position = jnp.argmin(jnp.square(r_gam - rhat_norm)) alpha = gam[array_position] beta_l = left_std * jnp.sqrt(_gamma(1 / alpha) / _gamma(3 / alpha)) beta_r = right_std * jnp.sqrt(_gamma(1 / alpha) / _gamma(3 / alpha)) return alpha, beta_l, beta_r def _compute_feature(block): alpha, beta_l, beta_r = _estimate_aggd_param(block) feats = jnp.array([alpha, (beta_l + beta_r) / 2]) shifts = jnp.array([[0, 1], [1, 0], [1, 1], [1, -1]]) def _compute_feature_shift(shift): shifted_block = jnp.roll(block, shift, axis=(0, 1)) alpha, beta_l, beta_r = _estimate_aggd_param(block * shifted_block) mean = (beta_r - beta_l) * (_gamma(2 / alpha) / _gamma(1 / alpha)) return jnp.array([alpha, mean, beta_l, beta_r]) feats = jnp.concatenate([feats, jax.vmap(_compute_feature_shift)(shifts).flatten()]) return feats def _nancov(x): """ Exclude whole rows that contain NaNs. """ nan_cond = ~jnp.any(jnp.isnan(x), axis=1, keepdims=True) n = jnp.sum(nan_cond) x_filtered = jnp.where(nan_cond, x, jnp.zeros_like(x)) x_mean = jnp.sum(x_filtered, axis=0) / n x_centered = jnp.where(nan_cond, x - x_mean, jnp.zeros_like(x)) cov = jnp.matmul(x_centered.T, x_centered) / (n - 1) return cov def _calculate_niqe(img, mu_pris_param, cov_pris_param, gaussian_window, block_size=96): h, w, _ = img.shape n_blocks_h = h // block_size n_blocks_w = w // block_size img = img[jnp.newaxis, :n_blocks_h * block_size, :n_blocks_w * block_size, :].astype(jnp.float32) # TODO: Fix later k_h, k_w = gaussian_window.shape gaussian_window = gaussian_window[..., jnp.newaxis, jnp.newaxis].astype(jnp.float32) # Only 1 channel ( Y, gray ) distparams = [] for scale in (1, 2): img_pad = jnp.pad(img, ((0, 0), (k_h // 2, k_h // 2), (k_w // 2, k_w // 2), (0, 0)), mode='edge') mu = lax.conv_general_dilated( img_pad.astype(jnp.float32), gaussian_window, window_strides=(1, 1), padding='VALID', dimension_numbers=('NHWC', 'HWIO', 'NHWC') ).astype(jnp.float32) sigma = lax.conv_general_dilated( jnp.square(img_pad).astype(jnp.float32), gaussian_window, window_strides=(1, 1), padding='VALID', dimension_numbers=('NHWC', 'HWIO', 'NHWC') ).astype(jnp.float32) - jnp.square(mu) sigma = jnp.sqrt(jnp.abs(sigma)) img_norm = ((img.astype(jnp.float32) - mu) / (sigma + jnp.ones((1,), dtype=jnp.float32)))[0] img_norm = einops.rearrange( # blocks are arranged from w to h. (w h) b1 b2 c img_norm, '(h b1) (w b2) c -> (w h) b1 b2 c', b1=block_size // scale, b2=block_size // scale) feats = jax.vmap(_compute_feature)(img_norm) distparams.append(jnp.array(feats)) if scale == 1: img = imresize_half(img / 255., antialiasing=True) * 255. distparams = jnp.concatenate(distparams, axis=-1) mu_dist_param = jnp.nanmean(distparams, axis=0, keepdims=True, dtype=jnp.float64) cov_dist_param = _nancov(distparams) invcov_dist_params = jnp.linalg.pinv((cov_pris_param + cov_dist_param) / 2, rcond=1e-15) val = jnp.matmul( jnp.matmul((mu_pris_param - mu_dist_param), invcov_dist_params), jnp.transpose((mu_pris_param - mu_dist_param)) ) quality = jnp.sqrt(val).squeeze() return quality def niqe(img: jnp.ndarray, crop_border: int, convert_to: Literal['y', 'gray']): ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) with open(ROOT_DIR + '/niqe_pris_params.pkl', 'rb') as f: loaded = pickle.load(f) mu_pris_param = loaded['mu_pris_param'] cov_pris_param = loaded['cov_pris_param'] gaussian_window = loaded['gaussian_window'] if convert_to == 'y': img = rgb2y(img) elif convert_to == 'gray': img = rgb2gray(img) else: raise ValueError(f'Unknown convert_to value: {convert_to}') if crop_border > 0: img = img[:, crop_border:-crop_border, crop_border:-crop_border, :] img = img.round().astype(jnp.float64) calc_func = partial( _calculate_niqe, mu_pris_param=mu_pris_param, cov_pris_param=cov_pris_param, gaussian_window=gaussian_window) quality = jax.vmap(calc_func)(img) return quality class NIQE: def __init__(self, crop_border: int, convert_to: Literal['y', 'gray']): self.crop_border = crop_border self.convert_to = convert_to ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) with open(ROOT_DIR + '/niqe_pris_params.pkl', 'rb') as f: loaded = pickle.load(f) self.mu_pris_param = loaded['mu_pris_param'] self.cov_pris_param = loaded['cov_pris_param'] self.gaussian_window = loaded['gaussian_window'] def __call__(self, img: jnp.ndarray): if self.convert_to == 'y': img = rgb2y(img) elif self.convert_to == 'gray': img = rgb2gray(img) else: raise ValueError(f'Unknown convert_to value: {self.convert_to}') if self.crop_border > 0: img = img[:, self.crop_border:-self.crop_border, self.crop_border:-self.crop_border, :] img = img.round().astype(jnp.float64) calc_func = partial( _calculate_niqe, mu_pris_param=self.mu_pris_param, cov_pris_param=self.cov_pris_param, gaussian_window=self.gaussian_window) quality = jax.vmap(calc_func)(img) return quality
dslisleedh/IQA-jax
iqa/metrics/niqe.py
niqe.py
py
6,637
python
en
code
0
github-code
13
34051378589
import os os.system('cls') print ('Questão 2\n') n = int (input('Insira um número para calcular seu quadrado:\n')) sum = 0 for i in range (1,n+1): sum = sum + ((2*i)-1) print ('O quadrado do número %d é %d' % (n,sum))
ViniciusMaiaM/Python-projects
Desafios e tarefas/prova2.py
prova2.py
py
225
python
pt
code
0
github-code
13
5919799834
import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import pandas as pd import tensorflow as tf from tensorflow.keras.utils import to_categorical from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split from mlp import FNC ### Global stuff 🌍 # data path : root = os.path.dirname(__file__) data_path = os.path.join(root,"..","data/") devices = ['/device:GPU:0','/device:GPU:1','/device:GPU:2','/device:GPU:3', '/device:GPU:4','/device:GPU:5','/device:GPU:6','/device:GPU:7'] # End global def get_data(p=0.2): df = pd.read_csv(os.path.join(data_path,'df.csv')) X , y = df['content'].values , df['label'].values print('Fitting TFIDF in progress...') vectorizer = TfidfVectorizer() X_tfidf = vectorizer.fit_transform(X).toarray() y = to_categorical(y) X_train, X_test, y_train, y_test = train_test_split(X_tfidf,y, test_size=p) return X_train, X_test, y_train, y_test if __name__ == '__main__': X_train, X_test, y_train, y_test = get_data() strategy = tf.distribute.MirroredStrategy(devices) with strategy.scope(): print('Building FNC in progress...') fnc = FNC(X_train,y_train) model = fnc.build_model() fnc.model = model print('FNC summary') fnc.summary() print('Start training FNC on 8 GPU\'s') fnc.train() print('Start Evaluation of FNC') val = fnc.evaluate(X_test,y_test) print(f'Evaluation results : for [{fnc}] : {val}')
djaymen/Fake_News
mlp/train.py
train.py
py
1,645
python
en
code
2
github-code
13
15159099709
import sympy as sym import sympy.plotting as syp import matplotlib.pyplot as plt sigma,mu,x = sym.Symbol('sigma'),sym.Symbol('mu'),sym.Symbol('x') sym.pprint(2*sym.pi*sigma) part1 = 1/(sym.sqrt(2*sym.pi*sigma**2)) part2 = sym.exp(-1*((x-mu)**2)/(2*sigma**2)) gauss_function=part1*part2 sym.pprint(gauss_function) sym.plot(gauss_function.subs({mu:0,sigma:1}),(x,-10,10),title="Gauss") ###Aynı işlemi for döngüsü ile yapalım. #evalf() -> matematiksel hale getirir. x_values,y_values = [],[] for value in range(-10,10): y = gauss_function.subs({mu:1,sigma:1,x:value}).evalf() x_values.append(value) y_values.append(y) plt.plot(x_values,y_values) plt.show()
oguzbalkaya/ProgramlamaLaboratuvari
sympyveornekleri2.py
sympyveornekleri2.py
py
673
python
en
code
0
github-code
13
19580658089
import argparse import json import sys import os.path import glob import xml.etree.ElementTree as ET from FFmpeg import HD_MODEL_NAME, HD_NEG_MODEL_NAME, HD_PHONE_MODEL_NAME ,_4K_MODEL_NAME, HD_PHONE_MODEL_VERSION from statistics import mean, harmonic_mean from Vmaf import vmaf from signal import signal, SIGINT def handler(signal_received, frame): print('SIGINT or CTRL-C detected. Exiting gracefully') sys.exit(0) def get_args(): '''This function parses and return arguments passed in''' parser = MyParser(prog='easyVmaf', description="Script to easy compute VMAF using FFmpeg. It allows to deinterlace, scale and sync Ref and Distorted video samples automatically: \ \n\n \t Autodeinterlace: If the Reference or Distorted samples are interlaced, deinterlacing is applied\ \n\n \t Autoscale: Reference and Distorted samples are scaled automatically to 1920x1080 or 3840x2160 depending on the VMAF model to use\ \n\n \t Autosync: The first frames of the distorted video are used as reference to a sync look up with the Reference video. \ \n \t \t The sync is doing by a frame-by-frame look up of the best PSNR\ \n \t \t See [-reverse] for more options of syncing\ \n\n As output, a json file with VMAF score is created", formatter_class=argparse.RawTextHelpFormatter) requiredgroup = parser.add_argument_group('required arguments') requiredgroup.add_argument( '-d', dest='d', type=str, help='Distorted video', required=True) requiredgroup.add_argument( '-r', dest='r', type=str, help='Reference video ', required=True) parser.add_argument('-sw', dest='sw', type=float, default=0, help='Sync Window: window size in seconds of a subsample of the Reference video. The sync lookup will be done between the first frames of the Distorted input and this Subsample of the Reference. (default=0. No sync).') parser.add_argument('-ss', dest='ss', type=float, default=0, help="Sync Start Time. Time in seconds from the beginning of the Reference video to which the Sync Window will be applied from. (default=0).") parser.add_argument('-fps', dest='fps', type=float, default=0, help='Video Frame Rate: force frame rate conversion to <fps> value. Autodeinterlace is disabled when setting this') parser.add_argument('-subsample', dest='n', type=int, default=1, help="Specifies the subsampling of frames to speed up calculation. (default=1, None).") parser.add_argument('-reverse', help="If enable, it Changes the default Autosync behaviour: The first frames of the Reference video are used as reference to sync with the Distorted one. (Default = Disable).", action='store_true') parser.add_argument('-model', dest='model', type=str, default="HD", help="Vmaf Model. Options: HD, 4K. (Default: HD).") parser.add_argument('-threads', dest='threads', type=int, default=0, help='number of threads') parser.add_argument( '-verbose', help='Activate verbose loglevel. (Default: info).', action='store_true') parser.add_argument( '-progress', help='Activate progress indicator for vmaf computation. (Default: false).', action='store_true') parser.add_argument( '-endsync', help='Activate end sync. This ends the computation when the shortest video ends. (Default: false).', action='store_true') parser.add_argument('-output_fmt', dest='output_fmt', type=str, default='json', help='Output vmaf file format. Options: json or xml (Default: json)') parser.add_argument( '-cambi_heatmap', help='Activate cambi heatmap. (Default: false).', action='store_true') parser.add_argument( '-sync_only', action='store_true', default=False, help='For sync measurement only. No Vmaf processing') if len(sys.argv) == 1: parser.print_help(sys.stderr) sys.exit(1) return parser.parse_args() class MyParser(argparse.ArgumentParser): def error(self, message): sys.stderr.write('error: %s\n' % message) self.print_help() sys.exit(2) if __name__ == '__main__': signal(SIGINT, handler) '''reading values from cmdParser''' cmdParser = get_args() main_pattern = cmdParser.d reference = cmdParser.r ''' to avoid error negative numbers are not allowed''' syncWin = abs(cmdParser.sw) ss = abs(cmdParser.ss) fps = abs(cmdParser.fps) n_subsample = abs(cmdParser.n) reverse = cmdParser.reverse model = cmdParser.model verbose = cmdParser.verbose output_fmt = cmdParser.output_fmt threads = cmdParser.threads print_progress = cmdParser.progress end_sync = cmdParser.endsync cambi_heatmap = cmdParser.cambi_heatmap sync_only = cmdParser.sync_only # Setting verbosity if verbose: loglevel = "verbose" else: loglevel = "info" # check output format if not output_fmt in ["json", "xml"]: print("output_fmt: ", output_fmt, " Not supported. JSON output used instead", flush=True) output_fmt = "json" ''' Distorted video path could be loaded as patterns i.e., "myFolder/video-sample-*.mp4" In this way, many computations could be done with just one command line. ''' main_pattern = os.path.expanduser(main_pattern) mainFiles = glob.glob(main_pattern) if not(os.path.isfile(reference)): print("Reference Video file not found: ", reference, flush=True) sys.exit(1) if len(mainFiles) == 0: print("Distorted Video files not found with the given pattern/name: ", main_pattern, flush=True) sys.exit(1) for main in mainFiles: myVmaf = vmaf(main, reference, loglevel=loglevel, subsample=n_subsample, model=model, output_fmt=output_fmt, threads=threads, print_progress=print_progress, end_sync=end_sync, manual_fps=fps, cambi_heatmap = cambi_heatmap) '''check if syncWin was set. If true offset is computed automatically, otherwise manual values are used ''' if syncWin > 0: offset, psnr = myVmaf.syncOffset(syncWin, ss, reverse) if cmdParser.sync_only: print("offset: ", offset, flush=True) sys.exit(1) else: offset = ss psnr = None if reverse: myVmaf.offset = -offset else: myVmaf.offset = offset vmafProcess = myVmaf.getVmaf() vmafpath = myVmaf.ffmpegQos.vmafpath vmafScore = [] vmafNegScore = [] vmafPhoneScore = [] if output_fmt == 'json': with open(vmafpath) as jsonFile: jsonData = json.load(jsonFile) for frame in jsonData['frames']: if model == 'HD': vmafScore.append(frame["metrics"][HD_MODEL_NAME]) vmafNegScore.append(frame["metrics"][HD_NEG_MODEL_NAME]) vmafPhoneScore.append(frame["metrics"][HD_PHONE_MODEL_NAME]) if model == '4K': vmafScore.append(frame["metrics"][_4K_MODEL_NAME]) elif output_fmt == 'xml': tree = ET.parse(vmafpath) root = tree.getroot() for frame in root.findall('frames/frame'): if model == 'HD': vmafScore.append(frame["metrics"][HD_MODEL_NAME]) vmafNegScore.append(frame["metrics"][HD_NEG_MODEL_NAME]) vmafPhoneScore.append(frame["metrics"][HD_PHONE_MODEL_NAME]) if model == '4K': vmafScore.append(frame["metrics"][_4K_MODEL_NAME]) print("\n \n \n \n \n ") print("=======================================", flush=True) print("VMAF computed", flush=True) print("=======================================", flush=True) print("offset: ", offset, " | psnr: ", psnr) if model == 'HD': print("VMAF HD: ", mean(vmafScore)) print("VMAF Neg: ", mean(vmafNegScore)) print("VMAF Phone: ", mean(vmafPhoneScore)) if model == '4K': print("VMAF 4K: ", mean(vmafScore)) print("VMAF output file path: ", myVmaf.ffmpegQos.vmafpath) if cambi_heatmap: print("CAMBI Heatmap output path: ", myVmaf.ffmpegQos.vmaf_cambi_heatmap_path) print("\n \n \n \n \n ")
gdavila/easyVmaf
easyVmaf.py
easyVmaf.py
py
8,717
python
en
code
135
github-code
13
24760403813
#Link: https://leetcode.com/problems/count-negative-numbers-in-a-sorted-matrix/ class Solution: def countNegatives(self, grid: List[List[int]]) -> int: count = 0 for row in grid: for cell in row: if cell < 0 : count += 1 return count
muradhaji/OlympSolutions
LeetCode/1351.py
1351.py
py
299
python
en
code
0
github-code
13
986374720
from painter.models import Card from .import_cards import Command as BaseImportCommand class Command(BaseImportCommand): help = ('Clears the database of cards, then fills it with the contents of one or' + ' more specified XLSX files. Parses a Laundry character sheet,' + ' looking for a specific layout.') def convert_to_python(self, worksheet): """ Each worksheet in this file represents a single character. The sheet contains several tables, in different locations. """ all_rows = list(worksheet.rows) identity_table = self.parse_table( all_rows, start_row=0, height=2, width=4) traits_table = self.parse_table( all_rows, start_row=0, start_column=5, height=2, width=4) spell_table = self.parse_table( all_rows, start_row=0, start_column=10, height=5, width=5) weapon_table = self.parse_table( all_rows, start_row=6, start_column=10, height=5, width=4) stat_table = self.parse_table( all_rows, start_row=3, height=9, width=3) derived_stat_table = self.parse_table( all_rows, start_row=3, start_column=4, height=8, width=2) skill_table = self.parse_table( all_rows, start_row=15, width=7) # Both the identity and traits tables only have a single row. identity = identity_table[0] traits = traits_table[0] # Arrange derived stats by their keys, so they can be more organised. derived_stats = { self.make_safe_name(row['derived_stat']): row for row in derived_stat_table } # Damage bonuses are converted to dice, using a table. # Automate that here and add it to derived_stats. bonus = int(derived_stats['damage_bonus']['value']) if bonus <= 12: bonus_die = '-1d6' elif bonus <= 16: bonus_die = '-1d4' elif bonus <= 24: bonus_die = 'None' elif bonus <= 32: bonus_die = '+1d4' elif bonus <= 40: bonus_die = '+1d6' else: bonus_die = '+2d6' derived_stats['damage_bonus']['value'] = bonus_die # For the skills, we also want to invert the table, but we also need # to collapse specialisations into bracketed names. For instance, # a table might look like this: # Knowledge # Speciality 1: 9 # Speciality 2: 19 # and we want to combine those into # Knowledge (Speciality 1): 9 # Knowledge (Speciality 2): 19 # Finally, we also want to filter out skills with a value of 1 or less # - those are pretty much just visual noise. # We can tell what the 'parent' skills are since they have no value, # and the 'child' skills because they are indented with a few spaces # in the table. skills = [] parent_skill_name = None for skill_row in skill_table: # Grab the name of the skill, since we'll need it. name = skill_row['skill'] # If this skill is a speciality (we can tell because its name # was indented with spaces, so now begins with some underscores), # add it to the name of its 'parent'. # If it's not a speciality, save it as the next parent_skill_name # in case it has specialities of its own. if (name[0] == ' '): # Remove the indent spaces. name = name.lstrip() # Filter out the 'Speciality N' example rows. if (name.startswith('Speciality')): continue name = '{parent} ({speciality})'.format( parent=parent_skill_name, speciality=name ) else: parent_skill_name = name # If the skill has a value and that value is at least 1, create # an entry for it in `skills`. value = skill_row['total'] if (value is not None and int(value) > 2): skills.append({ 'name': name, 'value': value, }) # Use some swanky Python 3.5 syntax to merge dictionaries together, # putting the identity and trait data on the root level. character = { **identity, **traits, 'stats': stat_table, 'derived_stats': derived_stats, 'skills': skills, 'spells': spell_table, 'weapons': weapon_table, } return [character] def convert_to_cards(self, card_data): """ Convert each character into three cards: - Core and derived stats - Skills - Spells and weapons """ name = card_data.pop('name') if not name: return [] cards = [ Card( name=name, template_name='stats.html', quantity=1, data=card_data, ), Card( name=name, template_name='skills.html', quantity=1, data=card_data, ), ] if card_data['spells'] or card_data['weapons']: cards.append(Card( name=name, template_name='spells.html', quantity=1, data=card_data, )) return cards
adam-thomas/imperial-painter
painter/importers/import_laundry.py
import_laundry.py
py
5,769
python
en
code
0
github-code
13
31967637232
#!/usr/bin/python """ ZetCode wxPython tutorial In this example we create a gauge widget. author: Jan Bodnar website: www.zetcode.com last modified: April 2018 """ import wx TASK_RANGE = 50 class Example(wx.Frame): def __init__(self, *args, **kw): super(Example, self).__init__(*args, **kw) self.InitUI() def InitUI(self): self.timer = wx.Timer(self, 1) self.count = 0 self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer) pnl = wx.Panel(self) vbox = wx.BoxSizer(wx.VERTICAL) hbox1 = wx.BoxSizer(wx.HORIZONTAL) hbox2 = wx.BoxSizer(wx.HORIZONTAL) hbox3 = wx.BoxSizer(wx.HORIZONTAL) self.gauge = wx.Gauge(pnl, range=TASK_RANGE, size=(250, -1)) self.btn1 = wx.Button(pnl, wx.ID_OK) self.btn2 = wx.Button(pnl, wx.ID_STOP) self.text = wx.StaticText(pnl, label='Task to be done') self.Bind(wx.EVT_BUTTON, self.OnOk, self.btn1) self.Bind(wx.EVT_BUTTON, self.OnStop, self.btn2) hbox1.Add(self.gauge, proportion=1, flag=wx.ALIGN_CENTRE) hbox2.Add(self.btn1, proportion=1, flag=wx.RIGHT, border=10) hbox2.Add(self.btn2, proportion=1) hbox3.Add(self.text, proportion=1) vbox.Add((0, 30)) vbox.Add(hbox1, flag=wx.ALIGN_CENTRE) vbox.Add((0, 20)) vbox.Add(hbox2, proportion=1, flag=wx.ALIGN_CENTRE) vbox.Add(hbox3, proportion=1, flag=wx.ALIGN_CENTRE) pnl.SetSizer(vbox) self.SetTitle('wx.Gauge') self.Centre() def OnOk(self, e): if self.count >= TASK_RANGE: return self.timer.Start(100) self.text.SetLabel('Task in Progress') def OnStop(self, e): if self.count == 0 or self.count >= TASK_RANGE or not self.timer.IsRunning(): return self.timer.Stop() self.text.SetLabel('Task Interrupted') def OnTimer(self, e): self.count = self.count + 1 self.gauge.SetValue(self.count) if self.count == TASK_RANGE: self.timer.Stop() self.text.SetLabel('Task Completed') def main(): app = wx.App() ex = Example(None) ex.Show() app.MainLoop() if __name__ == '__main__': main()
janbodnar/wxPython-examples
widgets/gauge_wid.py
gauge_wid.py
py
2,262
python
en
code
102
github-code
13
23449394462
from util import utils class PerfectMatch: def __init__(self, solver, matches, limit): self.limit = limit self.matches = matches self.match = utils.createMatch(self.matches) self.solver = solver.reset() def playMatch(self): solved = False scores = [] i = 0 while True: prediction = self.solver.predict() score = utils.score(match = self.match, prediction = prediction) self.solver.updateScore(score) if self.solver.performCheck(): check = utils.checkPair( match = self.match, prediction = prediction, index = self.solver.checkIndex()) self.solver.updateCheck(check) scores.append(score) solved = self.match == prediction i += 1 if solved or i >= self.limit: break if solved: print('Perfect match found after', i, 'tries!') else: print('Perfect match not found!') return i, scores
cestcedric/PerfectMatch
PerfectMatch.py
PerfectMatch.py
py
1,175
python
en
code
0
github-code
13
74564775698
#!/usr/bin/env python # pylint: disable=E1101,C0103,R0902 """ Component test TestComponent module and the harness """ from __future__ import print_function import os import threading import time import unittest import nose from WMCore.Agent.Daemon.Details import Details from WMCore.Database.Transaction import Transaction from WMCore.WMFactory import WMFactory from WMCore_t.Agent_t.TestComponent import TestComponent from WMQuality.TestInit import TestInit class HarnessTest(unittest.TestCase): """ TestCase for TestComponent module """ tempDir = None def setUp(self): """ setup for test. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema() def tearDown(self): """ Delete database """ self.testInit.clearDatabase() def testB(self): raise nose.SkipTest config = self.testInit.getConfiguration() self.tempDir = self.testInit.generateWorkDir(config) config.component_("TestComponent") config.TestComponent.logLevel = 'INFO' config.section_("General") config.TestComponent.componentDir = os.path.join( \ self.tempDir, "Components/TestComponent1") config.General.workDir = config.TestComponent.componentDir os.makedirs(config.TestComponent.componentDir) # as this is a test we build the string from our global environment # parameters normally you put this straight into the DefaultConfig.py file: # testInit.getConfiguration returns from the environment variable by default testComponent = TestComponent(config) testComponent.prepareToStart() testComponent.handleMessage('LogState', '') testComponent.handleMessage('TestMessage1', 'TestMessag1Payload') testComponent.handleMessage('TestMessage2', 'TestMessag2Payload') testComponent.handleMessage('TestMessage3', 'TestMessag3Payload') testComponent.handleMessage('TestMessage4', 'TestMessag4Payload') testComponent.handleMessage('Logging.DEBUG', '') testComponent.handleMessage('Logging.WARNING', '') testComponent.handleMessage('Logging.CRITICAL', '') testComponent.handleMessage('Logging.ERROR', '') testComponent.handleMessage('Logging.INFO', '') testComponent.handleMessage('Logging.SQLDEBUG', '') testComponent.handleMessage('TestComponent:Logging.DEBUG', '') testComponent.handleMessage('TestComponent:Logging.WARNING', '') testComponent.handleMessage('TestComponent:Logging.CRITICAL', '') testComponent.handleMessage('TestComponent:Logging.ERROR', '') testComponent.handleMessage('TestComponent:Logging.INFO', '') testComponent.handleMessage('TestComponent:Logging.SQLDEBUG', '') # test a non existing message (to generate an error) errorMsg = '' try: testComponent.handleMessage('NonExistingMessageType', '') except Exception as ex: errorMsg = str(ex) self.assertTrue(errorMsg.startswith('Message NonExistingMessageType with payload')) def testC(self): raise nose.SkipTest config = self.testInit.getConfiguration() self.tempDir = self.testInit.generateWorkDir(config) config.component_("TestComponent") config.TestComponent.logLevel = 'INFO' config.section_("General") # try starting a component as a daemon: config.TestComponent.componentDir = os.path.join( \ self.tempDir, "Components/TestComponent1") os.makedirs(config.TestComponent.componentDir) testComponent = TestComponent(config) # we set the parent to true as we are testing testComponent.startDaemon(keepParent=True) print('trying to kill the component') time.sleep(2) daemonFile = os.path.join(config.TestComponent.componentDir, "Daemon.xml") details = Details(daemonFile) print('Is component alive: ' + str(details.isAlive())) time.sleep(2) details.killWithPrejudice() print('Daemon killed') def testD(self): raise nose.SkipTest config = self.testInit.getConfiguration() config.component_("TestComponent") config.TestComponent.logLevel = 'INFO' config.section_("General") self.tempDir = self.testInit.generateWorkDir(config) # try starting a component as a daemon: config.TestComponent.componentDir = os.path.join( \ self.tempDir, "Components/TestComponent2") os.makedirs(config.TestComponent.componentDir) testComponent = TestComponent(config) # we set the parent to true as we are testing testComponent.startDaemon(keepParent=True) time.sleep(2) daemonFile = os.path.join(config.TestComponent.componentDir, "Daemon.xml") details = Details(daemonFile) print('Is component alive: ' + str(details.isAlive())) # create msgService to send stop message. myThread = threading.currentThread() factory = WMFactory("msgService", "WMCore.MsgService." + \ myThread.dialect) myThread.transaction = Transaction(myThread.dbi) msgService = factory.loadObject("MsgService") msgService.registerAs("HarnessTest") myThread.transaction.commit() print('Publish a stop message to test if the component shutsdown gracefully') myThread.transaction.begin() msg = {'name': 'Stop', 'payload': ''} msgService.publish(msg) myThread.transaction.commit() msgService.finish() while details.isAlive(): print('Component has not received stop message') time.sleep(2) print('Daemon shutdown gracefully') if __name__ == '__main__': unittest.main()
dmwm/WMCore
test/python/WMCore_t/Agent_t/Harness_t.py
Harness_t.py
py
5,963
python
en
code
44
github-code
13
71158869459
def reverse_vowels(s): phrase_list = list(s) vowels= ['a','e','i','o','u'] vowel_ind = [ind for ind,char in enumerate(phrase_list) if char in vowels] vowel_char = [char for ind,char in enumerate(phrase_list) if char in vowels] vowel_char.reverse() for ind, vow in enumerate(vowel_char): phrase_list[vowel_ind[ind]] = vow print(''.join(phrase_list)) """Reverse vowels in a string. Characters which re not vowels do not change position in string, but all vowels (y is not a vowel), should reverse their order. """ reverse_vowels("Hello!") #'Holle!' reverse_vowels("Tomatoes") #'Temotaos' reverse_vowels("Reverse Vowels In A String") #'RivArsI Vewols en e Streng' reverse_vowels("aeiou") #'uoiea' reverse_vowels("why try, shy fly?") #'why try, shy fly?''
chrissolo88/PythonPractice
reverse_vowels.py
reverse_vowels.py
py
830
python
en
code
0
github-code
13
22713332448
import pandas as pd import numpy as np import matplotlib.pyplot as plt CSV_FILES = ['/home/dylanz/final_project/eecs598/se_results/supervised_0.05_5_whole.pt_shift_shift', '/home/dylanz/final_project/eecs598/se_results/supervised-l2_1e-05.pt_shift_shift'] COLORS = ["#fd7f6f", "#7eb0d5", "#b2e061", "#bd7ebe", "#ffb55a", "#ffee65", "#beb9db", "#fdcce5", "#8bd3c7"] LABELS = ['CE-MMD', 'L2'] FIGURE_NAME = 'MMD-L2-SE' for i in range(len(CSV_FILES)): # Load dataframe df = pd.read_csv(CSV_FILES[i]) # Get mean and range for each test distribution means_df = df.mean(axis=0) num_samples = means_df.index.to_numpy(dtype=np.float32) means = means_df.to_numpy() # Plot mean values with error bars plt.plot(num_samples, means, label=LABELS[i], color=COLORS[i], marker='o') plt.legend() # Create labels and title plt.xlabel('Number of Samples') plt.xscale("log", base=2) plt.ylabel('AUROC') plt.title("Unbiased Samples Needed To Learn Unbiased Distribution") # Save the figure plt.savefig('/home/dylanz/final_project/eecs598/figures/' + FIGURE_NAME + '.png')
DylanJamesZapzalka/eecs598
process_se_results.py
process_se_results.py
py
1,104
python
en
code
0
github-code
13
6188576375
# drawShape.py # Created by Jo Narvaez-Jensen # program designed to create a window (500x500) that is displaying a rectanle that is 400 x 200 (blue outline, orange filling) with a green oval inside using the same coordinates as the rectanle from graphics import * display = GraphWin ("Drawing Window", 500,500) def main (): p1 = Point (50, 150) p2 = Point (450, 300) rShape = Rectangle (p1,p2) rShape.setOutline ("blue") rShape.setFill ("orange") rShape.setWidth ("5") oShape = Oval (p1, p2) oShape.setOutline ("green") oShape.setFill ("green") rShape.draw (display) oShape.draw (display) main ()
thenobleone/Programming
CSC-110/Chapter 4/draw.py
draw.py
py
648
python
en
code
1
github-code
13
15734956163
# This file contains useful functions for manipulating data from sklearn.preprocessing import MultiLabelBinarizer import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.metrics import confusion_matrix def get_daily_count(df, timecol='pk_timestamp'): """Compute number of rows per day Arguments: df {pandas DataFrame} -- must contain a time column (usually pk_timestamp) Returns: pandas DataFrame -- contains number of rows per day """ newdf = df.copy() newdf.index = newdf[timecol] daily_alarm_count = newdf.resample('d').count() # fill in missing dates with 0 idx = pd.date_range(min(daily_alarm_count.index), max(daily_alarm_count.index)) daily_counts = daily_alarm_count.reindex(idx, fill_value=0) return daily_counts def plot_daily_PMs_alarms(daily_counts, daily_counts_alarms, savefile=None): """ Makes a pretty plot of the number of rows per day Arguments: daily_counts {pandas DataFrame} -- must contain pk_id column daily_counts_alarms {pandas DataFrame} -- same but for alarm days """ fig, ax = plt.subplots(figsize=(20, 5)) xplot = pd.to_datetime(daily_counts.index) yplot = daily_counts['pk_id'] ax.plot(xplot, np.log10(yplot), '.', label='PMs') # plot red for gap days these = np.where(yplot == 0)[0] ax.plot(xplot[these], yplot[these] - 0.1, '.r', label='no data') xplot = pd.to_datetime(daily_counts_alarms.index) yplot = daily_counts_alarms['pk_id'] ax.plot(xplot, np.log10(yplot), 'o', label='alarms') # plot red for gap days these = np.where(yplot == 0)[0] ax.plot(xplot[these], yplot[these] - 0.3, 'or', label='no alarm') ax.set_ylabel('$\log_{10}$ num data per day') ax.set_xlabel('date') ax.legend() if savefile: fig.savefig(savefile) return def one_hot_encode_alarms(df): """Take a pandas dataframe with multi-labels in a "category" column and one-hot encode the labels Arguments: df {pandas DataFrame} -- dataframe with single column "category" with various string classes. Examples with multiple labels have duplicated rows. Returns: pandas DataFrame -- dataframe with one-hot-encoded labels, and duplicated rows removed """ print('computing one-hot-encoded alarms') # take all rows with alarms (others are assumed to be None) df_alarms = df[df['category'].notnull()] # get a list of the unique alarm names alarm_names = list(df_alarms['category'].unique()) # Create MultiLabelBinarizer object - include None (i.e. no alarm) as well one_hot = MultiLabelBinarizer(classes=[None] + alarm_names) # group the category labels that share the same pk_id and pk_timestamp (i.e. a device experienced multiple alarms simultaneously) labels = df_alarms.groupby(['pk_id', 'pk_timestamp'])['category'].apply(list) # One-hot encode the alarms labels_ohe = one_hot.fit_transform(labels) # drop the category column (no longer needed) and remove resulting duplicates df_alarms.drop(columns=['category'], inplace=True) df_alarms.drop_duplicates(inplace=True) # add "alarm " to the alarm columns alarm_colnames = ['alarm ' + str(alarm) for alarm in one_hot.classes_] labels_ohe_df = pd.DataFrame(labels_ohe, columns=alarm_colnames, index=df_alarms.index) # drop the categories column print('preparing dataframe for merging with one-hot-encoded alarms') df.drop(columns=['category'], inplace=True) df.drop_duplicates(inplace=True) # add the labels columns for the rest of the No Alarm device for colname in alarm_colnames: if 'None' in colname: df[colname] = 1 else: df[colname] = 0 print('adding one-hot-encoded alarms to dataframe') df.update(labels_ohe_df) return df def calc_acc_metrics(preds, teY): """Given the predictions of a model, and the ground truth, calculate accuracy metrics to evaluate the performance of the model on unseen data Keyword Arguments: preds -- the predictions of the model on the unseen test set (teX) teY -- the ground truth (the classes that correspond to the unseen data) Returns: prec -- the precision score == tp / (tp + fp) rec -- the recall score == tp / (tp + fn) acc -- the accuracy score == (tp + tn) / (tp + tn + fp + fn) f1 -- the f1 score == 2 * (prec + rec) / (prec + rec) fpr -- the false positive rate == fp / (fp + tn) """ prec = precision_score(teY, preds) rec = recall_score(teY, preds) acc = accuracy_score(teY, preds) f1 = f1_score(teY, preds) conf_matrix = confusion_matrix(teY, preds) fpr = conf_matrix[0][1] / (conf_matrix[0][1] + conf_matrix[0][0]) return prec, rec, acc, f1, fpr
Jincheng-Sun/ciena_hackathon
Preprocessing/utils.py
utils.py
py
5,022
python
en
code
0
github-code
13
37866933770
import investpy as inv from django.core.management.base import BaseCommand from assets.models.assets import ListAcaoFii class Command(BaseCommand): help = 'Create objs in database getting by b3 api' def handle(self, *args, **options): ListAcaoFii.objects.all().delete() df = inv.stocks.get_stocks('brazil') df = df.reset_index() for index, row in df.iterrows(): if len(row['symbol']) <= 7: ListAcaoFii.objects.create(nome=row['symbol'], empresa=row['name']) print('ativos cadastrados')
jorgemustafa/gerenciador-de-investimentos
assets/management/commands/charge_assets_b3.py
charge_assets_b3.py
py
567
python
en
code
0
github-code
13
13492701283
# -*- coding: utf8 -*- from Utils import * from Utils import GlobalProperty as GP from OnClickHandler import OnClickHandler import VideoPlayer from BaseClasses import * from WindowManager import wm import time from dialogs.DialogBaseInfo import DialogBaseInfo PLAYER = VideoPlayer.VideoPlayer() ch = OnClickHandler() C_LIST_MAINMENU = 1000 C_LIST_SHOW = 2000 C_LABEL_TIME = 2100 class MainMenu(WindowXML, DialogBaseInfo): def __init__(self, *args, **kwargs): super(MainMenu, self).__init__(*args, **kwargs) @busy_dialog def onInit(self): pass def onAction(self, action): super(MainMenu, self).onAction(action) ch.serve_action(action, self.getFocusId(), self) def onClick(self, control_id): super(MainMenu, self).onClick(control_id) ch.serve(control_id, self) @ch.action("back", "*") @ch.action("previousmenu", "*") def exit_script(self): self.close() self.run = False
devillinangel/script.huawei
resources/lib/MainMenu.py
MainMenu.py
py
973
python
en
code
0
github-code
13
17233025149
import zipfile import os import shutil def RoiRename(Path): z = zipfile.ZipFile(Path, 'r') Dirpath = (os.path.splitext(Path))[0] if os.path.exists(Dirpath): shutil.rmtree(Dirpath) # os.remove(Dirpath) else: os.mkdir(Dirpath) z.extractall(Dirpath) Relation = {} i = 1 # print(Dirpath) ################################################################ for file in os.listdir(Dirpath): # print(file) if os.path.isfile(os.path.join(Dirpath,file)) == True: if file.find('.') > 0: Roiname = (os.path.splitext(file)[0]) Relation[i] = [Roiname] Newname = str(i) + '.roi' os.rename(os.path.join(Dirpath, file), os.path.join(Dirpath, Newname)) i = i + 1 else: print(file.split('.')[-1]) # if os.path.exists(Dirpath + '_Renamed.zip'): # os.remove(Dirpath + '_Renamed.zip') # zip = zipfile.ZipFile(Dirpath + '_Renamed.zip', 'a', zipfile.ZIP_STORED) # for j in range(i-1): # zip.write(Dirpath + '/' + str(j+1)+'.roi', str(j+1)+'.roi') # for file in os.listdir(Dirpath): # # print(file) # zip.write(Dirpath +'/'+ file, file) # zip.close() z.close() # shutil.rmtree(Dirpath) # os.remove(Path) return [Dirpath, Relation] if __name__ == '__main__': RoiRename('C:\Result\JR1.zip') print(RoiRename('C:\Result\JR1.zip')) RoiRename('C:\Result\JR2.zip')
ZhouBo20171229/-
BatchRename.py
BatchRename.py
py
1,563
python
en
code
0
github-code
13
19059987156
#! python3 import os import csv import requests import bs4 as bs import urllib.request import re i=1 with open("input.csv") as csvfile: reader = csv.DictReader(csvfile) for row in reader: url = (row['URL']) device = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36' #os.makedirs('name', exist_ok=True) headers = {'User-Agent': device} resp = requests.get(url, headers=headers) soup = bs.BeautifulSoup(resp.text, 'lxml') images = soup.findAll('img',{"src":True}) for image in images: ext = [".com", ".org" ".net"] if not(any(x in image['src'] for x in ext)): print("Skipped " + imageSource) continue if image['src'].startswith('//'): imageSource = image['src'][2:] imageSource = "http://" + imageSource else: imageSource = image['src'] #print(imageSource) opener = urllib.request.build_opener() opener.addheaders = [('User-agent', device)] urllib.request.install_opener(opener) urllib.request.urlretrieve(imageSource, str(i) + ".jpg") i +=1
santarini/Monis-Image-Scrape
imageScrapeCSV.py
imageScrapeCSV.py
py
1,310
python
en
code
0
github-code
13
11029218112
# Keras import keras from keras import regularizers from keras.preprocessing import sequence from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential, Model, model_from_json from keras.layers import Dense, Embedding, LSTM from keras.layers import Input, Flatten, Dropout, Activation, BatchNormalization from keras.layers import Conv1D, MaxPooling1D, AveragePooling1D from keras.utils import np_utils from keras.callbacks import (EarlyStopping, LearningRateScheduler, ModelCheckpoint, TensorBoard, ReduceLROnPlateau) from tensorflow.keras.utils import to_categorical from tensorflow.keras import optimizers from keras import losses, models from keras.activations import relu, softmax from keras.layers import (Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense) from tensorflow.keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D, Flatten, Reshape, Dropout, Conv2D # sklearn from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder # Other from tqdm import tqdm, tqdm_pandas import scipy from scipy.stats import skew import pickle import librosa import librosa.display import json import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from matplotlib.pyplot import specgram import pandas as pd import seaborn as sns import glob import os import sys import IPython.display as ipd # To play sound in the notebook import warnings ''' 2. Extracting the MFCC feature as an image (Matrix format). ''' def prepare_data(df, n, sampling_rate, audio_duration, n_mfcc): X = np.empty(shape=(df.shape[0], n, 216, 1)) input_length = sampling_rate * audio_duration cnt = 0 for fname in tqdm(df.path): file_path = fname data, _ = librosa.load(file_path, sr=sampling_rate ,res_type="kaiser_fast" ,duration=2.5 ,offset=0.5 ) # Random offset / Padding if len(data) > input_length: max_offset = len(data) - input_length offset = np.random.randint(max_offset) data = data[offset:(input_length+offset)] else: if input_length > len(data): max_offset = input_length - len(data) offset = np.random.randint(max_offset) else: offset = 0 data = np.pad(data, (offset, int(input_length) - len(data) - offset), "constant") # MFCC extraction MFCC = librosa.feature.mfcc(data, sr=sampling_rate, n_mfcc=n_mfcc) MFCC = np.expand_dims(MFCC, axis=-1) X[cnt,] = MFCC cnt += 1 return X def print_confusion_matrix(confusion_matrix, class_names, figsize = (10,7), fontsize=14): df_cm = pd.DataFrame( confusion_matrix, index=class_names, columns=class_names, ) fig = plt.figure(figsize=figsize) try: heatmap = sns.heatmap(df_cm, annot=True, fmt="d") except ValueError: raise ValueError("Confusion matrix values must be integers.") heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize) heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize) plt.ylabel('True label') plt.xlabel('Predicted label') def get_2d_conv_model(n): ''' Create a standard deep 2D convolutional neural network''' nclass = 14 inp = Input(shape=(n,216,1)) #2D matrix of 30 MFCC bands by 216 audio length. x = Convolution2D(32, (4,10), padding="same")(inp) x = BatchNormalization()(x) x = Activation("relu")(x) x = MaxPool2D()(x) x = Dropout(rate=0.2)(x) x = Convolution2D(32, (4,10), padding="same")(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = MaxPool2D()(x) x = Dropout(rate=0.2)(x) x = Convolution2D(32, (4,10), padding="same")(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = MaxPool2D()(x) x = Dropout(rate=0.2)(x) x = Convolution2D(32, (4,10), padding="same")(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = MaxPool2D()(x) x = Dropout(rate=0.2)(x) x = Flatten()(x) x = Dense(64)(x) x = Dropout(rate=0.2)(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Dropout(rate=0.2)(x) out = Dense(nclass, activation=softmax)(x) model = models.Model(inputs=inp, outputs=out) opt = optimizers.Adam(0.001) model.compile(optimizer="adam", loss=losses.categorical_crossentropy, metrics=['acc']) return model # Cosine Loss from sklearn.metrics.pairwise import cosine_similarity #效率高 def getCosAcc(predict, label): return tf.reduce_mean(-tf.losses.cosine_similarity(predict, label, axis=0)) def getCosAcc1(predict, label): #print(predict[0].shape) acc = 0 sum = 0 for i in range(predict.shape[0]): sum += -tf.losses.cosine_similarity(predict[i], label[i]) acc = sum / predict.shape[0] return acc # Cosine Loss import tensorflow as tf #high efficient method def getAcc(predict, label): #print(predict.shape) return -tf.losses.cosine_similarity(predict, label) def get_2d_conv_auto_encoder(n): ''' Create a standard deep 2D convolutional neural network''' input_img = Input(shape=(n,216,1)) #2D matrix of 32 MFCC bands by 216 audio length. conv_1 = Conv2D(32, (3,18), activation='relu', padding='same')(input_img) conv_1 = BatchNormalization()(conv_1) pool_1 = MaxPool2D()(conv_1) pool_1 = Dropout(rate=0.2)(pool_1) conv_2 = Conv2D(64, (3,18), activation='relu', padding='same')(pool_1) conv_2 = BatchNormalization()(conv_2) pool_2 = MaxPool2D()(conv_2) pool_2 = Dropout(rate=0.2)(pool_2) conv_3 = Conv2D(128, (3,18), activation='relu', padding='same')(pool_2) conv_3 = BatchNormalization()(conv_3) pool_3 = MaxPool2D()(conv_3) pool_3 = Dropout(rate=0.2)(pool_3) conv_4 = Conv2D(1, (3,18), activation='relu', padding='same')(pool_3) encoded = BatchNormalization(name="encoded")(conv_4) up_3 = UpSampling2D()(encoded) conv_neg_3 = Conv2D(128, (3,18), activation='relu', padding='same')(up_3) conv_neg_3 = BatchNormalization()(conv_neg_3) up_4 = UpSampling2D()(conv_neg_3) conv_neg_4 = Conv2D(64, (3,18), activation='relu', padding='same')(up_4) conv_neg_4 = BatchNormalization()(conv_neg_4) up_5 = UpSampling2D()(conv_neg_4) conv_neg_5 = Conv2D(32, (3,18), activation='relu', padding='same')(up_5) conv_neg_5 = BatchNormalization()(conv_neg_5) out = Conv2D(1, (3,18), activation='relu', padding='same')(conv_neg_5) out = BatchNormalization()(out) model = models.Model(inputs=input_img, outputs=out) model.compile(optimizer="adam", loss="mse", metrics=[getCosAcc]) return model class get_results: def __init__(self, model_history, model ,X_test, y_test, labels): self.model_history = model_history self.model = model self.X_test = X_test self.y_test = y_test self.labels = labels def create_plot(self, model_history): '''Check the logloss of both train and validation, make sure they are close and have plateau''' plt.plot(model_history.history['loss']) plt.plot(model_history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper right') plt.show() def create_plot_acc(self, model_history): '''Check the logacc of both train and validation, make sure they are close and have plateau''' plt.plot(model_history.history['acc']) plt.plot(model_history.history['val_acc']) plt.title('model acc') plt.ylabel('acc') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() def create_results(self, model): '''predict on test set and get accuracy results''' opt = optimizers.Adam(0.001) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) score = model.evaluate(X_test, y_test, verbose=0) print("%s: %.2f%%" % (model.metrics_names[1], score[1]*100)) def confusion_results(self, X_test, y_test, labels, model): '''plot confusion matrix results''' preds = model.predict(X_test, batch_size=16, verbose=2) preds=preds.argmax(axis=1) preds = preds.astype(int).flatten() preds = (lb.inverse_transform((preds))) actual = y_test.argmax(axis=1) actual = actual.astype(int).flatten() actual = (lb.inverse_transform((actual))) classes = labels classes.sort() c = confusion_matrix(actual, preds) print_confusion_matrix(c, class_names = classes) def accuracy_results_gender(self, X_test, y_test, labels, model): '''Print out the accuracy score and confusion matrix heat map of the Gender classification results''' preds = model.predict(X_test, batch_size=16, verbose=2) preds=preds.argmax(axis=1) preds = preds.astype(int).flatten() preds = (lb.inverse_transform((preds))) actual = y_test.argmax(axis=1) actual = actual.astype(int).flatten() actual = (lb.inverse_transform((actual))) # print(accuracy_score(actual, preds)) actual = pd.DataFrame(actual).replace({'female_angry':'female' , 'female_disgust':'female' , 'female_fear':'female' , 'female_happy':'female' , 'female_sad':'female' , 'female_surprise':'female' , 'female_neutral':'female' , 'male_angry':'male' , 'male_fear':'male' , 'male_happy':'male' , 'male_sad':'male' , 'male_surprise':'male' , 'male_neutral':'male' , 'male_disgust':'male' }) preds = pd.DataFrame(preds).replace({'female_angry':'female' , 'female_disgust':'female' , 'female_fear':'female' , 'female_happy':'female' , 'female_sad':'female' , 'female_surprise':'female' , 'female_neutral':'female' , 'male_angry':'male' , 'male_fear':'male' , 'male_happy':'male' , 'male_sad':'male' , 'male_surprise':'male' , 'male_neutral':'male' , 'male_disgust':'male' }) classes = actual.loc[:,0].unique() classes.sort() c = confusion_matrix(actual, preds) print(accuracy_score(actual, preds)) print_confusion_matrix(c, class_names = classes)
981526092/Sentiment-Mapping-and-Matching-between-Audio-and-Text-Representation
audioEmbedding/sentiment_until.py
sentiment_until.py
py
11,427
python
en
code
0
github-code
13
4506901455
import blog.index as blog import chat.index as chat import login import salutation from tarantino import Tarantino from tarantino.authentication import authenticate from tarantino.http import ( HTTP200Response, HTTPRequest, HTTPResponse, HTTPStatusCode, JSONResponse, ) from tarantino.middleware import cors, default_middlewares app = Tarantino( "basic", middlewares=default_middlewares + [ cors.Cors( allow_origins=["http://localhost:8080"], allow_headers=["*"], allow_credentials=True, ) ], ) app.register_subapp(salutation.subapp) app.register_subapp(login.subapp) app.register_subapp(blog.subapp) app.register_subapp(chat.subapp) @app.get("/") @authenticate async def index(request: HTTPRequest): creds = request.credentials name = getattr(creds, "name", "Anonymous User") body = f""" <!doctype html> <html> <body> <h1> Hello {name}!! </h1> <h2> This is Index Page </h2> </body> </html> """ return HTTP200Response(body) @app.post("/") @authenticate async def index(request: HTTPRequest): body = await request.body(as_str=True) print("POST BODY: ", body) return HTTP200Response(f"PING BACK THE BODY: {body}") @app.get("/user/profile/{username}") @authenticate async def profile(request: HTTPRequest, username: str): creds = request.credentials if not hasattr(creds, "username") or getattr(creds, "username") != username: return HTTPResponse("", status=HTTPStatusCode.STATUS_401_UNAUTHORIZED) body = f""" <!doctype html> <html> <body> <h1> Hello {username}!! </h1> <h2> This is Profile Page </h2> </body> </html> """ return HTTP200Response(body) @app.get("/json_response/{user_id:str}/records/{record_id:str}") async def api_response(request: HTTPRequest, user_id: str, record_id: str): resp = { "params": {"user": user_id, "record": record_id}, "query": dict(request.query_params), "data": {"foo": "bar"}, } return JSONResponse(resp)
himanshu-dutta/tarantino
examples/basic-app/index.py
index.py
py
2,095
python
en
code
0
github-code
13
71430527059
import spikeextractors as se import ephys_viz as ev class examples: @classmethod def toy_example(cls): _, sorting = se.example_datasets.toy_example() return ev.Autocorrelograms( title="Autocorrelograms from SpikeExtractors toy example", sorting=sorting, max_samples=10000, max_dt_msec=150, bin_size_msec=6 ) @classmethod def spikeforest_mea_c30(cls): return ev.Autocorrelograms( title="Autocorrelograms from spikeforest mea_c30", sorting=dict( path="sha1dir://ed0fe4de4ef2c54b7c9de420c87f9df200721b24.synth_visapy/mea_c30/set1/firings_true.mda", samplerate=30000, download_from='spikeforest.public' ), max_samples=10000, max_dt_msec=150, bin_size_msec=2 )
flatironinstitute/ephys-viz
widgets/Autocorrelograms/examples.py
examples.py
py
892
python
en
code
6
github-code
13
23511651922
ROCKS = [ {(2, 0), (3, 0), (4, 0), (5, 0)}, {(2, 1), (3, 0), (3, 1), (4, 1), (3, 2)}, {(2, 0), (3, 0), (4, 0), (4, 1), (4, 2)}, {(2, 0), (2, 1), (2, 2), (2, 3)}, {(2, 0), (2, 1), (3, 0), (3, 1)}, ] EXAMPLE = False def move(rock, direction): x, y = direction return {(i + x, j + y) for (i, j) in rock} def basic(moves, number_of_rocks=2022): cave = set() j = 0 for i in range(number_of_rocks): initial_y = max(cave, key=lambda x: x[1])[1] + 4 if len(cave) > 0 else 3 falling_rock = ROCKS[i % 5] falling_rock = move(falling_rock, (0, initial_y)) while True: direction = (1, 0) if moves[j % len(moves)] == ">" else (-1, 0) new_position = move(falling_rock, direction) if any(p in cave or p[0] < 0 or p[0] > 6 for p in new_position): pass else: falling_rock = new_position j += 1 new_position = move(falling_rock, (0, -1)) if any(p in cave or p[1] < 0 for p in new_position): cave |= falling_rock break else: falling_rock = new_position return max(cave, key=lambda x: x[1])[1] + 1 def inspect(moves, number_of_rocks=10_000): cave = set() j = 0 final_positions = [] for i in range(number_of_rocks): initial_y = max(cave, key=lambda x: x[1])[1] + 4 if len(cave) > 0 else 3 falling_rock = ROCKS[i % 5] falling_rock = move(falling_rock, (0, initial_y)) while True: direction = (1, 0) if moves[j % len(moves)] == ">" else (-1, 0) new_position = move(falling_rock, direction) if any(p in cave or p[0] < 0 or p[0] > 6 for p in new_position): pass else: falling_rock = new_position j += 1 new_position = move(falling_rock, (0, -1)) if any(p in cave or p[1] < 0 for p in new_position): cave |= falling_rock final_positions.append(min(falling_rock, key=lambda x: x[0])[0]) break else: falling_rock = new_position if i % 5 == 4: print(tuple(final_positions)) final_positions = [] if __name__ == "__main__": with open("example.txt" if EXAMPLE else "input.txt") as f: moves = f.read().strip() part1 = basic(moves) print("First part:", part1) # START and END manually found by running (line number of the start and end of the repeated sequence) # implement(moves) START, END = (110, 449) if not EXAMPLE else (3, 10) part2 = basic( moves, 5 * START + ((1000000000000 - 5 * START) % (5 * (END - START))) ) + ((1000000000000 - 5 * START) // (5 * (END - START))) * ( basic(moves, 5 * END) - basic(moves, 5 * START) ) print("Second part:", part2)
alexcosta13/advent-of-code-2022
day17/main.py
main.py
py
2,927
python
en
code
0
github-code
13
1340004803
""" ======================================================================= COMPETITION TUTORIAL #1: Custom model and RL algorithm ======================================================================= In this tutorial, we customize the default TrackMania pipeline. To submit an entry to the TMRL competition, we essentially need a trained policy. In TMRL, this is encapsulated in an ActorModule. Note: this tutorial describes implementing a TrainingAgent in TMRL. The TMRL framework is relevant if you want to implement RL approaches. If you plan to try non-RL approaches instead, this is also accepted: just use the Gym environment and do whatever you need, then, wrap your trained policy in an ActorModule, and submit :) """ # Okay, first, let us import some useful stuff. # The constants that are defined in config.json: import tmrl.config.config_constants as cfg # Higher-level partially instantiated classes that are fixed for the competition: # (in particular this includes the Gym environment) import tmrl.config.config_objects as cfg_obj # higher-level constants that are fixed for the competition # The utility that is used in TMRL to partially instantiate classes: from tmrl.util import partial # The main TMRL components of a training pipeline: from tmrl.networking import Server, RolloutWorker, Trainer # The training object that we will customize with our own algorithm to replace the default SAC trainer: from tmrl.training_offline import TrainingOffline # External libraries: import numpy as np # ===================================================================== # USEFUL PARAMETERS # ===================================================================== # You can change these parameters here directly, # or you can change them in the config.json file. # maximum number of training 'epochs': # training is checkpointed at the end of each 'epoch' # this is also when training metrics can be logged to wandb epochs = cfg.TMRL_CONFIG["MAX_EPOCHS"] # number of rounds per 'epoch': # training metrics are displayed in terminal at the end of each round rounds = cfg.TMRL_CONFIG["ROUNDS_PER_EPOCH"] # number of training steps per round: # (a training step is a call to the train() function that we will define later) steps = cfg.TMRL_CONFIG["TRAINING_STEPS_PER_ROUND"] # minimum number of environment steps collected before training starts # (this is useful when you want to fill your replay buffer with samples from a baseline policy) start_training = cfg.TMRL_CONFIG["ENVIRONMENT_STEPS_BEFORE_TRAINING"] # maximum training steps / env steps ratio: # (if training becomes faster than this ratio, it will be paused waiting for new samples from the environment) max_training_steps_per_env_step = cfg.TMRL_CONFIG["MAX_TRAINING_STEPS_PER_ENVIRONMENT_STEP"] # number of training steps between when the Trainer broadcasts policy updates: update_model_interval = cfg.TMRL_CONFIG["UPDATE_MODEL_INTERVAL"] # number of training steps between when the Trainer updates its replay buffer with the buffer of received samples: update_buffer_interval = cfg.TMRL_CONFIG["UPDATE_BUFFER_INTERVAL"] # training device (e.g., "cuda:0"): # if None, the device will be selected automatically device = None # maximum size of the replay buffer: memory_size = cfg.TMRL_CONFIG["MEMORY_SIZE"] # batch size for training: batch_size = cfg.TMRL_CONFIG["BATCH_SIZE"] # ===================================================================== # ADVANCED PARAMETERS # ===================================================================== # You may want to change the following in advanced applications; # however, most competitors will not need to change this. # If interested, read the full TMRL tutorial. # base class of the replay memory: memory_base_cls = cfg_obj.MEM # sample preprocessor for data augmentation: sample_preprocessor = None # path from where an offline dataset can be loaded: dataset_path = cfg.DATASET_PATH # ===================================================================== # COMPETITION FIXED PARAMETERS # ===================================================================== # Competitors CANNOT change the following parameters. # (Note: For models such as RNNs, you don't need to use imgs_buf_len # and act_buf_len, but your ActorModule implementation needs to work # with the observations corresponding to their default values. The rule # about these history lengths is only here for simplicity. You are # allowed to hack this within your ActorModule implementation by, e.g., # storing histories if you like.) # rtgym environment class (full TrackMania Gym environment): env_cls = cfg_obj.ENV_CLS # number of consecutive screenshots (this is part of observations): imgs_buf_len = cfg.IMG_HIST_LEN # number of actions in the action buffer (this is part of observations): act_buf_len = cfg.ACT_BUF_LEN # ===================================================================== # MEMORY CLASS # ===================================================================== # Nothing to do here. # This is the memory class passed to the Trainer. # If you need a custom memory, change the relevant advanced parameters. memory_cls = partial(memory_base_cls, memory_size=memory_size, batch_size=batch_size, sample_preprocessor=sample_preprocessor, dataset_path=cfg.DATASET_PATH, imgs_obs=imgs_buf_len, act_buf_len=act_buf_len, crc_debug=False, use_dataloader=False, pin_memory=False) # ===================================================================== # CUSTOM MODEL # ===================================================================== # Alright, now for the fun part. # Our goal in this competition is to come up with the best trained # ActorModule for TrackMania 2020, where an 'ActorModule' is a policy. # In this tutorial, we present a deep RL-way of tackling this problem: # we implement our own deep neural network architecture (ActorModule), # and then we implement our own RL algorithm to train this module.. # We implement SAC and a hybrid CNN/MLP model. # The following constants are from the Spinnup implementation of SAC # that we simply adapt in this tutorial. LOG_STD_MAX = 2 LOG_STD_MIN = -20 from tmrl.actor import ActorModule import torch # In the full version of the TrackMania 2020 environment, the # observation-space comprises a history of screenshots. Thus, we need # Computer Vision layers such as a CNN in our model to process these. # The observation space also comprises single floats representing speed, # rpm and gear. We will merge these with the information contained in # screenshots thanks to an MLP following our CNN layers. # Let us first define a simple MLP: def mlp(sizes, activation, output_activation=nn.Identity): layers = [] for j in range(len(sizes) - 1): act = activation if j < len(sizes) - 2 else output_activation layers += [nn.Linear(sizes[j], sizes[j + 1]), act()] return nn.Sequential(*layers) # This utility computes the dimensionality of CNN feature maps when flattened together: def num_flat_features(x): size = x.size()[1:] # dimension 0 is the batch dimension, so it is ignored num_features = 1 for s in size: num_features *= s return num_features # This utility computes the dimensionality of the output in a 2D CNN layer: def conv2d_out_dims(conv_layer, h_in, w_in): h_out = floor((h_in + 2 * conv_layer.padding[0] - conv_layer.dilation[0] * (conv_layer.kernel_size[0] - 1) - 1) / conv_layer.stride[0] + 1) w_out = floor((w_in + 2 * conv_layer.padding[1] - conv_layer.dilation[1] * (conv_layer.kernel_size[1] - 1) - 1) / conv_layer.stride[1] + 1) return h_out, w_out # Let us now define the main building block of both our actor and critic: class VanillaCNN(Module): def __init__(self, q_net): super(VanillaCNN, self).__init__() # We will implement SAC, which uses a critic; this flag indicates whether the object is a critic network: self.q_net = q_net # Convolutional layers processing screenshots: self.h_out, self.w_out = 64, 64 self.conv1 = Conv2d(4, 64, 8, stride=2) self.h_out, self.w_out = conv2d_out_dims(self.conv1, self.h_out, self.w_out) self.conv2 = Conv2d(64, 64, 4, stride=2) self.h_out, self.w_out = conv2d_out_dims(self.conv2, self.h_out, self.w_out) self.conv3 = Conv2d(64, 128, 4, stride=2) self.h_out, self.w_out = conv2d_out_dims(self.conv3, self.h_out, self.w_out) self.conv4 = Conv2d(128, 128, 4, stride=2) self.h_out, self.w_out = conv2d_out_dims(self.conv4, self.h_out, self.w_out) self.out_channels = self.conv4.out_channels # Dimensionality of the CNN output: self.flat_features = self.out_channels * self.h_out * self.w_out # Dimensionality of the MLP input: # (Note that when the module is the critic, the MLP is also fed the action, which is 3 floats in TrackMania) self.mlp_input_features = self.flat_features + 12 if self.q_net else self.flat_features + 9 # MLP layers: # (when using the model as a policy, we need to sample from a multivariate gaussian defined later in the code; # thus, the output dimensionality is 1 for the critic, and we will define the output layer of policies later) self.mlp_layers = [256, 256, 1] if self.q_net else [256, 256] self.mlp = mlp([self.mlp_input_features] + self.mlp_layers, nn.ReLU) def forward(self, x): if self.q_net: # The critic takes the current action act as additional input # act1 and act2 are the actions in the action buffer (see real-time RL): speed, gear, rpm, images, act1, act2, act = x else: # For the policy, we still need the action buffer in observations: speed, gear, rpm, images, act1, act2 = x # CNN forward pass: x = F.relu(self.conv1(images)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) flat_features = num_flat_features(x) assert flat_features == self.flat_features, f"x.shape:{x.shape}, flat_features:{flat_features}, self.out_channels:{self.out_channels}, self.h_out:{self.h_out}, self.w_out:{self.w_out}" x = x.view(-1, flat_features) # MLP forward pass: if self.q_net: x = torch.cat((speed, gear, rpm, x, act1, act2, act), -1) else: x = torch.cat((speed, gear, rpm, x, act1, act2), -1) x = self.mlp(x) return x # Let us now implement our actor, wrapped in the TMRL ActorModule interface. # A trained such ActorModule is all you need to submit to the competition. class SquashedGaussianVanillaCNNActor(ActorModule): """ ActorModule class wrapping our policy. """ def __init__(self, observation_space, action_space): """ If you want to reimplement __init__, use the observation_space, action_space arguments. You don't have to use them, they are only here for convenience in case you want them. Args: observation_space: observation space of the Gym environment action_space: action space of the Gym environment """ # And don't forget to call the superclass __init__: super().__init__(observation_space, action_space) dim_act = action_space.shape[0] # dimensionality of actions act_limit = action_space.high[0] # maximum amplitude of actions # Our CNN+MLP module: self.net = VanillaCNN(q_net=False) # The policy output layer, which samples actions stochastically in a gaussian, with means...: self.mu_layer = nn.Linear(256, dim_act) # ... and log standard deviations: self.log_std_layer = nn.Linear(256, dim_act) # We will squash this within the action space thanks to a tanh activation: self.act_limit = act_limit def forward(self, obs, test=False): """ Forward pass in our policy. Args: obs: the observation from the Gym environment test: this will be True for test episodes and False for training episodes Returns: pi_action: the action sampled in the policy logp_pi: the log probability of the action for SAC """ # MLP: net_out = self.net(obs) # means of the multivariate gaussian (action vector) mu = self.mu_layer(net_out) # standard deviations: log_std = self.log_std_layer(net_out) log_std = torch.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX) std = torch.exp(log_std) # action sampling pi_distribution = Normal(mu, std) if test: pi_action = mu else: pi_action = pi_distribution.rsample() # log probabilities: logp_pi = pi_distribution.log_prob(pi_action).sum(axis=-1) logp_pi -= (2 * (np.log(2) - pi_action - F.softplus(-2 * pi_action))).sum(axis=1) # squashing within the action space: pi_action = torch.tanh(pi_action) pi_action = self.act_limit * pi_action pi_action = pi_action.squeeze() return pi_action, logp_pi def act(self, obs, test=False): """ Computes an action from an observation. Args: obs (object): the observation test (bool): True at test time, False otherwise Returns: act (numpy.array): the computed action """ with torch.no_grad(): a, _ = self.forward(obs, test, False) return a.numpy() # The critic module is straightforward: class VanillaCNNQFunction(nn.Module): """ Critic module. """ def __init__(self, observation_space, action_space): super().__init__() self.net = VanillaCNN(q_net=True) def forward(self, obs, act): x = (*obs, act) q = self.net(x) return torch.squeeze(q, -1) # Finally, let us merge this together into an actor-critic module for training. # Classically, we use two parallel critics to alleviate the overestimation bias. class VanillaCNNActorCritic(nn.Module): """ Actor-critic module for the SAC algorithm. """ def __init__(self, observation_space, action_space): super().__init__() # build policy and value functions self.actor = SquashedGaussianVanillaCNNActor(observation_space, action_space) self.q1 = VanillaCNNQFunction(observation_space, action_space) self.q2 = VanillaCNNQFunction(observation_space, action_space) def act(self, obs, test=False): with torch.no_grad(): a, _ = self.actor(obs, test, False) return a.numpy() # ===================================================================== # CUSTOM TRAINING ALGORITHM # ===================================================================== # So far, we have implemented our custom model. # We have also wrapped it in an ActorModule, which we will train and # submit as an entry to the TMRL competition. # Our ActorModule will be used in Workers to collect training data. # Our VanillaCNNActorCritic will be used in the Trainer for training # this ActorModule. Let us now tackle the training algorithm per-se. # In TMRL, this is done by implementing a custom TrainingAgent. # A TrainingAgent must implement two methods: # - train(batch): optimizes the model from a batch of RL samples # - get_actor(): outputs a copy of the current ActorModule # In this tutorial, we will implement the Soft Actor-Critic algorithm # by adapting the OpenAI Spinnup implementation to the TMRL library. class SACTrainingAgent(TrainingAgent): """ Our custom training algorithm (SAC). Args: observation_space (Gym.spaces.Space): observation space (here for your convenience) action_space (Gym.spaces.Space): action space (here for your convenience) device (str): torch device that should be used for training (e.g., `"cpu"` or `"cuda:0"`) """ # no-grad copy of the model used to send the Actor weights in get_actor(): model_nograd = cached_property(lambda self: no_grad(copy_shared(self.model))) def __init__(self, observation_space=None, # Gym observation space (required argument here for your convenience) action_space=None, # Gym action space (required argument here for your convenience) device=None, # Device our TrainingAgent should use for training (required argument) model_cls=MyActorCriticModule, # an actor-critic module, encapsulating our ActorModule gamma=0.99, # discount factor polyak=0.995, # exponential averaging factor for the target critic alpha=0.2, # fixed (SAC v1) or initial (SAC v2) value of the entropy coefficient lr_actor=1e-3, # learning rate for the actor lr_critic=1e-3, # learning rate for the critic lr_entropy=1e-3, # entropy autotuning coefficient (SAC v2) learn_entropy_coef=True, # if True, SAC v2 is used, else, SAC v1 is used target_entropy=None): # if None, the target entropy for SAC v2 is set automatically super().__init__(observation_space=observation_space, action_space=action_space, device=device) model = model_cls(observation_space, action_space) self.model = model.to(device) self.model_target = no_grad(deepcopy(self.model)) self.gamma = gamma self.polyak = polyak self.alpha = alpha self.lr_actor = lr_actor self.lr_critic = lr_critic self.lr_entropy = lr_entropy self.learn_entropy_coef=learn_entropy_coef self.target_entropy = target_entropy self.q_params = itertools.chain(self.model.q1.parameters(), self.model.q2.parameters()) self.pi_optimizer = Adam(self.model.actor.parameters(), lr=self.lr_actor) self.q_optimizer = Adam(self.q_params, lr=self.lr_critic) if self.target_entropy is None: self.target_entropy = -np.prod(action_space.shape).astype(np.float32) else: self.target_entropy = float(self.target_entropy) if self.learn_entropy_coef: self.log_alpha = torch.log(torch.ones(1, device=self.device) * self.alpha).requires_grad_(True) self.alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=self.lr_entropy) else: self.alpha_t = torch.tensor(float(self.alpha)).to(self.device) def get_actor(self): return self.model_nograd.actor def train(self, batch): o, a, r, o2, d, _ = batch pi, logp_pi = self.model.actor(o) loss_alpha = None if self.learn_entropy_coef: alpha_t = torch.exp(self.log_alpha.detach()) loss_alpha = -(self.log_alpha * (logp_pi + self.target_entropy).detach()).mean() else: alpha_t = self.alpha_t if loss_alpha is not None: self.alpha_optimizer.zero_grad() loss_alpha.backward() self.alpha_optimizer.step() q1 = self.model.q1(o, a) q2 = self.model.q2(o, a) with torch.no_grad(): a2, logp_a2 = self.model.actor(o2) q1_pi_targ = self.model_target.q1(o2, a2) q2_pi_targ = self.model_target.q2(o2, a2) q_pi_targ = torch.min(q1_pi_targ, q2_pi_targ) backup = r + self.gamma * (1 - d) * (q_pi_targ - alpha_t * logp_a2) loss_q1 = ((q1 - backup)**2).mean() loss_q2 = ((q2 - backup)**2).mean() loss_q = loss_q1 + loss_q2 self.q_optimizer.zero_grad() loss_q.backward() self.q_optimizer.step() for p in self.q_params: p.requires_grad = False q1_pi = self.model.q1(o, pi) q2_pi = self.model.q2(o, pi) q_pi = torch.min(q1_pi, q2_pi) loss_pi = (alpha_t * logp_pi - q_pi).mean() self.pi_optimizer.zero_grad() loss_pi.backward() self.pi_optimizer.step() for p in self.q_params: p.requires_grad = True with torch.no_grad(): for p, p_targ in zip(self.model.parameters(), self.model_target.parameters()): p_targ.data.mul_(self.polyak) p_targ.data.add_((1 - self.polyak) * p.data) ret_dict = dict( loss_actor=loss_pi.detach(), loss_critic=loss_q.detach(), ) if self.learn_entropy_coef: ret_dict["loss_entropy_coef"] = loss_alpha.detach() ret_dict["entropy_coef"] = alpha_t.item() return ret_dict training_agent_cls = partial(SACTrainingAgent, model_cls=MyActorCriticModule, gamma=0.99, polyak=0.995, alpha=0.2, lr_actor=1e-3, lr_critic=1e-3, lr_entropy=1e-3, learn_entropy_coef=True, target_entropy=None) # Trainer instance: training_cls = partial( TrainingOffline, env_cls=env_cls, memory_cls=memory_cls, training_agent_cls=training_agent_cls, epochs=epochs, rounds=rounds, steps=steps, update_buffer_interval=update_buffer_interval, update_model_interval=update_model_interval, max_training_steps_per_env_step=max_training_steps_per_env_step, start_training=start_training, device=device) training_agent_cls = None training_cls = partial(TrainingOffline, training_agent_cls=training_agent_cls, epochs=epochs, rounds=rounds, steps=steps, update_buffer_interval=update_buffer_interval, update_model_interval=update_model_interval, max_training_steps_per_env_step=max_training_steps_per_env_step, start_training=start_training, device=device, env_cls=env_cls, memory_cls=memory_cls) if __name__ == "__main__": my_trainer = Trainer(training_cls=training_cls)
surasakCH/AIB-TMRL
tmrl/tuto/tuto_competition.py
tuto_competition.py
py
22,561
python
en
code
0
github-code
13
34649879273
''' Museum Price Challenge The problem statement is as follows. Let's assume that we have a museum that has the following policy for the admission price based on a full price ticket of $12.50. The museum is closed on Mondays. Everyone gets half price discount on Tuesday and Thursdays. If you are age between 13 and 20 (including min-max), you will get the discount on Wednesdays. If you are younger than 6, or older than 65, your admission if free. If you are age between 6 and 12 (including min-max), your admission is half price on the Weekend (Saturday and Sunday). Build the program that gives the user to input the day of the week and his/her age, then gives the user information about the pricing for him/her. Your program should only have three patterns of output 1.) "We are closed on Monday," 2.) "You get half price discount of $99.99!", 3.) "You pay full price pf $99.99." Test your program with multiple days and multiple ages – before you run the program, determine what the answer should be to check for accuracy. BONUS: rather than input the day of the week, use date functions to try and determine what the day of the week is based on the current system date. ''' import datetime weekDays = ("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday") currdate = datetime.datetime.now() currday = currdate.weekday() currdateAsstring = weekDays[currday] admission = float(12.50) while True: age = int(input("Please enter your age: ")) if currdateAsstring == "Monday": print("The Museum is closed on Mondays.") elif age < 6 or age > 65: print("Your admission is free!") elif currdateAsstring == "Tuesday" or currdateAsstring == " Thursday": halfprice = admission/2 print(f"Admission is half price today! {halfprice:.2f}") elif currdateAsstring == "Wednesday" and age >= 13 and age <= 20: halfprice = admission/2 print(f"Admission is half price today! {halfprice:.2f}") elif currdateAsstring == "Saturday" or currdateAsstring == "Sunday" and age >= 6 and age <= 12: halfprice = admission/2 print(f"Admission is half price today! {halfprice:.2f}") else: print(f"{admission}") end = input("Do you want to add another ticket? Y or N: ") #End the loop here by inputting n. if end.lower() != "y": break print("Welcome to the Museum!")
MarkCrocker/Python
boolean2.py
boolean2.py
py
2,403
python
en
code
0
github-code
13
4693622431
import boto3, s3fs import pandas as pd from collections import Counter from boto3 import client import pandas as pd import requests from dynamo_pandas import get_df ############ AUDIO_FOLDER = 'AUDIOS/AUDIO' def s3_objects(): conn = client('s3') # again assumes boto.cfg setup, assume AWS S3 objects = conn.list_objects(Bucket='quantcldata', Prefix=AUDIO_FOLDER) return objects def s3_audio_list(): objects = s3_objects() files = [key['Key'] for key in objects['Contents']] times = [key['LastModified'] for key in objects['Contents']] return pd.DataFrame(dict(filename=files, time=times)) def audio_summary(): objects = s3_objects() cdf = pd.DataFrame(objects['Contents']) cdf['user'] = cdf.Key.apply(lambda k: k.split('/')[-1].split('_')) #cdf return cdf.user.value_counts() ############# def ddb(): return boto3.resource('dynamodb', region_name='us-east-2') def scan_table(tablename): dynamodb = ddb() table = dynamodb.Table(tablename) #resp = table.scan(ProjectionExpression="id, data") resp = table.scan() return pd.DataFrame(resp['Items']) def scan_pats(): dynamodb = ddb() table = dynamodb.Table('Pacientes') return table.scan()['Items'] def schedules(): URL = 'https://quantcldata.s3.us-east-2.amazonaws.com/CLIENTES/CORFO/pacientes_test.json' return pd.read_json(URL) def dt(di): try: ts = di.split('"')[0].split('(')[2].split(',')[:6] except: ts = di.split('"')[0].split(',') #return [int(t) for t in ts] return ts def getit(text, toke): rj = text.index(toke) return float(text[rj:].split(' ')[1][:-1]) def freqs(d): # {'JOMAX Contacto': (5221716593, datetime.datetime(2022, 7, 29, 22, 1, 9, tzinfo=<UTC>), # "{'F0': '193 [M=(100,165), F=(190,262)]', 'F0dev': 16.9653, 'hnr': '20 [16.5, 20]', # 'nhr': '0.014 [0.11, 0.19]', 'localJit': 0.0044, 'localabsoluteJitter': 2e-05, # 'rapJitter': 0.00255, 'ppq5Jitter': 0.0026, 'ddpJitter': 0.00766, 'localShimmer': 0.03913, # 'localdbShimmer': 0.33881, 'apq3Shimmer': 0.02202, 'aqpq5Shimmer': 0.02408, 'apq11Shimmer': 0.02804, # 'ddaShimmer': 0.06607, 'intensity': '72 [55,80]', 'PPE': 0.0003, 'Parkinson': '87.2%', 'F1': '601 [M=(718,906), F=(430,970)]', # 'F2': '1284 [M=(1160,1300), F=(1380,1820)]', 'F3': '2247 [M=(2520,3020), F=(2750,3250)]', 'F4': '3431 [M=(3700,4250), F=(4050,4550)]', # 'F2/F1': '2.140 [a=1.6,e=3.4,6.8,2.4]'}")} ds = d[1:700] FREQ_TOKES = ["'F0'","'F1'","'F2'","'F3'","'F4'"] cuts1 = [ds.index(toke) for toke in FREQ_TOKES] # es distinto para "'F0dev'", "'rapJitter'"]] #print(cuts1) #cuts2 = [float(ds[cut:].split("'")[3].split(' ')[0]) for cut in cuts1] # 'F1': '815 cuts2 = [ds[cut:].split("'")[3] for cut in cuts1[:-1]] # F4 is missing #cut4 = ds[cuts1[-1]].split("'")[2].split(' ')[1] for toke in ['rapJitter','localShimmer']: tx = getit(ds, toke) cuts2.append(tx) return cuts2 def ts(t): if isinstance(t,str): t1,t2=t.split() t12 = t1.split('-')+t2.split('-') return [int(tx.replace("'","")) for tx in t12] else: return t def get_coefs(d): #eval(dd_df.iloc[0]['data']) #dd_df.iloc[3]['data'].find("'F0'") #dd_df.iloc[3]['data'].find("'F2/F1'") try: return eval(d)['JOMAX Contacto'][2] except: start = d.find("'F0'") stop = d.find("'F2/F1'") return d[start:stop] def audio_data(all=False): # should be get_df(table='audios_pacientes') adf = scan_table('audios_pacientes') # id, data(3+12) out = adf #adf['time'] = adf['data'].apply(dt) #adf['fecha'] = adf.time.apply(lambda t: '%d-%02d-%02d' %(t[0],t[1],t[2])) #adf['hora'] = adf.time.apply(lambda t: '%d:%02d:%02d' %(t[3],t[4],t[5])) #adf['coefs'] = adf['data'].apply(freqs) #print(summary) #out = out[out.id.str.contains('JOMAX')] #out['time'] = out['time'].apply(lambda t: t[1] # if isinstance(t,list) and isinstance(t[0],str) # else t) #out['time'] = out['time'].apply(lambda t: t if len(t)==6 else t[1]) #out['time'] = out['time'].apply(ts) #out['coefs'] = out['data'].apply(get_coefs) #if 'data' in out: # out = out.drop(columns=['data']) return out
corfo-parkinsons/corfo-parkinsons-streamlit
aws.py
aws.py
py
4,434
python
en
code
0
github-code
13
29114609969
# -*- coding: utf-8 -*- import copy import os import unittest import shutil from parameterized import parameterized import tensorflow as tf from opennmt import Runner from opennmt.config import load_model from opennmt.utils import misc from opennmt.tests import test_util test_dir = os.path.dirname(os.path.realpath(__file__)) root_dir = os.path.join(test_dir, "..", "..") test_data = os.path.join(root_dir, "testdata") @unittest.skipIf(not os.path.isdir(test_data), "Missing test data directory") class RunnerTest(tf.test.TestCase): def _getTransliterationRunner(self, base_config=None, model_version="v2"): model_dir = os.path.join(self.get_temp_dir(), "model") shutil.copytree(os.path.join(test_data, "transliteration-aren-v2", model_version), model_dir) config = {} config["model_dir"] = model_dir config["data"] = { "source_vocabulary": os.path.join(model_dir, "ar.vocab"), "target_vocabulary": os.path.join(model_dir, "en.vocab"), } if base_config is not None: config = misc.merge_dict(config, base_config) model = load_model(model_dir) runner = Runner(model, config) return runner def _makeTransliterationData(self): ar = [ "آ ت ز م و ن", "آ ت ش ي س و ن", "آ ر ب ا ك ه", "آ ر ث ر", "آ ز ا", ] en = [ "a t z m o n", "a c h e s o n", "a a r b a k k e", "a r t h u r", "a s a" ] ar_file = test_util.make_data_file(os.path.join(self.get_temp_dir(), "ar.txt"), ar) en_file = test_util.make_data_file(os.path.join(self.get_temp_dir(), "en.txt"), en) return ar_file, en_file def testTrain(self): ar_file, en_file = self._makeTransliterationData() config = { "data": { "train_features_file": ar_file, "train_labels_file": en_file }, "params": { "learning_rate": 0.0005, "optimizer": "Adam" }, "train": { "batch_size": 10, "average_last_checkpoints": 4, "save_checkpoints_steps": 1, "max_step": 145002 # Just train for 2 steps. } } runner = self._getTransliterationRunner(config) avg_dir = runner.train() self.assertEndsWith(tf.train.latest_checkpoint(avg_dir), "145002") self.assertLen(tf.train.get_checkpoint_state(avg_dir).all_model_checkpoint_paths, 1) model_dir = os.path.dirname(avg_dir) self.assertEndsWith(tf.train.latest_checkpoint(model_dir), "145002") self.assertLen(tf.train.get_checkpoint_state(model_dir).all_model_checkpoint_paths, 3) # Check that the averaged checkpoint is usable. ar_file, _ = self._makeTransliterationData() en_file = os.path.join(self.get_temp_dir(), "output.txt") runner.infer(ar_file, predictions_file=en_file, checkpoint_path=avg_dir) with open(en_file) as f: self.assertEqual(next(f).strip(), "a t z m o n") def testTrainWithEval(self): ar_file, en_file = self._makeTransliterationData() config = { "data": { "train_features_file": ar_file, "train_labels_file": en_file, "eval_features_file": ar_file, "eval_labels_file": en_file }, "params": { "learning_rate": 0.0005, "optimizer": "Adam" }, "train": { "batch_size": 10, "max_step": 145002 # Just train for 2 steps. }, "eval": { "export_on_best": "loss" } } runner = self._getTransliterationRunner(config) model_dir = runner.train(with_eval=True) export_dir = os.path.join(model_dir, "export", "145002") self.assertTrue(os.path.exists(export_dir)) self.assertTrue(tf.saved_model.contains_saved_model(export_dir)) def testEvaluate(self): ar_file, en_file = self._makeTransliterationData() config = { "data": { "eval_features_file": ar_file, "eval_labels_file": en_file }, "eval": { "external_evaluators": "BLEU" } } runner = self._getTransliterationRunner(config) metrics = runner.evaluate() self.assertIn("loss", metrics) self.assertIn("bleu", metrics) @parameterized.expand([[1, "v2"], [4, "v2"], [1, "v1"]]) def testInfer(self, beam_size, model_version): config = { "params": { "beam_width": beam_size } } runner = self._getTransliterationRunner(config, model_version) ar_file, _ = self._makeTransliterationData() en_file = os.path.join(self.get_temp_dir(), "output.txt") runner.infer(ar_file, predictions_file=en_file) self.assertTrue(os.path.exists(en_file)) with open(en_file) as f: lines = f.readlines() self.assertEqual(len(lines), 5) self.assertEqual(lines[0].strip(), "a t z m o n") def testUpdateVocab(self): config = { "params": { "learning_rate": 0.0005, "optimizer": "Adam" } } runner = self._getTransliterationRunner(config) # Reverse order of non special tokens. new_en_vocab = os.path.join(self.get_temp_dir(), "en.vocab.new") with open(os.path.join(runner._config["model_dir"], "en.vocab")) as en_vocab, \ open(new_en_vocab, "w") as new_vocab: tokens = en_vocab.readlines() for token in tokens[:3]: new_vocab.write(token) for token in reversed(tokens[3:]): new_vocab.write(token) output_dir = os.path.join(self.get_temp_dir(), "updated_vocab") self.assertEqual(runner.update_vocab(output_dir, tgt_vocab=new_en_vocab), output_dir) # Check that the translation is unchanged. new_config = copy.deepcopy(runner._config) new_config["model_dir"] = output_dir new_config["data"]["target_vocabulary"] = new_en_vocab runner = Runner(runner._model, new_config) ar_file, _ = self._makeTransliterationData() en_file = os.path.join(self.get_temp_dir(), "output.txt") runner.infer(ar_file, predictions_file=en_file) with open(en_file) as f: self.assertEqual(next(f).strip(), "a t z m o n") def testScore(self): runner = self._getTransliterationRunner() ar_file, en_file = self._makeTransliterationData() score_file = os.path.join(self.get_temp_dir(), "scores.txt") runner.score(ar_file, en_file, output_file=score_file) self.assertTrue(os.path.exists(score_file)) with open(score_file) as f: lines = f.readlines() self.assertEqual(len(lines), 5) def testExport(self): config = { "data": { "source_tokenization": { "mode": "char" } } } export_dir = os.path.join(self.get_temp_dir(), "export") runner = self._getTransliterationRunner(config) runner.export(export_dir) self.assertTrue(tf.saved_model.contains_saved_model(export_dir)) extra_assets_dir = os.path.join(export_dir, "assets.extra") self.assertTrue(os.path.isdir(extra_assets_dir)) self.assertLen(os.listdir(extra_assets_dir), 1) imported = tf.saved_model.load(export_dir) translate_fn = imported.signatures["serving_default"] outputs = translate_fn( tokens=tf.constant([["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]]), length=tf.constant([6], dtype=tf.int32)) result = tf.nest.map_structure(lambda x: x[0, 0], outputs) tokens = result["tokens"][:result["length"]] self.assertAllEqual(tokens, [b"a", b"t", b"z", b"m", b"o", b"n"]) if __name__ == "__main__": tf.test.main()
hhmlai/OpenNMT-tf
opennmt/tests/runner_test.py
runner_test.py
py
7,557
python
en
code
null
github-code
13
17159235657
import numpy as np import matplotlib.pyplot as plt array1 = np.array([[0,1], [1,0]]).astype(int) array2 = np.array([[0,0], [1,1]]) array3 = np.array([[1,1], [0,0]]) array4 = np.array([[1,0], [0,1]]) fig, axs = plt.subplots(2, 2) axs[0, 0].imshow(array1) axs[0, 1].imshow(array2) axs[1, 0].imshow(array3) axs[1, 1].imshow(array4) plt.show() img_range = 0.1*np.arange(100).reshape(100,1) plt.imshow(img_range) plt.show()
Phayuth/robotics_manipulator
util/img_index_sequence.py
img_index_sequence.py
py
499
python
en
code
0
github-code
13
6044247109
import xmlrpc.client import json proxy = xmlrpc.client.ServerProxy("http://127.0.0.1:7778/") print("menu :") print("1. tampil semua sensor") print("2. tampil sensor suhu") print("3. tampil sensor kelembaban") print("4. tampil sensor kadar CO") menu = input("pilih menu no : ") if menu == "1" : proxy.getAllsensor() elif menu == "2" : proxy.getsensorsuhu() elif menu == "3" : proxy.getsensorkelembaban() elif menu == "4" : proxy.getsensorCO() else: print("menu tidak ada")
zeddinarief/skt
pengguna.py
pengguna.py
py
483
python
en
code
0
github-code
13
1456847542
#!/usr/bin/env python import sys if __name__ == "__main__": dem = str(input("voulez vouz multiplier ou additionner? tapez m pour multiplier ou a pour additionner: ")) if (dem =='a'): if len(sys.argv)>3: print("Veillez inserer que deux arguments") elif len(sys.argv)==3: x = int( sys.argv[1] ) y = int( sys.argv[2] ) print((x)," + ",(y)," = ",(x+y)) elif len(sys.argv)==2: print("Trop peu d'argument") print("Entrer la deuxième valeur: ") x = int( sys.argv[1] ) y = int(input()) print((x)," + ",(y)," = ",(x+y)) else: print("Ajouter deux arguments: ") x = int(input()) y = int(input()) print((x)," + ",(y)," = ",(x+y)) elif(dem == 'm'): if len(sys.argv) > 3: print("saisissez que deux argument") elif len(sys.argv) == 3: x = int(sys.argv[1]) y = int(sys.argv[2]) print(("x * y = "),(x*y)) elif len(sys.argv)==2: print("peu d'argument") x = int(sys.argv[1]) y = int(input("inserez le 2eme valeur: ")) print((x), "*",(y) ,"= ",(x*y)) else: print("ajouter des valeur") x = int(input("ajoutez 1er valeur: ")) y = int(input("ajoutez la 2eme valeur: ")) print((x),"*",(y),"=",(x*y)) else: print("pena programmation")
Dina64/td3_Dina_Ando
main.py
main.py
py
1,214
python
en
code
0
github-code
13
11056545631
from lib.libdata import * from lib.libcnn import * import argparse import matplotlib import random matplotlib.use('Agg') import matplotlib.pyplot as plt # command line arguments parser = argparse.ArgumentParser() parser.add_argument('--seed', metavar='seed', dest='seed', default=1776, type=int, help='RNG seed') parser.add_argument('--gpu_usage', metavar='gpu_usage', dest='gpu_usage', default=0.2, type=float, help='Proportion of GPU memory to use') parser.add_argument('--output_prefix', metavar='output_prefix', dest='output_prefix', default='', type=str, help='Prefix for output files') parser.add_argument('--num_classes', metavar='num_classes', dest='num_classes', default=5, type=int, help='Number of communities in PPM') parser.add_argument('--size_communities', metavar='size_communities', dest='size_communities', default=20, type=int, help='Size of /each/ community in PPM') parser.add_argument('--p', metavar='p', dest='p', default=0.8, type=float, help='Intracluster edge probability') parser.add_argument('--q', metavar='q', dest='q', default=0.2, type=float, help='Intercluster edge probability') parser.add_argument('--max_diffuse', metavar='max_diffuse', dest='max_diffuse', default=25, type=int, help='Upper bound on random node diffusion time') parser.add_argument('--noise_energy', metavar='noise_energy', dest='noise_energy', default=0.0, type=float, help='Standard deviation of Gaussian noise to corrupt flow signals') parser.add_argument('--corrupt_set', metavar='corrupt_set', dest='corrupt_set', default='both', choices=['both', 'train', 'valid'], type=str, help='Corrupt training set, validation set, or both') parser.add_argument('--shift_operator', metavar='shift_operator', dest='shift_operator', default='hodge', choices=['hodge', 'linegraph', 'laplacian'], type=str, help='Aggregation operator, Laplacian for node-space aggregation CNN') parser.add_argument('--smooth_filter', metavar='smooth_filter', dest='smooth_filter', default=True, type=bool, help='Decides if aggregation operator is low-pass') parser.add_argument('--num_shifts', metavar='num_shifts', dest='num_shifts', default=128, type=int, help='Number of aggregation steps: pass 0 or negative to use number of edges') parser.add_argument('--num_train', metavar='num_train', dest='num_train', default=10000, type=int, help='Training set size') parser.add_argument('--num_valid', metavar='num_valid', dest='num_valid', default=2000, type=int, help='Validation set size') parser.add_argument('--batch_size', metavar='batch_size', dest='batch_size', default=100, type=int, help='Training batch size') parser.add_argument('--learning_rate', metavar='learning_rate', dest='learning_rate', default=0.0001, type=float, help='ADAM optimizer learning rate') parser.add_argument('--epochs', metavar='epochs', dest='epochs', default=1000, type=int, help='Maximum number of training epochs') parser.add_argument('--patience', metavar='patience', dest='patience', default=10, type=int, help='Number of epochs without improvement before early stop. If 0 or negative, dont do early stopping') parser.add_argument('--dropout_rate', metavar='dropout_rate', dest='dropout_rate', default=0.5, type=float, help='Dropout rate for fully connected classifier') parser.add_argument('--layer_depths', metavar='layer_depths', dest='layer_depths', default=[16,32], nargs='+', type=int, help='Sequence of layer depths for CNN: filter_counts 16 32') parser.add_argument('--kernel_sizes', metavar='kernel_sizes', dest='kernel_sizes', default=[4,8], nargs='+', type=int, help='Sequence of kernel sizes for CNN: kernel_sizes 4 8') args = parser.parse_args() # collect parameters seed = args.seed gpu_usage = args.gpu_usage output_prefix = args.output_prefix num_classes = args.num_classes size_communities = args.size_communities p = args.p q = args.q max_diffuse = args.max_diffuse noise_energy = args.noise_energy corrupt_set = args.corrupt_set shift_operator = args.shift_operator smooth_filter = args.smooth_filter num_shifts = args.num_shifts num_train = args.num_train num_valid = args.num_valid batch_size = args.batch_size learning_rate = args.learning_rate epochs = args.epochs patience = args.patience layer_depths = args.layer_depths kernel_sizes = args.kernel_sizes dropout_rate = args.dropout_rate if patience < 1: patience = epochs # check passed parameters assert gpu_usage > 0.0 and gpu_usage <= 1.0 assert num_classes > 0 assert size_communities > 0 assert p > 0.0 and q > 0.0 and p <= 1.0 and q <= 1.0 assert max_diffuse > 0 assert noise_energy >= 0 assert num_train > 0 assert num_valid > 0 assert batch_size > 0 assert learning_rate > 0 assert epochs > 0 assert len(layer_depths) == len(kernel_sizes) assert (np.array(layer_depths) > 0).all() assert (np.array(kernel_sizes) > 0).all() assert dropout_rate >= 0.0 and dropout_rate <= 1.0 ################################################################# # random seeding np.random.seed(seed) tf.set_random_seed(seed) random.seed(seed) # limit GPU usage to 1/5 config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = gpu_usage keras.backend.set_session(tf.Session(config=config)) # generate data G, max_degree_nodes, sample_edges = SBM(num_classes, size_communities, p, q) agg_edges = [sample_edges[0]] agg_nodes = [max_degree_nodes[0]] if num_shifts < 1: num_shifts = G.size() A = scaledadjacencymatrix(G).todense() labels = np.random.choice(num_classes, num_train + num_valid) y = keras.utils.to_categorical(labels, num_classes) y_tr, y_vld = (y[:num_train], y[num_train:]) print('Creating flow data') source_nodes = [max_degree_nodes[c] for c in labels] diffusion_times = np.random.choice(max_diffuse, num_train + num_valid) flows = generateflows(G, A, max_diffuse, diffusion_times, source_nodes) N = noise_energy * np.random.randn(*flows.shape) if corrupt_set == 'train': N[:, num_train:] = 0 elif corrupt_set == 'valid': N[:, :num_train] = 0 flows += N if shift_operator == 'hodge': S = scaledhodgelaplacian(G) elif shift_operator == 'linegraph': S = scaledlinegraphlaplacian(G) flows = np.abs(flows) elif shift_operator == 'laplacian': S = scaledlaplacianmatrix(G) pinvBT = np.linalg.pinv(incidencematrix(G).todense().T) flows = np.array(pinvBT @ flows) # calculate gradients from flows (this is silly but its the easiest way to integrate this experiment in) if smooth_filter: # S = I - S S *= -1 for i in range(S.shape[0]): S[i,i] += 1 print('Creating aggregated signals') if shift_operator == 'laplacian': agg = aggregator(S, agg_nodes, num_shifts) else: agg = aggregator(S, agg_edges, num_shifts) X = np.transpose(agg @ flows, [2, 0, 1]) # (num_train + num_valid) x num_shifts x num observed edges X_tr, X_vld = (X[:num_train], X[num_train:]) # model generation history = AccuracyHistory() earlystopper = keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience) model = cnn_model(layer_depths, kernel_sizes, dropout_rate=dropout_rate, learning_rate=learning_rate, num_classes=num_classes) # train! model.fit(X_tr, y_tr, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_vld, y_vld), callbacks=[history, earlystopper]) training_score = model.evaluate(X_tr, y_tr, verbose=0) validation_score = model.evaluate(X_vld, y_vld, verbose=0) # save information and results ## parameters and final performance params_file = open(f'results/{output_prefix}-params.txt','w') for arg in vars(args): if arg != 'epochs': params_file.write(f'{arg} {getattr(args, arg)}\n') else: params_file.write(f'{arg} {len(history.train_loss)}/{getattr(args, arg)}\n') params_file.write(f'Training Loss: {training_score[0]}\n') params_file.write(f'Training Accuracy: {training_score[1]}\n') params_file.write(f'Validation Loss: {validation_score[0]}\n') params_file.write(f'Validation Accuracy: {validation_score[1]}\n') params_file.close() ## training, validation loss, accuracy tr_loss_file = open(f'./results/{output_prefix}-tr-loss.txt', 'w') tr_acc_file = open(f'./results/{output_prefix}-tr-acc.txt', 'w') val_loss_file = open(f'./results/{output_prefix}-val-loss.txt', 'w') val_acc_file = open(f'./results/{output_prefix}-val-acc.txt', 'w') for file_handler, training_data in zip([tr_loss_file, tr_acc_file, val_loss_file, val_acc_file], [history.train_loss, history.train_acc, history.val_loss, history.val_acc]): for index, item in enumerate(training_data): file_handler.write(f'{index}\t{item}\n') file_handler.close()
tmrod/hodgenet
hodgeaggregation/main.py
main.py
py
8,752
python
en
code
1
github-code
13
18383341780
import sys import random import operator import numpy as np from functools import reduce from datetime import datetime from sklearn.metrics import auc import matplotlib.pyplot as plt from utils.dataset_bands import datasets_bands from utils.usno import get_usno_projection, get_usno_vector from utils.panstarr import get_panstarr_projection, get_panstarr_vector def generate_candidates(m, bands): ''' Generate candidates for each element in m for each dataset band ''' return reduce(operator.concat, [ [ { 'image_key': i, 'usno_band': bands[j].get('USNO'), 'panstarr_band': bands[j].get('PanSTARR') } for j in range(len(bands)) ] for i in range(m) ]) def get_v_cid(s): ''' Return a string which uniquely identifies an element of `S` ''' return '{}.{}.{}'.format(s.get('image_key'), s.get('usno_band'), s.get('panstarr_band')) def get_anomalies(): ''' Return mission's `v_cid` of those known to be anomalies ''' return [ get_v_cid({ 'image_key': 13, 'usno_band': 'blue1', 'panstarr_band': 'g' }), get_v_cid({ 'image_key': 13, 'usno_band': 'blue2', 'panstarr_band': 'g' }), get_v_cid({ 'image_key': 56, 'usno_band': 'blue1', 'panstarr_band': 'g' }), get_v_cid({ 'image_key': 56, 'usno_band': 'blue2', 'panstarr_band': 'g' }), get_v_cid({ 'image_key': 679, 'usno_band': 'ir', 'panstarr_band': 'z' }), get_v_cid({ 'image_key': 831, 'usno_band': 'red1', 'panstarr_band': 'r' }), get_v_cid({ 'image_key': 831, 'usno_band': 'red2', 'panstarr_band': 'r' }) ] def compute_vs(S, A): ''' Compute the value `v` for all `s` in `S` using the active set `A` ''' # Compute v for each element in S vs = {} for s in S: # Each v is initially set to 0 v_cid = get_v_cid(s) vs[v_cid] = 0 # Use projection to reduce the dimensionality of x and y x = s.get('usno_vector') y = s.get('panstarr_vector') for member in A: xi = member.get('usno_vector') yi = member.get('panstarr_vector') # Compute `v` v = np.dot(np.dot(x, xi), np.dot(y, yi)) # Keep track of each `v` value using `cid` vs[v_cid] = vs[v_cid] + v if v_cid in vs else v return {k: v for k, v in vs.items() if v >= 0} def compute_roc_curve(S, A): ''' Compute the ROC Curve in different steps of threshold `v` ''' # Compute `v` value for the entire dataset vs = compute_vs(S, A) anomalies = get_anomalies() # Number of real anomalies in the data (p = positives) p = len(anomalies) # Real number of non-anomalies in the data (n = negatives) n = len(S) - p fpr, tpr = [], [] for v in sorted(vs.values()): potential_anomalies = list(filter(lambda x: vs[x] < v, vs)) # How many < `v` are correctly classified as anomaly tp = reduce(lambda acc, x: acc + 1 if x in anomalies else acc, potential_anomalies, 0) # How many < `v` are incorrectly classified as anomaly fp = len(potential_anomalies) - tp # Compute false positive rate (fpr) and true positive rate (tpr) fpr.append(fp / float(n)) tpr.append(tp / float(p)) return (fpr, tpr) def export_roc_curve_plot(S, A, t, num_proj, num_anomalies_found): ''' Plot ROC curve of `S`, using the active set `A` ''' fpr, tpr = compute_roc_curve(S, A) plt.plot(fpr, tpr, label='t = {}, AUC = {}'.format(t, '{}'.format(auc(fpr, tpr))[0:4])) plt.title('ROC Curve') plt.xlabel('FPR') plt.ylabel('TPR') plt.legend() plt.savefig('./plots/{}_anomalies_t_{}.png'.format(num_anomalies_found, t)) def tcrawl_candidates(num_proj, max_time_steps): ''' Crawl candidates and recommend those evaluated to be anomalies ''' try: # Generate candidates that will be crawled m = 1001 roc_delimiter = 50 vs_delimiter = 1 # Retrieve anomalies in the dataset anomalies = get_anomalies() # Store pre-processed S in memory for faster processing S = [ { 'image_key': s.get('image_key'), 'usno_band': s.get('usno_band'), 'panstarr_band': s.get('panstarr_band'), 'usno_vector': get_usno_projection(s.get('image_key'), s.get('usno_band'), num_proj), 'panstarr_vector': get_panstarr_projection(s.get('image_key'), s.get('panstarr_band'), num_proj) } for s in generate_candidates(m, datasets_bands) ] # Create a copy with all elements of `S` to use for computing the ROC curve S_original = S.copy() # Capture `v` values of missions across different time steps ts_agg = [] vs_min_agg = [] anomalies_found = [] anomalies_agg = {} for x in anomalies: anomalies_agg[x] = [] normal_agg = { '0.blue1.g': [] } t = 0 A = [] stop_criteria = False while not stop_criteria: # Find minimum `v` value and index vs = compute_vs(S, A) vm = min(vs.values()) vm_cid = [k for k in vs if vs[k] == vm] # Break ties randomly if len(vm_cid) > 1: vm_cid = random.choice(vm_cid) else: vm_cid = vm_cid[0] # Remove anomaly from S if found, add to the active set otherwise if vm_cid in anomalies: S = list(filter(lambda x: get_v_cid(x) != vm_cid, S)) anomalies_found.append((vm_cid, t)) else: x = list(filter(lambda x: get_v_cid(x) == vm_cid, S))[0] A.append(x) # Verify if all anomalies have been found before max time steps are reached early_stop = (len(anomalies_found) == len(anomalies) and t < max_time_steps) # Export ROC curve every `roc_delimiter` if (t % roc_delimiter == 0 and t > 0) or early_stop: export_roc_curve_plot(S_original, A, t, num_proj, len(anomalies_found)) # Aggregate `v` values every `vs_delimiter` if t % vs_delimiter == 0 or early_stop: vs = compute_vs(S_original, A) ts_agg.append(t) vs_min_agg.append(vm) for x in anomalies_agg: anomalies_agg[x].append(vs[x]) for x in normal_agg: normal_agg[x].append(vs[x]) print('[t vm_cid vm len(S) len(A)]: [{} {} {} {} {}]'.format(t, vm_cid, vm, len(S), len(A))) t = t + 1 stop_criteria = len(anomalies_found) == len(anomalies) or t == max_time_steps # Create plot of anomalies found plt.figure(100000) plt.plot(ts_agg, vs_min_agg, label='min(v)') # Plot anomalies for (vm_cid, t_found) in anomalies_found: plt.plot(ts_agg, anomalies_agg[vm_cid], label='Anomaly ({}), t = {}'.format(vm_cid, t_found)) # Plot normal observations for x in normal_agg: plt.plot(ts_agg, normal_agg[x], label='Normal ({})'.format(x)) plt.title('Anomalies Found') plt.xlabel('Time steps') plt.ylabel('v') plt.legend() plt.savefig('./plots/found_{}_anomalies_t_{}.png'.format(len(anomalies_found), t - 1)) # Create plot of anomalies not found plt.figure(200000) plt.plot(ts_agg, vs_min_agg, label='min(v)') # Plot anomalies not found for x in [x for x in anomalies if x not in list(map(lambda x: x[0], anomalies_found))]: plt.plot(ts_agg, anomalies_agg[x], label='Anomaly ({})'.format(x)) # Plot normal observations for x in normal_agg: plt.plot(ts_agg, normal_agg[x], label='Normal ({})'.format(x)) plt.title('Anomalies Not Found') plt.xlabel('Time steps') plt.ylabel('v') plt.legend() plt.savefig('./plots/not_found_{}_anomalies_t_{}.png'.format(len(anomalies) - len(anomalies_found), t - 1)) # Print summary print('Found: {}/{} anomalies'.format(len(anomalies_found), len(anomalies))) except Exception as e: print('******Unable to crawl candidates: {}******'.format(e)) if __name__ == '__main__': start_time = datetime.now() num_proj, max_time_steps = int(sys.argv[1]), int(sys.argv[2]) print('---Crawling candidates using {} projections up to {} time steps---\n'.format(num_proj, max_time_steps)) tcrawl_candidates(num_proj, max_time_steps) print('Elapsed time: {}'.format(datetime.now() - start_time))
diegocasmo/ml_blink_evaluation
ml_blink.py
ml_blink.py
py
8,167
python
en
code
1
github-code
13
18239208780
from tkinter import * import tkinter.font win = Tk() win.title("Hello world!") myFont = tkinter.font.Font(family="Helvetica", size=12, weight="bold") def cmd1(): print("hello") def close(): win.destroy() button1 = Button(win, text='Turn on', font=myFont, command=cmd1) button1.grid(row=0, column=0) exitButton = Button(win, text='Exit', font=myFont, command=close) exitButton.grid(row=1, column=1) win.protocol('WM_DELETE_WINDOW', close) win.mainloop()
jorgevs/MyPythonTestProject
TkinterTest.py
TkinterTest.py
py
472
python
en
code
0
github-code
13
4341697796
import sys import requests from flask import Flask, jsonify, render_template, request from flask_flatpages import FlatPages from flask_frozen import Freezer import numpy as np import matplotlib.pyplot as plt app = Flask(__name__) pages = FlatPages(app) freezer = Freezer(app) @app.route("/") def index(): return render_template("index.html") @app.route("/info", methods=["POST"]) def info(): res = requests.get("https://code.junookyo.xyz/api/ncov-moh/") if res.status_code != 200: return jsonify({"success": False}) data = res.json() if "global" not in data["data"] or "vietnam" not in data["data"]: return jsonify({"success": False}) return jsonify(data) if __name__ == "__main__": app.run(debug=True) # if len(sys.argv) > 1 and sys.argv[1] == "build": # freezer.freeze() # else: # app.run(debug=True)
vankhaiphan/covid19
app.py
app.py
py
919
python
en
code
0
github-code
13
32952424712
import glados import derpibooru class Derpi(glados.Module): @glados.Module.command('derpi', '<s|r> [query]', 'Search derpibooru for an image. The first argument is the mode. **s** means **search**, **r** means **random**.') async def derpi(self, message, args): args = args.split(' ', 1) mode = args[0] tags = '' if len(args) > 1: tags = args[1].split(',') try: if mode == 's': search = derpibooru.Search() if tags == '': image = next(search) else: image = next(search.query(*tags)) elif mode == 'r': search = derpibooru.Search() if tags == '': image = next(search.sort_by(derpibooru.sort.RANDOM)) else: image = next(search.sort_by(derpibooru.sort.RANDOM).query(*tags)) else: await self.provide_help('derpi', message) return await self.client.send_message(message.channel, image.url) except StopIteration: await self.client.send_message(message.channel, "No posts found!")
TheComet/GLaDOS2
modules/mlp/derpi.py
derpi.py
py
1,214
python
en
code
4
github-code
13
17799748864
# encoding: utf-8 # ================This module is completely inspired by scikit-image================ # https://github.com/scikit-image/scikit-image/blob/master/skimage/data/__init__.py # ================================================================================== import os import shutil from typing import Tuple import openslide import PIL from requests.exceptions import HTTPError from .. import __version__ from ._registry import legacy_registry, registry, registry_urls legacy_data_dir = os.path.abspath(os.path.dirname(__file__)) histolab_distribution_dir = os.path.join(legacy_data_dir, "..") try: from pooch.utils import file_hash except ModuleNotFoundError: # Function taken from # https://github.com/fatiando/pooch/blob/master/pooch/utils.py def file_hash(fname: str, alg: str = "sha256") -> str: """Calculate the hash of a given file. Useful for checking if a file has changed or been corrupted. Parameters ---------- fname : str The name of the file. alg : str The type of the hashing algorithm Returns ------- hash : str The hash of the file. Examples -------- >>> fname = "test-file-for-hash.txt" >>> with open(fname, "w") as f: ... __ = f.write("content of the file") >>> print(file_hash(fname)) 0fc74468e6a9a829f103d069aeb2bb4f8646bad58bf146bb0e3379b759ec4a00 >>> import os >>> os.remove(fname) """ import hashlib if alg not in hashlib.algorithms_available: raise ValueError("Algorithm '{}' not available in hashlib".format(alg)) # Calculate the hash in chunks to avoid overloading the memory chunksize = 65536 hasher = hashlib.new(alg) with open(fname, "rb") as fin: buff = fin.read(chunksize) while buff: hasher.update(buff) buff = fin.read(chunksize) return hasher.hexdigest() def _create_image_fetcher(): try: import pooch except ImportError: # Without pooch, fallback on the standard data directory # which for now, includes a few limited data samples return None, legacy_data_dir pooch_version = __version__.replace(".dev", "+") url = "https://github.com/histolab/histolab/raw/{version}/histolab/" # Create a new friend to manage your sample data storage image_fetcher = pooch.create( # Pooch uses appdirs to select an appropriate directory for the cache # on each platform. # https://github.com/ActiveState/appdirs # On linux this converges to # '$HOME/.cache/histolab-image' # With a version qualifier path=pooch.os_cache("histolab-images"), base_url=url, version=pooch_version, env="HISTOLAB_DATADIR", registry=registry, urls=registry_urls, ) data_dir = os.path.join(str(image_fetcher.abspath), "data") return image_fetcher, data_dir image_fetcher, data_dir = _create_image_fetcher() if image_fetcher is None: HAS_POOCH = False else: HAS_POOCH = True def _has_hash(path: str, expected_hash: str) -> bool: """Check if the provided path has the expected hash. Parameters ---------- path: str expected_hash: str Returns ------- bool True if the file hash and the expected one are equal """ if not os.path.exists(path): return False return file_hash(path) == expected_hash def _fetch(data_filename: str) -> str: """Fetch a given data file from either the local cache or the repository. This function provides the path location of the data file given its name in the histolab repository. Parameters ---------- data_filename: str Name of the file in the histolab repository. e.g. 'breast/sample1.svs'. Returns ------- resolved_path: str Path of the local file Raises ------ KeyError: If the filename is not known to the histolab distribution. ModuleNotFoundError: If the filename is known to the histolab distribution but pooch is not installed. ConnectionError: If the dataset has not been downloaded yet and histolab is unable to connect to the internet """ resolved_path = os.path.join(data_dir, "..", data_filename) expected_hash = registry[data_filename] # Case 1: # The file may already be in the data_dir. # We may have decided to ship it in the histolab distribution. if _has_hash(resolved_path, expected_hash): # Nothing to be done, file is where it is expected to be return resolved_path # Case 2: # The user is using a cloned version of the github repo, which # contains both the publicly shipped data, and test data. # In this case, the file would be located relative to the # histolab_distribution_dir gh_repository_path = os.path.join(histolab_distribution_dir, data_filename) if _has_hash(gh_repository_path, expected_hash): parent = os.path.dirname(resolved_path) os.makedirs(parent, exist_ok=True) shutil.copy2(gh_repository_path, resolved_path) return resolved_path # Case 3: # Pooch not found. if image_fetcher is None: raise ModuleNotFoundError( "The requested file is part of the histolab distribution, " "but requires the installation of an optional dependency, pooch. " "To install pooch, use your preferred python package manager. " "Follow installation instruction found at " "https://www.fatiando.org/pooch/latest/install.html" ) # Case 4: # Pooch needs to download the data. Let the image fetcher search for # our data. A ConnectionError is raised if no internet connection is # available. try: resolved_path = image_fetcher.fetch(data_filename) except HTTPError as httperror: raise HTTPError(f"{httperror}") except ConnectionError: # pragma: no cover # If we decide in the future to suppress the underlying 'requests' # error, change this to `raise ... from None`. See PEP 3134. raise ConnectionError( "Tried to download a histolab dataset, but no internet " "connection is available. To avoid this message in the " "future, try `histolab.data.download_all()` when you are " "connected to the internet." ) return resolved_path def _init_pooch() -> None: os.makedirs(data_dir, exist_ok=True) # Fetch all legacy data so that it is available by default for filename in legacy_registry: _fetch(filename) if HAS_POOCH: _init_pooch() def _load_svs(filename: str) -> Tuple[openslide.OpenSlide, str]: """Load an image file located in the data directory. Parameters ---------- filename : str Name of the file in the histolab repository Returns ------- slide : openslide.OpenSlide An OpenSlide object representing a whole-slide image. path : str Path where the slide is saved Raises ------ OpenSlideError: OpenSlide cannot open the given input """ try: svs = openslide.open_slide(_fetch(filename)) except PIL.UnidentifiedImageError: raise PIL.UnidentifiedImageError( "Your wsi has something broken inside, a doctor is needed" ) return svs, _fetch(filename) def aorta_tissue() -> Tuple[openslide.OpenSlide, str]: # pragma: no cover """aorta_tissue() -> Tuple[openslide.OpenSlide, str] Aorta tissue, brightfield, JPEG 2000, YCbCr This image is available here http://openslide.cs.cmu.edu/download/openslide-testdata/Aperio/ Free to use and distribute, with or without modification Returns ------- aorta_tissue : openslide.OpenSlide H&E-stained Whole-Slide-Image of aortic tissue. path : str Path where the slide is saved """ return _load_svs("aperio/JP2K-33003-1.svs") def breast_tissue() -> Tuple[openslide.OpenSlide, str]: # pragma: no cover """breast_tissue() -> Tuple[openslide.OpenSlide, str] Breast tissue, TCGA-BRCA dataset. This image is available here https://portal.gdc.cancer.gov/files/9c960533-2e58-4e54-97b2-8454dfb4b8c8 or through the API https://api.gdc.cancer.gov/data/9c960533-2e58-4e54-97b2-8454dfb4b8c8 Access: open Returns ------- breast_tissue : openslide.OpenSlide H&E-stained Whole-Slide-Image of breast tissue. path : str Path where the slide is saved """ return _load_svs("tcga/breast/9c960533-2e58-4e54-97b2-8454dfb4b8c8") def breast_tissue_diagnostic_green_pen() -> Tuple[ openslide.OpenSlide, str ]: # pragma: no cover """breast_tissue_diagnostic_green_pen() -> Tuple[openslide.OpenSlide, str] Breast tissue, TCGA-BRCA dataset. Diagnostic slide with green pen marks. This image is available here https://portal.gdc.cancer.gov/files/da36d3aa-9b19-492a-af4f-cc028a926d96 or through the API https://api.gdc.cancer.gov/data/da36d3aa-9b19-492a-af4f-cc028a926d96 Access: open Returns ------- breast_tissue : openslide.OpenSlide H&E-stained Whole-Slide-Image of breast tissue with green pen marks. path : str Path where the slide is saved """ return _load_svs("tcga/breast/da36d3aa-9b19-492a-af4f-cc028a926d96") def breast_tissue_diagnostic_red_pen() -> Tuple[ openslide.OpenSlide, str ]: # pragma: no cover """breast_tissue_diagnostic_red_pen() -> Tuple[openslide.OpenSlide, str] Breast tissue, TCGA-BRCA dataset. Diagnostic slide with red pen marks. This image is available here https://portal.gdc.cancer.gov/files/f8b4cee6-9149-45b4-ae53-82b0547e1e34 or through the API https://api.gdc.cancer.gov/data/f8b4cee6-9149-45b4-ae53-82b0547e1e34 Access: open Returns ------- breast_tissue : openslide.OpenSlide H&E-stained Whole-Slide-Image of breast tissue with red pen marks. path : str Path where the slide is saved """ return _load_svs("tcga/breast/f8b4cee6-9149-45b4-ae53-82b0547e1e34") def breast_tissue_diagnostic_black_pen() -> Tuple[ openslide.OpenSlide, str ]: # pragma: no cover """breast_tissue_diagnostic_black_pen() -> Tuple[openslide.OpenSlide, str] Breast tissue, TCGA-BRCA dataset. Diagnostic slide with black pen marks. This image is available here https://portal.gdc.cancer.gov/files/31e248bf-ee24-4d18-bccb-47046fccb461 or through the API https://api.gdc.cancer.gov/data/31e248bf-ee24-4d18-bccb-47046fccb461 Access: open Returns ------- breast_tissue : openslide.OpenSlide H&E-stained Whole-Slide-Image of breast tissue with green black marks. path : str Path where the slide is saved """ return _load_svs("tcga/breast/31e248bf-ee24-4d18-bccb-47046fccb461") def cmu_small_region() -> Tuple[openslide.OpenSlide, str]: """cmu_small_region() -> Tuple[openslide.OpenSlide, str] Carnegie Mellon University MRXS sample tissue This image is available here http://openslide.cs.cmu.edu/download/openslide-testdata/Aperio/ Licensed under a CC0 1.0 Universal (CC0 1.0) Public Domain Dedication. Returns ------- cmu_mrxs_tissue : openslide.OpenSlide H&E-stained Whole-Slide-Image of small tissue region. path : str Path where the slide is saved """ return _load_svs("data/cmu_small_region.svs") def heart_tissue() -> Tuple[openslide.OpenSlide, str]: # pragma: no cover """heart_tissue() -> Tuple[openslide.OpenSlide, str] Heart tissue, brightfield, JPEG 2000, YCbCr This image is available here http://openslide.cs.cmu.edu/download/openslide-testdata/Aperio/ Free to use and distribute, with or without modification Returns ------- heart_tissue : openslide.OpenSlide H&E-stained Whole-Slide-Image of heart tissue. path : str Path where the slide is saved """ return _load_svs("aperio/JP2K-33003-2.svs") def ihc_breast() -> Tuple[openslide.OpenSlide, str]: # pragma: no cover """ihc_breast() -> Tuple[openslide.OpenSlide, str] Breast cancer resection, staining CD3 (brown) and CD20 (red). This image is available here https://idr.openmicroscopy.org/ under accession number idr0073, ID `breastCancer12`. Returns ------- ihc_breast : openslide.OpenSlide IHC-stained Whole-Slide-Image of Breast tissue. path : str Path where the slide is saved """ return _load_svs("9798433/?format=tif") def ihc_kidney() -> Tuple[openslide.OpenSlide, str]: # pragma: no cover """ihc_kidney() -> Tuple[openslide.OpenSlide, str] Renal allograft, staining CD3 (brown) and CD20 (red). This image is available here https://idr.openmicroscopy.org/ under accession number idr0073, ID `kidney_46_4`. Returns ------- ihc_kidney : openslide.OpenSlide IHC-stained Whole-Slide-Image of kidney tissue. path : str Path where the slide is saved """ return _load_svs("9798554/?format=tif") def ovarian_tissue() -> Tuple[openslide.OpenSlide, str]: # pragma: no cover """ovarian_tissue() -> Tuple[openslide.OpenSlide, str] tissue of Ovarian Serous Cystadenocarcinoma, TCGA-OV dataset. This image is available here https://portal.gdc.cancer.gov/files/b777ec99-2811-4aa4-9568-13f68e380c86 or through the API https://api.gdc.cancer.gov/data/b777ec99-2811-4aa4-9568-13f68e380c86 Access: open Returns ------- prostate_tissue : openslide.OpenSlide H&E-stained Whole-Slide-Image of ovarian tissue. path : str Path where the slide is saved """ return _load_svs("tcga/ovarian/b777ec99-2811-4aa4-9568-13f68e380c86") def prostate_tissue() -> Tuple[openslide.OpenSlide, str]: # pragma: no cover """prostate_tissue() -> Tuple[openslide.OpenSlide, str] tissue of Prostate Adenocarcinoma, TCGA-PRAD dataset. This image is available here https://portal.gdc.cancer.gov/files/6b725022-f1d5-4672-8c6c-de8140345210 or through the API https://api.gdc.cancer.gov/data/6b725022-f1d5-4672-8c6c-de8140345210 Access: open Returns ------- prostate_tissue : openslide.OpenSlide H&E-stained Whole-Slide-Image of prostate tissue. path : str Path where the slide is saved """ return _load_svs("tcga/prostate/6b725022-f1d5-4672-8c6c-de8140345210")
nsmdgr/histolab
src/histolab/data/__init__.py
__init__.py
py
14,730
python
en
code
0
github-code
13
10945451428
import sys from unittest.mock import patch import pytest from importlib import reload import pynamodb.settings @pytest.mark.parametrize('settings_str', [ "session_cls = object()", "request_timeout_seconds = 5", ]) def test_override_old_attributes(settings_str, tmpdir): custom_settings = tmpdir.join("pynamodb_settings.py") custom_settings.write(settings_str) with patch.dict('os.environ', {'PYNAMODB_CONFIG': str(custom_settings)}): with pytest.warns(UserWarning) as warns: reload(pynamodb.settings) assert len(warns) == 1 assert 'options are no longer supported' in str(warns[0].message)
pynamodb/PynamoDB
tests/test_settings.py
test_settings.py
py
643
python
en
code
2,311
github-code
13
21540971462
""" @author Mrinal Pandey @date: 12th September, 2019 @day_time Thursday 19:38 """ def displayData(a, n): for i in range(n): print (a[i], end = '\t') print() def bubbleSort(a, n): for i in range(n - 1): for j in range(n - 1 - i): if a[j] > a[j + 1]: a[j], a[j + 1] = a[j + 1], a[j] a = [20, 15, 10, 5, 0] #a = [10, 2, 15, 19, 17] #a = [8, 6, 9, 14, 22] #a = [2, 4, 6, 8, 10] print ("\nArray before sorting:") n = len(a) displayData(a, n) bubbleSort(a, n) print ("\nArray after sorting") displayData(a, n) print()
mrinal-pandey/DSC-Codicon
bubble_sort.py
bubble_sort.py
py
574
python
en
code
0
github-code
13
31723826391
#!/usr/bin/env python # -*- coding: UTF-8 -*- import pika import random import re class Client(object): def __init__(self): self.host_ip= [] self.cmd = "" credentials = pika.PlainCredentials('sam', 'sam') self.connection = pika.BlockingConnection(pika.ConnectionParameters('10.100.203.154', 5672, '/', credentials)) self.channel = self.connection.channel() self.channel.exchange_declare(exchange="node_topic", exchange_type="topic") def send_message(self): task_id = str(random.randrange(1000, 2000)) print("task id: %s"%task_id) # print("cmd", self.cmd) for routing_key in self.host_ip: # print("ip", routing_key) self.channel.basic_publish(exchange='node_topic', routing_key=routing_key, body=self.cmd, properties=pika.BasicProperties( reply_to=task_id, correlation_id=routing_key ) ) def get_result(self, task_id): self.channel.queue_declare(queue=task_id) self.channel.basic_consume(self.handle_response, queue=task_id) for i in range(len(self.host_ip)): self.connection.process_data_events() def handle_response(self, ch, method, props, body): print("Recv: %s, %s"%(props.correlation_id, body)) ch.basic_ack(delivery_tag=method.delivery_tag) def interactive(self): while True: user_str = raw_input(">>:").strip() if user_str.startswith("run"): if re.match("run.+--hosts.+", user_str) is None: continue self.cmd = re.search("\".+\"", user_str).group().strip("\"") self.host_ip = re.findall("\d+\.\d+\.\d+\.\d+", user_str) # print("host_ip", self.host_ip) self.send_message() continue elif user_str.startswith("check_task"): task_id = re.search("\d+", user_str).group() self.get_result(task_id) else: continue if __name__ == '__main__': # mingling = 'run "df -h" --hosts 192.168.3.55 10.4.3.4' # print(re.match("run.+--hosts.+", mingling)) # print(re.search("\".+\"", mingling).group().strip("\"")) # print(re.findall("\d+\.\d+\.\d+\.\d+", mingling)) Client().interactive()
gaoshao52/pythonProject
基于RabbitMQrpc实现的主机管理/client.py
client.py
py
2,649
python
en
code
0
github-code
13
29860493087
import contextlib import io import tempfile import unittest from pathlib import Path from unittest import mock import termstyle from intelmq.bin import intelmqdump from intelmq.lib.test import skip_installation class TestCompleter(unittest.TestCase): """ A TestCase for Completer. """ def test_simple(self): comp = intelmqdump.Completer(['foo', 'foobar', 'else']) self.assertEqual(comp.complete('', 0), 'else') self.assertEqual(comp.complete('', 2), 'foobar') self.assertEqual(comp.complete('f', 0), 'foo') self.assertEqual(comp.complete('f', 1), 'foobar') self.assertEqual(comp.complete('a', 0), None) def test_queues(self): comp = intelmqdump.Completer(['r ', 'a '], queues={'some-parser-queue', 'some-expert-queue'}) self.assertEqual(comp.complete('r ', 0), 'r ') self.assertEqual(comp.complete('r 1 ', 0), 'r 1 some-expert-queue') self.assertEqual(comp.complete('r 1 ', 1), 'r 1 some-parser-queue') self.assertEqual(comp.complete('r 1 ', 2), None) self.assertEqual(comp.complete('r 2', 0), None) self.assertEqual(comp.complete('a ', 0), 'a some-expert-queue') self.assertEqual(comp.complete('a ', 2), None) self.assertEqual(comp.complete('r 34 some-p', 0), 'r 34 some-parser-queue') self.assertEqual(comp.complete('a some-e', 0), 'a some-expert-queue') class TestIntelMQDump(unittest.TestCase): def setUp(self) -> None: super().setUp() self.tmp_log_dir = tempfile.TemporaryDirectory() self.global_config = {} self.runtime_config = {} self.bot_configs = {} self.config_patcher = mock.patch.multiple( intelmqdump.utils, get_global_settings=mock.Mock(side_effect=self._mocked_global_config), get_runtime=mock.Mock(side_effect=self._mocked_runtime_config), get_bots_settings=mock.Mock(side_effect=self._mocked_bots_config)) self.config_patcher.start() self.ctl_config_patcher = mock.patch.multiple( intelmqdump.intelmqctl.utils, load_configuration=mock.Mock(side_effect=self._mocked_runtime_config)) self.ctl_config_patcher.start() # Coloring output makes asserts unnecessary complicated termstyle.disable() def _mocked_global_config(self): return self.global_config def _mocked_runtime_config(self, *args): return {"global": self.global_config, **self.runtime_config} def _mocked_bots_config(self, bot_id): return self.bot_configs[bot_id] def _prepare_empty_dump(self, filename: str): path = Path(f"{self.tmp_log_dir.name}/{filename}.dump") path.parent.mkdir(parents=True, exist_ok=True) Path(path).touch() def tearDown(self) -> None: self.ctl_config_patcher.stop() self.config_patcher.stop() self.tmp_log_dir.cleanup() termstyle.auto() return super().tearDown() def _run_main(self, argv: list) -> str: """Helper for running intelmqdump.main and capturing output""" output = io.StringIO() with contextlib.redirect_stdout(output): with contextlib.suppress(SystemExit): intelmqdump.main(argv) return output.getvalue().split("\n") @skip_installation() @mock.patch.object(intelmqdump, "input", return_value='q') def test_list_dumps_for_all_bots_from_default_log_path(self, _): self._prepare_empty_dump('test-1') self._prepare_empty_dump('test-2') with mock.patch.object(intelmqdump, "DEFAULT_LOGGING_PATH", self.tmp_log_dir.name): output = self._run_main([]) self.assertIn("0: test-1 empty file", output[1]) self.assertIn("1: test-2 empty file", output[2]) @skip_installation() @mock.patch.object(intelmqdump, "input", return_value='q') def test_list_dumps_for_all_bots_from_custom_locations(self, _): self.global_config = {"logging_path": self.tmp_log_dir.name} self._prepare_empty_dump('bot-1/test-1') self._prepare_empty_dump('bot-2/test-2') self.runtime_config = { "bot-1": { "parameters": { "logging_path": f"{self.tmp_log_dir.name}/bot-1" } }, "bot-2": { "parameters": { "logging_path": f"{self.tmp_log_dir.name}/bot-2" } } } output = self._run_main([]) self.assertIn("0: test-1 empty file", output[1]) self.assertIn("1: test-2 empty file", output[2]) @skip_installation() @mock.patch.object(intelmqdump, "input") def test_list_and_select_dump_from_global_location(self, input_mock): self._prepare_empty_dump('test-1') self.global_config = {"logging_path": self.tmp_log_dir.name} for selector in ['0', 'test-1']: with self.subTest(selector): input_mock.side_effect = [selector, 'q'] output = self._run_main([]) self.assertIn("0: test-1 empty file", output[1]) # Enough to check that the correct file path was used self.assertIn("Processing test-1: empty file", output[2]) @skip_installation() @mock.patch.object(intelmqdump, "input") def test_list_and_select_dump_from_custom_location(self, input_mock): self.global_config = {"logging_path": self.tmp_log_dir.name} self._prepare_empty_dump('/bot-1/test-1') self.runtime_config = { "bot-1": { "parameters": { "logging_path": f"{self.tmp_log_dir.name}/bot-1" } }, } for selector in ['0', 'test-1']: with self.subTest(selector): input_mock.side_effect = [selector, 'q'] output = self._run_main([]) self.assertIn("0: test-1 empty file", output[1]) # Enough to check that the correct file path was used self.assertIn("Processing test-1: empty file", output[2]) @skip_installation() @mock.patch.object(intelmqdump, "input") def test_selecting_dump_warns_when_filename_is_ambiguous(self, input_mock): """With different locations used, there could be a case of dumps with the same filename. Then, if user tried to select using filename, warn and exit. Selecting using numbers should be supported""" self._prepare_empty_dump('test-1') self._prepare_empty_dump('bot-1/test-1') self.global_config = {"logging_path": self.tmp_log_dir.name} self.runtime_config = { "bot-1": { "parameters": { "logging_path": f"{self.tmp_log_dir.name}/bot-1" } }, } with self.subTest("warn on ambiguous filename"): input_mock.side_effect = ['test-1'] output = self._run_main([]) self.assertIn("0: test-1 empty file", output[1]) self.assertIn("1: test-1 empty file", output[2]) self.assertIn("Given filename is not unique, please use number", output[3]) with self.subTest("allow selecting using number"): input_mock.side_effect = ['1', 'q'] output = self._run_main([]) self.assertIn("Processing test-1: empty file", output[3]) @skip_installation() @mock.patch.object(intelmqdump, "input", return_value='q') def test_get_dump_for_one_bot(self, _): self._prepare_empty_dump("bot/bot-1") self.global_config = {"logging_path": self.tmp_log_dir.name} self.bot_configs = {"bot-1": {"parameters": {"logging_path": f"{self.tmp_log_dir.name}/bot"}}} output = self._run_main(["bot-1"]) self.assertIn("Processing bot-1: empty file", output[0]) if __name__ == '__main__': # pragma: no cover unittest.main()
certtools/intelmq
intelmq/tests/bin/test_intelmqdump.py
test_intelmqdump.py
py
8,052
python
en
code
856
github-code
13
30177703264
class TreeNode: def __init__(self,data): self.value = data self.left = None self.right = None self.parent = None def __repr__(self): return repr(self.value) def add_left(self,node): self.left = node if node is not None: #node khali na hole er root takee parent a rekhe dibo self.parent = self def add_right(self,node): self.left = node if node is not None: self.parent = self #data insert def BST_Insert(root,node): last_node = None curent_node = root while curent_node is not None: last_node = curent_node #last_node a shesh node asbe current_node hoye. if curent_node.value > node.value: #node chhuto hole left site a position dekhbe curent_node = curent_node.left else: curent_node = curent_node.right #nahoy right site position khujbe if last_node is None: #last_node None howa mane tree khali. tokhon amra root a node call korbo root = node elif last_node.value > node.value: #node chhuto hole root er left a node bosabe. root.left.right o hote pare. last_node.add_left(node) else: last_node.add_right(node) return root #Traversing def BST_Create(): root = TreeNode(10) l = [17,5,3,7,12,19,1,4] for item in l: node = TreeNode(item) root = BST_Insert(root,node) return root if __name__=="__main__": info = BST_Create() print(info)
faysalf/DSA_by_Python
Data-Structure-main/Tree/Binary search Tree.py
Binary search Tree.py
py
1,589
python
en
code
0
github-code
13
39983132556
import requests import json host = "https://piopiy.telecmi.com/v1/call/action" class Hangup: def __init__(self, appid, secret): self.appid = appid self.secret = secret def hangup(self, uuid): if isinstance(uuid, str): data = {'appid': self.appid, 'secret': self.secret, 'cmiuuid': uuid, 'action': 'hangup'} headers = {'content-type': 'application/json'} return requests.post(host, data=json.dumps(data), headers=headers).text else: raise NameError('invalid argument type to make call')
telecmi/piopiy_python
src/piopiy/hangup.py
hangup.py
py
601
python
en
code
2
github-code
13