seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
42843816488 | import pytest
import logging
import json
from work_order_tests.work_order_tests import work_order_get_result_params, \
work_order_request_params
from automation_framework.work_order_submit.work_order_submit_utility \
import verify_work_order_signature, decrypt_work_order_response
from automation_framework.utilities.request_args import TestStep
from automation_framework.utilities.workflow import validate_response_code
logger = logging.getLogger(__name__)
def test_work_order_both_in_out_Data_DataEncryptionKey_null_echo(setup_config):
""" Testing work order request by passing
null in encrypteddataencryption in indata or both
in indata and outdata. """
# input file name
request = 'work_order_tests/input' \
'/work_order_both_in_out_Data_EncryptionKey_null_echo.json'
work_order_response, generic_params = work_order_request_params(
setup_config, request)
err_cd, work_order_get_result_response = work_order_get_result_params(
work_order_response[:2], generic_params)
assert (verify_work_order_signature(work_order_get_result_response,
generic_params[0])
is TestStep.SUCCESS.value)
assert (decrypt_work_order_response(work_order_get_result_response,
work_order_response[3],
work_order_response[4])[0]
is TestStep.SUCCESS.value)
# WorkOrderGetResult API Response validation with key parameters
assert (validate_response_code(work_order_get_result_response) is
TestStep.SUCCESS.value)
def test_work_order_with_empty_indata_outdata(setup_config):
""" Testing work order request by passing
empty indata and outdata. """
# input file name
request = 'work_order_tests/input' \
'/work_order_with_empty_indata_outdata.json'
work_order_response, generic_params = (work_order_request_params
(setup_config, request))
err_cd, work_order_get_result_response = (work_order_get_result_params
(work_order_response[:6],
generic_params))
# WorkOrderGetResult API Response validation with key parameters
assert (validate_response_code(work_order_get_result_response) is
TestStep.SUCCESS.value)
| manojsalunke85/avalon0.6_automaiton | tests/validation_suite/work_order_tests/get/test_work_order_submit_get_outData.py | test_work_order_submit_get_outData.py | py | 2,427 | python | en | code | 0 | github-code | 36 |
15057824639 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import home_logs.utils.unique
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='House',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=b'MyHome', max_length=50)),
('created_on', models.DateTimeField(auto_now_add=True)),
('uuid', models.CharField(default=home_logs.utils.unique.get, editable=False, max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Sensor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('location', models.CharField(max_length=50, null=True, blank=True)),
],
),
migrations.CreateModel(
name='SensorKind',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Space',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=b'MyRoom', max_length=50)),
('uuid', models.CharField(default=home_logs.utils.unique.get, editable=False, max_length=50, unique=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('x_length', models.DecimalField(decimal_places=2, default=0, help_text=b'Using meters', max_digits=6)),
('y_length', models.DecimalField(decimal_places=2, default=0, help_text=b'Using meters', max_digits=6)),
],
),
migrations.CreateModel(
name='SpaceKind',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='space',
name='kind',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='property.SpaceKind'),
),
migrations.AddField(
model_name='space',
name='sensors',
field=models.ManyToManyField(blank=True, related_name='spaces', to='property.Sensor'),
),
migrations.AddField(
model_name='sensor',
name='kind',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='property.SensorKind'),
),
migrations.AddField(
model_name='house',
name='spaces',
field=models.ManyToManyField(blank=True, to='property.Space'),
),
]
| tsaklidis/LogingAPI | home_logs/property/migrations/0001_initial.py | 0001_initial.py | py | 3,232 | python | en | code | 8 | github-code | 36 |
37278669262 | from typing import cast
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.support import expected_conditions as EC, wait
from selenium.webdriver.common.by import By
from chessEngine import Chess
from move import Move
import asyncio
import re
import time
class GetBoard():
squareLength = None
boardAsList = []
movesDone = 0
castleBlack = 'kq'
castleWhite = 'KQ'
#castling = 'KQkq'
castleMap = {'a1' : True, 'e1' : True, 'h1' : True, 'h8' : True, 'e8' : True, 'a8' : True}
def getBoard(self, driver, castling, white):
# gets the board, draws that and calculate the best move and does it
self.clearBoardList()
options = {
"black rook" : 'r', "black knight" : 'n', "black bishop" : 'b', "black queen" : 'q', "black king" : 'k', "black pawn" : 'p', "white rook" : 'R', "white knight" : 'N', "white bishop" : 'B', "white queen" : 'Q', "white king" : 'K', "white pawn" : 'P'
}
pieces = driver.find_elements_by_tag_name('piece')
self.getSquareLength(pieces)
for piece in pieces:
c = piece.get_attribute("class")
name = options.get(c)
if (name != None):
style = piece.get_attribute('style')
#print(c + " " + style)
transform = self.getPositonOfPiece(style)
line = self.boardAsList[transform[1]]
lineList = list(line)
lineList[transform[0]] = name
line=''
for l in lineList: line += l
self.boardAsList[transform[1]] = line
#print(self.castleWhite + " " + self.castleBlack)
boardString = self.boardListToString(white, castling)
return boardString
def boardListToString(self, white, castling):
boardString = ''
newList = []
for b in self.boardAsList:
s = ''
i = 0
add = 0
isOne = False
oldList = list(b)
for c in oldList:
i += 1
if c == '1':
isOne = True
add = add+1
if i == 8:
s += (str(add))
else:
if isOne == True:
s += (str(add))
add = 0
isOne = False
s += c
if self.boardAsList.index(b) != 7:
s += '/'
newList.append(s)
for n in newList: boardString += n
if white == True: move = 'w'
else:
move = 'b'
boardString = boardString[::-1]
boardString += ' ' + move
boardString += ' ' + str(castling)
return boardString
def checkCastle(self, driver, nextMove, castleList):
# check if already castled
moveList = driver.find_elements_by_tag_name('u8t')
for move in moveList:
# check if already castled
if move.text == 'O-O-O' or move.text == 'O-O':
i = moveList.index(move)
if (i+1) % 2 == 0:
castleList[2] = ""
castleList[3] = ""
else:
print('here 2')
castleList[0] = ""
castleList[1] = ""
# check if rock has moves
if nextMove[:2] == 'a1':
castleList[1] = ''
if nextMove[:2] == 'h1':
castleList[0] = ''
if nextMove[:2] == 'a8':
castleList[3] = ''
if nextMove[:2] == 'h8':
castleList[2] = ''
# check if king moves
if nextMove[:2] == 'e1':
castleList[1] = ''
castleList[2] = ''
if nextMove[:2] == 'e8':
castleList[1] = ''
castleList[2] = ''
return castleList
def listener(self):
# listener to check if you have to move
while True:
movesCount = len(self.driver.find_elements_by_tag_name('u8t'))
move = False
if self.white == True:
if movesCount % 2 == 0:
if movesCount == 0 or self.movesDone < movesCount:
print('white and move!')
self.getBoard()
move = self.startMove()
else:
if movesCount % 2 != 0:
#print('!= 0')
if self.movesDone < movesCount:
print('black and move!')
self.getBoard()
move = self.startMove()
if move == True:
self.movesDone = movesCount
time.sleep(0.5)
def getPositonOfPiece(self, style):
a = re.findall(r'\d+', style)
transform = []
for t in a:
transform.append(int(int(t) / self.squareLength))
return transform
def getSquareLength(self, pieces):
# gets the length of one square
if len(pieces) > 0:
piece = pieces[0]
self.squareLength = (piece.size).get('height')
def clearBoardList(self):
# clears the array and fills it with ones
self.boardAsList.clear()
empty = "11111111"
for i in range(8):
self.boardAsList.append(empty)
return | julianpjp/Lichess-Bot | getBoard.py | getBoard.py | py | 5,496 | python | en | code | 0 | github-code | 36 |
70876552105 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 13 09:12:14 2022
@author: Santiago Pinzon-Cortes
@contact: sanpinzoncor@unal.edu.co
"""
"""
Functions modified by functions made by Natalia Gomez-Perez (BGS) ngp@nerc.ac.uk
The functions are readers for data from WDC database and INTERMAGNET database
"""
# Libraries
import pandas as pd
import numpy as np
import os
from datetime import datetime
#from dateutil.rrule import rrule, MONTHLY
from dateutil import rrule
# Clean data
def clean_data(Bh):
return [i for i, element in enumerate(np.diff(Bh)) if (abs(element)<250 and abs(element)>0)]
#WDC data reader
def ReadYear_OBS(ye,obs):
datei = datetime(ye,1,1)
datef = datetime(ye+1,1,1)
datea = []
for i in rrule.rrule(rrule.HOURLY, dtstart=datei, until=datef):
a = datea.append(i)
datea = datea[:-1]
wdc_hourpath = '/Users/santiagopinzon/Mac/articles/Dst_proxies/DATA/WDC/'
filepath = os.path.join(os.path.dirname(wdc_hourpath),'wdc_'+obs.upper())
filename = obs.lower()+str(ye)+'.wdc'
full = os.path.join(filepath,filename)
if os.path.exists(full):
#number of chars on data from WDC:
nc=4
with open(full,'r') as csv_file:
df1 = pd.read_csv(csv_file,names=['data'])
if ye>=2000:
df1['YYYY'] = df1.data.str[3:5].astype(int)+2000
else:
df1['YYYY'] = df1.data.str[3:5].astype(int)+1900
df1['MM'] = df1.data.str[5:7].astype(int)
df1['DD'] = df1.data.str[8:10].astype(int)
df1['Component'] = df1.data.str[7].astype(str)
df1['Val'] = df1.data.str[16:21].astype(str)
df1['Vdata'] = df1.data.str[16:].astype(str)
df1['list'] = None
df1['length'] = df1['Vdata'].apply(len)
#print(df.length[0])
X = []
Y = []
H = []
#F = []
#if df1.Component[0]=='X':
for j in range(len(df1.Vdata)):
line = df1.Vdata[j]
#df.list[j] = [line[i:i+nc] for i in range(0, len(line), nc)]
l = np.array([int(line[i:i+nc]) for i in range(0, len(line), nc)])
element = df1.Component[j]
tabB = l[0]
x = l[1:-1]
#dfx = pd.DataFrame()
#dfx2 = pd.DataFrame()
magnitude = x+(tabB*100)
if element=='H':
H.extend(magnitude)
elif element=='X':
X.extend(magnitude)
elif element=='Y':
Y.extend(magnitude)
else: continue
#print(H)
H1 = np.array(H)
X1 = np.array(X)
Y1 = np.array(Y)
H2 = np.sqrt((X1**2)+(Y1**2))
#df1.insert(0,'t',[datetime(df.)])
if len(H1)!=0:
df = pd.DataFrame({'t':datea,'H':H1})
else:
df = pd.DataFrame({'t':datea,'H':H2})
df.index = df.t
df2 = df.copy()
df2['Hc'] = df2['H'].values
df2 = df2.drop(columns=['H'])
cleanh = clean_data(np.asarray(df2['Hc']))
df2 = df2.iloc[cleanh]
dfc = pd.concat((df, df2), axis=1)
dfc = dfc.drop(columns=['H','t'])
return dfc
#Intermagnet Data Reader
def read_Intermagnet(day, obs):
"""
Function to read Intermagnet data.
Input:
day: date
obs: Observatory's IAGA code
Output:
df: Observatory's Dataframe
"""
import datetime as dt
IAGApath = ('/Users/santiagopinzon/Mac/articles/Dst_proxies/DATA/IAGA/')
filepath = (os.path.join(os.path.dirname(IAGApath),day.strftime("%Y"),'IAGA_'+obs.upper()))
filename=obs.lower()+day.strftime("%Y%m%ddmin.min")
full=os.path.join(filepath,filename)
"""
Review observatory format
"""
ofile = open(full)
rfile = ofile.read()
sfile = rfile.split('\n')
hn = sfile.index(' # value of horizontal intensity. |')
hn = hn+1
#if obs.upper()=='HER':
# if day>dt.datetime(2013,12,31):
# hn=24
# else:
# hn=25
# else:
# if day>dt.datetime(2014,12,31):
# hn = 24
# else:
# hn=25
my_parser = lambda x,y : dt.datetime.strptime(x+' '+y,"%Y-%m-%d %H:%M:%S.%f")
df = pd.read_csv(full, sep='\s+',skiprows=hn,
#header=hn,
parse_dates={'DateTime':[0,1]},
date_parser=my_parser, index_col=0)
df=df.where(df<99999.00)
if obs.upper()+'X' in df.columns:
df[obs.upper()+'H']=df.apply(lambda row: -(np.sqrt(row[obs.upper()+'X']**2 + row[obs.upper()+'Y']**2)), axis=1)
df = df.drop(columns='|')
return df
| sanhbk/Dst-index-proxies-LDi | Readers.py | Readers.py | py | 5,270 | python | en | code | 0 | github-code | 36 |
8649453711 | """
============================
Author:柠檬班-木森
Time:2020/5/6 10:03
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
def work1():
"""课堂派登录"""
driver = webdriver.Chrome()
driver.implicitly_wait(30)
driver.get('https://www.ketangpai.com/User/login.html')
driver.find_element(By.XPATH, '//input[@placeholder="邮箱/账号/手机号"]').send_keys('11234')
driver.find_element(By.CSS_SELECTOR, 'input[placeholder="密码"]').send_keys('123454321')
driver.find_element(By.XPATH, '//a[text()="登录"]').click()
time.sleep(5)
driver.quit()
def work2():
"""腾讯客服页面操作"""
driver = webdriver.Chrome()
driver.get('https://kf.qq.com/product/weixin.html')
WebDriverWait(driver, 10, 0.5).until(
EC.element_to_be_clickable((By.XPATH, "//a[text()='手机版微信']"))).click()
WebDriverWait(driver, 10, 0.5).until(
EC.element_to_be_clickable((By.XPATH, "//a[text()='微信群']"))).click()
WebDriverWait(driver, 10, 0.5).until(
EC.element_to_be_clickable((By.XPATH, "//a[text()='微信群创建及设置方法']"))).click()
time.sleep(5)
driver.quit()
def work3():
"""艺龙搜索操作"""
driver = webdriver.Chrome()
driver.implicitly_wait(30)
driver.get('http://www.elong.com/')
city = driver.find_element(By.XPATH, "//div[@id='domesticDiv']//dl[dt/text()='目的地']//input")
city.clear()
# 输入长沙市
city.send_keys('长沙市')
# 等待搜索的元素加载出来,然后再进行选择
# time.sleep(1)
WebDriverWait(driver, 30, 0.5).until(
EC.element_to_be_clickable((By.XPATH, '//ul[@method="cityData"]/li[@data=0]'))).click()
# 输入区域
driver.find_element(By.XPATH, '//input[@placeholder="如位置\酒店名\品牌"]').send_keys('麓谷')
# 点击搜索
driver.find_element(By.XPATH, '//span[@data-bindid="search"]').click()
time.sleep(5)
driver.close()
driver.quit()
def work4():
"""12306"""
driver = webdriver.Chrome()
driver.implicitly_wait(30)
driver.get('https://www.12306.cn/index/')
# 点击往返
WebDriverWait(driver, 10, 0.5).until(
EC.element_to_be_clickable((By.XPATH, '//div[@class="search-tab-hd"]//a[text()="往返"]'))).click()
# 输入起始地
start_addr = driver.find_element(By.ID, "fromStationFanText")
start_addr.click()
# start_addr.clear()
start_addr.send_keys('长沙')
driver.find_element(By.ID, "citem_0").click()
# # 输入终止地
driver.find_element(By.ID, "toStationFanText").send_keys('上海')
driver.find_element(By.ID, "citem_0").click()
# # 点击高铁
WebDriverWait(driver, 10, 0.5).until(
EC.element_to_be_clickable((By.XPATH, '//li[@id="isHigh"]/i'))).click()
# # # 点击搜索
driver.find_element(By.XPATH, '//a[@id="search_two"]').click()
time.sleep(10)
driver.quit()
if __name__ == '__main__':
# work1()
# work2()
# work3()
work4()
| huchaoyang1991/py27_web | web_06day(鼠标和下拉选择框)/task_05day.py | task_05day.py | py | 3,284 | python | en | code | 0 | github-code | 36 |
34972774203 |
import sys
import os
DATA_TYPES = ("str", "int")
DATA_TYPE_STR = ", ".join(DATA_TYPES)
HOME_DIR = os.path.expanduser("~").replace("\\", "/")
def explode(inPath, outPath):
inLoc = os.path.expanduser(inPath)
outLoc = os.path.expanduser(outPath)
if not os.path.exists(inLoc):
return f"No such file or directory: `{inPath}`"
isInDir = os.path.isdir(inLoc)
outExists = os.path.exists(outLoc)
isOutDir = os.path.isdir(outLoc) if outExists else None
tasks = []
if isInDir:
with os.scandir(inLoc) as sd:
tasks = [
(f"{inLoc}/{e.name}", f"{outLoc}/{e.name}")
for e in sd
if e.name.endswith(".tf") and e.is_file()
]
if not tasks:
return "No .tf files in `{inPath}`"
if outExists and not isOutDir:
return "Not a directory: `{outPath}`"
if not outExists:
os.makedirs(outLoc, exist_ok=True)
else:
if not os.path.isfile(inLoc):
return "Not a file: `{inPath}"
if outExists:
if isOutDir:
outFile = f"{outLoc}/{os.path.basename(inLoc)}"
else:
outFile = outLoc
else:
outDir = os.path.dirname(outLoc)
if not os.path.exists(outDir):
os.makedirs(outDir, exist_ok=True)
outFile = outLoc
tasks = [(inLoc, outFile)]
msgs = []
for (inFile, outFile) in sorted(tasks):
result = _readTf(inFile)
if type(result) is str:
msgs.append(
f"{unexpanduser(inFile)} => {unexpanduser(outFile)}:\n\t{result}"
)
continue
(data, valueType, isEdge) = result
_writeTf(outFile, *result)
good = True
if msgs:
for msg in msgs:
thisGood = msg[0] != "X"
(sys.stdout if thisGood else sys.stderr).write(f"{msg}\n")
if not thisGood:
good = False
return good
def _readTf(path):
fh = open(path, encoding="utf8")
i = 0
metaData = {}
isEdge = False
edgeValues = False
error = None
for line in fh:
i += 1
if i == 1:
text = line.rstrip()
if text == "@edge":
isEdge = True
elif text == "@node":
isEdge = False
elif text == "@config":
error = "! This is a config feature. It has no data."
fh.close()
return error
else:
error = f"X Line {i}: missing @node/@edge/@config"
fh.close()
return error
continue
text = line.rstrip("\n")
if len(text) and text[0] == "@":
if text == "@edgeValues":
edgeValues = True
continue
fields = text[1:].split("=", 1)
metaData[fields[0]] = fields[1] if len(fields) == 2 else None
continue
else:
if text != "":
error = f"X Line {i}: missing blank line after metadata"
fh.close()
return error
else:
break
typeKey = "valueType"
if typeKey in metaData:
valueType = metaData[typeKey]
if valueType not in DATA_TYPES:
error = (
f'X Unknown @valueType: "{valueType}". Expected one of {DATA_TYPE_STR}'
)
fh.close()
return error
else:
error = f"X Missing @valueType. Should be one of {DATA_TYPE_STR}"
fh.close()
return error
result = _readDataTf(fh, i, valueType, isEdge, edgeValues)
fh.close()
return result
def _readDataTf(fh, firstI, valueType, isEdge, edgeValues):
i = firstI
implicit_node = 1
data = {}
normFields = 3 if isEdge and edgeValues else 2
isNum = valueType == "int"
for line in fh:
i += 1
fields = line.rstrip("\n").split("\t")
lfields = len(fields)
if lfields > normFields:
return f"line {i}: {lfields} fields instead of {normFields}"
if lfields == normFields:
nodes = _setFromSpec(fields[0])
if isEdge:
if fields[1] == "":
return f"line {i}: missing node for edge"
nodes2 = _setFromSpec(fields[1])
if not isEdge or edgeValues:
valTf = fields[-1]
else:
if isEdge:
if edgeValues:
if lfields == normFields - 1:
nodes = {implicit_node}
nodes2 = _setFromSpec(fields[0])
valTf = fields[-1]
elif lfields == normFields - 2:
nodes = {implicit_node}
if fields[0] == "":
return f"line {i}: missing node for edge"
nodes2 = _setFromSpec(fields[0])
valTf = ""
else:
nodes = {implicit_node}
valTf = ""
return f"line {i}: missing node for edge"
else:
if lfields == normFields - 1:
nodes = {implicit_node}
if fields[0] == "":
return f"line {i}: missing node for edge"
nodes2 = _setFromSpec(fields[0])
else:
return f"line {i}: missing node for edge"
else:
nodes = {implicit_node}
if lfields == 1:
valTf = fields[0]
else:
valTf = ""
implicit_node = max(nodes) + 1
if not isEdge or edgeValues:
value = (
int(valTf)
if isNum and valTf != ""
else None
if isNum
else ""
if valTf == ""
else _valueFromTf(valTf)
)
if isEdge:
if not edgeValues:
value = None
for n in nodes:
for m in nodes2:
data[(n, m)] = value
else:
for n in nodes:
if value is not None:
data[n] = value
return (data, valueType, isEdge)
def _writeTf(outFile, data, valueType, isEdge):
isInt = valueType == "int"
with open(outFile, "w") as fh:
if isEdge:
if isInt:
for ((n, m), v) in sorted(data.items()):
vTf = '' if v is None else f"\t{v}"
fh.write(f"{n}\t{m}{vTf}\n")
else:
for ((n, m), v) in sorted(data.items()):
vTf = '' if v is None else f"\t{_valueFromTf(v)}"
fh.write(f"{n}\t{m}{vTf}\n")
else:
if isInt:
for (n, v) in sorted(data.items()):
if v is not None:
fh.write(f"{n}\t{v}\n")
else:
for (n, v) in sorted(data.items()):
if v is not None:
fh.write(f"{n}\t{_valueFromTf(v)}\n")
def _valueFromTf(tf):
return "\\".join(
x.replace("\\t", "\t").replace("\\n", "\n") for x in tf.split("\\\\")
)
def _tfFromValue(val, isInt):
return (
str(val)
if isInt
else val.replace("\\", "\\\\").replace("\t", "\\t").replace("\n", "\\n")
)
def _setFromSpec(spec):
covered = set()
for r_str in spec.split(","):
bounds = r_str.split("-")
if len(bounds) == 1:
covered.add(int(r_str))
else:
b = int(bounds[0])
e = int(bounds[1])
if e < b:
(b, e) = (e, b)
for n in range(b, e + 1):
covered.add(n)
return covered
def unexpanduser(path):
return path.replace(HOME_DIR, "~")
| aarek-eng/txtpy | txtpy/convert/tf.py | tf.py | py | 8,081 | python | en | code | 1 | github-code | 36 |
43914363858 | # 플로이드 워셜 알고리즘
INF = int(1e9)
# 입력
num_nodes = int(input())
num_edges = int(input())
# 2차원 리스트 기본 값 초기화
graph = [[INF] * (num_nodes + 1) for _ in range(num_nodes + 1)]
for start_node in range(1, num_nodes + 1):
for end_node in range(1, num_nodes + 1):
if start_node == end_node:
graph[start_node][end_node] = 0
# 그래프 정보 입력
for _ in range(num_edges):
start_node, end_node, weight = map(int, input().split())
graph[start_node][end_node] = weight
# 플로이드워셜 알고리즘
for k in range(1, num_nodes + 1):
for a in range(1, num_nodes + 1):
for b in range(1, num_nodes + 1):
graph[a][b] = min(graph[a][b], graph[a][k]+graph[k][b])
# 출력
for a in range(1, num_nodes + 1):
for b in range(1, num_nodes + 1):
if graph[a][b] == INF:
print("INFINITY", end=" ")
else:
print(graph[a][b], end=" ")
print() | yesjuhee/study-ps | Hi-Algorithm/week7/floyd-warshall.py | floyd-warshall.py | py | 976 | python | en | code | 0 | github-code | 36 |
4897651596 | '''
Assignments
1)Write a Python program to sort (ascending and descending) a dictionary by value. [use sorted()]
2)Write a Python program to combine two dictionary adding values for common keys.
d1 = {'a': 100, 'b': 200, 'c':300}
d2 = {'a': 300, 'b': 200, 'd':400}
Sample output: Counter({'a': 400, 'b': 400, 'd': 400, 'c': 300})
'''
#ques 1
d1 = {'a': 100, 'b': 200, 'c':300}
d2 = {'a': 300, 'b': 200, 'd':400}
def sorted_dict_increasing(d1):
sorted_d1 = sorted(d1.items(),key= lambda x: x[1])
print(sorted_d1)
sorted_d1 = sorted(d1.items(),key= lambda x: x[1],reverse=True)
print(sorted_d1)
return sorted_d1
sorted_dict_increasing(d1)
# Ques 2
from collections import Counter
d1 = {'a': 100, 'b': 200, 'c':300}
d2 = {'a': 300, 'b': 200, 'd':400}
def dict_combine(d1,d2):
d_combine = Counter(d1) + Counter(d2)
print(d_combine)
return d_combine
dict_combine(d1,d2) | Deepak10995/node_react_ds_and_algo | assignments/week03/day1-2.py | day1-2.py | py | 901 | python | en | code | 0 | github-code | 36 |
21346552302 | from django.shortcuts import render, redirect
from django.core.mail import send_mail
from django.contrib import messages
from django.conf import settings
# Create your views here.
# Paypal email id[to donate] :- testingofshopkproject2@gmail.com & password :- Shopk@4994
def home(request):
return render(request, 'home.html')
def donate(request):
if request.method == 'POST':
try:
transaction_completed = True
if transaction_completed:
user_email = request.user.email
subject = 'Donation Confirmation'
message = 'Thank you for your donation!'
from_email = settings.EMAIL_HOST_USER
recipient_list = [user_email]
send_mail(subject, message, from_email, recipient_list, fail_silently=False)
messages.success(request, 'Donation successful. Check your email for confirmation.')
else:
messages.error(request, 'Transaction failed.')
return redirect('donate')
except Exception as e:
messages.error(request, 'An error occurred during the transaction: ' + str(e))
return render(request, 'donate.html')
| Kiran4949/Donation | app/views.py | views.py | py | 1,234 | python | en | code | 0 | github-code | 36 |
34547399185 | file = open("input.txt", "r")
raw_input = list()
[raw_input.append(i[:-1]) for i in file.readlines()]
f = 0
d = 0
for elem in raw_input:
print(elem)
t = elem.split(" ")
print(t)
if t[0] == "forward":
f += int(t[1])
elif t[0] == "down":
d += int(t[1])
elif t[0] == "up":
d -= int(t[1])
print(f, d)
print(f * d) | marin-jovanovic/advent-of-code | 2021/02/part_one.py | part_one.py | py | 361 | python | en | code | 0 | github-code | 36 |
16514949152 | from django.conf.urls import include, url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^about', views.mail_form),
url(r'^api', views.api),
url(r'^score/today', views.today),
url(r'^score/(?P<date_id>\d{8})/$', views.feedjson),
url(r'^admin/', include(admin.site.urls)),
url(r'^scraper/update', views.update),
url(r'^scraper/refresh', views.refresh),
] | h2r4t/npbapi | npbapi/urls.py | urls.py | py | 439 | python | en | code | 0 | github-code | 36 |
15154019133 | import pandas as pd
from textblob import TextBlob
import multiprocessing as mp
import time
def calc(review):
review_blob = TextBlob(str(review))
polarity = review_blob.sentiment.polarity
if polarity > 0:
return "Positive"
elif polarity < 0:
return "Negative"
else:
return "Neutral"
df = pd.read_csv('googleplaystore_user_reviews.csv')
reviews = df.Translated_Review
pool = mp.Pool(processes=8)
start = time.time()
new_labels = pool.map(calc, [rev for rev in reviews])
end = time.time()
print(end - start)
pool.close()
df['new labels'] = new_labels
df.to_csv("new_data.csv")
| philipsFarraj/ParallelProject | project.py | project.py | py | 626 | python | en | code | 0 | github-code | 36 |
70003672425 | import asyncio, config, aiohttp
import logging
from .utils import instance_tools
log = logging.getLogger()
class StatHandler:
def __init__(self, bot):
self.bot = bot
self.has_started = 0
async def postloop(self):
if not self.has_started == 1:
self.has_started = 1
while self.has_started:
log.info("Getting all servers.")
log.info("Attempting to update server count.")
i = instance_tools.InstanceTools(self.bot.instances, self.bot.redis)
guilds = await i.get_all_guilds()
log.info("Servers: %s" % guilds)
if self.bot.instance == 0:
async with aiohttp.ClientSession() as cs:
x = await cs.post(
"https://discordbots.org/api/bots/310039170792030211/stats",
json={
"server_count": int(guilds),
"shard_count": self.bot.shard_count
},
headers={
"Authorization": config.dbots_key
}
)
log.info("Posted to discordbots.org, {}".format(await x.json()))
x = await cs.post(
"https://discord.bots.gg/api/v1/bots/310039170792030211/stats",
json={
"guildCount": int(guilds),
"shardCount": self.bot.shard_count
},
headers={
"Authorization": config.dpw_key
}
)
log.info("Posted to discord.bots.gg, {}".format(await x.json()))
await cs.post(
"https://discord.services/api/bots/310039170792030211",
json={
"guild_count": int(guilds)
},
headers={
"Authorization": config.ds_key
}
)
log.info("Posted to discord.services, {}".format(await x.json()))
await cs.post(
"https://lbots.org/api/v1/bots/310039170792030211/stats",
json={
"guild_count": int(guilds),
"shard_count": self.bot.shard_count
},
headers={
"Authorization": config.lbots_key
}
)
log.info("Posted to lbots.org, {}".format(await x.json()))
await asyncio.sleep(1800)
async def on_ready(self):
self.bot.loop.create_task(self.postloop())
def setup(bot):
bot.add_cog(StatHandler(bot))
| harumaki4649/nekobot | modules/unused/stat_handler.py | stat_handler.py | py | 3,136 | python | en | code | 0 | github-code | 36 |
5547014999 | """
Voting 12/05/2023.
Lido V2 (Shapella-ready) protocol upgrade
1. Update `WithdrawalVault` proxy implementation
2. Call `ShapellaUpgradeTemplate.startUpgrade()`
3. Publish new `Lido` implementation in Lido app APM repo
4. Update `Lido` implementation
5. Publish new `NodeOperatorsRegistry` implementation in NodeOperatorsRegistry app APM repo
6. Update `NodeOperatorsRegistry` implementation
7. Publish new `LidoOracle` implementation in LidoOracle app APM repo
8. Update `LidoOracle` implementation to `LegacyOracle`
9. Create new role `STAKING_ROLE_ROLE` and assign to `StakingRouter`
10. Call `ShapellaUpgradeTemplate.finishUpgrade()`
11. Revoke `MANAGE_FEE` role from `Voting`
12. Revoke `MANAGE_WITHDRAWAL_KEY` role from `Voting`
13. Revoke `MANAGE_PROTOCOL_CONTRACTS_ROLE` role from `Voting`
14. Revoke `SET_EL_REWARDS_VAULT_ROLE` role from `Voting`
15. Revoke `SET_EL_REWARDS_WITHDRAWAL_LIMIT_ROLE` role from `Voting`
16. Revoke `DEPOSIT_ROLE` role from old `DepositSecurityModule`
17. Revoke `BURN_ROLE` role from `SelfOwnedStETHBurner`
18. Revoke `ADD_NODE_OPERATOR_ROLE` role from `Voting`
19. Revoke `SET_NODE_OPERATOR_ACTIVE_ROLE` role from `Voting`
20. Revoke `SET_NODE_OPERATOR_NAME_ROLE` role from `Voting`
21. Revoke `SET_NODE_OPERATOR_ADDRESS_ROLE` role from `Voting`
22. Revoke `REPORT_STOPPED_VALIDATORS_ROLE` role from `Voting`
23. Revoke `MANAGE_MEMBERS` role from `Voting`
24. Revoke `MANAGE_QUORUM` role from `Voting`
25. Revoke `SET_BEACON_SPEC` role from `Voting`
26. Revoke `SET_REPORT_BOUNDARIES` role from `Voting`
27. Revoke `SET_BEACON_REPORT_RECEIVER` role from `Voting`
28. Grant `MANAGE_TOKEN_URI_ROLE` role to `Voting`
29. Set `WithdrawalQueueERC721` baseUri to `https://wq-api.lido.fi/v1/nft`
30. Revoke `MANAGE_TOKEN_URI_ROLE` role from `Voting`
31. Fund Gas Funder multisig 0x5181d5D56Af4f823b96FE05f062D7a09761a5a53 for deposits with 50 stETH
"""
import time
from typing import Dict, Tuple, Optional
from brownie.network.transaction import TransactionReceipt
from brownie import ShapellaUpgradeTemplate # type: ignore
from utils.agent import agent_forward
from utils.finance import make_steth_payout
from utils.voting import bake_vote_items, confirm_vote_script, create_vote
from utils.repo import (
add_implementation_to_lido_app_repo,
add_implementation_to_nor_app_repo,
add_implementation_to_oracle_app_repo,
)
from utils.kernel import update_app_implementation
from utils.config import (
get_deployer_account,
get_is_live,
contracts,
STAKING_ROUTER,
WITHDRAWAL_VAULT,
WITHDRAWAL_VAULT_IMPL,
SELF_OWNED_STETH_BURNER,
get_priority_fee,
)
from utils.permissions import (
encode_oz_grant_role,
encode_oz_revoke_role,
encode_permission_create,
encode_permission_revoke,
)
# noinspection PyUnresolvedReferences
from utils.brownie_prelude import *
# Content URI: https://github.com/lidofinance/lido-dao/blob/b70881f026096790308d7ac9e277ad7f609c7117/apps/lido/README.md
update_lido_app = {
"new_address": "0x17144556fd3424EDC8Fc8A4C940B2D04936d17eb",
"content_uri": "0x697066733a516d525358415a724632785235726762556445724456364c47746a7151315434415a677336796f586f734d516333",
"id": "0x3ca7c3e38968823ccb4c78ea688df41356f182ae1d159e4ee608d30d68cef320",
"version": (4, 0, 0),
}
# Content URI: https://github.com/lidofinance/lido-dao/blob/b70881f026096790308d7ac9e277ad7f609c7117/apps/node-operators-registry/README.md
update_nor_app = {
"new_address": "0x8538930c385C0438A357d2c25CB3eAD95Ab6D8ed",
"content_uri": "0x697066733a516d54346a64693146684d454b5576575351316877786e33365748394b6a656743755a7441684a6b6368526b7a70",
"id": "0x7071f283424072341f856ac9e947e7ec0eb68719f757a7e785979b6b8717579d",
"version": (4, 0, 0),
}
# Content URI: https://github.com/lidofinance/lido-dao/blob/b70881f026096790308d7ac9e277ad7f609c7117/apps/lidooracle/README.md
update_oracle_app = {
"new_address": "0xa29b819654cE6224A222bb5f586920105E2D7E0E",
"content_uri": "0x697066733a516d575461635041557251614376414d5663716e5458766e7239544c666a57736861736334786a536865717a3269",
"id": "0x8b47ba2a8454ec799cd91646e7ec47168e91fd139b23f017455f3e5898aaba93",
"version": (4, 0, 0),
}
WITHDRAWAL_QUEUE_ERC721_BASE_URI = "https://wq-api.lido.fi/v1/nft"
def encode_template_start_upgrade(template_address: str) -> Tuple[str, str]:
template = ShapellaUpgradeTemplate.at(template_address)
return template.address, template.startUpgrade.encode_input()
def encode_template_finish_upgrade(template_address: str) -> Tuple[str, str]:
template = ShapellaUpgradeTemplate.at(template_address)
return template.address, template.finishUpgrade.encode_input()
def encode_withdrawal_vault_proxy_update(vault_proxy_address: str, implementation: str) -> Tuple[str, str]:
proxy = interface.WithdrawalVaultManager(vault_proxy_address)
return proxy.address, proxy.proxy_upgradeTo.encode_input(implementation, b"")
def encode_withdrawal_queue_base_uri_update(withdrawal_queue_address: str, base_uri: str) -> Tuple[str, str]:
withdrawal_queue = interface.WithdrawalQueueERC721(withdrawal_queue_address)
return withdrawal_queue.address, withdrawal_queue.setBaseURI.encode_input(base_uri)
def start_vote(tx_params: Dict[str, str], silent: bool) -> Tuple[int, Optional[TransactionReceipt]]:
"""Prepare and run voting."""
voting = contracts.voting
node_operators_registry = contracts.node_operators_registry
lido = contracts.lido
legacy_oracle = contracts.legacy_oracle
withdrawal_queue = contracts.withdrawal_queue
call_script_items = [
# 1)
encode_withdrawal_vault_proxy_update(WITHDRAWAL_VAULT, WITHDRAWAL_VAULT_IMPL),
# 2)
encode_template_start_upgrade(contracts.shapella_upgrade_template),
# 3)
add_implementation_to_lido_app_repo(
update_lido_app["version"], update_lido_app["new_address"], update_lido_app["content_uri"]
),
# 4)
update_app_implementation(update_lido_app["id"], update_lido_app["new_address"]),
# 5)
add_implementation_to_nor_app_repo(
update_nor_app["version"], update_nor_app["new_address"], update_nor_app["content_uri"]
),
# 6)
update_app_implementation(update_nor_app["id"], update_nor_app["new_address"]),
# 7)
add_implementation_to_oracle_app_repo(
update_oracle_app["version"], update_oracle_app["new_address"], update_oracle_app["content_uri"]
),
# 8)
update_app_implementation(update_oracle_app["id"], update_oracle_app["new_address"]),
# 9)
encode_permission_create(STAKING_ROUTER, node_operators_registry, "STAKING_ROUTER_ROLE", manager=voting),
# 10)
encode_template_finish_upgrade(contracts.shapella_upgrade_template),
# 11)
encode_permission_revoke(lido, "MANAGE_FEE", revoke_from=voting),
# 12)
encode_permission_revoke(lido, "MANAGE_WITHDRAWAL_KEY", revoke_from=voting),
# 13)
encode_permission_revoke(lido, "MANAGE_PROTOCOL_CONTRACTS_ROLE", revoke_from=voting),
# 14)
encode_permission_revoke(lido, "SET_EL_REWARDS_VAULT_ROLE", revoke_from=voting),
# 15)
encode_permission_revoke(lido, "SET_EL_REWARDS_WITHDRAWAL_LIMIT_ROLE", revoke_from=voting),
# 16)
encode_permission_revoke(lido, "DEPOSIT_ROLE", revoke_from=contracts.deposit_security_module_v1),
# 17)
encode_permission_revoke(lido, "BURN_ROLE", revoke_from=SELF_OWNED_STETH_BURNER),
# 18)
encode_permission_revoke(node_operators_registry, "ADD_NODE_OPERATOR_ROLE", revoke_from=voting),
# 19)
encode_permission_revoke(node_operators_registry, "SET_NODE_OPERATOR_ACTIVE_ROLE", revoke_from=voting),
# 20)
encode_permission_revoke(node_operators_registry, "SET_NODE_OPERATOR_NAME_ROLE", revoke_from=voting),
# 21)
encode_permission_revoke(node_operators_registry, "SET_NODE_OPERATOR_ADDRESS_ROLE", revoke_from=voting),
# 22)
encode_permission_revoke(node_operators_registry, "REPORT_STOPPED_VALIDATORS_ROLE", revoke_from=voting),
# 23)
encode_permission_revoke(legacy_oracle, "MANAGE_MEMBERS", revoke_from=voting),
# 24)
encode_permission_revoke(legacy_oracle, "MANAGE_QUORUM", revoke_from=voting),
# 25)
encode_permission_revoke(legacy_oracle, "SET_BEACON_SPEC", revoke_from=voting),
# 26)
encode_permission_revoke(legacy_oracle, "SET_REPORT_BOUNDARIES", revoke_from=voting),
# 27)
encode_permission_revoke(legacy_oracle, "SET_BEACON_REPORT_RECEIVER", revoke_from=voting),
# 28)
agent_forward([encode_oz_grant_role(withdrawal_queue, "MANAGE_TOKEN_URI_ROLE", grant_to=voting)]),
# 29)
encode_withdrawal_queue_base_uri_update(withdrawal_queue, base_uri=WITHDRAWAL_QUEUE_ERC721_BASE_URI),
# 30)
agent_forward([encode_oz_revoke_role(withdrawal_queue, "MANAGE_TOKEN_URI_ROLE", revoke_from=voting)]),
# 31)
make_steth_payout(
target_address="0x5181d5D56Af4f823b96FE05f062D7a09761a5a53",
steth_in_wei=50 * (10**18),
reference="Fund Gas Funder multisig"
)
]
vote_desc_items = [
"1) Update `WithdrawalVault` proxy implementation",
"2) Call `ShapellaUpgradeTemplate.startUpgrade()",
"3) Publish new implementation in Lido app APM repo",
"4) Updating implementation of Lido app",
"5) Publishing new implementation in Node Operators Registry app APM repo",
"6) Updating implementation of Node Operators Registry app",
"7) Publishing new implementation in Oracle app APM repo",
"8) Updating implementation of Oracle app",
"9) Create permission for STAKING_ROUTER_ROLE of NodeOperatorsRegistry assigning it to StakingRouter",
"10) Finish upgrade by calling `ShapellaUpgradeTemplate.finishUpgrade()`",
"11) Revoke `MANAGE_FEE` role from `Voting`",
"12) Revoke `MANAGE_WITHDRAWAL_KEY` role from `Voting`",
"13) Revoke `MANAGE_PROTOCOL_CONTRACTS_ROLE` role from `Voting`",
"14) Revoke `SET_EL_REWARDS_VAULT_ROLE` role from `Voting`",
"15) Revoke `SET_EL_REWARDS_WITHDRAWAL_LIMIT_ROLE` role from `Voting`",
"16) Revoke `DEPOSIT_ROLE` role from old `DepositSecurityModule`",
"17) Revoke `BURN_ROLE` role from `SelfOwnedStETHBurner`",
"18) Revoke `ADD_NODE_OPERATOR_ROLE` role from `Voting`",
"19) Revoke `SET_NODE_OPERATOR_ACTIVE_ROLE` role from `Voting",
"20) Revoke `SET_NODE_OPERATOR_NAME_ROLE` role from `Voting`",
"21) Revoke `SET_NODE_OPERATOR_ADDRESS_ROLE` role from `Voting`",
"22) Revoke `REPORT_STOPPED_VALIDATORS_ROLE` role from `Voting`",
"23) Revoke `MANAGE_MEMBERS` role from `Voting`",
"24) Revoke `MANAGE_QUORUM` role from `Voting`",
"25) Revoke `SET_BEACON_SPEC` role from `Voting`",
"26) Revoke `SET_REPORT_BOUNDARIES` role from `Voting`",
"27) Revoke `SET_BEACON_REPORT_RECEIVER` role from `Voting`",
"28) Grant `MANAGE_TOKEN_URI_ROLE` role to `Voting`",
"29) Set `WithdrawalQueueERC721` baseUri to `https://wq-api.lido.fi/v1/nft`",
"30) Revoke `MANAGE_TOKEN_URI_ROLE` role from `Voting`",
"31) Fund Gas Funder multisig 0x5181d5D56Af4f823b96FE05f062D7a09761a5a53 for deposits with 50 stETH"
]
vote_items = bake_vote_items(vote_desc_items, call_script_items)
return confirm_vote_script(vote_items, silent) and list(create_vote(vote_items, tx_params))
def main():
tx_params = {"from": get_deployer_account()}
if get_is_live():
tx_params["max_fee"] = "300 gwei"
tx_params["priority_fee"] = get_priority_fee()
vote_id, _ = start_vote(tx_params=tx_params, silent=False)
vote_id >= 0 and print(f"Vote created: {vote_id}.")
time.sleep(5) # hack for waiting thread #2.
| lidofinance/scripts | archive/scripts/upgrade_shapella.py | upgrade_shapella.py | py | 12,033 | python | en | code | 14 | github-code | 36 |
38214358765 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 19 3:15 2019
@author: deepnikajain
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from astropy.table import Table
from penquins import Kowalski
from Coadding.keypairs import get_keypairs
DEFAULT_AUTHs = get_keypairs()
DEFAULT_AUTH_kowalski = DEFAULT_AUTHs[1]
def query_kowal(name, lowjd, upjd):
k = Kowalski(username=DEFAULT_AUTH_kowalski[0], password=DEFAULT_AUTH_kowalski[1], verbose=False)
qu = {"query_type": "general_search",
"query": "db['ZTF_alerts'].find({'objectId': {'$eq': '%s'}, 'candidate.jd': {'$gt': '%d'}, \
'candidate.jd': {'$lt': '%d'}, {'candidate.jd': 1, 'candidate.fid': 1, \
'candidate.programid': 1, 'candidate.field': 1, 'candidate.magzpsci': 1, 'candidate.magzpsciunc': 1,})"%(name, lowjd, upjd)}
r = k.query(query=qu)
return r
def get_kdata(targetdir, name, r):
if 'result_data' in list(r.keys()):
rdata = r['result_data']
rrdata = rdata['query_result']
n = len(rrdata)
jd = []
fid = []
programid = []
fieldid = []
mag = []
magzp = []
magzp_unc = []
for i in range(n):
if rrdata[i]['candidate']['programid'] == 1:
jd.append(rrdata[i]['candidate']['jd'])
fid.append(rrdata[i]['candidate']['fid'])
programid.append(rrdata[i]['candidate']['programid'])
fieldid.append(rrdata[i]['candidate']['field'])
# mag.append(rrdata[i]['candidate']['magpsf'])
magzp.append(rrdata[i]['candidate']['magzpsci'])
magzp_unc.append(rrdata[i]['candidate']['magzpsciunc'])
jd = np.array(jd)
fid = np.array(fid)
programid = np.array(programid)
fieldid = np.array(fieldid)
mag = np.array(mag)
magzp = np.array(magzp)
magzp_unc = np.array(magzp_unc)
k_data = Table([jd, fid, programid, fieldid, mag, magzp, magzp_unc], \
names = ['jdobs', 'fid', 'programid', 'fieldid', 'mag', 'magzp', 'magzp_unc'])
kdata = k_data.to_pandas()
kdata.to_csv(targetdir + 'data/kowalski_data_' + name + '.csv', index = False, encoding = 'utf8')
else:
print(('Kowalski query is not succesful for %s'%name))
kdata = kdata.drop_duplicates()
kdata.sort_values(['jdobs'], inplace = True)
p = kdata.shape
if 'result_data' in list(r.keys()):
rdata = r['result_data']
rrdata = rdata['query_result']
n = len(rrdata)
jd = []
fid = []
programid = []
fieldid = []
mag = []
magzp = []
magzp_unc = []
for i in range(n):
if rrdata[i]['prv_candidates'] != None:
m = len(rrdata[i]['prv_candidates'])
for j in range(m):
if 'magzpsci' in list(rrdata[i]['prv_candidates'][j].keys()):
if rrdata[i]['prv_candidates'][j]['magpsf'] != None:
if rrdata[i]['prv_candidates'][j]['programid'] == 1:
jd.append (rrdata[i]['prv_candidates'][j]['jd'])
fid.append (rrdata[i]['prv_candidates'][j]['fid'])
programid.append (rrdata[i]['prv_candidates'][j]['programid'])
fieldid.append (rrdata[i]['prv_candidates'][j]['field'])
# mag.append (rrdata[i]['prv_candidates'][j]['magpsf'])
magzp.append (rrdata[i]['prv_candidates'][j]['magzpsci'])
magzp_unc.append (rrdata[i]['prv_candidates'][j]['magzpsciunc'])
jd = np.array(jd)
fid = np.array(fid)
programid = np.array(programid)
fieldid = np.array(fieldid)
mag = np.array(mag)
magzp = np.array(magzp)
magzp_unc = np.array(magzp_unc)
k_data = Table([jd, fid, programid, fieldid, mag, magzp, magzp_unc], \
names = ['jdobs', 'fid', 'programid', 'fieldid', 'mag', 'magzp', 'magzp_unc'])
kdata1 = k_data.to_pandas()
kdata1.to_csv(targetdir + 'data/kowalski_data1_' + name + '.csv', index = False, encoding = 'utf8')
else:
print(('Kowalski query is not succesful for %s'%name))
kdata1 = kdata1.drop_duplicates()
kdata1.sort_values(['jdobs'], inplace = True)
q = kdata1.shape
kdata = kdata.append(kdata1)
kdata = kdata.drop_duplicates()
kdata.sort_values(['jdobs'], inplace = True)
return p, q, kdata
| Deepnika/Assembling-lightcurves-SLSNe | query_kowalski.py | query_kowalski.py | py | 4,748 | python | en | code | 0 | github-code | 36 |
2201109453 | import numpy as np
import subprocess
import os
import sys
from Bio import SeqIO
from Bio import PDB
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.Polypeptide import PPBuilder
from joblib import Parallel, delayed
import multiprocessing as mp
import time
import re
from joblib import Parallel, delayed
import concurrent.futures
class energy_calculation:
def __init__(self, model_filename, model_dir):
self.model_filename = model_filename
self.model_dir = model_dir
self.model_ID = model_filename.replace("_model.pdb","")
self.model_path = model_dir + model_filename
self.origin = "positives"
self.partition = "1"
self.model_output_dir = f"/home/projects/ht3_aim/people/idamei/results/energy_calc_full_output/{self.partition}/{self.origin}/{self.model_ID}/"
#self.model_output_dir = f"/home/projects/ht3_aim/people/idamei/full_output_test/"
self.numpy_output_dir = f"/home/projects/ht3_aim/people/idamei/results/energy_output_arrays/{self.partition}/{self.origin}/"
#self.numpy_output_dir = f"/home/projects/ht3_aim/people/idamei/numpy_output_test/"
if self.origin == "positives":
self.binder = 1
else:
self.binder = 0
def pipeline(self):
startstart_time = time.time()
print("Start " + self.model_filename)
# Make output directory
os.makedirs(self.model_output_dir, exist_ok = True)
os.chdir(self.model_output_dir)
# Get PDB features
self.extract_pdb_features()
# Split pdb
try:
self.splitPDB()
except Exception as err:
print("Splitting PDB failed for: " + self.model_ID, file=sys.stderr)
print(err, file=sys.stderr)
# Run FoldX
try:
start_time = time.time()
self.run_foldx()
runtime = (time.time() - start_time) / 60
print("FoldX took {} min".format(runtime))
except Exception as err:
print("FoldX failed for: " + self.model_ID, file=sys.stderr)
print(err, file=sys.stderr)
# Extract foldX energies
try:
self.extract_foldx_energies()
except Exception as err:
print("Extracting foldX energies failed for: " + self.model_ID, file=sys.stderr)
print(err, file=sys.stderr)
# Run Rosetta
try:
start_time = time.time()
self.rosetta_scorefile_path_complex, self.rosetta_per_res_scorefile_path_complex = self.run_rosetta(self.model_path)
runtime = (time.time() - start_time) / 60
print("Rosetta for complex took {} min".format(runtime))
start_time = time.time()
self.rosetta_scorefile_path_tcr, self.rosetta_per_res_scorefile_path_tcr = self.run_rosetta(self.tcr_path)
runtime = (time.time() - start_time) / 60
print("Rosetta for TCR took {} min".format(runtime))
start_time = time.time()
self.rosetta_scorefile_path_pmhc, self.rosetta_per_res_scorefile_path_pmhc = self.run_rosetta(self.pmhc_path)
runtime = (time.time() - start_time) / 60
print("Rosetta for pMHC took {} min".format(runtime))
except Exception as err:
print("Rosetta failed for: " + self.model_ID, file=sys.stderr)
print(err, file=sys.stderr)
# Extract Rosetta energies
try:
self.rosetta_overall_scores_complex, self.rosetta_per_res_scores_complex = self.extract_rosetta_energies(
self.rosetta_scorefile_path_complex,
self.rosetta_per_res_scorefile_path_complex)
self.rosetta_overall_scores_tcr, self.rosetta_per_res_scores_tcr = self.extract_rosetta_energies(
self.rosetta_scorefile_path_tcr,
self.rosetta_per_res_scorefile_path_tcr)
self.rosetta_overall_scores_pmhc, self.rosetta_per_res_scores_pmhc = self.extract_rosetta_energies(
self.rosetta_scorefile_path_pmhc,
self.rosetta_per_res_scorefile_path_pmhc)
except Exception as err:
print("Extracting Rosetta energies failed for: " + self.model_ID, file=sys.stderr)
print(err, file=sys.stderr)
# Create output
try:
self.create_output()
except Exception as err:
print("Creating output failed for: " + self.model_ID, file=sys.stderr)
print(err, file=sys.stderr)
runtime = (time.time() - startstart_time) / 60
print("{} took {} min".format(self.model_ID, runtime))
def run_foldx(self):
# RepairPDB
if not os.path.exists(self.model_filename.replace(".pdb", "_Repair.pdb")):
repair_command = "foldx --command=RepairPDB --pdb={} --ionStrength=0.05 --pH=7 --water=CRYSTAL --vdwDesign=2 --out-pdb=1 --pdbHydrogens=false --output-dir={}".format(self.model_filename, self.model_output_dir)
subprocess.run(repair_command.split(), universal_newlines=True, stdout=subprocess.PIPE, cwd=self.model_dir)
# AnalyseComplex
repaired_pdb_path = self.model_filename.replace(".pdb", "_Repair.pdb")
analyse_command = "foldx --command=AnalyseComplex --pdb={} --output-dir={}".format(repaired_pdb_path,
self.model_output_dir)
subprocess.run(analyse_command.split(), universal_newlines=True, stdout=subprocess.PIPE, cwd=self.model_output_dir)
def extract_foldx_energies(self):
self.interaction_file_path = self.model_output_dir + "Interaction_" + self.model_filename.replace(".pdb",
"_Repair_AC.fxout")
foldx_output = open(self.interaction_file_path, "r")
foldx_interaction_energies = dict()
for line in foldx_output:
if line.startswith("./"):
splitted_line = line.split("\t")
group1 = splitted_line[1]
group2 = splitted_line[2]
interaction_energy = splitted_line[6]
foldx_interaction_energies[group1 + group2] = float(interaction_energy)
foldx_output.close()
self.foldx_interaction_energies = foldx_interaction_energies
def run_rosetta(self, infile):
# Relaxation
rosetta_relax_command = "relax.default.linuxgccrelease \
-ignore_unrecognized_res \
-nstruct 1 \
-s {} \
-out:path:pdb {}".format(infile, self.model_output_dir)
subprocess.run(rosetta_relax_command.split(), universal_newlines=True, stdout=subprocess.PIPE, cwd=self.model_output_dir)
# Scoring
result = re.search(r'/([^/]+)$', infile)
infilename = result.group(1)
relaxed_pdb_path = self.model_output_dir + infilename.replace(".pdb", "_0001.pdb")
rosetta_scorefile_path = self.model_output_dir + infilename + "_score.sc"
rosetta_score_command = "score_jd2.linuxgccrelease \
-in:file:s {} \
-out:file:scorefile {}".format(relaxed_pdb_path, rosetta_scorefile_path)
subprocess.run(rosetta_score_command.split(), universal_newlines=True, stdout=subprocess.PIPE, cwd=self.model_output_dir)
# Per residue scoring
rosetta_per_res_scorefile_path = self.model_output_dir + infilename + "_per_residue_score.sc"
rosetta_per_res_score_command = "per_residue_energies.linuxgccrelease \
-in:file:s {} \
-out:file:silent {}".format(relaxed_pdb_path, rosetta_per_res_scorefile_path)
subprocess.run(rosetta_per_res_score_command.split(), universal_newlines=True, stdout=subprocess.PIPE, cwd=self.model_output_dir)
return rosetta_scorefile_path, rosetta_per_res_scorefile_path
def extract_rosetta_energies(self, rosetta_scorefile_path, rosetta_per_res_scorefile_path):
# Rosetta overall energies
rosetta_scorefile = open(rosetta_scorefile_path, "r")
rosetta_scorefile.readline()
rosetta_scorefile.readline()
# SCORE: total_score score dslf_fa13 fa_atr fa_dun fa_elec fa_intra_rep fa_intra_sol_xover4 fa_rep fa_sol hbond_bb_sc hbond_lr_bb hbond_sc hbond_sr_bb linear_chainbreak lk_ball_wtd omega overlap_chainbreak p_aa_pp pro_close rama_prepro ref time yhh_planarity description
line = rosetta_scorefile.readline()
splitted_line = line.strip().split()
rosetta_overall_scores = splitted_line[1:-1] # 24 elements
rosetta_overall_scores = [float(x) for x in rosetta_overall_scores]
rosetta_scorefile.close()
# Rosetta per residue energies
rosetta_per_res_scorefile = open(rosetta_per_res_scorefile_path, "r")
rosetta_per_res_scores = {"M": {}, "P": {}, "A": {}, "B": {}}
# SCORE: pose_id pdb_id fa_atr fa_rep fa_sol fa_intra_rep fa_intra_sol_xover4 lk_ball_wtd fa_elec pro_close hbond_sr_bb hbond_lr_bb hbond_bb_sc hbond_sc dslf_fa13 omega fa_dun p_aa_pp yhh_planarity ref rama_prepro score description
for line in rosetta_per_res_scorefile:
splitted_line = line.strip().split()
if splitted_line[1] == "pose_id":
continue
pdb_id = splitted_line[2]
chain = pdb_id[-1]
position = int(pdb_id[:-1])
rosetta_per_res_scores[chain][position] = [float(x) for x in splitted_line[3:-1]] # 20 elements
rosetta_scorefile.close()
return rosetta_overall_scores, rosetta_per_res_scores
def create_output(self):
# Output array:
# one-hot AA (20), M (1), P (1), TCRA (1), TCRB (1)
# Rosetta_per_res_indiv_energies_complex (20), Rosetta_per_res_indiv_energies_pmhc/Rosetta_per_res_indiv_energies_tcr (20)
# foldx_MP (1), foldx_MA (1), foldx_MB (1), foldx_PA (1), foldx_PB (1), foldx_AB (1),
# Rosetta_total_energy_complex (24), Rosetta_total_energy_tcr (24), Rosetta_total_energy_pmhc (24),
# Positive/negative (1), origin (10X, swapped, posi) (3) WAS REMOVED LATER
# Total: 142
output_array = np.zeros(shape=(self.total_length, 146))
k1 = 0 # chain
k2 = 0 # residue number total
for chain in self.sequences:
sequence = self.sequences[chain]
k1 += 1
k3 = 0 # chain residue number
for aminoacid in sequence:
number = self.numbering[chain][k3]
output_array[k2, 0:20] = self.oneHot(aminoacid)
if chain == "M":
output_array[k2, 20:24] = np.array([1, 0, 0, 0])
output_array[k2, 24:44] = self.rosetta_per_res_scores_complex["M"][number]
output_array[k2, 44:64] = self.rosetta_per_res_scores_pmhc["M"][number]
if chain == "P":
output_array[k2, 20:24] = np.array([0, 1, 0, 0])
output_array[k2, 24:44] = self.rosetta_per_res_scores_complex["P"][number]
output_array[k2, 44:64] = self.rosetta_per_res_scores_pmhc["P"][number]
if chain == "A":
output_array[k2, 20:24] = np.array([0, 0, 1, 0])
output_array[k2, 24:44] = self.rosetta_per_res_scores_complex["A"][number]
output_array[k2, 44:64] = self.rosetta_per_res_scores_tcr["A"][number]
if chain == "B":
output_array[k2, 20:24] = np.array([0, 0, 0, 1])
output_array[k2, 24:44] = self.rosetta_per_res_scores_complex["B"][number]
output_array[k2, 44:64] = self.rosetta_per_res_scores_tcr["B"][number]
output_array[k2, 64:70] = list(self.foldx_interaction_energies.values())
output_array[k2, 70:94] = self.rosetta_overall_scores_complex
output_array[k2, 94:118] = self.rosetta_overall_scores_tcr
output_array[k2, 118:142] = self.rosetta_overall_scores_pmhc
output_array[k2, 142] = self.binder
if self.origin == "tenx_negatives":
output_array[k2, 143:146] = np.array([1, 0, 0])
elif self.origin == "swapped_negatives":
output_array[k2, 143:146] = np.array([0, 0, 1])
else:
output_array[k2, 143:146] = np.array([0, 0, 1])
k2 += 1
k3 += 1
np.save(file=self.numpy_output_dir + self.model_ID + ".npy", arr=output_array)
def extract_pdb_features(self):
# Get chain names and sequence numbering
pdb_file = open(self.model_path, "r")
numbering = {"M": [], "P": [], "A": [], "B": []}
chain_names = []
old_number = 0
old_chain = ""
for line in pdb_file:
splitted_line = line.split()
if splitted_line[0] != "ATOM":
continue
chain = splitted_line[4]
if chain != old_chain:
chain_names.append(chain)
old_chain = chain
new_number = splitted_line[5]
if new_number != old_number:
numbering[chain].append(int(new_number))
old_number = new_number
# Get sequences
structure = PDBParser().get_structure('', self.model_path)
ppb = PPBuilder()
chain_sequences = {}
i = 0
for pp in ppb.build_peptides(structure):
chain_name = chain_names[i]
chain_sequences[chain_name] = str(pp.get_sequence())
i += 1
self.chain_names = chain_names
self.sequences = chain_sequences
self.numbering = numbering
self.length_A = len(chain_sequences["A"])
self.length_B = len(chain_sequences["B"])
self.length_M = len(chain_sequences["M"])
self.length_P = len(chain_sequences["P"])
self.total_length = self.length_P + self.length_M + self.length_B + self.length_A
@staticmethod
def oneHot(residue):
mapping = dict(zip("ACDEFGHIKLMNPQRSTVWY", range(20)))
if residue in "ACDEFGHIKLMNPQRSTVWY":
return np.eye(20)[mapping[residue]]
else:
return np.zeros(20)
@staticmethod
def selectChain(ifn, ofn, chainID):
"""Saves selected chains from PDB in a new PDB"""
parser = PDB.PDBParser()
structure = parser.get_structure('x', ifn)
class ChainSelector():
def __init__(self, chainID=chainID):
self.chainID = chainID
def accept_chain(self, chain):
if chain.get_id() in self.chainID:
return 1
return 0
def accept_model(self, model):
return 1
def accept_residue(self, residue):
return 1
def accept_atom(self, atom):
return 1
sel = ChainSelector(chainID)
io = PDB.PDBIO()
io.set_structure(structure)
io.save(ofn, sel)
def splitPDB(self):
self.tcr_path = self.model_output_dir + "TCR.pdb"
self.pmhc_path = self.model_output_dir + "pMHC.pdb"
self.selectChain(ifn=self.model_path, ofn=self.tcr_path, chainID=["A", "B"])
self.selectChain(ifn=self.model_path, ofn=self.pmhc_path, chainID=["M", "P"])
def worker(model_filename):
instance = energy_calculation(model_filename, model_dir)
instance.pipeline()
origin = "positives"
partition = "1"
model_dir = f"/home/projects/ht3_aim/people/idamei/results/models/{partition}/{origin}/"
p = subprocess.Popen(["ls", model_dir],
stdout=subprocess.PIPE, universal_newlines=True)
models = p.communicate()[0].split()
if "molecules" in models:
models.remove("molecules")
if "rotabase.txt" in models:
models.remove("rotabase.txt")
#pool = mp.Pool(40)
#pool.map(worker, [model for model in models])
#pool.close()
#models_slice = models[108:118]
#print(len(models[118:]))
#models_slice = models[118:]
models_slice = ["12528_model.pdb"]
Parallel(n_jobs=20)(delayed(worker)(model) for model in models_slice)
| alschaap/master-thesis | scripts/energy_calc_pipeline.py | energy_calc_pipeline.py | py | 16,561 | python | en | code | 0 | github-code | 36 |
39838938631 | def get_square():
l1=[x**2 for x in range(0,21) if ((x%2 == 0) and (x%3 != 0 ))]
return l1
print(get_square())
def list_of_even_odds():
l1 = [x for x in range(0,21) if x%2 == 0]
l2 = [y for y in range(0,21) if y%2 != 0]
return [l1, l2]
print(list_of_even_odds())
# write an entire expression in a list. This is called List Comprehension
print([x*x for x in range(2, 5, 2) if x % 2 == 0])
print([x**3 for x in range(1,21)])
g = [2, 3]
i = [1, 2, 3, 4]
avg = sum(i)/len(i)
print(avg)
for elem in g:
i.remove(elem)
print(i)
m = ['mango']
l = ['w', 1, 'r', 3.5]
print(l + m)
l.append(m)
print(l)
# print(str(l))
# print(l)
#first index val is inclusive and the last value is exclusive
print(l[1:2])
def change_case(s):
lower_s = s.lower()
upper_s = s.upper()
return [lower_s, upper_s]
print(change_case('aaBBccDFg'))
def find_occurence(s):
end = (len(s) - 1)
start = 0
a = s.find('a')
b = s.find('b')
# a = s.find('a', start, end)
# b = s.find('b', start, end)
return [a, b]
print(find_occurence('aaacccbbbddd'))
def get_str(s):
s2 = ''
for c in s:
s2 += c*3
strlen = len(s2)
return [s, strlen, s2]
print(get_str('acbd'))
# the return statement evaluates the function to true or false
def in_range_again(x,y):
return (x < 1/3 < y)
print(in_range_again(0.1,0.2))
def in_range(x,y):
if(x < 1/3 < y):
return True
else:
return False
print(in_range(0.2,0.3))
def parity(n):
return n%2
print(str(parity(5)))
def math_op():
division = 3 / 2
floor_division = 3 // 2
modulus = 3 % 2
power = 3 ** 2
p = pow(3, 2)
return [division, floor_division, modulus, power, p]
[div, fl, md, pwer, w] = math_op()
print(div)
print(fl)
print(md)
print(pwer)
print(w) | ronaldboodram/educative_python | full_speed_python/basic_data_type.py | basic_data_type.py | py | 1,807 | python | en | code | 0 | github-code | 36 |
1411710184 | import requests
class YaDisk:
base_url = 'https://cloud-api.yandex.net/v1/disk/'
def __init__(self, token):
self.token = token
def get_headers(self):
return {
'Content-Type': 'application/json',
'Authorization': f'OAuth {self.token}'
}
def create_folder(self, folder_name):
yandex_api_url = f'{self.base_url}resources'
headers = self.get_headers()
params = {
'path': folder_name
}
response = requests.put(yandex_api_url, headers=headers, params=params)
if response.status_code == 201:
return True
elif response.status_code == 409:
print('The folder already exists on Yandex.Disk.')
return True
else:
print('Failed to create folder on Yandex.Disk.')
def upload_photo(self, folder_name, file_name, file_url):
yandex_api_url = f'{self.base_url}resources/upload'
headers = self.get_headers()
params = {
'path': f'{folder_name}/{file_name}',
'url': file_url,
'overwrite': False
}
response = requests.post(yandex_api_url, headers=headers, params=params)
data = response.json()
if 'href' in data:
return True
elif response.status_code == 409:
print(f'The file {file_name} already exists on Yandex.Disk.')
else:
print(f'Failed to upload photo to Yandex.Disk: {data}')
| kanadass/photo_backup_cw | ya_disk.py | ya_disk.py | py | 1,510 | python | en | code | 0 | github-code | 36 |
15905142289 | import random
import time
import tweepy
import pandas
import logging
from config import consumer_key, consumer_secret, access_token, access_token_secret
#Declare variables
timer = 10800 # three hours
df = pandas.read_csv('quotes.csv', delimiter='*')
index = df.index
number_of_rows = len(index)
logger = logging.getLogger('MotivatorBot')
hdlr = logging.FileHandler('./motivatorbot.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
# logger.setLevel(logging.info)
# authenticate the consumer key and secret
auth = tweepy.OAuthHandler(consumer_key,consumer_secret)
# authentication of access token and secret
auth.set_access_token(access_token,access_token_secret)
api = tweepy.API(auth)
# Get a follower and add return their tag to add to the string to be sent
def getUser():
follows = []
for follower in tweepy.Cursor(api.followers).items():
follows.append(follower.screen_name)
# Random number to choose user
follow_rand = random.randint(0,(len(follows)-1))
followName = follows[follow_rand]
preString = "Hey @{f} ".format(f=followName)
return preString
# Get a quote from th CSV
def getQuote():
quote_picker = random.randint(0,(number_of_rows - 1))
quote = df['quote'][quote_picker]
author = df['author'][quote_picker]
quoteFormat = "{quote} -{author} ".format(quote=quote, author=author)
return quoteFormat
# This is the main loop.
while True:
print('starting Loop')
quoteString = ''
userChoose = random.randint(0,3)
if userChoose <1:
quoteString += getUser()
quoteString += getQuote()
sleepSecs = random.randint(600,timer)
print(quoteString)
print(sleepSecs)
#Try the API Call. One possible error is 187, which is an error coming from Twitter that limits tweeting the same tweet out multiple times.
# I can't find any documentation from Twitter on the timeframe tht they're looking for, so what I'm doing is going back to the start of
# the loop if I see an error, because the liklihood that it will come back with another quote that will violate Twitter's policies is slim
# to none, and even if it does it will just keep trying quotes until it gets something that works.
try:
api.update_status(status = quoteString)
logging.info('SUCCESS! - %s', quoteString)
except tweepy.TweepError as e:
logging.error(e.reason)
print('Error Code', e.api_code)
print('Reason ', e.reason)
continue
time.sleep(sleepSecs)
| dannymccaslin/MotivatorBot | motivator.py | motivator.py | py | 2,587 | python | en | code | 0 | github-code | 36 |
12885361900 | """
problem 15
Starting in the top left corner of a grid and
moving only down and right, there are 6 routes to the bottom right corner.
How many routes are there through a 20x20 grid?
"""
def route_num(cube_size):
L = [1] * cube_size
for i in range(cube_size):
for j in range(i):
L[j] = L[j]+L[j-1]
L[i] = 2 * L[i - 1]
return L[cube_size - 1] | HunterJohnson/Interviews | project_euler/python/015.py | 015.py | py | 405 | python | en | code | 1 | github-code | 36 |
37298905972 | # -*- coding: utf-8 -*-
import json
import requests
def get_proxy():
response = requests.get('')
res = json.loads(response.text)
if res['code'] == 0:
try:
ip = res['data']['IP']
port = res['data']['PORT']
proxy = {}
proxy['http'] = ip+":"+port
return proxy
except Exception as e:
return None
| LogicJake/bilibili_user | function.py | function.py | py | 393 | python | en | code | 1 | github-code | 36 |
72166511464 | # -*- coding: utf-8 -*-
from instrument import Instrument
import instruments
import numpy
import types
import logging
class virtual_period(Instrument):
'''
This is the driver to handle period.
'''
def __init__(self, name, pulser):
'''
Initialize the virtual instruments
Input:
name : Name of the virtual instruments
pulser : Name given to the pulser
Output:
None
'''
Instrument.__init__(self, name, tags=['virtual'])
self.add_parameter('period', units='ns', flags=Instrument.FLAG_GETSET | Instrument.FLAG_GET_AFTER_SET, type=types.FloatType)
self.add_parameter('cooling_time', units='ns', flags=Instrument.FLAG_GETSET, type=types.FloatType)
self.add_parameter('origin', units='ns', flags=Instrument.FLAG_GETSET, type=types.FloatType)
# Defining some stuff
self._instruments = instruments.get_instruments()
self._pulser = self._instruments.get(pulser)
self._cooling_time = 1e3 #in [ns]
self._origin = 0. #in [ns]
self.get_all()
def get_all(self):
'''
Get all parameters of the virtual device
Input:
None
Output:
None
'''
self.get_period()
#########################################################
#
#
# Period
#
#
#########################################################
def do_set_period(self, period):
'''
set the period of the instrument
Input:
period (float): period of the pulser[ns]
Output:
None
'''
logging.info(__name__+' : set the period of the pulser')
self._pulser.set_period(period)
def do_get_period(self):
'''
Get the period of the instrument
Input:
None
Output:
period (float): period of the pulser[ns]
'''
logging.info(__name__+' : Get the period of the pulser')
return float(self._pulser.get_period())
#########################################################
#
#
# cooling time
#
#
#########################################################
def do_set_cooling_time(self, cooling_time=1e3):
'''
Set the cooling_time of the pulser
Input:
cooling_time (float): cooling_time of the pulser [ns]
Output:
None
'''
logging.info(__name__+' : Set the cooling_time of the pulser')
self._cooling_time = cooling_time
period1 = self.get_period()
period = self.get_origin() + self._pulser.get_chA_width()
period = max(period, period1) #added by Remy
self.set_period(period + cooling_time)
def do_get_cooling_time(self):
'''
Get the cooling time
Input:
None
Output:
period (float): cooling time [ns]
'''
logging.info(__name__+' : Get the cooling time')
return float(self._cooling_time)
#########################################################
#
#
# Origin
#
#
#########################################################
def do_set_origin(self, origin=1e3):
'''
Set the origin of the pulses
Input:
origin (float): origin of the pulses [ns]
Output:
None
'''
logging.info(__name__+' : Set the origin of the pulses')
self._origin = origin
oldPeriod = self.get_period()
cooling_time = self.get_cooling_time()
periodA = origin + self._pulser.get_chA_width() + cooling_time
periodC = self._pulser.get_chC_delay() + self._pulser.get_chC_width() + cooling_time
periodD = self._pulser.get_chD_delay() + self._pulser.get_chD_width() + cooling_time
newPeriod = max(periodA, periodC, periodD)
#If the new period is longer than the old,
#We set first the period and next we change the delaies
if newPeriod > oldPeriod:
self.set_period(newPeriod)
boardDelay = self._pulser.get_chB_delay() - self._pulser.get_chA_delay()
self._pulser.set_chA_delay(origin)
self._pulser.set_chB_delay(origin + boardDelay)
else:
boardDelay = self._pulser.get_chB_delay() - self._pulser.get_chA_delay()
self._pulser.set_chA_delay(origin)
self._pulser.set_chB_delay(origin + boardDelay)
self.set_period(newPeriod)
def do_get_origin(self):
'''
Get the origin of the pulses
Input:
None
Output:
period (float): origin of the pulses [ns]
'''
logging.info(__name__+' : Get the origin of the pulses')
return float(self._origin)
| QCoherence/python_drivers | virtual_period.py | virtual_period.py | py | 5,286 | python | en | code | 2 | github-code | 36 |
73497717544 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch, numpy as np, os
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
PAD_TOKEN = 50256
# Constants that should not be changed due to some hard-coded values in the original ConvNext-base model
VOCAB_SIZE, N_EMDB, N_POSITIONS = 50257, 768, 1024
class GRN(nn.Module):
""" GRN (Global Response Normalization) layer
"""
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim))
self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim))
def forward(self, x):
Gx = torch.norm(x, p=2, dim=(1,2), keepdim=True)
Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
return self.gamma * (x * Nx) + self.beta + x
class LayerNorm(nn.Module):
""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class Block(nn.Module):
""" Generator Block.
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
"""
def __init__(self, dim, drop_path=0.):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.norm = LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.grn = GRN(4 * dim)
self.pwconv2 = nn.Linear(4 * dim, dim)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.grn(x)
x = self.pwconv2(x)
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
class Generator(nn.Module):
""" ConvNeXt V2
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, depths=[3, 3, 20, 3], dims=[128, 256, 512, 1024], drop_path_rate=0., head_init_scale=1.):
super().__init__()
# Stem and 3 intermediate downsampling conv layers
self.downsample_layers = nn.ModuleList()
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
# 4 feature resolution stages, each consisting of multiple residual blocks
self.stages = nn.ModuleList()
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.wte = nn.Embedding(VOCAB_SIZE, N_EMDB)
self.wpe = nn.Embedding(N_POSITIONS, N_EMDB)
# Final norm layer (not used in this implementation)
# self.norm = nn.LayerNorm(dims[-1], eps=1e-6)
self.pre_head = nn.ConvTranspose1d(dims[-1], 1024, kernel_size=12, stride=12)
self.head = nn.Linear(self.wte.weight.shape[1], self.wte.weight.shape[0], bias=False)
self.head.weight = self.wte.weight
self.softmax = nn.Softmax(dim=1)
self.apply(self._init_weights)
def pad_indices(self, indices):
if indices.shape[1] < N_POSITIONS:
indices = torch.nn.functional.pad(indices, (0, N_POSITIONS - indices.shape[1]), value=50256)
else:
indices = indices[-N_POSITIONS:]
return indices
def build_image(self, patches):
# patches: (B, 1024, 3, 16, 16)
image = torch.zeros((patches.shape[0], 3, 256, 256)).cuda()
for i in range(16):
for j in range(16):
image[:, :, i*16:(i+1)*16, j*16:(j+1)*16] = patches[:, i*16+j, :, :, :]
return image
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
# Check if bias is present (ignore final logit output layer)
if m.bias is not None:
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward_features(self, x):
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
# global average pooling, (N, C, H, W) -> (N, C) (not used in this implementation)
# return self.norm(x.mean([-2, -1]))
# [B, 1024, 8, 8] -> [B, 1024, 64]
x = x.view(x.shape[0], x.shape[1], -1)
x = self.pre_head(x)
return x
def forward(self, input_ids):
# Reverse the order of the tokens
input_ids = torch.flip(input_ids, [1])
# Padd with 50256
input_ids = self.pad_indices(input_ids)
# Prepare the position ids / embeddings
position_ids = torch.arange(0, input_ids.size(-1) + 0, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
# Embeddings and position encoding
x = self.wte(input_ids) + self.wpe(position_ids)
# Reshape to (B, 1024, 3, 16, 16)
x = torch.reshape(x, (x.shape[0], x.shape[1], 3, 16, 16))
# Build an image from the patches
x = self.build_image(x)
# Run though the convnet
x = self.forward_features(x)
# Output logits
x = self.head(x)
return x[:, -1] | Aveygo/WordsAreWorth16x16Pixels | models/pacing_model.py | pacing_model.py | py | 8,183 | python | en | code | 3 | github-code | 36 |
18502054835 | kaas1 = input ("Is de kaas geel? ")
if kaas1 == "ja":
ask1 = input ("Zitten er gaten in? ")
if ask1 == "ja":
ask2 = input("Is de kaas belachelijk duur? ")
if ask2 == "ja":
print("Emmenthaler")
elif ask2 == "nee":
print ("Leerdammer")
if ask1 == "nee":
ask3 = input ("Is de kaas hard als steen? ")
if ask3 == "ja":
print ("Parmigiano Reggiano ")
elif ask3 == "nee":
print ("Goudse kaas")
if kaas1 =="nee":
ask4 = input ("Heeft de kaas blauwe schimmels? ")
if ask4 == "ja":
ask5 = input ("Heeft de kaas een korst? ")
if ask5 == "ja":
print ("Blue de Rochbaron")
elif ask6 == "nee":
print ("Foume d'Ambert")
if ask4 == "nee":
ask6 = input ("Heeft de kaas een korst? ")
if ask6 == "ja":
print("Camembert")
elif ask6 == "nee":
print("Mozzarella")
else:
print ("invalid option") | LaraMol/meten-is-weten | geneste/geneste.py | geneste.py | py | 1,128 | python | nl | code | 0 | github-code | 36 |
35062345982 | import turtle
import numpy as np
m = int(input())
turtle.shape('turtle')
turtle.penup()
turtle.forward(200)
turtle.pendown()
turtle.left(90)
def hcircle (n, r):
for i in range(n//2):
turtle.forward(2*np.pi*r/n)
turtle.left(360/n)
for i in range (m):
hcircle (100,50)
hcircle(50,10)
| sophiepistachio/MIPT_bestuzheva2020 | lesson2/lesson2#12.py | lesson2#12.py | py | 311 | python | en | code | 0 | github-code | 36 |
495133127 | from collections import namedtuple
from dagster import check
from dagster.core.definitions.logger import LoggerDefinition
from dagster.core.definitions.pipeline import PipelineDefinition
class InitLoggerContext(
namedtuple('InitLoggerContext', 'logger_config pipeline_def logger_def run_id')
):
'''Logger-specific initialization context.
An instance of this class is made available as the first argument to the ``logger_fn`` decorated
by :py:func:`@logger <logger>` or set on a :py:class:`LoggerDefinition`.
Users should not instantiate this class.
Attributes:
logger_config (Any): The configuration data provided by the environment config. The
schema for this data is defined by ``config_field`` on the :py:class:`LoggerDefinition`
pipeline_def (PipelineDefinition): The pipeline definition currently being executed.
logger_def (LoggerDefinition): The logger definition for the logger being constructed.
run_id (str): The ID for this run of the pipeline.
'''
def __new__(cls, logger_config, pipeline_def, logger_def, run_id):
return super(InitLoggerContext, cls).__new__(
cls,
logger_config,
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition),
check.inst_param(logger_def, 'logger_def', LoggerDefinition),
check.str_param(run_id, 'run_id'),
)
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster/core/execution/context/logger.py | logger.py | py | 1,419 | python | en | code | 2 | github-code | 36 |
17928724445 | import sys
import bleAdapter
from bleAdapter import bleAdapter
import time
import testutils
import dbus.mainloop.glib
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
# Config for enable/disable test case
ENABLE_TC_AFQP_SECONDARY_SERVICE = 0
class runTest:
mainloop = GObject.MainLoop()
DUT_GENERIC_STRING = "hello"
DUT_FAIL_STRING = "fail"
DUT_OPEN_CHAR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320002"
DUT_OPEN_DESCR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320008"
DUT_WRITE_NO_RESP_CHAR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320005"
DUT_NOTIFY_CHAR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320006"
DUT_INDICATE_CHAR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320007"
DUT_ENCRYPT_CHAR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320003"
DUT_ENCRYPT_DESCR_UUID = "8a7f1168-48af-4efb-83b5-e679f9320009"
DUT_UUID_128 = "8a7f1168-48af-4efb-83b5-e679f932db5a"
DUT_UUID_16 = "abcd"
DUT_SERVICEB_UUID = "8a7f1168-48af-4efb-83b5-e679f9320001"
DUT_SERVICEC_UUID = "3113a187-4b9f-4f9a-aa83-c614e11b0002"
DUT_CHAR = {"8a7f1168-48af-4efb-83b5-e679f9320002": {"Flags": "read, write"},
"8a7f1168-48af-4efb-83b5-e679f9320003": {"Flags": "read, write"},
"8a7f1168-48af-4efb-83b5-e679f9320004": {"Flags": "read, write"},
"8a7f1168-48af-4efb-83b5-e679f9320005": {"Flags": "write-without-response"},
"8a7f1168-48af-4efb-83b5-e679f9320006": {"Flags": "notify"},
"8a7f1168-48af-4efb-83b5-e679f9320007": {"Flags": "indicate"}}
DUT_DESCR = {"8a7f1168-48af-4efb-83b5-e679f9320008": None,
"8a7f1168-48af-4efb-83b5-e679f9320009": None,
"8a7f1168-48af-4efb-83b5-e679f932000a": None,
"8a7f1168-48af-4efb-83b5-e679f932000b": None}
DUT_NAME = "TEST"
TEST_GROUP = "Full_BLE"
TEST_NAME_PREFIX = "RaspberryPI"
SHORT_LOCAL_NAME_SIZE = 4
ADVERTISEMENT_TEST_TIMEOUT = 120
STOP_ADVERTISEMENT_TEST_TIMEOUT = 2000 # 2 seconds
SIMPLE_CONNECTION_TEST_TIMEOUT = 120
SERVICE_DISCOVERY_TEST_TIMEOUT = 120
PAIRING_TEST_TIMEOUT = 120
GENERIC_TEST_TIMEOUT = 120
MTU_SIZE = 200
numberOfTests = 0
numberOfFailedTests = 0
# Manufacturer-specific Data
# First two bytes are company ID (randomly select Espressif(741) for test purpose)
# Next bytes are defined by the company (randomly select unit8_t 5 for
# test purpose)
COMPANY_ID = 741
MANU_DATA = 5
# Service Data
# First 16 bit are Service UUID (randomly select 0xEF12 for test purpose)
# Next bytes are Data related to this Service (randomly select 0xD6 for test purpose)
SERV_UUID = "000012ef-0000-1000-8000-00805f9b34fb"
SERV_DATA = 214
# The maximum possible attribute size is 512 bytes. Long write/read tests use 512 for data length.
LONG_READ_WRITE_LEN = 512
testDevice = []
DUT_MTU_2_STRING = "a" * (MTU_SIZE - 3)
DUT_LONG_STRING = ["A" * (MTU_SIZE - 3), "B" *
(MTU_SIZE - 3), "C" * (MTU_SIZE - 3)]
DUT_CHAR_E_STRING = "E"
DUT_CHAR_F_STRING = "F"
isNotificationDeclinedSuccessFull = False
testResult = False
@staticmethod
def discoveryStoppedCb(testDevice=None):
global testResult
testResult = False
if testDevice is None:
testResult = True
runTest.mainloop.quit()
@staticmethod
def discoveryStartedCb(testDevice):
runTest.mainloop.quit()
@staticmethod
def discoveryEventCb(testDevice):
isTestSuccessFull = runTest.advertisement(testDevice)
if isTestSuccessFull:
runTest.setTestDevice(testDevice)
# discoveryEvent.set()
runTest.mainloop.quit()
@staticmethod
def discoveryEventCb_16bit(testDevice):
isTestSuccessFull = runTest.advertisement_16bit(testDevice)
if isTestSuccessFull:
runTest.setTestDevice(testDevice)
# discoveryEvent.set()
runTest.mainloop.quit()
@staticmethod
def notificationCb(uuid, value, flag):
isNotificationTestSuccessFull = runTest.notification(uuid, value, flag)
if isNotificationTestSuccessFull:
# notificationEvent.set()
runTest.mainloop.quit()
@staticmethod
def indicationCb(uuid, value, flag):
isIndicationTestSuccessFull = runTest.indication(uuid, value, flag)
if isIndicationTestSuccessFull:
# indicationEvent.set()
runTest.mainloop.quit()
@staticmethod
def notificationMTUCb(uuid, value, flag):
notification = runTest.notificationMTU2(uuid, value, flag)
if notification == runTest.DUT_MTU_2_STRING:
runTest.mainloop.quit()
runTest.isNotificationDeclinedSuccessFull = True
@staticmethod
def errorConnectCb():
print("Connection error")
sys.stdout.flush()
connectEvent.put(0)
@staticmethod
def stopAdvertisement(scan_filter):
# Do one cycle of discovery to remove cached messages.
timerHandle = GObject.timeout_add(
runTest.STOP_ADVERTISEMENT_TEST_TIMEOUT,
runTest.discoveryStoppedCb)
bleAdapter.setDiscoveryFilter(scan_filter)
# wait for DUT to start advertising
bleAdapter.startDiscovery(runTest.discoveryStoppedCb)
runTest.mainloop.run()
bleAdapter.stopDiscovery()
# All cached message have been remove. Try again a discovery.
timerHandle = GObject.timeout_add(
runTest.STOP_ADVERTISEMENT_TEST_TIMEOUT,
runTest.discoveryStoppedCb)
bleAdapter.setDiscoveryFilter(scan_filter)
# wait for DUT to start advertising
bleAdapter.startDiscovery(runTest.discoveryStoppedCb)
runTest.mainloop.run()
runTest.submitTestResult(testResult, runTest.stopAdvertisement)
bleAdapter.stopDiscovery()
@staticmethod
def reconnectWhileNotBonded():
isTestSuccessFull = bleAdapter.connect(runTest.testDevice)
if not isTestSuccessFull:
print("reconnectWhileNotBonded test: Could not connect")
sys.stdout.flush()
runTest.submitTestResult(False, runTest.reconnectWhileNotBonded)
return
# Since secure connection only are accepted, pairing in "just works"
# shoud get rejected
if bleAdapter.pair():
print("reconnectWhileNotBonded test: Able to pair in just Works mode")
sys.stdout.flush()
runTest.submitTestResult(False, runTest.reconnectWhileNotBonded)
return
runTest.submitTestResult(True, runTest.reconnectWhileNotBonded)
@staticmethod
def reconnectWhileBonded():
isTestSuccessFull = bleAdapter.connect(runTest.testDevice)
# since there is a bond with DUT, pairing is automatic
if(isTestSuccessFull):
isTestSuccessfull = bleAdapter.writeCharacteristic(
runTest.DUT_ENCRYPT_CHAR_UUID, runTest.DUT_ENCRYPT_CHAR_UUID)
runTest.submitTestResult(
isTestSuccessFull,
runTest.reconnectWhileBonded)
@staticmethod
def disconnect():
isTestSuccessFull = bleAdapter.disconnect()
runTest.submitTestResult(isTestSuccessFull, runTest.disconnect)
@staticmethod
def waitForDisconnect():
isTestSuccessfull = bleAdapter.isDisconnected(
timeout=runTest.GENERIC_TEST_TIMEOUT)
runTest.submitTestResult(isTestSuccessfull, runTest.waitForDisconnect)
@staticmethod
def pairing():
isTestSuccessFull = True
if bleAdapter.isPaired() == False:
bleAdapter.writeCharacteristic(
runTest.DUT_ENCRYPT_CHAR_UUID,
runTest.DUT_ENCRYPT_CHAR_UUID) # should trigger a pairing event
isTestSuccessFull = bleAdapter.isPaired(
timeout=runTest.GENERIC_TEST_TIMEOUT)
else:
isTestSuccessFull = False
return isTestSuccessFull
@staticmethod
def _readWriteProtectedAttributes(pairingStatus):
if pairingStatus:
expectedSuccess = True
else:
expectedSuccess = False
isTestSuccessfull = bleAdapter.writeDescriptor(
runTest.DUT_ENCRYPT_DESCR_UUID, runTest.DUT_ENCRYPT_DESCR_UUID)
if isTestSuccessfull != expectedSuccess:
print(
"readWriteProtectedAttributes test: Error while reading protect descriptor, pairing status was " +
str(pairingStatus) +
" Operation success was " +
str(isTestSuccessfull))
sys.stdout.flush()
return False
isTestSuccessfull = bleAdapter.writeCharacteristic(
runTest.DUT_ENCRYPT_CHAR_UUID, runTest.DUT_ENCRYPT_CHAR_UUID)
if isTestSuccessfull != expectedSuccess:
print(
"readWriteProtectedAttributes test: Error while writing protect characteristic, pairing status was " +
str(pairingStatus) +
" Operation success was " +
str(isTestSuccessfull))
sys.stdout.flush()
return False
return True
# Expect writing/Reading to protect attribute to succeed.
@staticmethod
def readWriteProtectedAttributesWhilePaired():
isPaired = bleAdapter.isPaired()
if not isPaired:
print(
"readWriteProtectedCharacteristicWhileNotPaired test: Expected paired:1, got:" +
str(isPaired))
sys.stdout.flush()
return False
return runTest._readWriteProtectedAttributes(True)
# Expect writing/Reading to protect attribute to fail.
@staticmethod
def readWriteProtectedAttributesWhileNotPaired():
isPaired = bleAdapter.isPaired()
if isPaired:
print(
"readWriteProtectedCharacteristicWhileNotPaired test: Expected paired:0, got:" +
str(isPaired))
sys.stdout.flush()
return False
return runTest._readWriteProtectedAttributes(False)
@staticmethod
def indication(uuid, value, flag):
isSuccessfull = False
if (uuid == runTest.DUT_INDICATE_CHAR_UUID) and (
value == runTest.DUT_GENERIC_STRING) and (flag == "indicate"):
isSuccessfull = True
return isSuccessfull
@staticmethod
def notification(uuid, value, flag):
isSuccessfull = False
if (uuid == runTest.DUT_NOTIFY_CHAR_UUID) and (
value == runTest.DUT_GENERIC_STRING) and (flag == "notify"):
isSuccessfull = True
return isSuccessfull
@staticmethod
def notificationMTU2(uuid, value, flag):
if (uuid == runTest.DUT_NOTIFY_CHAR_UUID) and (flag == "notify"):
return value
@staticmethod
def notificationOnCharE(uuid, value, flag):
isSuccessfull = False
if (uuid == runTest.DUT_NOTIFY_CHAR_UUID) and (
value == runTest.DUT_CHAR_E_STRING) and (flag == "notify"):
isSuccessfull = True
return isSuccessfull
@staticmethod
def indicationOnCharF(uuid, value, flag):
isSuccessfull = False
if (uuid == runTest.DUT_INDICATE_CHAR_UUID) and (
value == runTest.DUT_CHAR_F_STRING) and (flag == "indicate"):
isSuccessfull = True
return isSuccessfull
@staticmethod
def writeWithoutResponse():
return bleAdapter.writeCharacteristic(
runTest.DUT_WRITE_NO_RESP_CHAR_UUID,
runTest.DUT_WRITE_NO_RESP_CHAR_UUID,
False)
@staticmethod
def writeResultWithoutResponse(result):
return bleAdapter.writeCharacteristic(
runTest.DUT_WRITE_NO_RESP_CHAR_UUID, result, False)
@staticmethod
def writereadLongCharacteristic():
long_value = "1" * runTest.LONG_READ_WRITE_LEN
bleAdapter.writeCharacteristic(runTest.DUT_OPEN_CHAR_UUID, long_value)
(isTestSuccessfull, charRead) = bleAdapter.readCharacteristic(
runTest.DUT_OPEN_CHAR_UUID)
if charRead != long_value:
isTestSuccessfull = False
print(
"writereadLongCharacteristic test: Expected value:" +
long_value +
" got:" +
charRead)
sys.stdout.flush()
return isTestSuccessfull
@staticmethod
def _readWriteChecks(charUUID, descrUUID):
bleAdapter.writeCharacteristic(charUUID, charUUID)
bleAdapter.writeDescriptor(descrUUID, descrUUID)
(isTestSuccessfull, charRead) = bleAdapter.readCharacteristic(charUUID)
(isTestSuccessfull, descrRead) = bleAdapter.readDescriptor(descrUUID)
if charRead != charUUID:
isTestSuccessfull = False
print(
"readWriteSimpleConnection test: Expected char uuid:" +
charUUID +
" got:" +
charRead)
if descrRead != descrUUID:
isTestSuccessfull = False
print(
"readWriteSimpleConnection test: Expected descr uuid:" +
descrUUID +
" got:" +
descrRead)
sys.stdout.flush()
return isTestSuccessfull
@staticmethod
def readWriteSimpleConnection():
isTestSuccessfull = runTest._readWriteChecks(
runTest.DUT_OPEN_CHAR_UUID, runTest.DUT_OPEN_DESCR_UUID)
isPaired = bleAdapter.isPaired()
if isPaired:
isTestSuccessfull = False
print(
"readWriteSimpleConnection test: Expected paired:0, got:" +
str(isPaired))
sys.stdout.flush()
return isTestSuccessfull
@staticmethod
def discoverPrimaryServices():
return bleAdapter.isServicesResolved(
timeout=runTest.GENERIC_TEST_TIMEOUT)
@staticmethod
def checkProperties(gatt):
isTestSuccessfull = True
for uuid in runTest.DUT_CHAR.keys():
if runTest.DUT_CHAR[uuid]["Flags"] != gatt.characteristics[uuid]["Flags"]:
print(
"checkProperties test: incorrect flags, expected: " +
runTest.DUT_CHAR[uuid]["Flags"] +
" was: " +
gatt.characteristics[uuid]["Flags"])
isTestSuccessfull = False
sys.stdout.flush()
return isTestSuccessfull
@staticmethod
def checkUUIDs(gatt, bEnableSecondaryService):
isTestSuccessfull = True
if runTest.DUT_SERVICEB_UUID not in gatt.services.keys():
print(
"checkUUIDs test: missing service UUID: " +
runTest.DUT_SERVICEB_UUID)
isTestSuccessfull = False
elif (gatt.services[runTest.DUT_SERVICEB_UUID]["Primary"] != True):
print(
"checkUUIDs test: wrong service type: " +
runTest.DUT_SERVICEC_UUID)
isTestSuccessfull = False
#Check secondary service UUID
if bEnableSecondaryService == True and ENABLE_TC_AFQP_SECONDARY_SERVICE == 1 :
if runTest.DUT_SERVICEC_UUID not in gatt.services.keys():
print(
"checkUUIDs test: missing secondary service UUID: " +
runTest.DUT_SERVICEC_UUID)
isTestSuccessfull = False
elif (gatt.services[runTest.DUT_SERVICEC_UUID]["Primary"]):
print(
"checkUUIDs test: wrong service type: " +
runTest.DUT_SERVICEC_UUID)
isTestSuccessfull = False
# Check characteristics UUIDs
for uuid in runTest.DUT_CHAR.keys():
if uuid not in gatt.characteristics.keys():
print("checkUUIDs test: missing characteristic UUID: " + uuid)
isTestSuccessfull = False
# Check descriptors
for uuid in runTest.DUT_DESCR.keys():
if uuid not in gatt.descriptors.keys():
print("checkUUIDs test: missing descriptors UUID: " + uuid)
isTestSuccessfull = False
sys.stdout.flush()
return isTestSuccessfull
@staticmethod
def simpleConnection(isConnected):
return isConnected
@staticmethod
def reConnection(isConnected):
return isConnected
@staticmethod
def removeIndication(isSuccessfull):
return isSuccessfull
@staticmethod
def removeNotification(isSuccessfull):
return isSuccessfull
@staticmethod
def advertisement(testDevice, DUT_UUID=None):
if (DUT_UUID is None):
DUT_UUID = runTest.DUT_UUID_128
if (bleAdapter.getPropertie(testDevice, "Address") is None):
print("Advertisement test: Waiting for Address")
sys.stdout.flush()
return False
UUIDs = bleAdapter.getPropertie(testDevice, "UUIDs")
if (UUIDs is None):
print("Advertisement test: Waiting for UUIDs")
sys.stdout.flush()
return False
else:
if (DUT_UUID not in UUIDs):
print("Advertisement test: Waiting for device UUID")
sys.stdout.flush()
return False
# Remove test for service B. Advertisement messages were too small.
# Should look into improving this part if it can be done.
# if (runTest.DUT_SERVICEB_UUID not in UUIDs):
# print("Advertisement test: Waiting for serviceB UUID")
# sys.stdout.flush()
# return False
name = bleAdapter.getPropertie(testDevice, "Name")
if(name is None):
print("Advertisement test: Waiting name")
sys.stdout.flush()
return False
else:
# Names can be cached. So the complete local name may still be in
# memory. Check the 4 first letter which constitutes the short name
if (runTest.DUT_NAME != name[:runTest.SHORT_LOCAL_NAME_SIZE]):
print("Advertisement test: name is incorrect: " + name)
sys.stdout.flush()
# return False
if (bleAdapter.getPropertie(testDevice, "TxPower") is None):
print("Advertisement test: Waiting for TxPower")
sys.stdout.flush()
return False
if(bleAdapter.getPropertie(testDevice, "RSSI") is None):
print("Advertisement test: Waiting for RSSI")
sys.stdout.flush()
return False
return True
@staticmethod
def get_manufacture_data(testDevice, DUT_UUID=None):
manufacture_data_dict = bleAdapter.getPropertie(
testDevice, "ManufacturerData")
# If manufacture data doesn't exist, return None
if(manufacture_data_dict is None):
return None
# If manufacture data exists, return manufacture data
else:
print("Manufacturer Specific Data: " +
str(manufacture_data_dict.items()))
sys.stdout.flush()
if manufacture_data_dict.get(runTest.COMPANY_ID) != None:
manufacture_data = manufacture_data_dict[runTest.COMPANY_ID]
return manufacture_data
else:
return None
@staticmethod
def get_service_data(testDevice, DUT_UUID=None):
service_data_dict = bleAdapter.getPropertie(
testDevice, "ServiceData")
# If service data doesn't exist, return None
if(service_data_dict is None):
return None
# If service data exists, return service data
else:
print("Service Data: " +
str(service_data_dict.items()))
sys.stdout.flush()
if service_data_dict.get(runTest.SERV_UUID) != None:
service_data = service_data_dict[runTest.SERV_UUID]
return service_data
else:
return None
@staticmethod
def _advertisement_start(scan_filter, UUID, discoveryEvent_Cb, bleAdapter):
scan_filter.update({"UUIDs": [UUID]})
bleAdapter.setDiscoveryFilter(scan_filter)
# Discovery test
bleAdapter.startDiscovery(discoveryEvent_Cb)
runTest.mainloop.run()
bleAdapter.stopDiscovery()
@staticmethod
def _simple_connect():
# Simple Connection test
testDevice = runTest.getTestDevice()
isTestSuccessFull = bleAdapter.connect(testDevice)
time.sleep(2) # wait for connection parameters update
@staticmethod
def _advertisement_connection_tests(scan_filter,
bleAdapter,
UUID,
discoveryEvent_Cb):
runTest._advertisement_start(scan_filter=scan_filter,
UUID=UUID,
discoveryEvent_Cb=discoveryEvent_Cb,
bleAdapter=bleAdapter)
runTest._simple_connect()
runTest.stopAdvertisement(scan_filter)
bleAdapter.disconnect()
testutils.removeBondedDevices()
@staticmethod
def Advertise_Without_Properties(scan_filter,
bleAdapter):
DUT_NAME_ORIGINAL = runTest.DUT_NAME
runTest.DUT_NAME = "nimb"
runTest._advertisement_connection_tests(
scan_filter=scan_filter,
bleAdapter=bleAdapter,
UUID=runTest.DUT_UUID_128,
discoveryEvent_Cb=runTest.discoveryEventCb)
runTest.DUT_NAME = DUT_NAME_ORIGINAL
return True
@staticmethod
def Check_ManufactureData(scan_filter,
bleAdapter,
bEnableManufactureData):
isTestSuccessFull = True
runTest._advertisement_start(
scan_filter=scan_filter,
UUID=runTest.DUT_UUID_16,
discoveryEvent_Cb=runTest.discoveryEventCb_16bit,
bleAdapter=bleAdapter)
manufacture_data = runTest.get_manufacture_data(runTest.testDevice)
if bEnableManufactureData == False:
if manufacture_data is not None:
print("ERROR: MANU_DATA is not None")
isTestSuccessFull &= False
else:
if manufacture_data is None:
print("ERROR: MANU_DATA is not None")
isTestSuccessFull &= False
else:
for data in manufacture_data:
if data != runTest.MANU_DATA:
print( "MANU_DATA is not correct. Data received: %d" %data)
isTestSuccessFull &= False
runTest._simple_connect()
runTest.stopAdvertisement(scan_filter)
isTestSuccessFull &= bleAdapter.disconnect()
testutils.removeBondedDevices()
return isTestSuccessFull
@staticmethod
def Check_ServiceData(scan_filter,
bleAdapter,
bEnableServiceData):
isTestSuccessFull = True
runTest._advertisement_start(
scan_filter=scan_filter,
UUID=runTest.DUT_UUID_16,
discoveryEvent_Cb=runTest.discoveryEventCb_16bit,
bleAdapter=bleAdapter)
service_data = runTest.get_service_data(runTest.testDevice)
if bEnableServiceData == False:
if service_data is not None:
print("ERROR: SERV_DATA is not None")
isTestSuccessFull &= False
else:
if service_data is None:
print("ERROR: SERV_DATA is None")
isTestSuccessFull &= False
else:
for data in service_data:
if data != runTest.SERV_DATA:
print( "SERV_DATA is not correct. Data received: %d" %data)
isTestSuccessFull &= False
runTest._simple_connect()
runTest.stopAdvertisement(scan_filter)
isTestSuccessFull &= bleAdapter.disconnect()
testutils.removeBondedDevices()
return isTestSuccessFull
@staticmethod
def Advertise_With_Manufacture_Data(scan_filter,
bleAdapter):
isTestSuccessFull = True
# Check when manufacture data length is 0, but pointer is valid
isTestSuccessFull &= runTest.Check_ManufactureData(scan_filter, bleAdapter, False)
# Check when manufacture data pointer is NULL, but length is not 0
isTestSuccessFull &= runTest.Check_ManufactureData(scan_filter, bleAdapter, False)
# Check when manufacture data length is not 0, and pointer is valid
isTestSuccessFull &= runTest.Check_ManufactureData(scan_filter, bleAdapter, True)
return isTestSuccessFull
@staticmethod
def Advertise_With_Service_Data(scan_filter,
bleAdapter):
isTestSuccessFull = True
# Check when service data length is 0, but pointer is valid
isTestSuccessFull &= runTest.Check_ServiceData(scan_filter, bleAdapter, False)
# Check when service data pointer is NULL, but length is not 0
isTestSuccessFull &= runTest.Check_ServiceData(scan_filter, bleAdapter, False)
# Check when service data length is not 0, and pointer is valid
isTestSuccessFull &= runTest.Check_ServiceData(scan_filter, bleAdapter, True)
return isTestSuccessFull
@staticmethod
def Advertise_With_16bit_ServiceUUID(scan_filter,
bleAdapter):
runTest._advertisement_connection_tests(
scan_filter=scan_filter,
bleAdapter=bleAdapter,
UUID=runTest.DUT_UUID_16,
discoveryEvent_Cb=runTest.discoveryEventCb_16bit)
return True
@staticmethod
def _scan_discovery_with_timer(bleAdapter):
bleAdapter.startDiscovery(runTest.discoveryStartedCb)
StartScan = time.time()
runTest.mainloop.run()
ScanTime = time.time() - StartScan
bleAdapter.stopDiscovery()
return ScanTime
@staticmethod
def Advertise_Interval_Consistent_After_BT_Reset(scan_filter,
bleAdapter):
isTestSuccessFull = True
runTest._advertisement_start(
scan_filter=scan_filter,
UUID=runTest.DUT_UUID_128,
discoveryEvent_Cb=runTest.discoveryEventCb,
bleAdapter=bleAdapter)
secondKPI = runTest._scan_discovery_with_timer(bleAdapter)
runTest._simple_connect()
isTestSuccessFull = runTest.discoverPrimaryServices()
bleAdapter.gatt.updateLocalAttributeTable( False )
time.sleep(2) # wait for connection parameters update
# Second time disconnect
isTestSuccessFull &= bleAdapter.disconnect()
# Third time connection
# wait for DUT to start advertising
thirdKPI = runTest._scan_discovery_with_timer(bleAdapter)
isTestSuccessFull &= bleAdapter.connect(runTest.testDevice)
if thirdKPI > secondKPI * 10:
isTestSuccessFull &= False
# write result back to server
isTestSuccessFull = runTest.discoverPrimaryServices()
bleAdapter.gatt.updateLocalAttributeTable( False )
isTestSuccessFull &= runTest.writeResultWithoutResponse(
chr(isTestSuccessFull + 48))
runTest.stopAdvertisement(scan_filter)
isTestSuccessFull &= bleAdapter.disconnect()
testutils.removeBondedDevices()
return isTestSuccessFull
@staticmethod
def Write_Notification_Size_Greater_Than_MTU_3(scan_filter,
bleAdapter):
runTest._advertisement_start(
scan_filter=scan_filter,
UUID=runTest.DUT_UUID_128,
discoveryEvent_Cb=runTest.discoveryEventCb,
bleAdapter=bleAdapter)
runTest._simple_connect()
runTest.stopAdvertisement(scan_filter)
isTestSuccessFull_discover = runTest.discoverPrimaryServices()
bleAdapter.gatt.updateLocalAttributeTable( False )
time.sleep(2) # wait for connection parameters update
# Check device not present. After discovery of services, advertisement
# should have stopped.
runTest.stopAdvertisement(scan_filter)
bleAdapter.setNotificationCallBack(runTest.notificationMTUCb)
bleAdapter.subscribeForNotification(
runTest.DUT_NOTIFY_CHAR_UUID) # subscribe for next test
runTest.mainloop.run()
isTestSuccessFull_notification = runTest.isNotificationDeclinedSuccessFull
runTest.submitTestResult(
isTestSuccessFull_notification,
runTest.notification)
isTestSuccessFull_removenotification = bleAdapter.subscribeForNotification(
runTest.DUT_NOTIFY_CHAR_UUID, subscribe=False) # unsubscribe
runTest.submitTestResult(
isTestSuccessFull_removenotification,
runTest.removeNotification)
isTestSuccessFull_disconnect = bleAdapter.disconnect()
testutils.removeBondedDevices()
isTestSuccessFull = (isTestSuccessFull_discover &
isTestSuccessFull_notification &
isTestSuccessFull_removenotification &
isTestSuccessFull_disconnect)
return isTestSuccessFull
@staticmethod
def Send_Data_After_Disconnected(scan_filter,
bleAdapter):
runTest._advertisement_start(
scan_filter=scan_filter,
UUID=runTest.DUT_UUID_128,
discoveryEvent_Cb=runTest.discoveryEventCb,
bleAdapter=bleAdapter)
runTest._simple_connect()
isTestSuccessFull = runTest.discoverPrimaryServices()
runTest.submitTestResult(
isTestSuccessFull,
runTest.discoverPrimaryServices)
bleAdapter.gatt.updateLocalAttributeTable( False )
# Check device not present. After discovery of services, advertisement
# should have stopped.
runTest.stopAdvertisement(scan_filter)
# Check write and read
bleAdapter.writeCharacteristic(
runTest.DUT_OPEN_CHAR_UUID,
runTest.DUT_OPEN_DESCR_UUID)
bleAdapter.readCharacteristic(runTest.DUT_OPEN_CHAR_UUID)
# Enable and receive notification and indication then disable.
bleAdapter.subscribeForNotification(runTest.DUT_NOTIFY_CHAR_UUID)
bleAdapter.subscribeForNotification(
runTest.DUT_INDICATE_CHAR_UUID) # subscribe for next test
time.sleep(2) # wait for connection parameters update
# Check Notification and Indication
bleAdapter.setNotificationCallBack(runTest.notificationCb)
isTestSuccessFull = True
runTest.mainloop.run()
runTest.submitTestResult(isTestSuccessFull, runTest.notification)
bleAdapter.setNotificationCallBack(runTest.indicationCb)
isTestSuccessFull = True
runTest.mainloop.run()
runTest.submitTestResult(isTestSuccessFull, runTest.indication)
isTestSuccessFull &= bleAdapter.disconnect()
# Second time connection
# wait for DUT to start advertising
bleAdapter.startDiscovery(runTest.discoveryStartedCb)
runTest.mainloop.run()
bleAdapter.stopDiscovery()
runTest._simple_connect()
bleAdapter.subscribeForNotification(runTest.DUT_NOTIFY_CHAR_UUID)
bleAdapter.subscribeForNotification(
runTest.DUT_INDICATE_CHAR_UUID) # subscribe for next test
# Check write and read after reconnection
bleAdapter.writeCharacteristic(
runTest.DUT_OPEN_CHAR_UUID,
runTest.DUT_OPEN_DESCR_UUID)
bleAdapter.readCharacteristic(runTest.DUT_OPEN_CHAR_UUID)
# Check Notification and Indication after reconnection
bleAdapter.setNotificationCallBack(runTest.notificationCb)
isTestSuccessFull = True
runTest.mainloop.run()
runTest.submitTestResult(isTestSuccessFull, runTest.notification)
bleAdapter.setNotificationCallBack(runTest.indicationCb)
isTestSuccessFull = True
runTest.mainloop.run()
runTest.submitTestResult(isTestSuccessFull, runTest.indication)
isTestSuccessFull = bleAdapter.subscribeForNotification(
runTest.DUT_NOTIFY_CHAR_UUID, subscribe=False) # unsubscribe
isTestSuccessFull = True
runTest.submitTestResult(isTestSuccessFull, runTest.removeNotification)
isTestSuccessFull = bleAdapter.subscribeForNotification(
runTest.DUT_INDICATE_CHAR_UUID, subscribe=False) # unsubscribe
isTestSuccessFull = True
runTest.submitTestResult(isTestSuccessFull, runTest.removeIndication)
isTestSuccessFull &= bleAdapter.disconnect()
testutils.removeBondedDevices()
return isTestSuccessFull
@staticmethod
def Check_Bond_State(scan_filter, bleAdapter):
runTest._advertisement_start(scan_filter=scan_filter,
UUID=runTest.DUT_UUID_128,
discoveryEvent_Cb=runTest.discoveryEventCb,
bleAdapter=bleAdapter)
runTest._simple_connect()
isTestSuccessFull = runTest.discoverPrimaryServices()
runTest.submitTestResult(
isTestSuccessFull,
runTest.discoverPrimaryServices)
bleAdapter.gatt.updateLocalAttributeTable( False )
isTestSuccessFull &= bleAdapter.pair_cancelpairing()
time.sleep(2)
testutils.removeBondedDevices()
return isTestSuccessFull
@staticmethod
def Change_MTU_Size(scan_filter, bleAdapter):
runTest._advertisement_start(scan_filter=scan_filter,
UUID=runTest.DUT_UUID_128,
discoveryEvent_Cb=runTest.discoveryEventCb,
bleAdapter=bleAdapter)
runTest._simple_connect()
time.sleep(5)
isTestSuccessFull = bleAdapter.disconnect()
testutils.removeBondedDevices()
return isTestSuccessFull
@staticmethod
def Callback_NULL_check(scan_filter, bleAdapter):
runTest._advertisement_start(scan_filter=scan_filter,
UUID=runTest.DUT_UUID_128,
discoveryEvent_Cb=runTest.discoveryEventCb,
bleAdapter=bleAdapter)
runTest._simple_connect()
isTestSuccessFull = runTest.discoverPrimaryServices()
runTest.submitTestResult(
isTestSuccessFull,
runTest.discoverPrimaryServices)
bleAdapter.gatt.updateLocalAttributeTable( False )
isTestSuccessFull &= runTest.checkUUIDs(bleAdapter.gatt, False)
time.sleep(5)
isTestSuccessFull &= bleAdapter.disconnect()
testutils.removeBondedDevices()
return isTestSuccessFull
@staticmethod
def advertisement_16bit(testDevice):
return runTest.advertisement(
testDevice, DUT_UUID=runTest.UUID_16to128(
runTest.DUT_UUID_16))
@staticmethod
def UUID_16to128(UUID_16bit):
return "0000" + UUID_16bit + "-0000-1000-8000-00805f9b34fb"
@staticmethod
def setTestDevice(testDeviceTmp):
runTest.testDevice = testDeviceTmp
@staticmethod
def getTestDevice():
return runTest.testDevice
@staticmethod
def submitTestResult(isSuccessfull, testMethod):
runTest.numberOfTests += 1
if isSuccessfull is True:
successString = "PASS"
else:
successString = "FAIL"
runTest.numberOfFailedTests += 1
print("TEST("
+ runTest.TEST_GROUP
+ ", "
+ runTest.TEST_NAME_PREFIX
+ "_"
+ testMethod.__name__
+ ") "
+ successString)
sys.stdout.flush()
@staticmethod
def printTestsSummary():
print("-----------------------")
print(str(runTest.numberOfTests) + " Tests " +
str(runTest.numberOfFailedTests) + " Failures 0 Ignored")
sys.stdout.flush()
| aws/amazon-freertos | libraries/abstractions/ble_hal/test/ble_test_scipts/testClass.py | testClass.py | py | 36,737 | python | en | code | 2,543 | github-code | 36 |
75220902185 | from . import music
def change_key(sounds, diff):
"""Transpose all sounds a chosen number
of halftones up
Parameters:
sounds (list[Sound]) : Sounds to transpose
diff (int) : Chosen number of halftones
Returns:
(list[Sound]) : Transposed sounds
"""
sounds1 = []
for s in sounds:
n1 = s.note+diff if s.note is not None else None
s1 = music.Sound(n1, s.timestamp, s.duration_ms)
sounds1.append(s1)
return sounds1
| JakubBilski/tonations-recognition | src/sounds_manipulation.py | sounds_manipulation.py | py | 483 | python | en | code | 1 | github-code | 36 |
37974946074 | day = 27
weight = 59.5
month = 'February'
library_is_open = False
if library_is_open:
print('Hurrayyy!!!')
print("it's Open")
else:
print(':(')
gold_rate = 4600
if gold_rate > 5000:
print("We cannot Buy it now")
else:
print("We're Buying!!!")
age = 16
if age >= 18:
print('Adult')
else:
print('Child')
| ShivaniKiran95/PythonPracticCode | Beginner/BooleansandIf.py | BooleansandIf.py | py | 322 | python | en | code | 0 | github-code | 36 |
15974641193 | # *-* coding: utf-8 *-*
"""
Created on mar 23 fév 2021 09:14:21 UTC
@author: vekemans
"""
import math as mt
import numpy as np
from math import pi as π
from numpy.fft import fft,fftshift,ifft
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import animation
nfig = 1
def dft(func, start,end,N, order=1):
"""
Return the 1D-derivative of function 'func', at specified order
"""
L = end-start
h = L/N
x = np.arange(start,end, step=h)
# -- Compute the FFT
u = func(x)
u_hat = fftshift(fft(u))
k = np.arange(-N/2,N/2)
# -- Compute the Derivative
u_dot_hat = (1j*k)**order * u_hat
u_dot = ifft(fftshift(u_dot_hat)).real
return x, u_dot
def gaussian(x, μ=0,σ=1):
"""
Gaussian function
"""
u = 1/mt.sqrt(2*π*σ**2) * np.exp(-(x-μ)**2/(2*σ**2))
return u
def dgaussian(x, μ=0,σ=1):
"""
Derivative of the gaussian function
"""
u_dot = -2*(x-μ)/(2*σ**2) * gaussian(x, μ=μ,σ=σ)
return u_dot
if __name__=='__main__':
m = np.arange(2,15)
Nvec = 2**m
error = np.empty(m.shape)
μ = π
σ = 0.5
f = lambda x: gaussian(x, μ=μ,σ=σ)
for (i,N) in enumerate(Nvec):
x,u_dot = dft(f, 0,2*π,N, order=1)
uprime = -2*(x-μ)/(2*σ**2) * f(x)
error[i] = np.abs(uprime-u_dot).max()
fig = plt.figure(nfig)
plt.plot(x,f(x), 'k-', label=r'$u$')
plt.plot(x,uprime, 'k--', label=r"$u'$")
plt.xlabel(r'$x$')
# plt.title('Gaussian function and its derivative')
plt.legend()
plt.tight_layout()
fig.savefig('../figs/gaussian_function.pdf')
nfig += 1
fig = plt.figure(nfig)
plt.loglog(Nvec,error, 'k-o', markersize=4, lw=0)
plt.xlabel(r'$N$')
plt.ylabel('Error')
# plt.title('Convergence of spectral method for differentiation')
plt.grid(which='major', linestyle='--', linewidth=0.5)
plt.grid(which='minor', linestyle=':', linewidth=0.25)
plt.tight_layout()
fig.savefig('../figs/spectral_convergence.pdf')
nfig += 1
N = 2**8
x = np.arange(0,2*π, step=2*π/N)
u = gaussian(x, μ=π,σ=σ)
# u = np.exp(-100*(x-1)**2)
c = 0.2 + np.power(np.sin(x-1),2)
tmax = 8.
tplot = .02
plotgap = int(tplot/0.001)
dt = tplot/plotgap
Nplots = int(tmax/tplot)
data = np.zeros((Nplots,N))
data[0,:] = u; u_old = u
time = np.arange(Nplots)/Nplots * tmax
for i in range(Nplots):
for n in range(plotgap):
u_hat = fftshift(fft(u))
u_dot_hat = (1j*np.arange(-N/2,N/2)) * u_hat
u_dot = ifft(fftshift(u_dot_hat)).real
u_new = u_old - 2*dt*c*u_dot if i>0 else u_old - dt*c*u_dot
u_old = u
data[i,:] = u_new
fig = plt.figure(nfig)
u_plt, = plt.plot([], [], 'k-')
u_txt = plt.text(π, 0.9, '', ha='center', fontsize=10)
plt.xlim([x[0],x[-1]])
plt.ylim([0,1])
plt.xlabel(r'$x$', fontsize=10)
plt.ylabel(r'$u$', fontsize=10)
# plt.title(r'$u_t + c(x) u_x = 0 \qquad u^0 = \mathcal{N}(\pi,0.5)$', fontsize=12)
plt.tight_layout()
nfig += 1
def animate(t):
u_plt.set_data(x, data[t,:])
u_txt.set_text('Current time : t = %.2f [s]' %(t*tplot))
return u_plt,u_txt,
anim = animation.FuncAnimation(fig, animate, Nplots, interval=100*tplot, blit=True)
writer = animation.PillowWriter(fps=25)
anim.save('../figs/gaussian_animation.gif', writer=writer)
fig = plt.figure(nfig)
ax = plt.axes(projection='3d')
for i in range(Nplots):
t = time[i]*np.ones(x.shape)
ax.plot3D(x,t, data[i,:], color='tab:blue')
# X,T = np.meshgrid(x,time)
# print(x.shape,time.shape,X.shape,data.shape)
# ax.plot_surface(X,T, data, cmap='w')
ax.set_xlim([x[0],x[-1]])
ax.set_ylim([0,tmax])
ax.set_zlim([0,4])
ax.set_xlabel('X-axis')
ax.set_ylabel('Time-axis')
ax.set_zlabel(r'$u$')
ax.view_init(40,-70)
fig.tight_layout()
fig.savefig('../figs/gaussian_convection.pdf')
nfig += 1
# -- Show figures
plt.show()
| abbarn/lmeca2300 | homeworks/fft3.py | fft3.py | py | 4,093 | python | en | code | 0 | github-code | 36 |
34342251813 | # import asyncio
import time
from evdev import InputDevice, categorize, ecodes
source_device = None
target_device = None
# Init dev reference
while source_device is None and target_device is None:
try:
source_device = InputDevice('/dev/input/event1')
target_device = InputDevice('/dev/hidg0')
except Exception as err:
print ("No device - waiting...")
time.sleep (10)
# # Async helper
# async def helper(source_device, target_device):
# async for ev in source_device.async_read_loop():
# print(categorize(ev))
# target_device.write_event(ev)
# # Loop waiting for keystroke
# loop = asyncio.get_event_loop()
# loop.run_until_complete(helper(source_device, target_device))
for ev in source_device.async_read_loop():
print(categorize(ev))
target_device.write_event(ev) | c4software/raspberry-pi-hid-proxy | sample.py | sample.py | py | 804 | python | en | code | 1 | github-code | 36 |
31187115675 | # #1. 분할 정복
# def bs(A, l, r, k):
# if l>r:
# return None
# m=(l+r)//2
# if A[m]>k:
# return bs(A, l, m-1, k)
# elif A[m] < k:
# return bs(A, m+1, r, k)
# else:
# return m
#A에 이중리스트로 주어진 값 저장
n, k = map(int, input().split()) #4, 16
A=[] #[[2, 5, 10, 19],[3, 8, 16, 19],[7, 20, 20, 32],[13, 25, 37, 44]]
for i in range(n):
A.append(list(map(int, input().split())))
def bs(A, k, n, i, j): #(i,j)의 형태로 반환, 없으면 -1,-1 출력 //n은 2^f의 형태!
if n == 1:
if A[i][j] == k:
return (i, j)
return (-1, -1)
elif n%2==0:
x = A[i+n//2-1][j+n//2-1]
y = A[i+n//2][j+n//2]
if k == x:
return (i+n//2-1, j+n//2-1)
elif k == y:
return (i+n//2, j+n//2)
elif k<x: #1,2,3사분면에 존재, 그 중 한 곳에 존재
result = bs(A, k, n//2, i, j+n//2) #1사분면
if result != (-1, -1):
return result
result = bs(A, k, n//2, i, j) #2사분면
if result != (-1, -1):
return result
result = bs(A, k, n//2, i+n//2, j) #3사분면
if result != (-1, -1):
return result
elif k>y: #1,3,4사분면에 존재, 그 중 한 곳에 존재
result = bs(A, k, n//2, i, j+n//2) #1사분면
if result != (-1, -1):
return result
result = bs(A, k, n//2, i+n//2, j) #3사분면
if result != (-1, -1):
return result
result = bs(A, k, n//2, i+n//2, j+n//2) #4사분면
if result != (-1, -1):
return result
else:#elif x<k<y: #1,3사분면에 존재 그 중 한 곳에 존재
result = bs(A, k, n//2, i, j+n//2) #1사분면
if result != (-1, -1):
return result
result = bs(A, k, n//2, i+n//2, j) #3사분면
if result != (-1, -1):
return result
return (-1, -1)
else: #n이 2^f형태가 아닌 입력은 문제에서 주어지지 않음
print("n이 2^f형태가 아니라서 주어진 조건과 다름!!")
#n=짝수라 가정
print(bs(A, k, n, 0, 0))
#문제의 주어진 조건대로 값이 들어오기 때문에 n//2의 값을 기준으로 4등분하여 1,2,3,4분면으로 나눠서 접근하였다.
#슬라이싱하면 따로 시간이 더 들기 때문에 인덱스로 접근하였다.
#x = A[i+n//2-1][j+n//2-1], y = A[i+n//2][j+n//2] 값과 비교해서 찾으면 바로 그 위치에 있는 것이다.
#그리고 x와 y에 대해서 k값을 비교해서 k가 x보다 작다면 1,2,3사분면에 존재하고 k가 y보다 크다면 1,3,4분면에 존재하고 x<k<y면 1,3사분면에 존재한다.
#여기서 없다면 (-1,-1)을 출력해야하기 때문에 n=1인 바닥조건에서 k값이 없으면 (-1,-1)이 반환되어야 하지만 여러 사분면 중에서 k값이 있다면 한 곳에서만 존재한다.
#따라서 조건문으로 (-1,-1)이 아닌 경우에만 그 값을 반환하고 모두 없을 경우에는 사분면의 조건을 따지는 문장이 다 끝나고 마지막에 (-1,-1)을 반환해서 없는 것을 의미하게 했다.
#또한 함수를 여러번 호출하면 그때마다 값을 계산하므로 값을 저장해두고 그 값으로 비교 후 조건에 충족된다면 그 값을 반환하게 했다.
#그렇게 수행시간을 분석해보면 T(n)=3T(n/2)+C=3(3T(n/2^2)+c)+c=3^2 * T(n/2^2)+c(1+3)이므로 n=2^k로 가정하고 쭉 점화식을 전개하면
#3^k * T(n/2^k)+c(1+3+3^2+......+3^(k-1))=3^k * c+c(1+3+3^2+......+3^(k-1))=c(1+3+....+3^k)=c'*3^k이 된다.
#따라서 [LaTex] Markdown 수식 작성법을 활용하여 Big-O로 표기하면 O(3^k)=O(3^{log_{2}n})=O(n^{log_{2}3})=O(n^{1.5849....})이다.
# #2. 이진 탐색
# def bs(A, k):
# l = 0
# r = len(A)-1
# while l <= r:
# m = (l+r)//2
# if A[m]>k:
# r=m-1
# elif A[m]<k:
# l=m+1
# else:
# return m
# return None
# n, k = map(int, input().split())
# A=[]
# for i in range(n):
# A.append(list(map(int, input().split())))
# result = bs(A[i], k)
# if result != None:
# print(f"({i}, {result})")
# break
# elif i == n-1:
# print(f"(-1, -1)")
# #행별로 이진탐색이 더 빠름
# #O(logn) | Ha3To/2022_2nd | python_workspace/Divide_&_Conquer.py | Divide_&_Conquer.py | py | 4,148 | python | ko | code | 0 | github-code | 36 |
72448270504 | import numpy as np
import tensornetwork as tn
np_vec = np.array([[0], [1]])
tn_vec1 = tn.Node(np_vec)
tn_vec2 = tn.Node(np_vec)
# Contracting the first index gives matrix with 1 element
tn_outer = tn.contract(tn_vec1[0] ^ tn_vec2[0])
# Contracting the second index gives matrix
tn_outer1 = tn.contract(tn_vec1[1] ^ tn_vec2[1])
tn_outer2 = tn.contract(tn_vec2[1] ^ tn_vec1[1])
# Matrix multiplication
np_mat1 = np.array([[1, 2], [3, 4]])
np_mat2 = np.array([[0, 1], [1, 0]])
tn_mat1 = tn.Node(np_mat1)
tn_mat2 = tn.Node(np_mat2)
# Multiplying by contracting the edges
mat12 = tn.contract(tn_mat1[1] ^ tn_mat2[0])
# Vector multiplication
vec1 = tn.contract(mat12[0] ^ tn_vec1[0]) # Picks 2nd row.
vec2 = tn.contract(mat12[1] ^ tn_vec1[0]) # Picks 2nd column.
# Quantum gate simulation
qubit = tn.Node(np.array([[0], [1]]))
z = tn.Node(np.array([[1, 0], [0, -1]]))
# _ = z[1] ^ qubit[0]
val = tn.contract(qubit[0] ^ z[1])
# val = qubit @ z
| Zshan0/TensorSimulations | src/dummy/matrix.py | matrix.py | py | 953 | python | en | code | 0 | github-code | 36 |
44448619471 | from pathlib import Path
import sqlite3
from datetime import datetime
from pymongo import ASCENDING
import pandas as pd
from utils.many_utils import PATH, get_collection
def inserisci_scadenza(
st,
nome,
data_scadenza,
dettagli,
importo,
stato,
categoria,
frequenza,
notifiche=False,
):
conn = sqlite3.connect(Path(PATH, f"utente_{st.session_state['user']}.db"))
c = conn.cursor()
data_inserimento = datetime.now().strftime("%Y-%m-%d")
data_completamento = None
completata = 1 if stato == "Completata" else 0
c.execute(
"""
INSERT INTO scadenze (nome, data_inserimento, data_scadenza, data_completamento, dettagli, importo, categoria, frequenza, notifiche, completata)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
(
nome,
data_inserimento,
data_scadenza,
data_completamento,
dettagli,
importo,
categoria,
frequenza, # mensile? annuale? settimanale?
notifiche, # o meglio, "quanti giorni prima della scadenza voglio essere notificato"
completata, # completata/da fare
),
)
conn.commit()
conn.close()
def get_scadenze(st):
conn = sqlite3.connect(Path(PATH, f"utente_{st.session_state['user']}.db"))
c = conn.cursor()
c.execute("SELECT * FROM scadenze ORDER BY data_scadenza")
scadenze = c.fetchall()
cols = [
"id",
"nome",
"data_inserimento",
"data_scadenza",
"data_completamento",
"dettagli",
"importo",
"categoria",
"frequenza",
"notifiche",
"completata",
]
st.write()
conn.close()
return pd.DataFrame(scadenze, columns=cols).sort_values(
"data_scadenza", ascending=True
)
def aggiorna_stato_scadenza(st, id_scadenza):
conn = sqlite3.connect(Path(PATH, f"utente_{st.session_state['user']}.db"))
c = conn.cursor()
c.execute("UPDATE scadenze SET completata = 1 WHERE id = ?", (id_scadenza))
c.execute(
"UPDATE scadenze SET data_completamento = ? WHERE id = ?",
(datetime.strftime(datetime.now(), "%Y-%m-%d"), id_scadenza),
)
conn.commit()
conn.close()
def elimina_scadenza(st, id_scadenza):
conn = sqlite3.connect(Path(PATH, f"utente_{st.session_state['user']}.db"))
c = conn.cursor()
c.execute("DELETE FROM scadenze WHERE id = ?;", (id_scadenza))
conn.commit()
conn.close()
def genera_body_scadenza(s):
res_id = s["_id"]
res_nome = s["nome"]
res_data_inserimento = s["data_inserimento"]
res_data_scadenza = s["data_scadenza"]
res_data_completamento = s["data_completamento"]
res_dettagli = s["dettagli"]
res_importo = s["importo"]
res_categoria = s["categoria"]
res_frequenza = s["frequenza"]
res_notifiche = s["notifiche"]
res_completata = "Si" if s["completata"] == 1 else "No"
return f"\
ID: {res_id} - {res_nome}, \n\
Categoria: {res_categoria},\n\
Inserita il: {res_data_inserimento}\n\
Scadenza: {res_data_scadenza},\n\
Dettagli: {res_dettagli},\n\
Importo: {res_importo},\n\
Completata: {res_completata}\n\
Data completamento: {res_data_completamento}\
"
######### Mongo
def get_scadenze_mongo(st, mongo_uri):
scadenze_col = get_collection(
st.session_state["user"],
"scadenze",
mongo_uri,
mongo_db="solexiv_db",
)
scadenze = list(scadenze_col.find().sort("data_scadenza", ASCENDING))
cols = [
"_id",
"nome",
"data_inserimento",
"data_scadenza",
"data_completamento",
"dettagli",
"importo",
"categoria",
"frequenza",
"notifiche",
"completata",
]
return pd.DataFrame(scadenze, columns=cols).sort_values(
"data_scadenza", ascending=True
)
def inserisci_scadenza_mongo(
st,
nome,
data_scadenza,
dettagli,
importo,
stato,
categoria,
frequenza,
notifiche=False,
):
scadenze_col = get_collection(
st.session_state["user"],
"scadenze",
st.session_state["mongo_uri"],
mongo_db="solexiv_db",
)
data_inserimento = datetime.now().strftime("%Y-%m-%d")
data_scadenza = data_scadenza.strftime("%Y-%m-%d")
data_completamento = None
completata = True if stato == "Completata" else False
scadenza = {
"nome": nome,
"data_inserimento": data_inserimento,
"data_scadenza": data_scadenza,
"data_completamento": data_completamento,
"dettagli": dettagli,
"importo": importo,
"categoria": categoria,
"frequenza": frequenza,
"notifiche": notifiche,
"completata": completata,
}
res = scadenze_col.insert_one(scadenza)
st.toast("Scadenza inserita correttamente.")
return True
def aggiorna_stato_scadenza_mongo(st, id_scadenza):
scadenze_col = get_collection(
st.session_state["user"],
"scadenze",
st.session_state["mongo_uri"],
mongo_db="solexiv_db",
)
scadenza = scadenze_col.find_one({"_id": id_scadenza})
if scadenza:
scadenza["completata"] = True
scadenza["data_completamento"] = datetime.now().strftime("%Y-%m-%d")
scadenze_col.update_one({"_id": id_scadenza}, {"$set": scadenza})
st.toast("Stato della scadenza aggiornato correttamente.")
else:
st.error("Scadenza non trovata.")
def elimina_scadenza_mongo(st, id_scadenza):
scadenze_col = get_collection(
st.session_state["user"],
"scadenze",
st.session_state["mongo_uri"],
mongo_db="solexiv_db",
)
# Elimina la scadenza corrispondente all'id fornito
result = scadenze_col.delete_one({"_id": id_scadenza})
if result.deleted_count > 0:
st.toast("Scadenza eliminata correttamente.")
else:
st.error("Scadenza non trovata.")
| piopy/solexiv | src/logica_applicativa/Scadenze.py | Scadenze.py | py | 6,066 | python | it | code | 0 | github-code | 36 |
6267092868 | # -*- coding: utf-8 -*-
import scrapy
import re
import datetime
from scrapy.http import Request
from urllib import parse
from ..items import JobBoleArticleItem
from ..utils.common import get_md5
class JobboleSpider(scrapy.Spider):
name = 'jobbole'
allowed_domains = ['blog.jobbole.com']
start_urls = ['http://blog.jobbole.com']
#start_urls = ['http://blog.jobbole.com/licai/zq/164942.html']
def parse(self, response: scrapy.http.TextResponse):
"""
1、获取文章列表页中的文章url并交给scrapy,下载后并进行解析
2、获取下一页的url并交给scrapy进行下载,下载完成后交给parse
"""
# 提取下一页并交给scrapy进行下载
next_page = response.meta.get("next_page", 0)+1
if next_page <= 10:
next_url = f"http://blog.jobbole.com/kaifadou/snews-getajax.php?next={next_page}"
yield Request(url=next_url, meta={"next_page": next_page}, callback=self.parse)
#解析列表页中的所有文章url并交给scrapy下载后并进行解析
post_nodes = response.css(".zhicheng_news_list a")
for post_node in post_nodes:
image_url = post_node.css("img::attr(src)").extract_first("")
post_url = post_node.css("::attr(href)").extract_first("")
#new_url = response.url + post_url
#parse.urljoin(response.url,post_url) 拼接url
yield Request(url=parse.urljoin(response.url, post_url), meta={"front_image_url": parse.urljoin(response.url, image_url)}, callback=self.parse_detail)
#print(post_url)
def parse_detail(self, response):
article_item = JobBoleArticleItem()
#提取文章的具体字段
#xpath语法
# title = response.xpath('/html/body/div[3]/div[2]/div[1]/h2[1]/text()').extract()[0]
# creatdate = response.xpath('/html/body/div[3]/div[2]/div[1]/div[1]/span[1]/text()').extract()[0]
# #方法一:
# content = response.xpath("//div[@class='wen_article']").extract_first("")
# content = re.findall(r">(.*?)<", content)
# content = ''.join(content)
# #方法二:
# #content = response.xpath("//div[@class='wen_article']/p/text()").extract()
# #content = ''.join(content)
# #方法三:
# #content = response.xpath("//div[@class='wen_article']").extract().replace("<p>", "").replace("</p>", "")
#css选择器语法
front_image_url = response.meta.get("front_image_url", "")
title = response.css(".ship_wrap h2::text").extract()[0]
create_date = response.css(".meta span::text").extract()[0]
content = response.css(".wen_article p::text").extract()
content = "".join(content)
#print(re_selector)
article_item["url_object_id"] = get_md5(response.url)
article_item["title"] = title
try:
create_date = datetime.datetime.strftime(create_date, "%Y/%m/%d").date()
except Exception as e:
create_date = datetime.datetime.now().date()
article_item["create_date"] = create_date
article_item["url"] = response.url
article_item["front_image_url"] = [front_image_url]
article_item["content"] = content
yield article_item
| jasonxu510/scrapypro | ArticleSpider/ArticleSpider/spiders/jobbole.py | jobbole.py | py | 3,319 | python | en | code | 0 | github-code | 36 |
35520934329 | # 숫자 맞추기 게임
import random
com = random.randint(1, 10)
cnt = 0
while True :
cnt += 1
user = int(input('1부터 10까지의 숫자를 입력하세요. >> '))
if user == com :
print(f'정답! {cnt}번 만에 맞췄습니다!')
break
else :
print('오답! 다시 시도해보세요.')
# 숫자 찍기
for i in range(1, 6) :
print(str(i) * 5)
for i in range(1, 26) :
for j in range(i, i+1) :
if j < 10 :
print(j, end=' ')
else :
print(j, end=' ')
if i % 5 == 0 :
print()
for i in range(1, 26, 5) :
for j in range(i, i+5) :
if j < 10 :
print(j, end=' ')
else :
print(j, end=' ')
print()
# 별 찍기
# 1번
for i in range(5) :
print('★' * 5)
# 2번
for i in range(1, 6) :
print('★' * i)
# 3번
for i in range(5, 0, -1) :
print('★' * i)
# 4번
for i in range(1, 11, 2) :
print(' ' * ((11 - i) // 2), '★' * i)
# 5번
for i in range(9, 0, -2) :
print(' ' * ((11 - i) // 2), '★' * i)
# 6번
for i in range(1, 13, 2) :
print(' ' * ((13 - i) // 2), '★' * i)
for i in range(9, 0, -2) :
print(' ' * ((13 - i) // 2), '★' * i)
# 주사위 눈 구하기
for i in range(1, 7) :
for j in range(1, 7) :
if i + j == 6 :
print(f'({i}, {j})', end=' ')
for x in range(0, 11) :
for y in range(0, 11) :
if x * 2 + y * 4 == 10 :
print(f'x = {x}, y = {y}')
# 버블 정렬
sort_num = []
for i in range(10) :
sort_num.append(random.randint(0, 10))
print(sort_num)
for i in range(len(sort_num)-1) :
check = False
for j in range(len(sort_num)-1-i) :
if sort_num[j] > sort_num[j+1] :
check = True
sort_num[j], sort_num[j+1] = sort_num[j+1], sort_num[j]
print(sort_num)
if not check :
break
# 로또 뽑기
# 1장 뽑기
lotto = set()
while True :
lotto.add(random.randint(1, 46))
if len(lotto) == 6 :
break
print(lotto)
# 5장 뽑기
lotto5 = []
lotto = set()
while True :
lotto.add(random.randint(1, 45))
if len(lotto) == 6 :
lotto5.append(lotto)
lotto = set()
if len(lotto5) == 5 :
break
print(lotto5)
# list 사용
lotto_num = []
for i in range(1, 45) :
lotto_num.append(i)
lotto5 = []
for i in range(5) :
for j in range(1000) :
shuffle = random.randint(0, 44)
lotto_num[0], lotto_num[shuffle] = lotto_num[shuffle], lotto_num[0]
lotto = lotto_num[:6]
print(lotto)
lotto5.append(lotto)
print(lotto5)
| DahyeonS/Java_Python_Lecture | 20231130/python_ex.py | python_ex.py | py | 2,599 | python | en | code | 0 | github-code | 36 |
71859082343 | import logging
import os
from io import StringIO
import boto3
import pandas as pd
from botocore.exceptions import ClientError
AWS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET = os.environ.get("AWS_SECRET_ACCESS_KEY")
bucket = "kiwi-bot"
# key = "ordersDB.csv"
prefix = "data/"
filename = "https://kiwi-bot.s3.us-east-2.amazonaws.com/ordersDB.csv"
filepath = "kiwi_bot\data\order.csv"
s3 = boto3.client(
"s3",
region_name="us-east-1",
aws_access_key_id=AWS_KEY_ID,
aws_secret_access_key=AWS_SECRET,
)
read_file = s3.get_object(Bucket=bucket, Key=key)
df = pd.read_csv(read_file["Body"])
df.to_csv("kiwi_bot\data\order2.csv")
print("File succesfully loaded")
| edward0rtiz/clustering-demand-scm | lib/data_engine/s3_get_object.py | s3_get_object.py | py | 717 | python | en | code | 0 | github-code | 36 |
4628312590 | #
# Title:pid_lock.py
# Description:ensure only a single instance runs
# Development Environment:OS X 10.15.5/Python 3.7.6
# Author:G.S. Cole (guycole at gmail dot com)
#
import os
class PidLock:
def lock_test(self, file_name: str) -> bool:
"""
return True if active lock noted
"""
target_pid = os.getpid()
try:
infile = open(file_name, "r")
target_pid = int(infile.readline())
infile.close()
except IOError:
return False
command = "/bin/ps -p %d" % target_pid
temp = os.popen(command).readlines()
# returns one or two lines, w/first line as header
# [' PID TTY TIME CMD\n', '52645 ttys000 0:00.04 python pid_lock.py\n']
if len(temp) > 1:
return True
return False
def write_lock(self, file_name: str) -> bool:
"""
write a PID lock file
"""
outfile = open(file_name, "w")
outfile.write("%d\n" % (os.getpid()))
outfile.close()
if __name__ == "__main__":
print("start")
pid_lock = PidLock()
flag = pid_lock.lock_test("/tmp/target")
if flag:
print("pidlock test true")
else:
print("pidlock test false, now write lock")
pid_lock.write_lock("/tmp/target")
flag = pid_lock.lock_test("/tmp/target")
print("pidlock test now %s" % flag)
print("stop")
# ;;; Local Variables: ***
# ;;; mode:python ***
# ;;; End: ***
| guycole/mellow-elephant | src/pid_lock.py | pid_lock.py | py | 1,502 | python | en | code | 2 | github-code | 36 |
70183485543 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
from decouple import config
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'senda.settings')
enviroment = config("ENVIROMENT")
if enviroment == "staging":
DJANGO_CONFIGURATION = 'Staging'
elif enviroment == "preview":
DJANGO_CONFIGURATION = 'Preview'
elif enviroment == "production":
DJANGO_CONFIGURATION = 'Production'
else:
DJANGO_CONFIGURATION = 'Development'
os.environ.setdefault('DJANGO_CONFIGURATION', DJANGO_CONFIGURATION)
from django.conf import settings
try:
from configurations.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| UNPSJB/SendaAlquiler | backend/manage.py | manage.py | py | 1,129 | python | en | code | 1 | github-code | 36 |
35849079519 | import tkinter as tk
from tkinter import ttk
import s_probe
class Treev(ttk.Treeview):
def __init__(self, master = None):
self.master = master
self.tree = ttk.Treeview(master, columns=('val', 'max'), height=30, padding=[2,2])
try:
# initiate sProbe static class, decide windows or linux and import
s_probe.sProbe()
except TypeError as e:
raise TypeError(e)
self.init_windows()
def init_windows(self):
#self.tree.tag_configure(font=['FiraMono Nerd Font Mono', 12, 'normal'])
dc = round((s_probe.sProbe.descap / s_probe.sProbe.voltage) / 1000, 3)
self.tree.insert('', 'end', 'system', text='System Name', values=(s_probe.sProbe.system_name, ''), open=True)
self.tree.insert('system', 0, 'name', text='Name', values=(s_probe.sProbe.name, ''))
self.tree.insert('system', 'end', text='Status', values=(s_probe.sProbe.status, ''))
self.tree.insert('system', 'end', 'chargepercent', text='Charge Percent',
values=(str(s_probe.sProbe.est_chrg) + ' %', ''))
if s_probe.sProbe.runtime != 'N/A':
self.tree.insert('system', 'end', 'timerem', text='Time Remaining',
values=(str(s_probe.sProbe.hours) + 'h ' + str(s_probe.sProbe.minutes) + 'm ', ''))
if s_probe.sProbe.maxrechargetime is not None:
self.tree.insert('system', 'end', 'maxchargetime', text='Max Recharge Time',
values=(s_probe.sProbe.maxrechargetime,''))
self.tree.insert('system', 'end', 'manufacturedate', text='Manufacture Date', values=(s_probe.sProbe.mdate, ''))
self.tree.insert('system', 'end', 'deviceid', text='Device ID', values=(s_probe.sProbe.device_id, ''))
# Check "maxrechargetime", ""
if s_probe.sProbe.charging:
self.tree.insert('system', 'end', 'power', text=str('Power' + '🔌'), open=True)
self.tree.insert('power', 'end', 'chargepower', text='Charge Power',
values=(str(s_probe.sProbe.chargerate) + ' W', ''))
#self.tree.insert('timerem', 'end', 'rechargetime', text='Max Recharge Time',
# values=(str(s_probe.sProbe.rehours) + 'h ' + str(s_probe.sProbe.remins) + 'm', ''))
if s_probe.sProbe.ttf is not None:
self.tree.insert('timerem', 'end', 'ttf', text='Time to Full Charge',
values=(str(s_probe.sProbe.ttf / 60) + 'h ' + str(s_probe.sProbe.ttf % 60) + 'm', ''))
else: #discharging
self.tree.insert('system', 'end', 'power', text=str('Power' + ' âš¡'), open=True)
self.tree.insert('power', 'end', 'dpower', text='Discharge Power',
values=(str(s_probe.sProbe.dischargerate / 1000) + ' W', ''))
self.tree.insert('power', 'end', 'amps', text='Amperage', values=(str(s_probe.sProbe.amps) + ' A', ''))
self.tree.insert('system', 'end', 'v', text='Voltage', open=True)
self.tree.insert('v', 'end', 'voltnow', text='Voltage', values=(str(s_probe.sProbe.voltage / 1000) + ' V', ''))
self.tree.insert('v', 'end', 'desvolt', text='Design Voltage',
values=(str(int(s_probe.sProbe.design_voltage) / 1000) + ' V', ''))
self.tree.insert('system', 'end', 'capacity', text='Capacity', open=True)
self.tree.insert('capacity', 'end', 'descap', text='Design Capacity', values=(str(s_probe.sProbe.descap / 1000) + ' Wh(' +
str(dc) + ' Ah)', ''))
self.tree.insert('capacity', 'end', 'fullcap', text='Full Charge Capacity',
values=(str(s_probe.sProbe.full_cap / 1000) + ' Wh', ''))
self.tree.insert('capacity', 'end', 'bathealth', text='Battery Health',
values=(str(round(s_probe.sProbe.bathealth, 2)) + ' %', ''))
self.tree.insert('capacity', 'end', 'capleft', text='Remaining Capacity', values=(str(s_probe.sProbe.rem_cap) + ' Wh', ''))
# extra info
self.tree.insert('system', 'end', 'info', text='Extra Info', open=True)
self.tree.insert('info', 'end', text='Cycle Count', values=(s_probe.sProbe.cycle_count, ''))
self.tree.insert('info', 'end', text='Temperature', values=(s_probe.sProbe.temp, ''))
self.tree.insert('info', 'end', 'cap', text='Caption', values=(s_probe.sProbe.caption, ''))
self.tree.insert('info', 'end', 'desc', text='Description', values=(s_probe.sProbe.desc, ''))
self.tree.insert('info', 'end', 'avail', text='Availability', values=(s_probe.sProbe.avail, ''))
self.tree.insert('info', 'end', 'batstat', text='Battery Status', values=(str(s_probe.sProbe.bstatus)+' ('+str(s_probe.sProbe.stat_str)+')', ''))
if s_probe.sProbe.ogchem is not None:
self.tree.insert('info', 'end', 'chem', text='Chemistry', values=(str(s_probe.sProbe.ogchem)+' ('+str(s_probe.sProbe.chem_str)+')', ''))
if s_probe.sProbe.err_desc is not None:
self.tree.insert('info', 'end', text='Error Description', values=(s_probe.sProbe.err_desc, ''))
if s_probe.sProbe.pmc:
# Set to True
self.tree.insert('info', 'end', text='Power Mgmt Capabilities', values=(s_probe.sProbe.pmc, ''))
self.tree.insert('info', 'end', text='Low Alarm', values=(str(s_probe.sProbe.lowalarm) + ' Wh',''))
self.tree.insert('info', 'end', text='Critical Alarm', values=(str(s_probe.sProbe.critalarm) + ' Wh', ''))
self.tree.insert('info', 'end', text='Critical Bias', values=(str(s_probe.sProbe.critbi) + '', ''))
# initialize max var, and initialize column
self.maxv = s_probe.sProbe.voltage
self.maxamps = s_probe.sProbe.amps
if s_probe.sProbe.charging:
self.maxcharge = s_probe.sProbe.chargerate
#self.tree.set('charge', 'max', str(self.maxcharge) + ' W')
self.tree.set('chargepower', 'max', str(self.maxcharge) + ' W')
else:
self.maxdis = s_probe.sProbe.dischargerate
self.tree.set('dpower', 'max', str(self.maxdis) + ' W')
self.tree.set('voltnow', 'max', str(self.maxv) + ' V')
self.tree.set('amps', 'max', str(self.maxamps) + ' A')
# column headings
self.tree.heading('#0', text='Property', anchor=tk.CENTER)
self.tree.column('#0', width=200, stretch=tk.YES)
self.tree.heading('0', text='Value')
self.tree.column('0', width=200)
self.tree.heading('1', text='Max')
self.tree.column('1', width=150)
self.tree.grid(row=0, column=0, sticky='nsew', padx=(10,0), pady=(10,2))
scrolly = ttk.Scrollbar(self.master, orient=tk.VERTICAL, command=self.tree.yview)
scrollx = ttk.Scrollbar(self.master, orient=tk.HORIZONTAL, command=self.tree.xview)
scrolly.grid(row=0, column=1, sticky='ns', pady=(10,0))
#scrollx.grid(row=1, column=0, sticky='ew', padx=(10,0), pady=(0,10))
self.tree.configure(yscroll=scrolly.set)
#self.tree.bind('<<TreeviewSelect>>', self.item_selected)
return self.tree
def re_tree(self):
rem_ah = str(round((s_probe.sProbe.rem_cap / s_probe.sProbe.voltage) / 1000, 3))
full_ah = str(round((s_probe.sProbe.full_cap / s_probe.sProbe.voltage) / 1000, 3))
if s_probe.sProbe.charging:
self.tree.set('chargepower', 'val', str(s_probe.sProbe.chargerate) + ' W')
if s_probe.sProbe.ttf is not None:
self.tree.set('ttf', 'val', str(str(s_probe.sProbe.ttfhours) + 'h ' + str(s_probe.sProbe.ttfmins) + 'm'))
if s_probe.sProbe.chargerate > self.maxcharge:
self.maxcharge = s_probe.sProbe.chargerate
self.tree.set('chargepower', 'max', str(self.maxcharge) + ' W')
if s_probe.sProbe.maxrechargetime is not None:
self.tree.set('rechargetime', 'val', str(str(s_probe.sProbe.rehours) + 'h ' + str(s_probe.sProbe.remins) + 'm'))
else:
self.tree.set('dpower', 'val', str(s_probe.sProbe.dischargerate) + ' W')
if s_probe.sProbe.dischargerate > self.maxdis:
self.maxdis = s_probe.sProbe.dischargerate
self.tree.set('dpower', 'max', str(self.maxdis) + ' W')
self.tree.set('dpower', 'max', str(self.maxdis) + ' W')
if s_probe.sProbe.runtime != 'N/A':
self.tree.set('timerem', 'val', str(str(s_probe.sProbe.hours) + 'h ' + str(s_probe.sProbe.minutes) + 'm'))
self.tree.set('batstat', 'val', str(str(s_probe.sProbe.bstatus) + ' ('+str(s_probe.sProbe.stat_str)+')'))
self.tree.set('voltnow', 'val', str(s_probe.sProbe.voltage) + ' V')
self.tree.set('avail', 'val', str(s_probe.sProbe.avail) + ' (' + str(s_probe.sProbe.avail_str)+')')
self.tree.set('amps', 'val', str(s_probe.sProbe.amps) + ' A')
# Max values column
if s_probe.sProbe.voltage > self.maxv:
self.maxv = s_probe.sProbe.voltage
self.tree.set('voltnow', 'max', str(self.maxv) + ' V')
if s_probe.sProbe.amps > self.maxamps:
self.maxamps = s_probe.sProbe.amps
self.tree.set('amps', 'max', str(self.maxamps) + ' A')
self.tree.set('capleft', 'val', str(s_probe.sProbe.rem_cap / 1000) + ' Wh (' + rem_ah + ' Ah)')
# doubt this changs often
self.tree.set('fullcap', 'val', str(s_probe.sProbe.full_cap / 1000) + ' Wh (' + full_ah + ' Ah)')
self.tree.set('chargepercent', 'val', str(s_probe.sProbe.est_chrg) + ' %')
# takes about 8 seconds
def get_rootwmi(self):
if not self.rootwmi_on:
self.rootwmi_on = True
r = s_probe.sProbe.getRootWmi()
self.tree.tree.insert('', 'end', 'root/wmi', text='root/wmi', open=True)
for i in r.classes:
if "Battery" in i and "MS" not in i:
tmp = r.instances(i)
if len(tmp) > 0:
self.tree.tree.insert('root/wmi', 'end', i, text=i, open=True)
for x in tmp[0].properties.keys():
self.tree.tree.insert(i, 'end', str(i)+x, text=x, values=(getattr(tmp[0], x), ''))
else:
self.rootwmi_on = False
self.tree.delete('root/wmi')
def get_win32batt(self):
if not self.win32bat_on:
self.win32bat_on = True
w = s_probe.sProbe.getw32bat_inst()
self.tree.insert('', 'end', 'Raw', text='win32_battery', open=True)
for i in w.properties.keys():
val = getattr(w, i)
if val:
self.tree.insert('Raw', 'end', str('b' + i), text=i, values=(val, ''))
self.tree.yview_moveto('1.0')
else:
self.win32bat_on = False
self.tree.delete('Raw')
def get_portable(self):
if s_probe.sProbe.portable is not None:
if not s_probe.sProbeortable_on:
s_probe.sProbeortable_on = True
self.tree.insert('', 'end', 'portable', text='Portable Battery', open=True)
# Portable Battery (all static values)
for i in s_probe.sProbe.portable.properties.keys():
val = getattr(s_probe.sProbe.portable, i)
# Not none values
if val:
if 'DesignVoltage' in i or 'DesignCapacity' in i:
val = str(int(val) / 1000)
self.tree.insert('portable', 'end', i, text=i, values=(val, ''))
# Scroll to bottom
self.tree.yview_moveto('1.0')
else:
s_probe.sProbeortable_on = False
self.tree.delete('portable')
else:
print('Portable does not exist')
def on_close(self):
s_probe.sProbe.on_close()
| jmerc141/batterypy | tree.py | tree.py | py | 12,087 | python | en | code | 0 | github-code | 36 |
25282880574 | import entrypoints
import mimetypes
def open(*args, **kwargs):
"""
Dispatch to a compatible PIMS reader.
"""
# Dispatch using the first argument which is assumed to be a file buffer,
# filename, or filename glob.
reader = _dispatch(args[0])
return reader(*args, **kwargs)
def _dispatch(file):
# Ensure mimetypes is initialized. (This step pulls from the operating
# system's MIME type registry.)
mimetypes.init()
if isinstance(file, str):
# file is inferred to be a filename or filename glob.
mimetype, _ = mimetypes.guess_type(file)
else:
# file is inferred to be a file buffer, which has a name attribute.
# If this is a non-file-based buffer like a StringIO object it won't
# have a name, in which case we can't infer the type this way.
# In the future, we could employ libraries that peek at the first
# couple bytes and infer the MIME type from the file signature, which
# would work on any buffer.
try:
filename = file.name
except AttributeError:
raise DispatchError(
"Expected a filename or file buffer with a 'name' attribute.")
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
raise DispatchError(f"Could not detect MIME type of {file}")
try:
entrypoint = entrypoints.get_single('TBD.readers', mimetype)
except entrypoints.NoSuchEntryPoint:
raise DispatchError(f"No PIMS reader found for MIME type {mimetype}")
reader = entrypoint.load()
return reader
class PIMSError(Exception):
"base class for all exceptions raised directly by PIMS"
...
class DispatchError(PIMSError):
...
| danielballan/pims2-prototype | pims/__init__.py | __init__.py | py | 1,750 | python | en | code | 1 | github-code | 36 |
3763224892 | import cv2 as cv
import pandas as pd
df = pd.read_csv("Set03_video01.csv")
cam = cv.VideoCapture("Set03_video01.h264")
frame = 0
# initBB = (int(df.iloc[0]['x']),int(df.iloc[0]['y']),int(df.iloc[0]['w']),int(df.iloc[0]['h']))
# tracker = cv.TrackerCSRT_create()
# tracking = False
# while True:
# _ret, img = cam.read()
# if(_ret == False):
# break
# if(frame == df.iloc[0]['frame_start']):
# tracker.init(img, initBB)
# tracking = True
# if tracking:
# (success, box) = tracker.update(img)
# if success:
# (x, y, w, h) = [int(v) for v in box]
# img = cv.rectangle(img, (x, y), (x + w, y + h),(0, 255, 0), 2)
# cv.imshow("f",img)
# cv.waitKey(5)
# frame+=1
# if(frame>100):
# break
trackers = []
to_delete = []
boxes = []
while True:
_ret, img = cam.read()
if(_ret == False):
break
temp_df = df[(df.frame_start == frame)]
if(temp_df is not None):
for index,row in temp_df.iterrows():
x = int(row['x'])
if(x > 200):
x = int(row['x'])-200
y = int(row['y'])-400
w = 3*int(row['w'])
h = 6*int(row['h'])
print(x,y+400)
bbox = cv.selectROI(img, False)
print("selected")
initBB = (x,y,w,h)
speed = float(row['speed'])
tracker = cv.TrackerCSRT_create()
tracker.init(img, bbox)
failure_rate = 0
trackers.append([tracker,int(row['frame_end'])-10,speed,failure_rate])
if(len(trackers) > 0):
for idx, item in enumerate(trackers):
if(frame > item[1] or item[3]>4):
to_delete.append(idx)
if(len(to_delete) > 0):
for item in to_delete:
trackers.pop(item)
to_delete = []
for tracker_ in trackers:
(success, box) = tracker_[0].update(img)
if success:
(x, y, w, h) = [int(v) for v in box]
speed = tracker_[2]
boxes.append([frame,x,y,w,h,speed])
# img = cv.rectangle(img, (x, y), (x + w, y + h),(0, 255, 0), 2)
else:
tracker_[3] +=1
# cv.imshow('f',img)
# cv.waitKey(5)
frame += 1
if(frame%200==0):
print(frame)
# print("kength of trackers" + str(len(trackers)))
# print("kength of to_delete" + str(len(to_delete)))
df = pd.DataFrame(boxes)
df.to_csv("boxes_data_3.csv",index=False,header=['frame','x','y','w','h','speed']) | aaravpandya/optic_flow | tracker.py | tracker.py | py | 2,532 | python | en | code | 0 | github-code | 36 |
14575147176 | from collections import defaultdict
from sys import stdin
def rec(graph, v, visited, visited2):
if v == 'end':
return 1
if v.islower():
if v not in visited:
visited.add(v)
else:
visited2.add(v)
res = 0
for to in graph[v]:
if not to.islower() or \
to != 'start' and (to not in visited or not len(visited2)):
res += rec(graph, to, visited, visited2)
if v.islower():
if v not in visited2:
visited.remove(v)
else:
visited2.remove(v)
return res
graph = defaultdict(set)
for line in stdin:
a, b = line.strip().split('-')
graph[a].add(b)
graph[b].add(a)
print(rec(graph, 'start', set(), set()))
| vfolunin/archives-solutions | Advent of Code/2021/12.2.py | 12.2.py | py | 758 | python | en | code | 0 | github-code | 36 |
12730428499 | """
The main, core program. A CLI program.
For now: whilst this is just a CLI program, you must edit any key settings with the file:
C:\Potts' Software\Fortnite Exploit\fe-sett.ini
Changing this file is permitted, I'm not the best at efficient programming. Work away.
Written by Elliot Potts,
https://www.elliotpotts.me/
"""
import os
import time
import keyboard
import ConfigParser
local_settings = {
'hotkey': None,
'audio_mute': None,
'space_delay': None,
'bool_temp': None
}
cPar = ConfigParser.SafeConfigParser()
def spacer(amount):
time.sleep(1)
print("" * amount)
def settingsDigest():
if os.path.isdir("C:\Potts' Software\Fortnite Exploit"):
pass
else:
os.makedirs("C:\Potts' Software\Fortnite Exploit")
os.chdir("C:\Potts' Software\Fortnite Exploit")
if os.path.isfile("fe-sett.ini"):
try:
settRead = cPar.read("fe-sett.ini")
local_settings['hotkey'] = cPar.get("settings", "Hotkey")
local_settings['space_delay'] = cPar.get("settings", "ParachuteDelay")
except:
print(" [-] Error reading configuration file. Regenerating...")
os.remove("fe-sett.ini")
spacer(3)
print(" [-] Configuration file removed. It will be regenerated when the application is next launched.")
quit()
else:
cfgFile = open("fe-sett.ini", "w")
cPar.add_section("settings")
cPar.set("settings", "Hotkey", "V")
cPar.set("settings", "ParachuteDelay", "0.5")
cPar.write(cfgFile)
cfgFile.close()
settRead = cPar.read("fe-sett.ini")
local_settings['hotkey'] = cPar.get("settings", "Hotkey")
local_settings['space_delay'] = cPar.get("settings", "ParachuteDelay")
settingsDigest()
def startProgram(mode):
print("Program activated. Press {} to activate glitch.".format(str(local_settings['hotkey'])))
print("Press CTRL+C to TERMINATE the program. Release {} to stop the glitch.".format(str(local_settings['hotkey'])))
spacer(2)
if mode == 1:
while True:
# print("DBG: ACTV")
if keyboard.is_pressed(local_settings['hotkey']):
print("Activation key is pressed. Exploitin'.")
keyboard.press_and_release('space')
else:
# print("DBG: ACTV3")
pass
elif mode == 2:
while True:
if keyboard.is_pressed(local_settings['hotkey']):
print("Activation key is pressed. Exploitin'.")
keyboard.press_and_release('space')
time.sleep(float(local_settings['space_delay']))
else:
print(" [-] Invalid function call. Report this. Terminating.")
quit()
def main():
spacer(10)
print("""Your settings are as follows:
1). Your hotkey is set to: {},
2). Your timed delay setting is: {}
You may change these values manually or by using the menu function.""".format(str(local_settings['hotkey']),
str(local_settings['space_delay'])))
spacer(10)
print("""Please chose a menu option to get started:
1). Modify Hotkey
2). Display settings
--------------------------------------
3). Start program with spam mode (can be either really useful or really bad, play around)
4). Start program with timed mode (custom wait period between each glider deploy, see the config file/settings)
--------------------------------------
5). Quit program""")
try:
getMenuChoice = int(raw_input(" >> Enter your choice: "))
except ValueError:
print("You have entered an invalid input.")
main()
if getMenuChoice == 1:
print("Your hotkey is currently: {}, enter what you would like it to be changed to and press enter.".format(str(
local_settings['hotkey']
)))
getHotKeyChange = raw_input("Enter your hotkey: ")
cfgFile = open(r"C:\Potts' Software\Fortnite Exploit\fe-sett.ini", "w")
cPar.set("settings", "Hotkey", getHotKeyChange)
cPar.write(cfgFile)
cfgFile.close()
settRead = cPar.read(r"C:\Potts' Software\Fortnite Exploit\fe-sett.ini")
local_settings['hotkey'] = cPar.get("settings", "Hotkey")
print("Your hotkey has been changed to {} successfully.".format(str(local_settings['hotkey'])))
print("Returning to the main menu...")
spacer(10)
main()
elif getMenuChoice == 2:
spacer(10)
main()
elif getMenuChoice == 3:
startProgram(1)
elif getMenuChoice == 4:
startProgram(2)
elif getMenuChoice == 5:
print("Program is being terminated via menu choice. Bye!")
spacer(10)
quit()
else:
print(" [-] Invalid input. Chose a number from the menu. Restarting.")
main()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print(" [-] The program has been manually terminated via the keyboard...")
| vbuckgartuit/Fornite-Parachute-Exploit | cli/main.py | main.py | py | 5,224 | python | en | code | 1 | github-code | 36 |
570245906 | import logging
import math
from .geomsmesh import geompy
from .triedreBase import triedreBase
O, OX, OY, OZ = triedreBase()
def ellipsoideDefaut(minRad,allonge):
"""Le bloc contenant la fissure est un ellipsoide construit centre a l'origine,
contenant le tore elliptique de fissure
@param minRad :petit rayon
@param allonge :rapport grand rayon / petit rayon
@return ellipsoide (geomObject)
"""
logging.info("start")
boule = geompy.MakeSphereR(2)
bouler = geompy.MakeRotation(boule, OY, math.pi/2.0)
face = geompy.MakeFaceHW(100, 100, 3)
boulepart = geompy.MakePartition([bouler], [face], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
solids = geompy.ExtractShapes(boulepart, geompy.ShapeType["SOLID"], True)
solid0 = solids[0]
for i in range(1,len(solids)):
solid0 = geompy.MakeFuse(solid0, solids[i])
ellipsoide = geompy.MakeScaleAlongAxes(solid0, O, minRad, minRad*(allonge+2.0)/2.0, minRad) # on limite l'allongement de l'ellipsoide
#geompy.addToStudy( ellipsoide, 'ellipsoide' )
return ellipsoide
| luzpaz/occ-smesh | src/Tools/blocFissure/gmu/ellipsoideDefaut.py | ellipsoideDefaut.py | py | 1,044 | python | en | code | 2 | github-code | 36 |
14017935134 | '''
this module makes helix curve
'''
import math
import maya.cmds as cmds
def helix(radius, pitch, sr, sp, ncvs,*args):
'''
create helix curve
'''
deg = 3
spas = ncvs - deg
knots = ncvs + deg -1
points = []
points.append((radius, 0, 0.5))
#cmds.joint(p=(0,0,0))
d = 1
for i in range(ncvs):
radius = radius*sr
pitch = pitch*sp
x = radius * math.cos(i)
y = pitch * i
z = -radius * math.sin(i)
if i%d == 0:
points.append((x, y, z))
cmds.curve(d=3, p=points)
def do_helix(*args):
radius = cmds.floatField("radius", value=True, q=True)
pitch = cmds.floatField("pitch", value=True, q=True)
sr = cmds.floatField("sr", value=True, q=True)
sp = cmds.floatField("sp", value=True, q=True)
ncv = cmds.intField("ncv", value=True, q=True)
helix(radius, pitch, sr, sp, ncv)
def do_helix_UI():
'''
main rondom copy function
'''
cmds.window()
cmds.rowColumnLayout(numberOfColumns=2)
cmds.text(label="radius")
cmds.floatField("radius", value=3)
cmds.text(label="pitch")
cmds.floatField("pitch", value=0.4)
cmds.text(label="sr")
cmds.floatField("sr", value=1.0)
cmds.text(label="sp")
cmds.floatField("sp", value=1.0)
cmds.text(label="ncv")
cmds.intField("ncv", value=20)
cmds.text(label="execute")
cmds.button(label="DoHelix", command=do_helix)
cmds.showWindow()
do_helix_UI() | s-nako/MayaPythonTools | ModelingTools/curves/do_helix.py | do_helix.py | py | 1,471 | python | en | code | 0 | github-code | 36 |
41416155472 | from Domain.cheltuiala import getSuma, getTip
def celeMaiMariCheltuieli(lista):
'''
Determina cele mai mari cheltuieli pentru fiecare tip de cheltuiala.
:param lista: lista de cheltuieli
:return: cele mai mari cheltuieli pentru fiecare tip de cheltuiala
'''
rezultat = {}
for cheltuiala in lista:
tip = getTip(cheltuiala)
suma = getSuma(cheltuiala)
if tip in rezultat:
if suma > rezultat[tip]:
rezultat[tip] = suma
else:
rezultat[tip] = suma
return rezultat
| AP-MI-2021/lab-567-ZarnescuBogdan | Logic/functionalitate3.py | functionalitate3.py | py | 564 | python | ro | code | 0 | github-code | 36 |
74552235943 | import json
import datetime, time
import itertools
import pyverdict
import decimal
import os
import multiprocessing
from multiprocessing import Queue
from common import util
import pandas as pd
import numpy as np
import queue
import threading
from threading import Thread
#logger = logging.getLogger("idebench")
class IDEBenchDriver:
# def init(self, options, schema, driver_arg):
# pass
#
# def workflow_start(self):
# print("workflow start")
# pass
#
# def workflow_end(self):
# #os.system("/usr/local/pgsql/bin/pg_ctl stop -D ~/xdb_data")
# #os.system('sudo -b bash -c "echo 1 > /proc/sys/vm/drop_caches"')
# #os.system("/usr/local/pgsql/bin/pg_ctl start -D ~/xdb_data")
# pass
#def can_execute_online(self, sql_statement):
# return (not " or " in sql_statement.lower()) and (not " AVG(" in sql_statement)
def verdictdbedit(self, sql_statement):
sql_statement=sql_statement.replace('FROM movies','FROM public.movies_scrambled_'+str(self.verdictdbconfig["scramblePercent"])+'_percent')
sql_statement=sql_statement.replace('FROM flights','FROM public.flights_scrambled_'+str(self.verdictdbconfig["scramblePercent"])+'_percent')
sql_statement=sql_statement.replace('FROM weather','FROM public.weather_scrambled_'+str(self.verdictdbconfig["scramblePercent"])+'_percent')
#print("SQL:",sql_statement)
return sql_statement.lower()
def create_connection(self):
connection = pyverdict.postgres(host=self.config['host'], user='crossfilter', password=self.config['password'], port=self.config['port'], dbname='crossfilter-eval-db')
connection.set_loglevel("ERROR")
return connection
def init(self, options, schema, driver_arg):
self.time_of_latest_request = 0
self.isRunning = False
self.requests = queue.LifoQueue()
with open("verdictdb.config.json","r") as f:
self.verdictdbconfig = json.load(f)
self.config = json.load(open(os.path.join(os.path.dirname(__file__),'..','verdictdb.config.json')))
def execute_vizrequest(self, viz_request, options, schema, result_queue):
viz = viz_request.viz
sql_statement = viz.get_computed_filter_as_sql(schema)
#calculate connection time
# get a connection from the pool - block if non is available
# connection = self.pool.get()
connection=self.conn
viz_request.start_time = util.get_current_ms_time()
try:
editedSqlStatement = self.verdictdbedit(sql_statement)
#print(editedSqlStatement)
data = connection.sql(editedSqlStatement)
except Exception as e:
print(e, flush=True)
viz_request.result = {}
viz_request.margins = {}
viz_request.end_time = util.get_current_ms_time()
result_queue.put(viz_request)
return
viz_request.end_time = util.get_current_ms_time()
# put connection back in the queue so the next thread can use it.
#cursor.close()
#connection.close()
#connection=self.create_connection()
#self.pool.put(connection)
results = {}
for i, row in data.iterrows():
keys = []
if row[0] is None:
continue
for i, bin_desc in enumerate(viz_request.viz.binning):
if "width" in bin_desc:
bin_width = bin_desc["width"]
keys.append(str(int(row[0])))
else:
keys.append(str(row[0]).strip())
key = ",".join(keys)
row = list(row)
for i, r in enumerate(row):
if isinstance(r, decimal.Decimal):
row[i] = float(r)
results[key] = row[1]
viz_request.result = results
#viz_request.margins = margins
viz_request.margins = {}
result_queue.put(viz_request)
print("delivering...")
def process_request(self, viz_request, options, schema, result_queue):
self.requests.put((viz_request, options, schema, result_queue))
def process(self):
# while the workflow is running, pop the latest request from the stack and execute it
while self.isRunning:
try:
request = self.requests.get(timeout=1)
viz_request = request[0]
options = request[1]
schema = request[2]
result_queue = request[3]
# only execute requests that are newer than the last one we processed (drops old/no longer needed queries)
if viz_request.expected_start_time < self.time_of_latest_request:
viz_request.dropped = True
result_queue.put(viz_request)
continue
self.time_of_latest_request = viz_request.expected_start_time
self.execute_vizrequest(viz_request, options, schema, result_queue)
except Exception as e:
# ignore queue-empty exceptions
print(e, flush=True)
pass
self.conn.close()
def workflow_start(self):
# pool a number of db connections
self.isRunning = True
#self.pool = queue.Queue()
#for i in range(1):
# conn = self.create_connection()
# self.pool.put(conn)
self.conn=self.create_connection()
thread = Thread(target = self.process)
thread.start()
def workflow_end(self):
self.isRunning = False
# close all db connections at the end of a workflow
#for i in range(self.pool.qsize()):
# conn = self.pool.get(timeout=1)
# conn.close()
# def process_request(self, viz_request, options, schema, out_q):
# print("processsing..." + str(viz_request.operation_id))
# if viz_request.viz.binning:
# sql_statement = viz_request.viz.get_computed_filter_as_sql(schema)
# sql_statement = sql_statement.replace(schema.get_fact_table_name(), "%s_%s%s" % (
# schema.get_fact_table_name(), options.settings_size, "n" if options.settings_normalized else ""))
# #if self.can_execute_online(sql_statement):
# # sql_statement = sql_statement.replace("SELECT ", "SELECT ONLINE ")
# # sql_statement += " WITHTIME %s CONFIDENCE 95" % options.settings_time_requirement
# # sql_statement += " REPORTINTERVAL %s;" % options.settings_time_requirement
# # connection, cursor = self.create_connection(options.settings_time_requirement + 20)
#
# #connection, cursor = self.create_connection(options.settings_time_requirement)
# #calculate connection time
# t1=util.get_current_ms_time()
# connection, cursor = self.create_connection()
# t2=util.get_current_ms_time()
# viz_request.connection_time=t2-t1
# viz_request.start_time = util.get_current_ms_time()
# try:
# data = connection.sql(self.verdictdbedit(sql_statement))
# except:
# viz_request.result = {}
# viz_request.margins = {}
# viz_request.timedout = True
# viz_request.end_time = util.get_current_ms_time()
# out_q.put(viz_request)
# return
# #data = connection.sql(self.verdictdbedit(sql_statement))
# #data=connection.sql(sql_statement)
#
# viz_request.end_time = util.get_current_ms_time()
# connection.close()
#
# results = {}
# margins = {}
| leibatt/crossfilter-benchmark-public | drivers/verdictdb.py | verdictdb.py | py | 7,794 | python | en | code | 3 | github-code | 36 |
1417006794 | """
A simple text-based adventure game called: Maze Trap
try and find the key and reach to the exit
you may need tools like a torch
Available commands include:
go <compass direction>
take <object>
drop <object>
inventory
quit
"""
# current location: hallway, lounge or bedroom
state = "maze"
# the object that the player is carrying around (or "nothing").
carrying = []
###############################################################
# global dictionary of the movable objects in each location
items_in = {"maze" : [],
"mazekey" : ["key"],
"mazetorch" : ["torch"],
"mazechallenge" : ["fake-key"],
"mazedeadend" : [],
"mazeexit" : []
}
#####################################################
# Functions for describing the current location
def describe_maze():
print("You have just entered into a maze, Good Luck!")
print("Every room around you seems to be lit except the east room")
def describe_mazekey():
if "torch" not in carrying:
print("It's a dark room, might be something here.")
else:
print("Theres a key here, i didnt even realise. It was so dark")
def describe_mazetorch():
if "torch" not in carrying:
print("There's a torch on the table")
else:
print("You just took a torch, maybe it can be used somewhere")
def describe_mazechallenge():
if "fake-key" not in carrying:
print("Theres a key on the table")
else:
print("I might be able to keep going with this key")
def describe_mazechallenge1():
print("there was nothing here after all")
def describe_mazedeadend():
if "key" and "torch" in carrying:
print("The torch reveals a secret key hole towrarads the north side")
else:
print("i dont think this room leads to anything?!?!")
def describe_mazeexit():
print("You escaped, Thanks for playing!")
def describe():
"""Print a description of the current location."""
if state == "maze":
describe_maze()
for item in items_in[state]:
print("You can see: " + item + " maybe you can pick it up?.")
elif state == "mazekey":
describe_mazekey()
if "torch" in carrying:
for item in items_in[state]:
print("You can see: " + item + " maybe you can pick it up?.")
elif state == "mazetorch":
describe_mazetorch()
for item in items_in[state]:
print("You can see: " + item + " maybe you can pick it up?.")
elif state == "mazechallenge":
describe_mazechallenge()
for item in items_in[state]:
print("You can see: " + item + " maybe you can pick it up?.")
elif state == "mazechallenge1":
describe_mazechallenge1()
elif state == "mazedeadend":
describe_mazedeadend()
for item in items_in[state]:
print("You can see: " + item + " maybe you can pick it up?.")
elif state == "mazeexit":
describe_mazeexit()
else:
print("ERROR: unknown location: " + str(state))
#######################################################
# Functions for moving between locations
def move_maze(direction):
if direction == "east":
return "mazekey"
elif direction == "north":
return "mazedeadend"
if direction == "south":
return "mazetorch"
elif direction == "west":
return "mazechallenge"
return ""
def move_mazekey(direction):
if direction == "west":
return "maze"
return ""
def move_mazedeadend(direction):
if direction == "south":
return "maze"
elif direction == "north" and "key" in carrying:
return "mazeexit"
return ""
def move_mazetorch(direction):
if direction == "north":
return "maze"
return ""
def move_mazechallenge(direction):
if direction == "east":
return "maze"
elif direction == "west":
return "mazechallenge1"
return ""
def move_mazechallenge1(direction):
if direction == "east":
return "mazechallenge"
return ""
def move_cmd(direction):
"""Attempt to move in the given direction.
This updates the 'state' variable to the new location,
or leaves it unchanged and prints a warning if the move was not valid.
:param direction: a compass direction, "north", "east", "south", or "west".
:return: None
"""
global state
if state == "maze":
new_state = move_maze(direction)
elif state == "mazetorch":
new_state = move_mazetorch(direction)
elif state == "mazechallenge":
new_state = move_mazechallenge(direction)
elif state == "mazechallenge1":
new_state = move_mazechallenge1(direction)
elif state == "mazedeadend":
new_state = move_mazedeadend(direction)
elif state == "mazekey":
new_state = move_mazekey(direction)
else:
print("WARNING: move_cmd sees unknown state: " + state)
new_state = ""
# now check to see if it was a valid move
if new_state == "":
print("You cannot go " + str(direction) + " from here.")
else:
state = new_state
#########################################################
def take_cmd(obj):
"""Try to pick up the given object.
Most objects can only be picked up when in the correct room.
"""
global carrying
#This looks for the object in the current room then picks it up and drops anything it is carrying
if obj in items_in[state]:
carrying.append(obj)
print("You picked up and carrying " + obj + ".")
items_in[state].remove(obj)
else:
print("There is nothing to pickup!")
########################################################################
#this function is used to drop an object from your inventory to the current room
def drop_cmd(obj):
if obj in carrying:
items_in[state].append(obj)
print("You dropped " + obj + ".")
carrying.remove(obj)
else:
print("There is nothing to drop!")
#########################################################
def inventory_cmd():
print("You are currently carrying:" + str(carrying))
########################################################################
# The main loop that processes the player's input commands.
def main():
for turn in range(20, 0, -1):
print("")
describe()
cmd = input("Enter your command " + str(turn) + "> ")
cmd = str.lower(cmd) #This will turn any input command into lowercase
cmd_split = str.split(cmd) #This splits the input command into a list of words
cmd = " ".join(cmd_split) #This turns the list of words into a single string
if cmd == "quit":
print("You gave in so easily :-(")
break
elif cmd.startswith("go "):
where = cmd[3:]
move_cmd(where)
if state == "outside":
print("You push the door open with the heavy aquarium and escape to outside!")
break
elif cmd.startswith("take "):
obj_name = cmd[5:]
take_cmd(obj_name)
elif cmd.startswith("drop "):
obj_name = cmd[5:]
drop_cmd(obj_name)
elif cmd == "inventory":
inventory_cmd()
else:
print("I do not understand '" + cmd + "'. Try go/take/drop/inventory/quit")
print("Game over")
if __name__ == "__main__":
main()
| Kaizuu08/PythonShowcase2023Semester1 | Week 7/text_game_multiple.py | text_game_multiple.py | py | 7,522 | python | en | code | 0 | github-code | 36 |
42917266933 | from apps.horizons import Horizons
hztn = Horizons()
def test_connect():
""" Test connection to the JPL telnet service """
assert None == hztn.tn
hztn.connect()
assert None != hztn.tn
def test_bodies_list():
""" Test the get bodies and JPL ID lists methods """
barycenters = hztn.get_barycenters()
print(barycenters)
assert barycenters[3][0] == 3
assert barycenters[2][1] == 'Venus Barycenter'
assert 10 == len(barycenters)
neptune = barycenters[8]
neptune_system = hztn.get_barycenter_members(neptune[0])
print(neptune_system)
assert None != neptune_system
assert neptune_system[len(neptune_system) - 1][0] == 899
def test_essential_orbit():
""" Test retrieving essential orbit data """
mars = 499
mars_system = 10
neptune = 899
neptune_system = 8
result = hztn.get_orbit(mars,mars_system)
assert result['orbital_period'] == 686.9707263001874
print(result)
result = hztn.get_orbit(neptune,neptune_system)
print(result)
assert result['eccentricity'] == 0.004492338553255954
| nlantoing/Astrarium | tests/apps/test_horizons.py | test_horizons.py | py | 1,088 | python | en | code | 0 | github-code | 36 |
28068259902 | # 나이순 정렬
# https://www.acmicpc.net/problem/10814
def solution() :
n = int(input())
member_list = []
for i in range(n) :
age, name = input().split()
age = int(age)
member_list.append([i, age, name])
sorted_list = sorted(member_list, key = lambda x : (x[1], x[0]))
for i in sorted_list :
print(i[1], i[2])
solution()
| hwanginbeom/algorithm_study | 1.algorithm_question/8.Sort/152.Sorting_wooseok.py | 152.Sorting_wooseok.py | py | 385 | python | en | code | 3 | github-code | 36 |
27787353861 | #v.2.0.0
import json, os, time
import resources.config_server as config
from resources.lib.xlogger import Logger
from resources.lib.blasters import *
from resources.lib.websocket_server import WebsocketServer
class Main:
def __init__( self, thepath ):
"""Start IguanaIR Blaster Server."""
self.ROOTPATH = os.path.dirname( thepath )
self.LW = Logger( logfile=os.path.join( self.ROOTPATH, 'data', 'logs', 'server.log' ),
numbackups=config.Get( 'logbackups' ), logdebug=config.Get( 'debug' ) )
self.LW.log( ['script started'], 'info' )
self.WAIT_BETWEEN = config.Get( 'wait_between' )
self.CMDRUNNING = False
self.SERVER = WebsocketServer( config.Get( 'ws_port' ), host=config.Get( 'ws_ip' ) )
self.SERVER.set_fn_new_client( self._new_client )
self.SERVER.set_fn_client_left( self._client_left )
self.SERVER.set_fn_message_received( self._message_received )
self.SERVER.run_forever()
self.LW.log( ['script finished'], 'info' )
def _new_client( self, client, server ):
self.LW.log( ['Client connected'] )
def _client_left( self, client, server ):
self.LW.log( ['Client disconnected'] )
def _message_received(self, client, server, message ):
if len(message) > 200:
message = message[:200] + '..'
self.LW.log( ['Client said: %s' % message] )
jm = json.loads( message )
blaster = self._pick_blaster( jm )
if not blaster:
self.LW.log( ['invalid blaster type configured in settings, not sending any commands'], 'info' )
else:
while self.CMDRUNNING:
time.sleep( 1 )
self.LW.log( ['checking to see if previous command has completed'], 'info' )
self.CMDRUNNING = True
self.LW.log( ['sending commands on to %s' % jm.get( 'blaster' )], 'info' )
loglines = blaster.SendCommands( jm.get( 'commands' ) )
self.LW.log( loglines )
self.CMDRUNNING = False
def _pick_blaster( self, jm ):
if jm.get( 'blaster' ) == 'iguanair':
return iguanair.Blaster( keypath=os.path.join( self.ROOTPATH, 'data', 'keys' ), key_ext=config.Get( 'key_ext' ),
path_to_igc=config.Get( 'path_to_IGC' ), irc=jm.get( 'irc' ), wait_between=self.WAIT_BETWEEN )
else:
return None
| pkscout/iguana-blaster | resources/lib/server.py | server.py | py | 2,443 | python | en | code | 0 | github-code | 36 |
32751323841 | from raspberry_pi.adapter import Adapter
from raspberry_pi.human_sensor import HumanSensor
from raspberry_pi.humidity_sensor import HumiditySensor
from raspberry_pi.target import Target
from raspberry_pi.temperature_sensor import TemperatureSensor
import json
# 根据不同的key,获取需要被适配的类
def get_adaptee_class(key):
adaptees = {
'target': Target(),
'temperature_sensor': TemperatureSensor(),
'humidity_sensor': HumiditySensor(),
'human_sensor': HumanSensor()
}
return adaptees.get(key, None)
if __name__ == "__main__":
# 选择传感器类型
adaptee_type = 'human_sensor'
# 适配器进行适配
adapter = Adapter(get_adaptee_class(adaptee_type))
# 适配后的采集数据操作
data = adapter.request()
# 格式化为json格式
json_data = json.dumps(data)
print(json_data)
# 传输到kafka | qihonggang/leetcode | python_code/raspberry_pi/main.py | main.py | py | 902 | python | en | code | 1 | github-code | 36 |
40729675478 | from typing import Dict, List, Optional
from fuzzly.models.post import PostId, PostIdValidator
from fuzzly.models.tag import TagGroupPortable
from fuzzly.models.user import UserPortable
from pydantic import BaseModel
class LookupRequest(BaseModel) :
tag: Optional[str]
class TagsRequest(BaseModel) :
_post_id_converter = PostIdValidator
post_id: PostId
tags: List[str]
class RemoveInheritance(BaseModel) :
parent_tag: str
child_tag: str
class InheritRequest(RemoveInheritance) :
deprecate: Optional[bool] = False
class UpdateRequest(BaseModel) :
name: Optional[str]
group: Optional[TagGroupPortable]
owner: Optional[str]
description: Optional[str]
deprecated: Optional[bool] = None
class TagPortable(str) :
pass
class TagGroups(Dict[TagGroupPortable, List[TagPortable]]) :
pass
class Tag(BaseModel) :
tag: str
owner: Optional[UserPortable]
group: TagGroupPortable
deprecated: bool
inherited_tags: List[TagPortable]
description: Optional[str]
count: int
class InternalTag(BaseModel) :
tag: str
owner: Optional[int]
group: str
deprecated: bool
inherited_tags: List[str]
description: Optional[str]
| kheina-com/tagger | models.py | models.py | py | 1,142 | python | en | code | 0 | github-code | 36 |
17895710010 | r"""Train toy segmenter model on cityscapes.
"""
# pylint: enable=line-too-long
import ml_collections
batch_size = 128
_CITYSCAPES_TRAIN_SIZE_SPLIT = 146
# Model spec.
STRIDE = 4
mlp_dim = 2
num_heads = 1
num_layers = 1
hidden_size = 1
target_size = (128, 128)
def get_config(runlocal=''):
"""Returns the configuration for Cityscapes segmentation."""
runlocal = bool(runlocal)
config = ml_collections.ConfigDict()
config.experiment_name = 'cityscapes_segmenter_toy_model'
# Dataset.
config.dataset_name = 'cityscapes'
config.dataset_configs = ml_collections.ConfigDict()
config.dataset_configs.target_size = target_size
config.dataset_configs.train_split = 'train[:5%]'
config.dataset_configs.dataset_name = '' # name of ood dataset to evaluate
# Model.
config.model_name = 'segvit'
config.model = ml_collections.ConfigDict()
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = (STRIDE, STRIDE)
config.model.backbone = ml_collections.ConfigDict()
config.model.backbone.type = 'vit'
config.model.backbone.mlp_dim = mlp_dim
config.model.backbone.num_heads = num_heads
config.model.backbone.num_layers = num_layers
config.model.backbone.hidden_size = hidden_size
config.model.backbone.dropout_rate = 0.1
config.model.backbone.attention_dropout_rate = 0.0
config.model.backbone.classifier = 'gap'
# Decoder
config.model.decoder = ml_collections.ConfigDict()
config.model.decoder.type = 'linear'
# Training.
config.trainer_name = 'segvit_trainer'
config.optimizer = 'adam'
config.optimizer_configs = ml_collections.ConfigDict()
config.l2_decay_factor = 0.0
config.max_grad_norm = 1.0
config.label_smoothing = None
config.num_training_epochs = ml_collections.FieldReference(2)
config.batch_size = batch_size
config.rng_seed = 0
config.focal_loss_gamma = 0.0
# Learning rate.
config.steps_per_epoch = _CITYSCAPES_TRAIN_SIZE_SPLIT // config.get_ref(
'batch_size')
# setting 'steps_per_cycle' to total_steps basically means non-cycling cosine.
config.lr_configs = ml_collections.ConfigDict()
config.lr_configs.learning_rate_schedule = 'compound'
config.lr_configs.factors = 'constant * cosine_decay * linear_warmup'
config.lr_configs.warmup_steps = 0
config.lr_configs.steps_per_cycle = config.get_ref(
'num_training_epochs') * config.get_ref('steps_per_epoch')
config.lr_configs.base_learning_rate = 1e-4
# model and data dtype
config.model_dtype_str = 'float32'
config.data_dtype_str = 'float32'
# init not included
# Logging.
config.write_summary = True
config.write_xm_measurements = True # write XM measurements
config.xprof = False # Profile using xprof.
config.checkpoint = True # Do checkpointing.
config.checkpoint_steps = 5 * config.get_ref('steps_per_epoch')
config.debug_train = False # Debug mode during training.
config.debug_eval = False # Debug mode during eval.
config.log_eval_steps = 1 * config.get_ref('steps_per_epoch')
# Evaluation.
config.eval_mode = False
config.eval_configs = ml_collections.ConfigDict()
config.eval_configs.mode = 'standard'
config.eval_covariate_shift = True
config.eval_label_shift = True
if runlocal:
config.count_flops = False
return config
def get_sweep(hyper):
return hyper.product([])
| google/uncertainty-baselines | experimental/robust_segvit/configs/cityscapes/toy_model.py | toy_model.py | py | 3,338 | python | en | code | 1,305 | github-code | 36 |
16725608494 | #!/usr/bin/env python
import cPickle
import fasttext
import numpy as np
import os
import sys
from mlfutil import CharEncoder, draw_progress
data_file = sys.argv[1]
cencoder = CharEncoder()
def title_encoding(title):
chars = []
for char in title.decode('utf8'):
idx = cencoder.cat2idx(char)
idx = cencoder.cat2idx('UNK') if idx < 0 else idx
chars.append(idx)
return chars
def build_dict():
cencoder.build_dict('data_all/data_all.tsv')
cencoder.save_dict('chardict')
def init():
cencoder.load_dict('chardict')
def main():
outfile = sys.argv[2]
build_dict()
init()
with open(data_file) as f:
data = [l.rstrip('\r\n').split('\t') for l in f.readlines()]
fo = open(outfile, 'w')
data_size = len(data)
for nr, rec in enumerate(data):
title = rec[1]
title_feats = title_encoding(title)
print >> fo, \
'\t'.join(map(str, rec[0:1] + title_feats))
draw_progress(nr, data_size-1)
fo.close()
if __name__ == '__main__':
main()
| kn45/tf-models | rnn_regressor/3_Feature.py | 3_Feature.py | py | 1,063 | python | en | code | 0 | github-code | 36 |
27360781667 | from struct import unpack
from numpy import zeros, uint8, ravel
def imagefeatures_and_labels (datatype):
input_data = ''
input_labels = ''
if(datatype == 'train'):
input_data = open('train-images.idx3-ubyte', 'rb')
input_labels = open('train-labels.idx1-ubyte', 'rb')
if(datatype == 'test'):
input_data = open('t10k-images.idx3-ubyte', 'rb')
input_labels = open('t10k-labels.idx1-ubyte', 'rb')
s1, s2, s3, s4 = input_data.read(4), input_data.read(4), input_data.read(4), input_data.read(4)
magicnum = unpack('>I',s1)[0] #2051
totalimage = unpack('>I',s2)[0] #60000 for train, #10000 for test set
rows = unpack('>I',s3)[0] #28
cols = unpack('>I',s4)[0] #28
s1, s2 = input_labels.read(4), input_labels.read(4)
totallabel = unpack('>I',s2)[0]
#put the data into a numpy array
images = zeros((totalimage, rows, cols), dtype = uint8)
image_features = zeros((totalimage, rows*cols), dtype = uint8)
labels = zeros((totallabel, 1), dtype = uint8)
if( totalimage == totallabel):
for i in range(totalimage):
for row in range(rows):
for col in range(cols):
pixel = unpack('>B', input_data.read(1))[0] #1 byte
images[i][row][col] = pixel
image_features[i] = ravel(images[i])
labels[i] = unpack('>B', input_labels.read(1))[0]
input_data.close()
input_labels.close()
return (images, image_features, ravel(labels))
| OzgeAkin/MachineLearningPraktikum | week1/datasetpreparation.py | datasetpreparation.py | py | 1,533 | python | en | code | 0 | github-code | 36 |
30775454289 | def main():
mayores:list[str] = []
nombre:str
edad:int
for i in range(10):
edad = int(input(f"Ingrese la edad del alumno {i+1}: "))
nombre = input("Ingrese su nombre: ")
if edad >= 18: mayores.append(nombre)
print(mayores)
if __name__ == '__main__':
main()
| GermanMorini/Programacion1 | GTP2/gtp8.py | gtp8.py | py | 357 | python | es | code | 1 | github-code | 36 |
33517425176 | #from typing_extensions import runtime
from manim import *
#####################################################################################
###################### Norma inducida y bases ortonormales ########################
#####################################################################################
#####################################################################################
############################### Cuarta escena ######################################
###################### versión: Manim Community v0.17.3 ##########################
#####################################################################################
ROJO = '#FF0000'
AZUL = '#0087FF'
NARANJA = '#FF7700'
VERDE = '#1FFF00'
MAGENTA = '#FF00FF'
AMARILLO = "#FFFF00"
GRIS = "#888888"
MAGENTA_CLARO = "#FF67FF"
AZUL_CLARO = "#9CDCEB"
AZUL_OSCURO = "#1C758A"
TEAL_A = "#ACEAD7"
TEAL_E = "#49A88F"
MOSTAZA_OSCURO = "#FFD025"
MOSTAZA_CLARO = "#FFE072"
SKIP_DEFAULT = False #Útil para lo siguiente: si sólo quieres renderizar una sección, cambia esta variable a 'True' y cambia el valor de 'skip_animations' de esa sección a 'not SKIP_DEFAULT'
class SE1 (MovingCameraScene):
def construct(self):
#-------------------------------------------- Variables que definen al sistema coordenado
escala_plano = 0.8
origen_plano = np.array([-3, 0, 0])
#--------------------------------------------Textos
texto_0 = MathTex(r" \vec{u},\vec{v}\in V, \vec{v}\neq \vec{0}. ").shift(3*RIGHT + 2.5*UP).scale(0.8)
texto_1 = MathTex(r"\frac{\langle \vec{u} , \vec{v} \rangle}{\langle \vec{v} , \vec{v} \rangle} \vec{v}").next_to(texto_0, 2*DOWN).scale(0.8)
texto_1_hat = MathTex(r"\frac{\langle \vec{u} , \hat{v} \rangle}{\langle \hat{v} , \hat{v} \rangle} \hat{v}").next_to(texto_0, 2*DOWN).scale(0.8)
texto_2 = MathTex(r"= \frac{\langle \vec{u} , \vec{v} \rangle}{\big(\sqrt{\langle \vec{v} , \vec{v} \rangle}\big)^2} \vec{v} ").next_to(texto_1,RIGHT).scale(0.8).shift(0.6*LEFT)
texto_3 = MathTex(r"= \frac{\langle \vec{u} , \vec{v} \rangle}{||\vec{v}||^2} \vec{v}").scale(0.8)
texto_4 = MathTex(r"= \frac{\langle \vec{u} , \vec{v} \rangle}{||\vec{v}||} \bigg( \frac{1}{||\vec{v}||} \vec{v} \bigg)").scale(0.8)
texto_5 = MathTex(r"= \frac{\langle \vec{u} , \vec{v} \rangle}{||\vec{v}||} \hat{v}").scale(0.8)
texto_5_hat = MathTex(r"= \frac{\langle \vec{u} , \hat{v} \rangle}{||\hat{v}||} \hat{v}").scale(0.8)
texto_6 = MathTex(r"\vec{u},\hat{v}\in V, ||\hat{v}||= 1.").shift(4.225*RIGHT + 2.4725*UP).scale(0.8)
uno = MathTex("1").scale(0.8)
texto0 = VGroup(texto_2, texto_3, texto_4, texto_5)\
.arrange(DOWN, center=False, aligned_edge=LEFT).shift(0.2*RIGHT)
#-------------------------------------------- Vectores u y v
v = np.array([3, -1, 0])
u = np.array([2, 1.5, 0])
#--------------------------------------------
vt_v = ValueTracker(1)
v_vect = Vector([3, -1, 0], buff=0, color=VERDE).shift(origen_plano)
u_vect = Vector(u, buff=0, color=AZUL).shift(origen_plano)
distancia_v_label = 0.4*v/(np.linalg.norm(v))
distancia_u_label = 0.4*u/(np.linalg.norm(u))
v_label = MathTex(r"\vec{v}").scale(0.8).move_to(v_vect.get_end()+0.5*LEFT+0.1*DOWN).set_color(VERDE)
u_label = MathTex(r"\vec{u}").scale(0.8).move_to(u_vect.get_end()+0.5*LEFT+0.1*UP).set_color(AZUL)
v_vect.add_updater(lambda v:
v_vect.become(Vector([3/vt_v.get_value(), (-1)/vt_v.get_value(), 0], buff=0, color=VERDE).shift(origen_plano))
)
v_label.add_updater(lambda v:
v_label.become(MathTex(r"\hat{v}").scale(0.8).move_to(v_vect.get_end()+0.5*LEFT+0.1*DOWN).set_color(VERDE)) if (vt_v.get_value() == 3.7)
else v_label.become(MathTex(r"\vec{v}").scale(0.8).move_to(v_vect.get_end()+0.5*LEFT+0.1*DOWN).set_color(VERDE))
)
aux_vect = origen_plano+(np.dot(v, u)/np.dot(v, v))*v
proy_u_v_arrow = Arrow(start=origen_plano, end=aux_vect, buff=0, color=AMARILLO)
dashedline = DashedLine(
u_vect.get_end(), proy_u_v_arrow.get_end(), color=AMARILLO
)
label_proy_u_v = MathTex(r"{", # 0
r"\langle", # 1
r"\vec{u}", # 2
r",", # 3
r"\vec{v}", # 4
r"\rangle", # 5
r"\over", # 6
r"\langle \vec{v}, \vec{v} \rangle", # 7
r"}", # 8
r"\vec{v}", # 9
).set_color(AMARILLO).scale(0.8).move_to(origen_plano).shift(0.7*LEFT+0.5*UP)
label_c = MathTex(r"{", # 0
r"\langle", # 1
r"\vec{u}", # 2
r",", # 3
r"\vec{v}", # 4
r"\rangle", # 5
r"\over", # 6
r"||\vec{v}||" #7
r"}", # 8
r"\hat{v}", #9
).set_color(AMARILLO).scale(0.8).move_to(origen_plano).shift(0.7*LEFT+0.5*UP)
label_d = MathTex(r"{", # 0
r"\langle", # 1
r"\vec{u}", # 2
r",", # 3
r"\hat{v}", # 4
r"\rangle", # 5
r"\over", # 6
r"||\hat{v}||" #7
r"}", # 8
r"\hat{v}", #9
).set_color(AMARILLO).scale(0.8).move_to(origen_plano).shift(0.7*LEFT+0.5*UP)
label_e = MathTex(r"{", # 0
r"\langle", # 1
r"\vec{u}", # 2
r",", # 3
r"\hat{v}", # 4
r"\rangle", # 5
r"\over", # 6
r"1" #7
r"}", # 8
r"\hat{v}", #9
).set_color(AMARILLO).scale(0.8).move_to(origen_plano).shift(0.7*LEFT+0.5*UP)
grid = NumberPlane(x_range=[-4, 4, 1], y_range=[-4, 4, 1],
background_line_style={
"stroke_width": 1, "stroke_opacity": 0.5}
).scale(escala_plano).shift(origen_plano)
t = ValueTracker(0)
def upd_for_v(obj):
new_vec = Vector(v-(t.get_value()/np.linalg.norm(u)**2)*u,
buff=0, color=VERDE).shift(origen_plano)
obj.become(new_vec)
def upd_for_proy_u_v_arrow(obj):
if(np.abs(t.get_value()-np.dot(u, v)) > 0.05):
v_ = v-t.get_value()*u/np.linalg.norm(u)**2
aux = origen_plano+(np.dot(v_, u)/np.dot(v_, v_))*v_
new_arrow = Arrow(start=origen_plano, end=aux, buff=0, color=AMARILLO)
obj.become(new_arrow)
else:
obj.become(Dot(origen_plano, radius=0.1, color=AMARILLO))
def upd_for_label(obj):
v_ = v-t.get_value()*u/np.linalg.norm(u)**2
distancia = 0.4*v_/(np.linalg.norm(v_))
obj.move_to(v_vect.get_end()+distancia)
def upd_for_dashedline(obj):
new_dashedline = DashedLine(
u_vect.get_end(), proy_u_v_arrow.get_end(), color=AMARILLO
)
obj.become(new_dashedline)
#Animaciones
self.next_section("...de un vector u sobre un vector no nulo v....", skip_animations=SKIP_DEFAULT)
self.camera.frame.shift(4.25*RIGHT)
texto_0.shift(RIGHT)
self.play(Write(texto_0[0][0:2]))
self.wait()
self.play(Write(texto_0[0][2:]))
self.wait(1.5)
self.add_foreground_mobjects(texto_1)
self.play(Write(texto_1[0][0:17]))
self.wait()
self.play(Write(texto_1[0][17:]))
self.next_section("...reescribir esta expresión como sigue:", skip_animations=SKIP_DEFAULT)
self.add_foreground_mobjects(texto_2)
self.play(Write(texto_2))
self.wait()
self.add_foreground_mobjects(texto_3)
self.play(Write(texto_3))
self.wait()
self.add_foreground_mobjects(texto_4)
self.play(Write(texto_4))
self.wait()
self.add_foreground_mobjects(texto_5)
self.play(Write(texto_5))
self.wait()
self.next_section("[pausa] Es decir, la componente del vector u...", skip_animations=SKIP_DEFAULT)
self.play(self.camera.frame.animate.shift(4.25*LEFT), Write(grid), run_time=1)
self.play(
Write(u_vect),
Write(u_label),
run_time=1.75
)
self.play(
Write(v_vect),
Write(v_label),
run_time=1.25
)
self.play(
Write(dashedline),
Write(proy_u_v_arrow),
FadeIn(label_proy_u_v[:]),
run_time=1.25
)
self.wait()
self.next_section("...reescalando al vector unitario...", skip_animations=SKIP_DEFAULT)
self.play(FadeOut(texto_2), FadeOut(texto_3), FadeOut(texto_4), run_time=0.5)
self.play(texto_5.animate.shift(3.675*UP), run_time=0.75)
texto_5_hat.move_to(texto_5.get_center())
texto_1_hat.move_to(texto_1.get_center())
self.wait(0.5)
self.play(Indicate(texto_5[0][15:]), run_time=3.25)
self.play(Indicate(texto_5[0][1:8]), run_time=2.25)
self.play(Indicate(texto_5[0][9:15]), run_time=1.5)
self.play(ReplacementTransform(label_proy_u_v[7], label_c[7]),
ReplacementTransform(label_proy_u_v[9], label_c[8]))
self.wait()
self.next_section("...es unitario...", skip_animations=SKIP_DEFAULT)
label_e[4].shift(0.05*UP+0.02*LEFT)
self.play(ReplacementTransform(texto_0[0][3:5], texto_6[0][3:5]),
ReplacementTransform(texto_0[0][8:], texto_6[0][8:]),
ReplacementTransform(texto_5[0][5], texto_5_hat[0][5]),
ReplacementTransform(texto_5[0][11], texto_5_hat[0][11]),
ReplacementTransform(texto_1[0][4], texto_1_hat[0][4]),
ReplacementTransform(texto_1[0][9], texto_1_hat[0][9]),
ReplacementTransform(texto_1[0][12], texto_1_hat[0][12]),
ReplacementTransform(texto_1[0][15], texto_1_hat[0][15]),
ReplacementTransform(label_proy_u_v[4], label_e[4]),
ReplacementTransform(label_c[7], label_d[7]),
vt_v.animate.set_value(3.7)
)
self.wait()
self.next_section("...se simplifica aún más.", skip_animations= SKIP_DEFAULT)
uno.move_to(texto_5[0][9:15].get_center())
self.play(Transform(texto_5[0][9:15], uno),
ReplacementTransform(label_d[7], label_e[7])
)
self.wait()
self.play(
texto_5[0][8:10].animate.set_opacity(0),
label_proy_u_v[6].animate.set_opacity(0),
label_proy_u_v[0:6].animate.shift(0.25*DOWN),
texto_5[0][1:8].animate.shift(0.25*DOWN),
label_e[4].animate.set_opacity(0),
label_e[7].animate.set_opacity(0)
)
self.wait()
class SE2(MovingCameraScene):
def bortonormal(self):
texto1 = MathTex(r"N=\{\vec{n}_1,...,\vec{n}_k\}\subseteq V \ \text{es \emph{ortonormal}}").scale(.55)
texto1_2 = MathTex(r"\text{si} \ \langle \vec{n}_i , \vec{n}_j \rangle =", r"0 \text{ para } i\neq j, \text{ con } 1\le i,j\le k,").scale(.55)
texto1_3 = MathTex(r"\text{y }", r"||" , r"\vec{n}_i" , r"||", r"=1" , r"\text{ para } 1\le i\le k.").scale(.55)
texto_1=VGroup(texto1, texto1_2, texto1_3).arrange(direction=RIGHT, center=True)
texto1_2.next_to(texto1, RIGHT, buff=0.15)
texto1_3.next_to(texto1_2, RIGHT, buff=0.15)
texto_1.move_to(3*UP)
texto2_3 = MathTex(r"\text{y }", r" \langle " , r"\vec{n}_i" , r", \vec{n}_i \rangle", r"=1" , r"\text{ para } 1\le i\le k.").scale(.55)
texto2_3.next_to(texto1_2, RIGHT, buff=0.15)
texto3_3 = MathTex(r" \begin{cases} 1 &\text{si } j=i, \\ 0 &\text{si } j\neq i, \end{cases} \text{ con } 1\le i,j\le k.").scale(.55)
texto3_3.next_to(texto1_2[0], RIGHT, buff=0.15)
texto3 = MathTex(r"N=\{\vec{n}_1,...,\vec{n}_k\} \text{ es una base \emph{ortonormal} de } V", r"\text{ si}").scale(.55)
texto5 = MathTex(r"\langle N \rangle = V,").scale(.55)
texto5_c1 = Tex("$1 \\ $~", "si~","$j = i,$").scale(.55)
texto5_c2 = Tex("$0 \\ $~", " si~", " $j \\neq i.$").scale(.55)
for i, item in enumerate(texto5_c2):
item.align_to(texto5_c1[i], LEFT)
texto5_c1g = VGroup(*texto5_c1)
texto5_c2g = VGroup(*texto5_c2)
texto5_c2g.next_to(texto5_c1g, DOWN)
texto5_g = VGroup(texto5_c1g, texto5_c2g)
b3 = Brace(texto5_g, .1*LEFT)
P_ij = b3.get_text("$\\langle \\vec{n}_i, \\vec{n}_j\\rangle$ =").scale(.55)
nj_ni = VGroup(P_ij, b3, texto5_g).arrange(direction=RIGHT, buff=.15)
N_ort = VGroup(texto5, nj_ni).arrange(direction=RIGHT, buff=1.75, center=True)
texto3.move_to(UP, aligned_edge=RIGHT)
e22 = Tex("*", "Ver el ", "Ejercicio 2.2", ".").scale(.55).to_edge(DOWN)
e22[0].set_color(AMARILLO)
e22[2].set_color(AZUL)
e23 = Tex("*", "Ver el ", "Ejercicio 2.3", ".").scale(.55).to_edge(DOWN)
e23[0].set_color(AMARILLO)
e23[2].set_color(AZUL)
#Animaciones
self.next_section("...decimos que un conjunto es ortonormal...", skip_animations=SKIP_DEFAULT)
self.camera.frame.animate.move_to(1.5*UP)
self.play(Write(texto1), run_time=2)
self.play(Write(texto1_2), run_time=0.9)
self.wait(0.1)
self.play(Write(texto1_3), run_time=0.7)
self.wait()
self.next_section("...sea igual a uno.", skip_animations=SKIP_DEFAULT)
self.play(ReplacementTransform(texto1_3, texto2_3))
self.wait()
self.next_section("...como sigue:", skip_animations=SKIP_DEFAULT)
self.play(FadeOut(texto1_2[1], texto2_3))
self.play(Write(texto3_3))
self.wait()
self.next_section("...es linealmente independiente", skip_animations=SKIP_DEFAULT)
self.play(Write(e22))
self.wait()
self.play(Unwrite(e22), run_time=0.5)
self.wait()
self.next_section("N es una base ortonormal de V...", skip_animations=SKIP_DEFAULT)
texto3.shift(2.75*RIGHT)
self.play(Write(texto3[0]), run_time=2.5)
N_ort[1].move_to(0.75*RIGHT)
self.play(Write(texto3[1]))
self.wait(1.5)
self.play(Write(N_ort[0]))
self.wait(0.5)
self.play(Write(N_ort[1]), run_time=2)
self.play(Write(e23))
self.wait()
self.play(Unwrite(e23), run_time=0.5)
self.wait()
def construct(self):
self.bortonormal()
class SE3(MovingCameraScene):
def construct(self):
#----------------------------------- ESPACIO V EN GENERAL
dim_V = MathTex("\\text{dim}\\left(V\\right)=k<\\infty").scale(0.6).to_edge(UP).shift(0.2*RIGHT).shift(0.5*UP)
base_beta = Tex(
"$\\beta = \\{\\vec{b}_1,..., \\vec{b}_k\\}$", # 0
" base de ", # 1
"$V$" # 2
).move_to(UP)
base_propiedades = Tex(
"$\\langle \\beta\\rangle = V$",
"$,\\,\\beta$ es l.i."
)
combination = MathTex(
"\\vec{v}", # 0
"=", # 1
"c_1", # 2
"\\vec{b}_1", # 3
"+", # 4
"\\cdots", # 5
"+", # 6
"c_k", # 7
"\\vec{b}_k" # 8
)
coeficientes_c = Tex("¿",
"$c_i$",
"?")
group_1 = VGroup(base_beta, base_propiedades,
combination, coeficientes_c).scale(0.7)
group_1.arrange(1.8*DOWN, center=False, aligned_edge=LEFT)
combination_copy = group_1[2].copy()
#----------------------------------- ESPACIO V CON PRODUCTO INTERNO, BASE ORTOGONAL
base_gamma = Tex(
"$\\Gamma = \\{\\vec{g}_1,..., \\vec{g}_k\\}$", # 0
" base ortogonal de", # 1
" $V$" # 2
).move_to(UP)
gen_gamma = MathTex(r"\langle\Gamma\rangle = V, ")
case_1 = Tex("$\\langle \\vec{g}_i, \\vec{g}_i\\rangle \\neq 0$ \\ ", " si", " $j = i$,")
case_2 = Tex("$0$", " si", " $j \\neq i$.")
for i, item in enumerate(case_2):
item.align_to(case_1[i], LEFT)
case_1_g = VGroup(*case_1)
case_2_g = VGroup(*case_2)
case_2_g.next_to(case_1_g, DOWN)
cases_group = VGroup(case_1_g, case_2_g)
braces = Brace(cases_group, LEFT)
producto_ij = braces.get_text("$\\langle \\vec{g}_i, \\vec{g}_j\\rangle$ =")
gj_gi = VGroup(producto_ij, braces, cases_group)
base_gamma_propiedades = VGroup(gen_gamma, gj_gi).arrange(
direction=RIGHT, buff=0.20, center=False)
combination_dg = MathTex(
r"\vec{v}", # 0
r"=", # 1
r"d_1", # 2
r"\vec{g}_1", # 3
r"+\cdots+", # 4
r"d_k", # 5
r"\vec{g}_k" # 6
)
coeficientes_d = MathTex(r"d_i = \frac{\langle \vec{v}, \vec{g}_i\rangle}{\langle \vec{g}_i, \vec{g}_i \rangle}, \quad 1\le i\le k")
group_2 = VGroup(base_gamma, base_gamma_propiedades,
combination_dg, coeficientes_d).scale(0.7)
group_2.arrange(1*DOWN, center=False, aligned_edge=LEFT)
coeficientes_d.set_y(coeficientes_c.get_y()).shift(0.05*DOWN)
#----------------------------------- BASE ORTONORMAL
base_N = Tex(
" $N = \\{\\hat{n}_1,..., \\hat{n}_k\\}$", # 0
" base ortonormal de", # 1
" $V$" # 2
).move_to(UP)
gen_N = MathTex(r"\langle N\rangle = V, ")
case_1_N = Tex("$1$ \\ ", " si", " $j = i$,")
case_2_N = Tex("$0$", " si", " $j \\neq i$.")
for i, item in enumerate(case_2_N):
item.align_to(case_1_N[i], LEFT)
case_1_g_N = VGroup(*case_1_N)
case_2_g_N = VGroup(*case_2_N)
case_2_g_N.next_to(case_1_g_N, DOWN)
cases_group_N = VGroup(case_1_g_N, case_2_g_N)
braces_N = Brace(cases_group_N, LEFT)
producto_ij_N = braces_N.get_text("$\\langle \\hat{n}_i, \\hat{n}_j\\rangle$ =")
nj_ni = VGroup(producto_ij_N, braces_N, cases_group_N)
base_N_propiedades = VGroup(gen_N, nj_ni).arrange(
direction=RIGHT, buff=0.20, center=False)
combination_fn = MathTex(
r"\vec{v}", # 0
r"=", # 1
r"f_1", # 2
r"\hat{n}_1", # 3
r"+\cdots+", # 4
r"f_k", # 5
r"\hat{n}_k" # 6
)
coeficientes_f = MathTex(r"f_i = \frac{\langle \vec{v}, \hat{n}_i\rangle}{\langle \hat{n}_i, \hat{n}_i\rangle}, \quad 1\le i\le k")
group_3 = VGroup(base_N, base_N_propiedades,
combination_fn, coeficientes_f).scale(0.7)
group_3.arrange(1*DOWN, center=False, aligned_edge=LEFT)
#Alineación de grupos
groups = VGroup(group_1, group_2, group_3).scale(0.8).shift(1.5*UP)
groups.arrange_in_grid(rows=1, cols=3, buff=1, row_heights=None)
groups[1].align_to(groups[0], UP).shift(0.5*LEFT+0.05*DOWN)
groups[2].align_to(groups[1], UP).shift(1.0*LEFT)
combination_fn.shift(0.05*DOWN)
#Animaciones
self.next_section("...un espacio vectorial de dimensión finita...", skip_animations=SKIP_DEFAULT)
self.camera.frame.shift(0.2*RIGHT+1.5*UP) #Ajuste de cámara para el encuadre
self.play(Write(dim_V), run_time=2)
self.wait(1.5)
self.play(FadeIn(groups[0][3]), run_time=2)
self.play(FadeIn(groups[0][2]), run_time=2)
self.wait(0.5)
self.play(FadeIn(groups[0][:2]), run_time=2)
self.wait()
self.next_section("...base ortogonal, entonces los coeficientes...", skip_animations=SKIP_DEFAULT)
self.play(FadeIn(groups[1][:2]), run_time=2)
self.wait(0.8)
self.play(FadeIn(groups[1][2]))
self.play(FadeIn(groups[1][3]), run_time=1.5)
self.wait()
self.next_section("...base ortonormal, entonces...", skip_animations=SKIP_DEFAULT)
self.play(FadeIn(groups[2][0:3][:]), run_time=2)
self.wait()
self.next_section("Podemos aplicar el mismo resultado.", skip_animations=SKIP_DEFAULT)
self.play(FadeIn(groups[2][3][:]), run_time=2)
self.wait()
self.next_section("...unitarios, el resultado anterior se simplifica...", skip_animations=SKIP_DEFAULT)
uno = MathTex("1").scale(0.8)
uno.move_to(coeficientes_f[0][12:21].get_center())
self.play(ReplacementTransform(coeficientes_f[0][12:21], uno))
self.wait(1.25)
self.play(coeficientes_f[0][11:21].animate.set_color(BLACK),
uno.animate.set_color(BLACK))
self.play(coeficientes_f[0][3:11].animate.shift(0.2*DOWN+0.1*LEFT),
coeficientes_f[0][21].animate.shift(0.175*LEFT+0.075*DOWN),
coeficientes_f[0][22:].animate.shift(0.175*LEFT+0.05*DOWN))
self.wait()
| animathica/alganim | 2/NIyBO_E4.py | NIyBO_E4.py | py | 21,964 | python | en | code | 6 | github-code | 36 |
15056294029 | import os
import sqlite3
import subprocess as sp
import sys
from pathlib import Path
db_path = Path(Path.home() / '.mypycheck.sqlite3')
def _create_files_table(con: sqlite3.Connection) -> None:
con.execute('''CREATE TABLE IF NOT EXISTS files (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
timestamp DOUBLE NOT NULL
);''')
try:
con.execute('''CREATE UNIQUE INDEX idx_files_name
ON files (name);''')
except:
pass
def _check(file: str, stdout: int=-1, stderr: int=-1) -> None:
path = Path(file).resolve(strict=True)
try:
connection = sqlite3.connect(db_path)
_create_files_table(connection)
except:
if db_path.exists():
os.remove(db_path)
connection = sqlite3.connect(db_path)
_create_files_table(connection)
cursor = connection.execute("SELECT name,timestamp FROM files WHERE name = ?", (str(path), ))
row = cursor.fetchone()
mtime = path.stat().st_mtime
if row is not None and row[1] >= mtime:
return
if stdout < 0:
stdout = sys.stdout.fileno()
if stderr < 0:
stderr = sys.stderr.fileno()
# Throws sp.CalledProcessError on failed check
sp.check_call(['mypy', file, '--strict'], stdout=stdout, stderr=stderr)
connection.execute("INSERT OR REPLACE INTO files (name, timestamp) VALUES (?, ?);", (str(path), mtime))
connection.commit()
connection.close()
def check(file: str) -> None:
try:
if '/site-packages/' in str(file):
return
_check(file)
except sp.CalledProcessError as err:
exit(1)
def clean() -> None:
if db_path.exists():
os.remove(db_path)
def check_main() -> None:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('target')
parser.add_argument('--clean', action='store_true')
args = parser.parse_args()
if args.clean:
clean()
check(args.target)
| dlsloan/mypycheck | src/mypycheck/__init__.py | __init__.py | py | 2,041 | python | en | code | 0 | github-code | 36 |
2403591134 | import time
import pygame
from pygame.locals import *
import random
from os import environ
# Class for creating a window structure for the simulation
class WindowStructure:
def __init__(self, win_pos=(0, 0), state=0):
self.y_origin, self.x_origin = win_pos
self.state = state
# Method for creating a window with a given position and flags
def create_window(self):
environ['SDL_VIDEO_WINDOW_POS'] = f"{self.y_origin},{self.x_origin}"
self.screen = pygame.display.set_mode(flags=self.state)
self.clock = pygame.time.Clock()
# Class for running the simulation
class Simulation(WindowStructure):
def __init__(self, win_pos, control_system, state, num_sprites, direction, velocity, shift_direction,
shift_time, black_screen_duration, pre_experiment_duration, trial_duration):
super().__init__(win_pos, state)
self.create_window()
self.num_sprites = num_sprites
self.direction = direction
self.velocity = velocity
self.shift_direction = shift_direction
self.shift_time = shift_time
self.control_system = control_system
self.black_screen_duration = black_screen_duration
self.pre_experiment_duration = pre_experiment_duration
self.trial_duration = trial_duration
# Method to run the experiment
def run_experiment(self, flag):
pygame.event.set_allowed([QUIT])
self.display_black_screen()
self.pre_experiment()
# self.simple_loop_direction(flag)
# self.simple_loop_pause(flag)
self.direction = 'hidden' if self.direction == 'forward' else 'backward'
self.move_your_swarm(flag)
self.direction = 'hidden' if self.direction == 'backward' else 'forward'
self.move_your_swarm(flag)
self.direction = 'hidden' if self.direction == 'forward' else 'backward'
self.move_your_swarm(flag)
self.direction = 'hidden' if self.direction == 'backward' else 'forward'
self.move_your_swarm(flag)
print(" -------- END OF THE SIMULATION --------")
exit(0)
def display_black_screen(self):
self.screen.fill((0, 0, 0))
pygame.display.update()
time.sleep(self.black_screen_duration)
# Method to create an animation with given parameters
def create_animation(self):
all_sprites_list = pygame.sprite.Group()
for _ in range(self.num_sprites):
stimuli = SpriteObj(self.screen.get_size(), self.direction, self.velocity)
all_sprites_list.add(stimuli)
return Animation(self.screen, self.screen.get_size(), self.num_sprites, self.direction, self.velocity,
self.shift_direction)
# Main loop for running the simulation
def simple_loop_pause(self, flag):
start_time = time.time()
counter = 0
shift_counter = 0
animation = self.create_animation()
trial_duration_counter = time.time()
for i in range(1200):
animation.run_logic()
animation.display_frame()
# time.sleep(1)
while time.time() - trial_duration_counter < self.trial_duration:
shift_counter += 1
if animation.shift_direction and shift_counter > self.shift_time * 120:
animation.change_direction()
shift_counter = 0
if self.control_system == 2:
if flag.value:
animation.display_frame()
time.sleep(1)
continue
elif self.control_system == 1:
if not flag.value:
animation.display_frame()
#time.sleep(1)
continue
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
animation.run_logic()
animation.display_frame()
self.clock.tick(120)
counter += 1
if (time.time() - start_time) > 1:
print("FPS: ", int(counter / (time.time() - start_time)))
counter = 0
start_time = time.time()
def simple_loop_direction(self, flag):
start_time = time.time()
trial_duration_counter = time.time()
counter = 0
shift_counter = 0
animation = self.create_animation()
prev_flag_value = flag.value
while time.time() - trial_duration_counter < self.trial_duration:
shift_counter += 1
if animation.shift_direction and shift_counter > self.shift_time * 120:
animation.change_direction()
shift_counter = 0
if prev_flag_value != flag.value:
print(f"Flag value changed from {prev_flag_value} to {flag.value}")
animation.change_direction()
prev_flag_value = flag.value
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
animation.run_logic()
animation.display_frame()
self.clock.tick(120)
counter += 1
if (time.time() - start_time) > 1:
print("FPS: ", int(counter / (time.time() - start_time)))
counter = 0
start_time = time.time()
# Method to perform pre-experiment tasks
def pre_experiment(self): # 20 white, black, white
self.screen.fill((255, 255, 255))
pygame.display.update(self.screen.get_rect())
time.sleep(self.pre_experiment_duration)
def move_your_swarm(self, flag):
start_time = time.time()
counter = 0
animation = self.create_animation()
trial_duration_counter = time.time()
for i in range(1200):
animation.run_logic()
animation.display_frame()
while time.time() - trial_duration_counter < self.trial_duration:
animation.run_logic()
animation.display_frame()
self.clock.tick(120)
counter += 1
if (time.time() - start_time) > 1:
print("FPS: ", int(counter / (time.time() - start_time)))
counter = 0
start_time = time.time()
# Class for creating sprite objects
class SpriteObj(pygame.sprite.Sprite):
def __init__(self, screen_size, direction, velocity, image_data=None):
super().__init__()
self.width, self.height = screen_size
self.direction = direction
self.heading = 'original'
if self.direction == 'forward':
self.heading = 'flipped'
self.velocity = velocity
self.image_data = None
self.image = None
self.image_data = 'D:\AmirA21\Desktop\Yossef\Visual-Based_Collective-Motion\Simulator_ver_02\images\image_for_peer_recognition_test.png'
self.create_surface()
self.reset_pos()
# Method to create a sprite surface with an image or default data
def create_surface(self):
if self.image_data is None:
pass
else:
self.image = pygame.image.load(self.image_data).convert_alpha()
self.generate_surface()
# Method to generate the sprite surface using image_data
def generate_surface(self):
if self.image_data is None:
self.image_data = (45, 45)
width, height = 45, 45
self.image = pygame.Surface([width, height], pygame.SRCALPHA)
self.image.fill((255, 255, 255))
pygame.draw.circle(self.image, (0, 0, 0), (22.5, 22.5), 22.5)
else:
# width, height = self.image.get_size()
self.image.set_colorkey((255, 255, 255))
if self.heading == 'flipped':
self.image = pygame.transform.flip(self.image, True, False)
self.rect = self.image.get_rect()
# Method to reset the position of the sprite
def reset_pos(self):
self.rect.y = random.randrange(0, self.height)
if self.direction == "forward":
self.rect.x = random.randrange(self.width, self.width * 2)
else:
self.rect.x = random.randrange(self.width * -1, 0)
# Method to update the sprite's position based on its direction and velocity
def update(self):
if self.direction == "forward":
self.rect.x += -self.velocity
if self.rect.x > self.width * 2 or self.rect.x < 0:
self.reset_pos()
else:
self.rect.x += self.velocity
if self.rect.x < self.width * -1 or self.rect.x > self.width:
self.reset_pos()
expected_heading = 'flipped' if self.direction == 'forward' else 'original'
if self.heading != expected_heading:
self.flip()
def flip(self):
"""Flips the direction and heading of the sprite."""
self.direction = 'backward' if self.direction == 'forward' else 'forward'
self.heading = 'flipped' if self.heading == 'original' else 'original'
self.image = pygame.transform.flip(self.image, True, False)
class Animation(object):
def __init__(self, screen, screen_size, num_sprites, direction, velocity, shift_direction):
self.direction = direction
self.screen = screen
self.num_sprites = num_sprites
self.velocity = velocity
self.screen_size = screen_size
self.shift_direction = shift_direction
self.all_sprites_list = pygame.sprite.Group()
self.create_sprites()
# Method to create sprite objects and add them to the sprite group
def create_sprites(self):
screen_size = self.screen.get_size()
for _ in range(self.num_sprites):
stimuli = SpriteObj(screen_size, self.direction, self.velocity)
self.all_sprites_list.add(stimuli)
# Method to change the direction of all sprites in the animation
def change_direction(self):
for sprite in self.all_sprites_list:
sprite.flip()
# Method to display the current frame of the animation
def display_frame(self):
self.screen.fill((255, 255, 255))
# self.reduce_brightness(self.screen, 100)
if self.direction != 'hidden':
self.all_sprites_list.draw(self.screen)
pygame.display.update()
# Method to update the logic of the animation, including the position of the sprites
def run_logic(self):
self.all_sprites_list.update()
@staticmethod
def reduce_brightness(screen, alpha_value):
overlay = pygame.Surface(screen.get_size(), pygame.SRCALPHA)
overlay.fill((0, 0, 0, alpha_value))
screen.blit(overlay, (0, 0))
| Time2bImmortal/Heart_of_the_swarm | Simulator_ver_02/VirtualSimulation.py | VirtualSimulation.py | py | 11,078 | python | en | code | 0 | github-code | 36 |
20981029783 | """
Created on Mon Feb 10 03:29:54 2020
@author: Luthfi (lsaif.github.com)
"""
from flask import Flask, render_template, request
import csv
import re
with open('litho_dict.csv', newline='') as infile:
reader = csv.reader(infile)
next(reader)
litholist = dict(reader)
def translate(desc,transdict):
words = desc.split(' ')
trans = [transdict.get(x.lower(),x) for x in words]
translation = (' '.join(trans))
words = re.split('(\W)', translation)
trans = [transdict.get(x.lower(),x) for x in words]
translation = (''.join(trans))
translation = translation.replace('.',',')
return translation
app = Flask(__name__)
@app.route('/')
def homepage():
return render_template('index.html')
@app.route('/', methods=['GET','POST'])
def mudlog_translator():
if request.method == "POST":
lithodesc = request.form.get('lithology_description')
print (translate(lithodesc,litholist))
result = (translate(lithodesc,litholist))
return render_template('index.html', result = result)
if __name__ == "__main__":
app.run(debug=True) | luthfigeo/MudLog-Translator | MudLogTranslator.py | MudLogTranslator.py | py | 1,117 | python | en | code | 3 | github-code | 36 |
35356970921 | def BiggerGreater(line: str) -> str:
magic = list(line)
for i, char in enumerate(magic[::-1]):
index = find_index_with_value_less_than_the_current(magic[:len(magic) - 1 - i], char)
if index is not None:
magic[len(magic) - 1 - i], magic[index] = magic[index], magic[len(magic) - 1 - i]
magic = magic[:index + 1] + sorted(magic[index + 1:])
return "".join(magic)
return ""
def find_index_with_value_less_than_the_current(chars: list, value: str) -> int:
for index, char in enumerate(chars[::-1]):
if value > char:
return len(chars) - 1 - index
return None
| vladkostikov/HSP | entrance/bigger_greater.py | bigger_greater.py | py | 648 | python | en | code | 0 | github-code | 36 |
29290630775 | #!/usr/bin/env python
"""
Given the name of an ACS association, create the DAG for processing it (meaning
running CALACS on each exposure of the visit and multidrizzle on the total
output of CALACS) and submit it to the grid.
Variables used by the job/workflow templates are
code_root path to the root of the pipeline code
repository path to the raw data repository
dataset name of the association to process (9 characters)
exposures root names of the exposures in the visit (internal)
Different types of middleware can be used to execute the workflow on the user
data. The middleware is specified using the -g option and defaults to 'condor'.
Supported middleware is 'condor', 'makefile' or 'xgrid'.
"""
import os
import random
import time
from owl import config
from owl import workflow
# Constants
USAGE = '''\
process_acs_simple.py OPTIONS <association name>
OPTIONS
-r, --repository=PATH path to the raw data respository
-g, --grid-middleware=MIDDLEWARE middleware name
-e,--env=KEY=VAL(,KEY=VAL)*
'''
TEMPLATE_ROOT = getattr(config, 'DIRECTORIES_TEMPLATE_ROOT')
CODE_ROOT = getattr(config, 'DIRECTORIES_PIPELINE_ROOT')
WORK_ROOT = getattr(config, 'DIRECTORIES_WORK_ROOT')
INSTRUMENT = 'acs'
MODE = 'simple'
class AcsSimpleWorkflow(workflow.Workflow):
"""
ACS Simple workflow.
"""
def get_extra_keywords(self, code_root, repository, dataset, work_dir,
flavour, extra_env):
# Exposures are *_raw.fits files inside repository/dataset. Just return
# the list of exposure root names.
directory = os.path.join(repository, dataset)
return({'exposures': [f[:-9] for f in os.listdir(directory) \
if f.endswith('_raw.fits')]})
def process(datasets, repository, template_root, code_root=CODE_ROOT,
extra_env=None, work_root=WORK_ROOT, middleware='condor',
verbose=False):
"""
Given a list of datasets (i.e. association names) to process and a directory
of templates, for each association determine the number of exposures, render
the full workflow template set and submit the instantiated workflow to the
grid. Return immediately upon workflow submission.
"""
if(extra_env is None):
extra_env = {}
# Create a simple work directory path: work_root/<user>_<timestamp>
dir_name = '%s_%f' % (os.environ.get('USER', 'UNKNOWN'), time.time())
work_dir = os.path.join(work_root, dir_name)
# Add a random string to work_dir to make sure that we do not clobber
# another one in case the same user submits >1 workflow at the same time
# (which is something pretty rare). This is not super safe (since random
# numbers are not really random), but should suffice for this particular
# issue.
salt = '_%d' % (int(1e6 * random.random()))
work_dir += salt
for dataset in datasets:
# Create a instrument/mode Workflow instance (dataset independent)...
wflow = AcsSimpleWorkflow(template_root=template_root)
# ... and submit it to the grid (for this particular piece of data).
_id = wflow.execute(code_root=code_root,
repository=repository,
dataset=dataset,
work_dir=work_dir,
flavour=middleware)
print('Dataset %s submitted as job %s' % (dataset, _id))
print(' Work directory: %s' % (work_dir))
return(0)
def _parse_extra_environment_info(raw_user_env):
"""
Given a string of the form
KEY=VAL(,KEY=VAL)*
where KEYs are environment variable names and VARs their respective values,
parse the input string and return a dictionary of the form
{KEY: VAL, }
Only simple parsing is supported. This means, among other things, that VALs
are assumed to be strings. No arrays, list, tuples are supported. The
returned dictionary is assumed to be usable to agument the user environment.
"""
def parse_token(token):
"""
Parse a
KEY=VAL
string, allowing for spaces on either side of the = sign. Return the
parsed (KEY, VAL) tuple.
"""
if(not token or '=' not in token):
return(())
items = token.split('=', 1)
return((items[0].strip(), items[1].strip()))
if(not raw_user_env):
return({})
return(dict(map(parse_token, raw_user_env.split(','))))
if(__name__ == '__main__'):
import optparse
import sys
# Setup the command line option parser and do the parsing.
parser = optparse.OptionParser(USAGE)
parser.add_option('-r', '--repository',
dest='repository',
type='str',
default=None,
help='path to the raw data respository')
parser.add_option('-g', '--grid-middleware',
dest='middleware',
type='str',
default='condor',
help='grid/local middleware to use')
parser.add_option('-e', '--env',
dest='env',
type='str',
default='',
help='any extra environment variable to use')
# Verbose flag
parser.add_option('-v',
action='store_true',
dest='verbose',
default=False)
# Parse the command line args.
(options, args) = parser.parse_args()
# Sanity check: all the opions (apart from verbose) are required.
if(not options.repository):
parser.error('Please specify the repository path.')
if(not args):
parser.error('Please specify the name of the association(s).')
# Make sure that that suff actually exists.
if(not os.path.exists(options.repository)):
parser.error('Please specify a valid repository path.')
instrumentPath = os.path.join(TEMPLATE_ROOT, INSTRUMENT)
if(not os.path.exists(instrumentPath)):
print('Unable to find templates for %s' % (INSTRUMENT))
sys.exit(1)
templateDir = os.path.join(instrumentPath, MODE)
if(not os.path.exists(templateDir)):
print('Unable to find templates for %s/%s' % (INSTRUMENT, MODE))
sys.exit(2)
# Now see if we have to do any environment variable parsing/setting up.
env = _parse_extra_environment_info(options.env)
# Run!
sys.exit(process(datasets=args,
repository=options.repository,
template_root=templateDir,
middleware=options.middleware,
extra_env=env,
verbose=options.verbose))
| fpierfed/owl | example/acs_simple/bin/process_acs_simple.py | process_acs_simple.py | py | 6,869 | python | en | code | 5 | github-code | 36 |
10094330881 | from filters.filter import FilterReplicator
class TopkFilter(FilterReplicator):
def __init__(self, input_workers, config):
super(TopkFilter, self).__init__(
"shot_result=SCORED",
"filter-topk",
input_workers,
config
)
def run(self):
super(TopkFilter, self).run("Top K")
| PatricioIribarneCatella/nba-statistics | src/filters/topk.py | topk.py | py | 402 | python | en | code | 0 | github-code | 36 |
11504622560 | """
This module contains the Distribution class which defines a standard
interface for distributions It also provides several implemented
distributions, which inherit from Distribution Any user-specified
distributions should inherit from Distribution
"""
import numpy as np
from .utils import overrides, package_path
import os
from scipy import stats
import pickle
class Distribution(object):
"""
Interface/abstract class for distributions.
Any user-specified distributions should be defined by inheriting from this class and
overriding the appropriate methods.
"""
def __init__(self, ndims=2, nbatch=100):
""" Creates a Distribution object
:param ndims: the dimension of the state space for this distribution
:param nbatch: the number of sampling particles to run simultaneously
:returns: a Distribution object
:rtype: Distribution
"""
# distribution dimensions
self.ndims = ndims
# number of sampling particles to use
self.nbatch = nbatch
# TensorflowDistributions require some special treatment
# this attribute is to be used instead of isinstance, as that would require
# tensorflow to be imported globally
if not hasattr(self, 'backend'):
self.backend = 'numpy'
# true iff being sampled with a jump process
self.mjhmc = None
# number of times energy op has been called
self.E_count = 0
# number of times gradient op has been called
self.dEdX_count = 0
# only set to true when I have a bias initialization and am being burned in
# to generate and cache a fair initialization for continuous samplers
self.generation_instance = False
# so some distributions may modify the default
if not hasattr(self, 'max_n_particles'):
self.max_n_particles = None
# set the state fairly. calls out to a cache
self.init_X()
def E(self, X):
self.E_count += X.shape[1]
return self.E_val(X)
def E_val(self, X):
"""
Subclasses should implement this with the correct energy function
"""
raise NotImplementedError()
def dEdX(self, X):
self.dEdX_count += X.shape[1]
return self.dEdX_val(X)
def dEdX_val(self, X):
"""
Subclasses should implement this with the correct energy gradient function
"""
raise NotImplementedError()
def __hash__(self):
""" Subclasses should implement this as the hash of the tuple of all parameters
that effect the distribution, including ndims. This is very important!!
nbatch should not be part of the hash!! Including it will break everything
As an example, see how this is implemented in Gaussian
:returns: a hash of the relevant parameters of self
:rtype: int
"""
raise NotImplementedError()
def init_X(self):
"""
Sets self.Xinit to a good initial value
"""
# TODO: make production ready by adding global flag to disable
# research options like this
self.cached_init_X()
def cached_init_X(self):
""" Sets self.Xinit to cached (serialized) initial states for continuous-time samplers, generated by burn in
*For use with continuous-time samplers only*
:returns: None
:rtype: none
"""
distr_name = type(self).__name__
distr_hash = hash(self)
file_name = '{}_{}.pickle'.format(distr_name, distr_hash)
file_prefix = '{}/initializations'.format(package_path())
if file_name in os.listdir(file_prefix):
with open('{}/{}'.format(file_prefix, file_name), 'rb') as cache_file:
mjhmc_endpt, _, _, control_endpt = pickle.load(cache_file)
if self.mjhmc:
self.Xinit = mjhmc_endpt[:, :self.nbatch]
else:
self.Xinit = control_endpt[:, :self.nbatch]
else:
from mjhmc.misc.gen_mj_init import MAX_N_PARTICLES, cache_initialization
# modify this object so it can be used by gen_mj_init
old_nbatch = self.nbatch
self.nbatch = self.max_n_particles or MAX_N_PARTICLES
self.generation_instance = True
# must rebuild now that nbatch is changed back
if self.backend == 'tensorflow':
self.build_graph()
# start with biased initializations
# changes self.nbatch
try:
self.gen_init_X()
except NotImplementedError:
# completely arbitrary choice
self.Xinit = np.random.randn(self.ndims, self.nbatch)
#generate and cache fair initialization
cache_initialization(self)
# reconstruct this object using fair initialization
self.nbatch = old_nbatch
self.generation_instance = False
# must rebuild now that nbatch is changed back
if self.backend == 'tensorflow':
self.build_graph()
self.cached_init_X()
def gen_init_X(self):
""" Sets self.Xinit to generated initial states for the sampling particles
*For use with discrete-time samplers only*
:returns: None
:rtype: None
"""
raise NotImplementedError()
def reset(self):
"""
resets the object. returns self for convenience
"""
self.E_count = 0
self.dEdX_count = 0
if not self.generation_instance:
self.init_X()
return self
def __call__(self, X):
"""
Convenience method for NUTS compatibility
returns -E, -dEdX
"""
rshp_X = X.reshape(len(X), 1)
E = float(self.E(rshp_X))
dEdX = self.dEdX(rshp_X).T[0]
return -E, -dEdX
def load_cache(self):
""" Loads and returns the cached fair initializations and
estimated variances associated with this
distribution. Throws an error if the cache does not exist
:returns: the loaded cache: (fair_initialization, emc_var_estimate, true_var_estimate)
:rtype: (np.ndarray, float, float)
"""
distr_name = type(self).__name__
distr_hash = hash(self)
file_name = '{}_{}.pickle'.format(distr_name, distr_hash)
file_prefix = '{}/initializations'.format(package_path())
with open('{}/{}'.format(file_prefix, file_name)) as cache_file:
return pickle.load(cache_file)
class LambdaDistribution(Distribution):
""" An `anonymous' distribution object for quick
experimentation. Due to the initialization time that is required
at first run it, one shouldn't use this object in the
long-term. Rather create your own distribution class that inherits
from Distribution.
You should give your LambdaDistribution objects a name. Use a
descriptive name, and use the same for functionally equivalent
LambdaDistributions - the hash of the name is used to label the
initialization information which is generated at first run time of
a new distribution. This requirement is a side effect of the
unfortunate fact that there is no computable hash function which
assigns functionally identical programs to the same number.
"""
#pylint: disable=too-many-arguments
def __init__(self, energy_func=None, energy_grad_func=None, init=None, name=None):
""" Creates an anonymous distribution object.
:param ndims: the dimension of the state space for this distribution
:param nbatch: the number of sampling particles to run simultaneously
:param energy_func: function specifying the energy
:param energy_grad_func: function specifying gradient of the energy
:param name: name of this distribution. use the same name for
functionally identical distributions
:param init: fair initialization for this distribution. array of shape (ndims, nbatch)
:returns: an anonymous distribution object
:rtype: LambdaDistribution
"""
self.energy_func = energy_func
self.energy_grad_func = energy_grad_func
self.init = init
# TODO: raise warning if name is not passed
self.name = name or str(np.random())
super(LambdaDistribution, self).__init__(ndims=init.shape[0], nbatch=init.shape[1])
@overrides(Distribution)
def E_val(self, X):
return np.sum(X*np.dot(self.J,X), axis=0).reshape((1,-1))/2.
@overrides(Distribution)
def dEdX_val(self, X):
return np.dot(self.J,X)/2. + np.dot(self.J.T,X)/2.
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = self.init
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.nbatch, self.name))
class Gaussian(Distribution):
def __init__(self, ndims=2, nbatch=100, log_conditioning=6):
"""
Energy function, gradient, and hyperparameters for the "ill
conditioned Gaussian" example from the LAHMC paper.
"""
self.conditioning = 10**np.linspace(-log_conditioning, 0, ndims)
self.J = np.diag(self.conditioning)
self.description = '%dD Anisotropic Gaussian, %g self.conditioning'%(ndims, 10**log_conditioning)
super(Gaussian, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
return np.sum(X*np.dot(self.J,X), axis=0).reshape((1,-1))/2.
@overrides(Distribution)
def dEdX_val(self, X):
return np.dot(self.J,X)/2. + np.dot(self.J.T,X)/2.
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = (1./np.sqrt(self.conditioning).reshape((-1,1))) * np.random.randn(self.ndims,self.nbatch)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, hash(tuple(self.conditioning))))
class RoughWell(Distribution):
def __init__(self, ndims=2, nbatch=100, scale1=100, scale2=4):
"""
Energy function, gradient, and hyperparameters for the "rough well"
example from the LAHMC paper.
"""
self.scale1 = scale1
self.scale2 = scale2
self.description = '{} Rough Well'.format(ndims)
super(RoughWell, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
cosX = np.cos(X*2*np.pi/self.scale2)
E = np.sum((X**2) / (2*self.scale1**2) + cosX, axis=0).reshape((1,-1))
return E
@overrides(Distribution)
def dEdX_val(self, X):
sinX = np.sin(X*2*np.pi/self.scale2)
dEdX = X/self.scale1**2 + -sinX*2*np.pi/self.scale2
return dEdX
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = self.scale1 * np.random.randn(self.ndims, self.nbatch)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.scale1, self.scale2))
class MultimodalGaussian(Distribution):
def __init__(self, ndims=2, nbatch=100, separation=3):
self.sep_vec = np.array([separation] * nbatch +
[0] * (ndims - 1) * nbatch).reshape(ndims, nbatch)
# separated along first axis
self.sep_vec[0] += separation
super(MultimodalGaussian, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
trim_sep_vec = self.sep_vec[:, :X.shape[1]]
return -np.log(np.exp(-np.sum((X + trim_sep_vec)**2, axis=0)) +
np.exp(-np.sum((X - trim_sep_vec)**2, axis=0)))
@overrides(Distribution)
def dEdX_val(self, X):
# allows for partial batch size
trim_sep_vec = self.sep_vec[:, :X.shape[1]]
common_exp = np.exp(np.sum(4 * trim_sep_vec * X, axis=0))
# floating point hax
return ((2 * ((X - trim_sep_vec) * common_exp + trim_sep_vec + X)) /
(common_exp + 1))
@overrides(Distribution)
def init_X(self):
# okay, this is pointless... sep vecs cancel
self.Xinit = ((np.random.randn(self.ndims, self.nbatch) + self.sep_vec) +
(np.random.randn(self.ndims, self.nbatch) - self.sep_vec))
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.separation))
class TestGaussian(Distribution):
def __init__(self, ndims=2, nbatch=100, sigma=1.):
"""Simple default unit variance gaussian for testing samplers
"""
self.sigma = sigma
super(TestGaussian, self).__init__(ndims, nbatch)
@overrides(Distribution)
def E_val(self, X):
return np.sum(X**2, axis=0).reshape((1, -1)) / (2. * self.sigma ** 2)
@overrides(Distribution)
def dEdX_val(self, X):
return X/self.sigma**2
@overrides(Distribution)
def gen_init_X(self):
self.Xinit = np.random.randn(self.ndims, self.nbatch)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims, self.sigma))
#pylint: disable=too-many-instance-attributes
class ProductOfT(Distribution):
""" Provides the product of T experts distribution
"""
#pylint: disable=too-many-arguments
def __init__(self, ndims=36, nbasis=36, nbatch=100, lognu=None, W=None, b=None):
""" Product of T experts, assumes a fixed W that is sparse and alpha that is
"""
# awkward hack to import theano in poe only
try:
import theano.tensor as T
import theano
self.theano = theano
self.T = T
except:
raise ImportError("Theano could not be imported")
if ndims != nbasis:
raise NotImplementedError("Initializer only works for ndims == nbasis")
self.ndims = ndims
self.nbasis = nbasis
self.nbatch = nbatch
if W is None:
W = np.eye(ndims, nbasis)
self.weights = self.theano.shared(np.array(W, dtype='float32'), 'W')
if lognu is None:
pre_nu = np.random.rand(nbasis,) * 2 + 2.1
else:
pre_nu = np.exp(lognu)
self.nu = self.theano.shared(np.array(pre_nu, dtype='float32'), 'nu')
if b is None:
b = np.zeros((nbasis,))
self.bias = self.theano.shared(np.array(b, dtype='float32'), 'b')
state = T.matrix()
energy = self.E_def(state)
gradient = T.grad(T.sum(energy), state)
#@overrides(Distribution)
self.E_val = self.theano.function([state], energy, allow_input_downcast=True)
#@overrides(Distribution)
self.dEdX_val = self.theano.function([state], gradient, allow_input_downcast=True)
super(ProductOfT,self).__init__(ndims,nbatch)
self.backend = 'theano'
def E_def(self,X):
"""
energy for a POE with student's-t expert in terms of:
samples [# dimensions]x[# samples] X
receptive fields [# dimensions]x[# experts] W
biases [# experts] b
degrees of freedom [# experts] nu
"""
rshp_b = self.bias.reshape((1,-1))
rshp_nu = self.nu.reshape((1, -1))
alpha = (rshp_nu + 1.)/2.
energy_per_expert = alpha * self.T.log(1 + ((self.T.dot(X.T, self.weights) + rshp_b) / rshp_nu) ** 2)
energy = self.T.sum(energy_per_expert, axis=1).reshape((1, -1))
return energy
@overrides(Distribution)
def gen_init_X(self):
#hack to remap samples from a generic product of experts to
#the model we are actually going to generate samples from
Zinit = np.zeros((self.ndims, self.nbatch))
for ii in xrange(self.ndims):
Zinit[ii] = stats.t.rvs(self.nu.get_value()[ii], size=self.nbatch)
Yinit = Zinit - self.bias.get_value().reshape((-1, 1))
self.Xinit = np.dot(np.linalg.inv(self.weights.get_value()), Yinit)
@overrides(Distribution)
def __hash__(self):
return hash((self.ndims,
self.nbasis,
hash(tuple(self.nu.get_value())),
hash(tuple(self.weights.get_value().ravel())),
hash(tuple(self.bias.get_value().ravel()))))
| rueberger/MJHMC | mjhmc/misc/distributions.py | distributions.py | py | 16,295 | python | en | code | 24 | github-code | 36 |
7135815082 | # -*- coding: utf-8 -*-
# ***************************************************
# * File : main.py
# * Author : Zhefeng Wang
# * Email : wangzhefengr@163.com
# * Date : 2023-04-11
# * Version : 0.1.041123
# * Description : description
# * Link : link
# * Requirement : 相关模块版本需求(例如: numpy >= 2.1.0)
# ***************************************************
# python libraries
import os
import sys
from typing import List, Optional, Callable, Iterable
import torch
from torch import nn
from gluonts.torch.model.predictor import PyTorchPredictor
from gluonts.torch.distributions import StudentTOutput
from gluonts.model.forecast_generator import DistributionForecastGenerator
import pytorch_lightning as pl
# global variable
LOGGING_LABEL = __file__.split('/')[-1][:-3]
def mean_abs_scaling(context, min_scale = 1e-5):
return context.abs().mean().clamp(min_scale, None).unsqueeze(1)
class FeedForwardNetwork(nn.Module):
def __init__(self, prediction_length: int, context_length: int,
hidden_dimensions: List[int], batch_norm: bool = False,
distr_output: Callable = StudentTOutput(),
scaling: Callable = mean_abs_scaling) -> None:
super(FeedForwardNetwork, self).__init__()
# ------------------------------
# Parameters
# ------------------------------
# check params
assert prediction_length > 0
assert context_length > 0
assert len(hidden_dimensions) > 0
# params init
self.prediction_length = prediction_length # 预测长度
self.context_length = context_length # 10
self.hidden_dimensions = hidden_dimensions # 隐藏层维度 []
self.distr_output = distr_output # 分布输出
self.batch_norm = batch_norm # 是否进行 BatchNormalization
# ------------------------------
# Layers
# ------------------------------
# layer1: 数据转换
self.scaling = scaling
# layer2:
modules = []
dimensions = [context_length] + hidden_dimensions[:-1] # dimensions=[0, 1, 2, ..., n]
for in_size, out_size in zip(dimensions[:-1], dimensions[1:]):
# layer2.1
modules += [
self.__make_linear(in_size, out_size),
nn.ReLU()
]
# layer2.2
if batch_norm:
modules.append(nn.BatchNorm1d(out_size))
# layer3:
modules.append(
self.__make_linear(dimensions[-1], prediction_length * hidden_dimensions[-1])
)
# layer4: output
self.nn = nn.Sequential(*modules)
self.args_proj = self.distr_output.get_args_proj(hidden_dimensions[-1])
@staticmethod
def __make_linear(dim_in, dim_out):
linear = nn.Linear(dim_in, dim_out)
torch.nn.init.uniform_(linear.weight, -0.07, 0.07)
torch.nn.init.zeros_(linear.bias)
return linear
def forward(self, context):
# data scaling
scale = self.scaling(context)
scaled_context = context / scale
# output
nn_out = self.nn(scaled_context)
nn_out_reshaped = nn_out.reshape(-1, self.prediction_length, self.hidden_dimensions[-1])
# student t distribution outout
distr_args = self.args_proj(nn_out_reshaped)
return distr_args, torch.zeros_like(scale), scale
def get_predictor(self, input_transform, batch_size = 32, device = None):
return PyTorchPredictor(
prediction_length = self.prediction_length,
input_names = ["past_target"],
prediction_net = self,
batch_size = batch_size,
input_transform = input_transform,
forecast_generator = DistributionForecastGenerator(self.distr_output),
device = device,
)
class LightningFeedForwardNetwork(FeedForwardNetwork, pl.LightningModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def training_step(self, batch, batch_idx):
# TODO
context = batch["past_target"]
target = batch["future_target"]
assert context.shape[-1] == self.context_length
assert target.shape[-1] == self.prediction_length
distr_args, loc, scale = self(context)
distr = self.distr_output.distribution(distr_args, loc, scale)
# loss function
loss = -distr.log_prob(target)
return loss.mean()
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr = 1e-3)
return optimizer
# 测试代码 main 函数
def main():
from gluonts.dataset.repository.datasets import get_dataset
dataset = get_dataset("electricity")
if __name__ == "__main__":
main()
| wangzhefeng/tsproj | models/todo/FeedForwardNetwork.py | FeedForwardNetwork.py | py | 4,869 | python | en | code | 0 | github-code | 36 |
74694208105 | '''
select_subhalos
library of functions for selecting subhalos (for purpose of cw reconstruction) and padding
set of positions in periodic cube (to counteract disperse's problems with boundary conds.)
Chris J Duckworth cduckastro@gmail.com
'''
import numpy as np
import groupcat as gc
def return_stel_tracers(basepath, snapnum, blen, min_mass=10**8.5):
'''
returns stellar tracer positions for all objects with a defined minimum mass at a
defined snapshot. basepath should direct to /output directory of chosen simulation
and blen should be the corresponding box side length.
'''
# Subhalo fields
sfields = ['SubhaloMassType','SubhaloPos']
subhalos = gc.loadSubhalos(basepath, snapnum, fields=sfields)
# selecting based on total mass within the subhalo.
return subhalos['SubhaloPos'][()][subhalos['SubhaloMassType'][()][:,4]*10**10 > min_mass]
def box_extend(blen, pos, frac=0.1):
'''
Given a periodic cube of equal side length blen, this script extends (or pads) each
side by an additional fraction set by frac. This is simply repeating all
positions (defined by pos) on each side.
This is required to ensure disperse is doing its job at the edges of sims.
'''
# wrapping x, y and then z directions in order. wrapping is performed on updated set
# of positions at each step to ensure there is enough padding at the corners.
# creating copy of positions to update.
pos_extend = pos
# looping over dimensions in order.
for ind in np.arange(3):
# slicing each side.
low_slice = pos_extend[pos_extend[:, ind] < blen * frac]
low_slice[:, ind] += blen
high_slice = pos_extend[pos_extend[:, ind] > blen * (1 - frac)]
high_slice[:, ind] -= blen
# adding new points.
pos_extend = np.append(pos_extend, low_slice, axis=0)
pos_extend = np.append(pos_extend, high_slice, axis=0)
return pos_extend
| Chris-Duckworth/disperse_TNG | lib/select_subhalos.py | select_subhalos.py | py | 1,858 | python | en | code | 3 | github-code | 36 |
18394694315 | import os
import numpy as np
import pytest
from numpy.testing import assert_array_equal
import tiledb
def test_schema_evolution(tmp_path):
ctx = tiledb.default_ctx()
se = tiledb.ArraySchemaEvolution(ctx)
uri = str(tmp_path)
attrs = [
tiledb.Attr(name="a1", dtype=np.float64),
tiledb.Attr(name="a2", dtype=np.int32),
]
dims = [tiledb.Dim(domain=(0, 3), dtype=np.uint64)]
domain = tiledb.Domain(*dims)
schema = tiledb.ArraySchema(domain=domain, attrs=attrs, sparse=False)
tiledb.Array.create(uri, schema)
data1 = {
"a1": np.arange(5, 9),
"a2": np.random.randint(0, 1e7, size=4).astype(np.int32),
}
with tiledb.open(uri, "w") as A:
A[:] = data1
with tiledb.open(uri) as A:
res = A[:]
assert_array_equal(res["a1"], data1["a1"])
assert_array_equal(res["a2"], data1["a2"])
assert "a3" not in res.keys()
newattr = tiledb.Attr("a3", dtype=np.int8)
se.add_attribute(newattr)
with pytest.raises(tiledb.TileDBError) as excinfo:
se.add_attribute(newattr)
assert "Input attribute name is already there" in str(excinfo.value)
assert "tiledb/schema_evolution.cc" in str(excinfo.value)
se.array_evolve(uri)
data2 = {
"a1": np.arange(5, 9),
"a2": np.random.randint(0, 1e7, size=4).astype(np.int32),
"a3": np.random.randint(0, 255, size=4).astype(np.int8),
}
with tiledb.open(uri, "w") as A:
A[:] = data2
def test_it():
with tiledb.open(uri) as A:
res = A[:]
assert_array_equal(res["a1"], data2["a1"])
assert_array_equal(res["a2"], data2["a2"])
assert_array_equal(res["a3"], data2["a3"])
test_it()
tiledb.consolidate(uri)
test_it()
se = tiledb.ArraySchemaEvolution(ctx)
se.drop_attribute("a1")
se.array_evolve(uri)
data3 = {
"a2": np.random.randint(0, 1e7, size=4).astype(np.int32),
"a3": np.random.randint(0, 255, size=4).astype(np.int8),
}
def test_it2():
with tiledb.open(uri) as A:
res = A[:]
assert "a1" not in res.keys()
assert_array_equal(res["a2"], data3["a2"])
assert_array_equal(res["a3"], data3["a3"])
with tiledb.open(uri, "w") as A:
A[:] = data3
test_it2()
tiledb.consolidate(uri)
test_it2()
def test_schema_evolution_timestamp(tmp_path):
ctx = tiledb.default_ctx()
se = tiledb.ArraySchemaEvolution(ctx)
vfs = tiledb.VFS()
uri = str(tmp_path)
schema_uri = os.path.join(uri, "__schema")
attrs = [tiledb.Attr(name="a1", dtype=np.float64)]
domain = tiledb.Domain(tiledb.Dim(domain=(0, 3), dtype=np.uint64))
schema = tiledb.ArraySchema(domain=domain, attrs=attrs, sparse=False)
tiledb.Array.create(uri, schema)
def get_schema_timestamps(schema_uri):
schema_files = filter(lambda x: "__enumerations" not in x, vfs.ls(schema_uri))
return [int(os.path.basename(file).split("_")[2]) for file in schema_files]
assert 123456789 not in get_schema_timestamps(schema_uri)
newattr = tiledb.Attr("a2", dtype=np.int8)
se.timestamp(123456789)
se.add_attribute(newattr)
se.array_evolve(uri)
assert 123456789 in get_schema_timestamps(schema_uri)
def test_schema_evolution_with_enmr(tmp_path):
ctx = tiledb.default_ctx()
se = tiledb.ArraySchemaEvolution(ctx)
uri = str(tmp_path)
attrs = [
tiledb.Attr(name="a1", dtype=np.float64),
tiledb.Attr(name="a2", dtype=np.int32),
]
dims = [tiledb.Dim(domain=(0, 3), dtype=np.uint64)]
domain = tiledb.Domain(*dims)
schema = tiledb.ArraySchema(domain=domain, attrs=attrs, sparse=False)
tiledb.Array.create(uri, schema)
data1 = {
"a1": np.arange(5, 9),
"a2": np.random.randint(0, 1e7, size=4).astype(np.int32),
}
with tiledb.open(uri, "w") as A:
A[:] = data1
with tiledb.open(uri) as A:
assert not A.schema.has_attr("a3")
newattr = tiledb.Attr("a3", dtype=np.int8, enum_label="e3")
se.add_attribute(newattr)
with pytest.raises(tiledb.TileDBError) as excinfo:
se.array_evolve(uri)
assert " Attribute refers to an unknown enumeration" in str(excinfo.value)
se.add_enumeration(tiledb.Enumeration("e3", True, np.arange(0, 8)))
se.array_evolve(uri)
se = tiledb.ArraySchemaEvolution(ctx)
with tiledb.open(uri) as A:
assert A.schema.has_attr("a3")
assert A.attr("a3").enum_label == "e3"
se.drop_enumeration("e3")
with pytest.raises(tiledb.TileDBError) as excinfo:
se.array_evolve(uri)
assert "Unable to drop enumeration" in str(excinfo.value)
se.drop_attribute("a3")
se.array_evolve(uri)
with tiledb.open(uri) as A:
assert not A.schema.has_attr("a3")
@pytest.mark.parametrize(
"type,data",
(
("int", [0]),
("bool", [True, False]),
("str", ["abc", "defghi", "jk"]),
("bytes", [b"abc", b"defghi", b"jk"]),
),
)
def test_schema_evolution_extend_enmr(tmp_path, type, data):
uri = str(tmp_path)
enmr = tiledb.Enumeration("e", True, dtype=type)
attrs = [tiledb.Attr(name="a", dtype=int, enum_label="e")]
domain = tiledb.Domain(tiledb.Dim(domain=(0, 3), dtype=np.uint64))
schema = tiledb.ArraySchema(domain=domain, attrs=attrs, enums=[enmr])
tiledb.Array.create(uri, schema)
with tiledb.open(uri) as A:
assert A.schema.has_attr("a")
assert A.attr("a").enum_label == "e"
assert A.enum("e") == enmr
se = tiledb.ArraySchemaEvolution()
updated_enmr = enmr.extend(data)
se.extend_enumeration(updated_enmr)
se.array_evolve(uri)
with tiledb.open(uri) as A:
assert A.schema.has_attr("a")
assert A.attr("a").enum_label == "e"
assert A.enum("e") == updated_enmr
def test_schema_evolution_extend_check_bad_type():
enmr = tiledb.Enumeration("e", True, dtype=str)
with pytest.raises(tiledb.TileDBError):
enmr.extend([1, 2, 3])
with pytest.raises(tiledb.TileDBError):
enmr.extend([True, False])
enmr.extend(["a", "b"])
enmr = tiledb.Enumeration("e", True, dtype=int)
with pytest.raises(tiledb.TileDBError):
enmr.extend(["a", "b"])
with pytest.raises(tiledb.TileDBError):
enmr.extend([True, False])
enmr.extend([1, 2, 3])
enmr = tiledb.Enumeration("e", True, dtype=bool)
with pytest.raises(tiledb.TileDBError):
enmr.extend(["a", "b"])
with pytest.raises(tiledb.TileDBError):
enmr.extend([1, 2, 3])
enmr.extend([True, False])
| TileDB-Inc/TileDB-Py | tiledb/tests/test_schema_evolution.py | test_schema_evolution.py | py | 6,667 | python | en | code | 165 | github-code | 36 |
8594392164 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Scripts to plot Figure 2, co-occurrence and spearman rho for top 1000 videos in terms of views and watch times.
Usage: python plot_intersection_spearman_top1000.py
Time: ~2M
"""
from __future__ import division, print_function
import os
from scipy import stats
import operator
import matplotlib.pyplot as plt
def plot_intersection(ax, view_rank_dict, watch_rank_dict, color, linestyle, label):
sorted_view_rank = sorted(view_rank_dict.items(), key=operator.itemgetter(1), reverse=True)[:1000]
sorted_watch_rank = sorted(watch_rank_dict.items(), key=operator.itemgetter(1), reverse=True)[:1000]
x_axis = []
y_axis = []
# iterate from 50 to 1000, with gap 10
for i in range(50, 1001, 10):
view_set = set([item[0] for item in sorted_view_rank[:i]])
watch_set = set([item[0] for item in sorted_watch_rank[:i]])
x_axis.append(i)
y_axis.append(len(view_set.intersection(watch_set))/i)
ax.plot(x_axis, y_axis, color=color, linestyle=linestyle, label=label)
def plot_spearman(ax, view_rank_dict, watch_rank_dict, color, linestyle, label):
sorted_view_rank = sorted(view_rank_dict.items(), key=operator.itemgetter(1), reverse=True)[:1000]
sorted_watch_rank = sorted(watch_rank_dict.items(), key=operator.itemgetter(1), reverse=True)[:1000]
x_axis = []
y_axis = []
# iterate from 50 to 1000, with gap 10
for i in range(50, 1001, 10):
view_set = set([item[0] for item in sorted_view_rank[:i]])
watch_set = set([item[0] for item in sorted_watch_rank[:i]])
unit_list = list(view_set.union(watch_set))
view_rank = [view_rank_dict[x] for x in unit_list]
watch_rank = [watch_rank_dict[x] for x in unit_list]
x_axis.append(i)
y_axis.append(stats.spearmanr(view_rank, watch_rank)[0])
ax.plot(x_axis, y_axis, color=color, linestyle=linestyle, label=label)
if __name__ == '__main__':
# == == == == == == == == Part 1: Set up experiment parameters == == == == == == == == #
# setting parameters
view_rank_dict = {}
watch_rank_dict = {}
music_view_rank_dict = {}
music_watch_rank_dict = {}
news_view_rank_dict = {}
news_watch_rank_dict = {}
# == == == == == == == == Part 2: Load dataset == == == == == == == == #
input_doc = '../../production_data/new_tweeted_dataset_norm/'
for subdir, _, files in os.walk(input_doc):
for f in files:
with open(os.path.join(subdir, f), 'r') as fin:
fin.readline()
for line in fin:
vid, dump = line.rstrip().split('\t', 1)
view30 = float(dump.split('\t')[7])
watch30 = float(dump.split('\t')[8])
view_rank_dict[vid] = view30
watch_rank_dict[vid] = watch30
if f.startswith('10'):
music_view_rank_dict[vid] = view30
music_watch_rank_dict[vid] = watch30
if f.startswith('25'):
news_view_rank_dict[vid] = view30
news_watch_rank_dict[vid] = watch30
print('>>> Loading data: {0} done!'.format(os.path.join(subdir, f)))
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
plot_intersection(ax1, view_rank_dict, watch_rank_dict, color='r', linestyle='-', label='ALL')
plot_intersection(ax1, music_view_rank_dict, music_watch_rank_dict, color='k', linestyle='--', label='Music')
plot_intersection(ax1, news_view_rank_dict, news_watch_rank_dict, color='k', linestyle=':', label='News')
ax1.set_ylim([0, 1])
ax1.set_xlabel('top $n$ videos', fontsize=18)
ax1.set_ylabel('Intersection size', fontsize=18)
ax1.set_yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
ax1.tick_params(axis='both', which='major', labelsize=16)
ax1.legend(loc='lower right', handlelength=1, frameon=False, fontsize=20)
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.set_title('(a) co-occurrence rate', fontsize=18)
plt.tight_layout()
plt.show()
fig = plt.figure()
ax2 = fig.add_subplot(1, 1, 1)
plot_spearman(ax2, view_rank_dict, watch_rank_dict, color='r', linestyle='-', label='ALL')
plot_spearman(ax2, music_view_rank_dict, music_watch_rank_dict, color='k', linestyle='--', label='Music')
plot_spearman(ax2, news_view_rank_dict, news_watch_rank_dict, color='k', linestyle=':', label='News')
ax2.set_ylim([-1, 1])
ax2.set_xlabel('top $n$ videos', fontsize=18)
ax2.set_ylabel("Spearman's $\\rho$", fontsize=18)
ax2.set_yticks([-1.0, -0.5, 0.0, 0.5, 1.0])
ax2.tick_params(axis='both', which='major', labelsize=16)
ax2.legend(loc='lower right', handlelength=1, frameon=False, fontsize=20)
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.set_title("(b) Spearman's $\\rho$", fontsize=18)
plt.tight_layout()
plt.show()
| avalanchesiqi/yt-longevity | engagement_plots/plot_intersection_spearman_top1000.py | plot_intersection_spearman_top1000.py | py | 5,027 | python | en | code | 2 | github-code | 36 |
3419488617 | from spectractor import parameters
from spectractor.config import set_logger
import matplotlib.pyplot as plt
import pandas as pd
import os
import numpy as np
class LogBook:
"""Class to load_image and analyse observation logbook csv files."""
def __init__(self, logbook="./tests/data/ctiofulllogbook_jun2017_v5.csv"):
"""Load and initialise the logbook
Parameters
----------
logbook: str
Path to the logbook. Must be a CSV file.
Examples
----------
>>> logbook = LogBook('./tests/data/ctiofulllogbook_jun2017_v5.csv')
>>> assert logbook.df is not None
>>> print(logbook.logbook)
./tests/data/ctiofulllogbook_jun2017_v5.csv
>>> print(logbook.df['disperser'][:2])
0 Ron400
1 Ron400
Name: disperser, dtype: object
>>> logbook = LogBook('./log.csv')
"""
self.my_logger = set_logger(self.__class__.__name__)
self.logbook = logbook
if not os.path.isfile(logbook):
self.my_logger.error('CSV logbook file {} not found.'.format(logbook))
return
# self.csvfile = open(self.logbook, 'rU', encoding='latin-1')
# self.reader = csv.DictReader(self.csvfile, delimiter=';', dialect=csv.excel_tab)
self.df = pd.read_csv(self.logbook, sep=";", decimal=",", encoding='latin-1', header='infer')
self.df['date'] = pd.to_datetime(self.df.date)
def search_for_image(self, filename):
"""
Look for an image file name in the logbook and load_image properties:
- Obj-posXpix and Obj-posYpix: the [x0,y0] guessed pixel position in the image
- Dx and Dy: the x and y windows in pixel to search for the target; set XWINDOW and YWINDOW variables in parameters.py
- object: the name of the target
Parameters
----------
filename: str
the fits image file name (not the path, only the file name.)
Returns
-------
disperser_label: str
the name of the disperser
target: str
the name of the target
xpos: int
the x position of the target (in pixel)
ypos: int
the y position of the target (in pixel)
Examples
--------
>>> logbook = LogBook('./tests/data/ctiofulllogbook_jun2017_v5.csv')
>>> disperser_label, target, xpos, ypos = logbook.search_for_image("unknown_file.fits")
>>> print(disperser_label, target, xpos, ypos)
None None None None
>>> disperser_label, target, xpos, ypos = logbook.search_for_image("reduc_20170605_028.fits")
>>> print(disperser_label, target, xpos, ypos)
HoloPhAg PNG321.0+3.9 814 585
>>> disperser_label, target, xpos, ypos = logbook.search_for_image("reduc_20170608_119.fits")
>>> print(disperser_label, target, xpos, ypos)
None HD205905 None None
>>> disperser_label, target, xpos, ypos = logbook.search_for_image("reduc_20170630_001.fits")
>>> print(disperser_label, target, xpos, ypos)
None bias None None
"""
disperser_label = None
target = None
xpos = None
ypos = None
skip = False
try:
row = self.df.loc[(self.df['file'] == filename)].iloc[0]
target = row['object']
if row['object'] == 'bias' or row['object'] == 'flat' or row['object'] == 'zero':
self.my_logger.error(
'Fits file %s in logbook %s has flag %s. Skip file.' % (filename, self.logbook, target))
skip = True
if row['skip'] == 'skip':
self.my_logger.error('Fits file %s in logbook has flag "skip". Skip file.' % filename)
skip = True
if np.isnan(row['Obj-posXpix']):
self.my_logger.error(
'Fits file %s in logbook %s has no target x position. Skip file.' % (filename, self.logbook))
skip = True
if np.isnan(row['Obj-posYpix']):
self.my_logger.error(
'Fits file %s in logbook %s has no target y position. Skip file.' % (filename, self.logbook))
skip = True
if not np.isnan(row['Dx']):
parameters.XWINDOW = int(row['Dx'])
parameters.XWINDOW_ROT = int(row['Dx'])
if not np.isnan(row['Dy']):
parameters.YWINDOW = int(row['Dy'])
parameters.YWINDOW_ROT = int(row['Dy'])
if not skip:
xpos = int(row['Obj-posXpix'])
ypos = int(row['Obj-posYpix'])
disperser_label = row['disperser']
except IndexError:
if target is None and skip is False:
self.my_logger.error('Fits file %s not found in logbook %s.' % (filename, self.logbook))
return disperser_label, target, xpos, ypos
def plot_columns_vs_date(self, column_names):
"""Plot of the column property with respect to the dates.
Parameters
----------
column_names: list, str
List of column names to plot versus time from the log book.
Examples
--------
>>> logbook = LogBook('./tests/data/ctiofulllogbook_jun2017_v5.csv')
>>> logbook.plot_columns_vs_date(['seeing'])
>>> logbook.plot_columns_vs_date(['P', 'T'])
"""
if isinstance(column_names, str):
column_names = [column_names]
self.df.plot(x='date', y=column_names)
if parameters.DISPLAY:
plt.show()
if parameters.PdfPages:
parameters.PdfPages.savefig()
if __name__ == "__main__":
import doctest
doctest.testmod()
| LSSTDESC/Spectractor | spectractor/logbook.py | logbook.py | py | 5,793 | python | en | code | 13 | github-code | 36 |
7426499074 | from maltego_trx.transform import DiscoverableTransform
from db import db
from utils import row_dict_to_conversation_email
class DomainToEnronUsers(DiscoverableTransform):
"""
Given a maltego.Domain Entity, return a list of Users for this Domain.
"""
@classmethod
def create_entities(cls, request, response):
domain = request.Value
res = db.get_ppl_by_domain(domain, limit=request.Slider)
for d in res:
ent = response.addEntity('maltego.EmailAddress', d['email'])
| crest42/enron | transforms/DomainToEnronUsers.py | DomainToEnronUsers.py | py | 528 | python | en | code | 0 | github-code | 36 |
32793914547 | import torch
import torch.nn as nn
from conf import device
class Encoder(nn.Module):
def __init__(self, vocab_size, hidden_size=256):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(vocab_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input, hidden):
embedd = self.embedding(input).view(1, 1, -1)
output, hidden = self.gru(embedd, hidden)
return output, hidden
def init_hidden(self):
return torch.zeros(1, 1, self.hidden_size, dtype=torch.float32, device=device)
| junix/gen_poem | encoder.py | encoder.py | py | 622 | python | en | code | 0 | github-code | 36 |
6790152771 | import csv
from datetime import datetime
from django.conf import settings
from django.core.management import BaseCommand
from exo_accounts.models import EmailAddress
from exo_certification.tasks import HubspotCertificationDealSyncTask
from ...models import ExOCertification, CertificationRequest
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'-f', '--file', nargs='+', type=str, help='CSV file')
def handle(self, *args, **kwargs):
path = kwargs.get('file')[0]
certification = ExOCertification.objects.get(level='L2A')
with open(path) as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
try:
str_email = row[1]
email = EmailAddress.objects.get(email=str_email)
user = email.user
data = {
'price': 0,
'created': datetime.strptime(row[2], '%Y-%m-%d'),
'status': settings.EXO_CERTIFICATION_REQUEST_STATUS_CH_DRAFT,
'user': user,
'requester_email': str_email,
'requester_name': user.full_name,
}
certification_request, created = CertificationRequest.objects.get_or_create(
user=user,
certification=certification,
defaults=data,
)
if created:
HubspotCertificationDealSyncTask().s(pk=certification_request.pk).apply()
certification_request.refresh_from_db()
self.stdout.write(
self.style.SUCCESS('email [{}]: Successfully created {}'.format(
str_email, certification_request)
)
)
certification_request.status = settings.EXO_CERTIFICATION_REQUEST_STATUS_CH_APPROVED
certification_request.save(update_fields=['status'])
else:
self.stdout.write(
self.style.WARNING('email [{}]: Already exists CertRequest {}'.format(
str_email, certification_request)
)
)
except Exception as exc:
self.stdout.write(
self.style.ERROR('email [{}]: Errored {}'.format(str_email, exc)))
| tomasgarzon/exo-services | service-exo-core/exo_certification/management/commands/generate_certification_requests_free_coaches.py | generate_certification_requests_free_coaches.py | py | 2,647 | python | en | code | 0 | github-code | 36 |
41831305861 | from typing import Optional
from cartes import COEUR, COULEURS, CarteBelote, CarteSetBelote, Couleur, Pli
class Annonce:
VALID_SCORES = list(range(80, 170, 10)) + [0, 1000, 2000]
def __init__(self, atout, score_a_faire, joueur):
if int(score_a_faire) not in self.VALID_SCORES:
raise ValueError("score_a_faire non valide")
if not isinstance(atout, Couleur):
raise ValueError("couleur non valide")
self.atout = atout
self.score_a_faire = score_a_faire
self.joueur = joueur
def __lt__(self, other):
if self.score_a_faire < other.score_a_faire:
return True
ANNONCE_NULLE = Annonce(atout=COEUR, score_a_faire=0, joueur=None)
def poser_question(question, reponses_possibles):
reponse = None
while reponse not in reponses_possibles:
reponse = input(question)
return reponse
class Joueur:
def __init__(self, nom: str):
self.nom: str = nom
self.belote: bool = False
self.main: CarteSetBelote = CarteSetBelote()
self.doit_annoncer: bool = True
self.equipe: Optional[Equipe] = None # Défini lors de la création de l'équipe
self.plis: list[Pli] = []
def __repr__(self) -> str:
return self.nom
def _reinit(self):
self.doit_annoncer = True
self.belote = False
self.main = CarteSetBelote()
self.plis = []
@property
def _total_points(self):
score = 0
for pli in self.plis:
score += pli._points
if self.belote:
score += 20
return score
def _afficher_main(self):
print(self.main)
def _annoncer(self, meilleure_annonce):
self._afficher_main()
reponse = poser_question(f"Souhaitez-vous annoncer {self} ?", ["o", "n"])
if reponse == "o":
couleur = poser_question("Couleur ?", list(map(lambda x: x.nom, COULEURS)))
couleur = list(filter(lambda x: x.nom == couleur, COULEURS))[0]
scores_possibles = list(
map(
lambda x: str(x),
filter(
lambda x: x > meilleure_annonce.score_a_faire,
Annonce.VALID_SCORES,
),
)
)
score = poser_question("Score ?", scores_possibles)
annonce = Annonce(atout=couleur, score_a_faire=int(score), joueur=self)
else:
annonce = None
self.doit_annoncer = False
return annonce
def _demander_melanger(self) -> bool:
reponse = input(f"Souhaitez-vous mélanger {self.nom}? [o/n]: ")
if reponse == "o":
return True
return False
def _ajouter_carte_en_main(self, carte):
self.main.append(carte)
def _faire_annonce(self) -> bool:
reponse = input(f"Souhaitez-vous faire une annonce {self.nom}? [o/n]: ")
if reponse == "o":
return True
return False
def _couleur_demandee_en_main(self, couleur_demandee) -> bool:
return (
len(
list(
filter(
lambda x: x.couleur.forme == couleur_demandee.couleur.forme,
self.main,
)
)
)
> 0
)
def _atout_en_main(self) -> bool:
return True if True in [carte.atout for carte in self.main] else False
def _meilleur_atout_en_main(self, other_atout: CarteBelote) -> bool:
if other_atout.atout is False:
raise ValueError("Vous devez appeler cette fonction avec un autre atout")
meilleurs_atouts_en_main = list(
filter(lambda x: x.atout and x > other_atout, self.main)
)
if meilleurs_atouts_en_main:
return True
return False
def _jouer_carte(self, pli: Pli, couleur_atout: Couleur):
premiere_carte_jouee = pli[0] if pli else None
print(f"À toi de jouer {self}")
self._afficher_main()
while True:
index_carte = 999
while index_carte not in range(0, len(self.main)):
index_carte = input("Index de la carte à jouer: ")
try:
index_carte = int(index_carte)
except Exception:
...
carte_a_jouer = self.main[index_carte]
if premiere_carte_jouee is not None:
carte_gagnante = pli._carte_la_plus_forte(couleur_atout=couleur_atout)
if carte_a_jouer.couleur.forme != premiere_carte_jouee.couleur.forme:
couleur_en_main: bool = self._couleur_demandee_en_main(
couleur_demandee=premiere_carte_jouee
)
if couleur_en_main:
print(
f"Vous possèder du {premiere_carte_jouee.couleur.forme} "
"en main. "
f"Vous ne pouvez pas jouer du {carte_a_jouer.couleur.forme}"
)
continue
else:
if self._atout_en_main():
if (
carte_a_jouer.atout is False
and carte_gagnante.joueur.equipe != self.equipe
):
print("Vous devez couper !")
continue
else:
if premiere_carte_jouee.atout and carte_a_jouer.atout:
if carte_a_jouer < premiere_carte_jouee:
if self._meilleur_atout_en_main(other_atout=carte_gagnante):
print("Vous avez un atout supérieur en main")
if self.belote and carte_a_jouer.atout:
if carte_a_jouer.valeur in ("D", "R"):
atouts_en_main = list(filter(lambda x: x.atout is True, self.main))
valeurs_atouts = list(map(lambda x: x.valeur, atouts_en_main))
if "D" in valeurs_atouts or "R" in valeurs_atouts:
print("Belote")
else:
print("Rebelote")
self.main.pop(index_carte)
return carte_a_jouer
class Equipe:
def __init__(self, joueur1, joueur2):
self.joueur1: Joueur = joueur1
self.joueur2: Joueur = joueur2
joueur1.equipe = self
joueur2.equipe = self
self.score: int = 0
def __repr__(self) -> str:
return f"{self.joueur1.nom} & {self.joueur2.nom}"
@property
def joueurs(self) -> tuple:
return (self.joueur1, self.joueur2)
| slim0/pyContree | joueurs.py | joueurs.py | py | 6,836 | python | fr | code | 0 | github-code | 36 |
40155422506 | import os, sys, time, datetime
# Additional packages
import numpy as np
# ARL Env
from dVRK.PSM_cartesian_ddpg_env import PSMCartesianDDPGEnv
# Stable baselines algorithms
from stable_baselines.ddpg.policies import MlpPolicy
from stable_baselines import HER, DDPG
from stable_baselines.common.noise import NormalActionNoise
from stable_baselines.common.noise import OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec
from stable_baselines.common.callbacks import CheckpointCallback
def redirect_stdout(filepath: str = None):
"""Redirect the ouput stream to a file. Also redirect error output stream
"""
cdir = os.getcwd()
basepath = os.path.join(cdir, '.logs')
if not os.path.exists(basepath):
os.makedirs(basepath)
if filepath is None:
now = datetime.datetime.now()
filepath = 'log_' + now.strftime("%Y_%m_%d-%H_%M_%S.txt")
filepath = os.path.join(basepath, filepath)
err_filepath = filepath[:-4] + '_err.txt'
if os.path.exists(filepath):
filepath = filepath[:-4]
filepath += now.strftime("_%H_%M_%S") + '.txt'
sys.stdout = open(filepath, 'w')
sys.stderr = open(err_filepath, 'w')
print("Began logging")
return
def main(env: PSMCartesianDDPGEnv):
# the noise objects for DDPG
n_actions = env.action.action_space.shape[0]
param_noise = None
action_noise = OrnsteinUhlenbeckActionNoise(
mean=np.zeros(n_actions),
sigma=float(0.5) * np.ones(n_actions)
)
model = DDPG(
MlpPolicy,
env,
gamma=0.95,
verbose=1,
nb_train_steps=300,
nb_rollout_steps=150,
param_noise=param_noise,
batch_size=128,
action_noise=action_noise,
random_exploration=0.05,
normalize_observations=True,
tensorboard_log="./ddpg_dvrk_tensorboard/",
observation_range=(-1.5,
1.5),
critic_l2_reg=0.01
)
model.learn(
total_timesteps=4000000,
log_interval=100,
callback=CheckpointCallback(save_freq=100000,
save_path="./ddpg_dvrk_tensorboard/")
)
model.save("./ddpg_robot_env")
# NOTE:
# If continuing learning from previous checkpoint,
# Comment above chunk of code {model=DDPG(''') till model.save("./her_robot_env")} and uncomment below lines:
# Replace the XXXXX below with the largest number present in (rl_model_) directory ./ddpg_dvrk_tensorboard/
# remaining_training_steps = 4000000 - XXXXX
# model_log_dir = './ddpg_dvrk_tensorboard/rl_model_XXXXX_steps.zip'
# model = DDPG.load(model_log_dir, env=env)
# # Reset the model
# env.reset()
# model.learn(remaining_training_steps, log_interval=100,
# callback=CheckpointCallback(save_freq=100000, save_path="./ddpg_dvrk_tensorboard/"))
# model.save("./ddpg_robot_env")
def load_model(eval_env):
model = DDPG.load('./ddpg_robot_env', env=eval_env)
count = 0
step_num_arr = []
for _ in range(20):
number_steps = 0
obs = eval_env.reset()
for _ in range(400):
action, _ = model.predict(obs)
obs, reward, done, _ = eval_env.step(action)
number_steps += 1
if done:
step_num_arr.append(number_steps)
count += 1
print("----------------It reached terminal state -------------------")
break
print(
"Robot reached the goal position successfully ",
count,
" times and the Average step count was ",
np.average(np.array(step_num_arr))
)
if __name__ == '__main__':
# redirect_stdout()
root_link_name = 'baselink'
env_kwargs = {
'action_space_limit': 0.05,
'goal_position_range': 0.05,
'position_error_threshold': 0.01,
'goal_error_margin': 0.0075,
'joint_limits':
{
'lower_limit': np.array([-0.2,
-0.2,
0.1,
-1.5,
-1.5,
-1.5,
-1.5]),
'upper_limit': np.array([0.2,
0.2,
0.24,
1.5,
1.5,
1.5,
1.5])
},
'workspace_limits':
{
'lower_limit': np.array([-0.04,
-0.03,
-0.2]),
'upper_limit': np.array([0.03,
0.04,
-0.091])
},
'enable_step_throttling': False,
'steps_to_print': 10000
}
# Training
ambf_env = PSMCartesianDDPGEnv(**env_kwargs)
time.sleep(5)
ambf_env.make(root_link_name)
ambf_env.reset()
main(env=ambf_env)
ambf_env.ambf_client.clean_up()
# Evaluate learnt policy
eval_env = PSMCartesianDDPGEnv(**env_kwargs)
time.sleep(5)
eval_env.make(root_link_name)
eval_env.reset()
load_model(eval_env=eval_env)
eval_env.ambf_client.clean_up()
| WPI-AIM/ambf_rl | scripts/dVRK/PSM_cartesian_ddpg_algorithm.py | PSM_cartesian_ddpg_algorithm.py | py | 4,942 | python | en | code | 8 | github-code | 36 |
37313862527 | # Control flow allows you to build logic into your programs
# Your program can run block of code based on a given condition
phone_balance = 3
bank_balance = 0
if phone_balance < 5:
phone_balance += 10
bank_balance -= 10
season = ''
# Example of if, elif and else statements
if season == 'spring':
print('plant the garden!')
elif season == 'summer':
print('water the garden!')
elif season == 'fall':
print('harvest the garden!')
elif season == 'winter':
print('stay indoors!')
else:
print('unrecognized season')
# Complex Boolean expression
# logical operators like and, or, not
weight = 55
height = 164
if 18.5 <= weight / height**2 < 25:
print("BMI is considered 'normal'")
is_raining = True
is_sunny = True
if is_raining and is_sunny:
print("Is there a rainbow?")
unsubscribed = False
location = 'USA'
if (not unsubscribed) and (location == "USA" or location == "CAN"):
print("send email")
# Good and bad examples of boolean expressions
# Dont use True or False as conditions
# to check whether a boolean is true use the boolean itself without the boolean_varibale == True check
# Similarly, to check if a boolean is false, use the not boolean_variable combination
# example:
is_night = True
is_day = False
if is_night:
print("Good night!")
if not is_day:
print("Get some more sleep!")
# Truth Value Testing
# If a control flow contains a value to the right of the equal sign that is not a boolean expression,
# python will check for its Truth value to decide whether to run the code or not.
# Constants defined to be false:
# None, False
# Zero of any numeric type: 0 , 0.0, 0j
# Empty sequences and collections: (), [], {}, ' ', set(0), range(0), etc.
# Anything else not listed above will contain a truth value of True
| e87/ai_nanodegree | intro_to_python/control_flow/control_flow_operators.py | control_flow_operators.py | py | 1,798 | python | en | code | 0 | github-code | 36 |
36728435043 | def isprime(n):
for i in range(2,n):
if(n%i==0):
return False
else:
return True
def difference(n):
if(n==0):
return 0
elif(n==1):
return 1
elif(isprime(n)):
return 0
a=0
b=0
n1=n+1
while(True):
if(isprime(n1)):
a=n1
break
else:
n1+=1
n1=n-1
while(True):
if(isprime(n1)):
b=n1
break
else:
n1-=1
diff1=a-n
diff2=n-b
return min(diff1,diff2)
n=int(input())
print(difference(n)) | 21A91A05B8/codemind-python | Minimum_absolute_difference_of_a_number_and_its_closest_prime.py | Minimum_absolute_difference_of_a_number_and_its_closest_prime.py | py | 585 | python | en | code | 0 | github-code | 36 |
21678597993 |
# You are given coins of different denominations and a total amount of money amount. Write a function to compute the fewest number of coins that you need to make up that amount. If that amount of money cannot be made up by any combination of the coins, return -1.
# Example 1:
# coins = [1, 2, 5], amount = 11
# return 3 (11 = 5 + 5 + 1)
# Example 2:
# coins = [2], amount = 3
# return -1.
# Note:
# You may assume that you have an infinite number of each kind of coin.
class Solution:
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
rt = [-1] * (amount + 1)
rt[0] = 0
for i in range(amount+1):
for j in coins:
if j > i:
continue
if rt[i-j] != -1:
if rt[i] == -1:
rt[i] = rt[i-j] + 1
else:
rt[i] = min([rt[i], rt[i-j] + 1])
return rt[-1]
| WangsirCode/leetcode | Python/coin-change.py | coin-change.py | py | 1,029 | python | en | code | 0 | github-code | 36 |
4833395496 | import os
import torch
import genova
from torch import nn, optim
import torch.distributed as dist
from torch.utils.data import DataLoader
from torch.cuda.amp import autocast, GradScaler
from torch.nn.parallel import DistributedDataParallel as DDP
from .optimal_path_inference import optimal_path_infer
from .seq_generation_inference import seq_generation_infer
class Task:
def __init__(self, cfg, serialized_model_path, distributed=True):
self.cfg = cfg
self.distributed = distributed
self.serialized_model_path = serialized_model_path
if cfg.mode == 'train':
if self.distributed:
dist.init_process_group(backend='nccl')
self.local_rank = int(os.environ["LOCAL_RANK"])
self.device = torch.device("cuda", self.local_rank)
torch.cuda.set_device(self.local_rank)
else: self.device = torch.device('cuda')
else:
if isinstance(cfg.infer.device, int):
torch.cuda.set_device(cfg.infer.device)
self.device = torch.device('cuda:'+str(cfg.infer.device))
else:
self.device = torch.device('cpu')
def initialize(self, *, train_spec_header,train_dataset_dir,val_spec_header,val_dataset_dir):
self.model = genova.models.Genova(self.cfg).to(self.device)
if self.cfg.task == 'optimal_path':
self.train_loss_fn = nn.KLDivLoss(reduction='batchmean')
self.eval_loss_fn = nn.KLDivLoss(reduction='sum')
elif self.cfg.task == 'node_classification':
self.train_loss_fn = nn.BCEWithLogitsLoss()
self.eval_loss_fn = nn.BCEWithLogitsLoss(reduction='sum')
elif self.cfg.task == 'sequence_generation':
self.train_loss_fn = nn.CrossEntropyLoss()
self.eval_loss_fn = nn.CrossEntropyLoss(reduction='sum')
else:
raise NotImplementedError
assert self.distributed==dist.is_initialized()
if self.distributed: self.model = DDP(self.model, device_ids=[self.local_rank], output_device=self.local_rank)
self.optimizer = optim.AdamW(self.model.parameters(), lr=self.cfg.train.lr)
self.scaler = GradScaler()
self.persistent_file_name = os.path.join(self.serialized_model_path,self.cfg.wandb.project+'_'+self.cfg.wandb.name+'.pt')
if os.path.exists(self.persistent_file_name):
checkpoint = torch.load(self.persistent_file_name)
if self.distributed: self.model.module.load_state_dict(checkpoint['model_state_dict'])
else: self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.train_dl = self.train_loader(train_spec_header,train_dataset_dir)
self.eval_dl = self.eval_loader(val_spec_header,val_dataset_dir)
def test_initialize(self, *, test_spec_header=None, test_dataset_dir=None):
assert not self.distributed
self.model = genova.models.Genova(self.cfg).to(self.device)
self.persistent_file_name = os.path.join(self.serialized_model_path,self.cfg.wandb.project+'_'+self.cfg.wandb.name+'.pt')
print('checkpoint: ', self.persistent_file_name)
assert os.path.exists(self.persistent_file_name)
if isinstance(self.cfg.infer.device, int):
checkpoint = torch.load(self.persistent_file_name)
else:
checkpoint = torch.load(self.persistent_file_name,map_location='cpu')
self.model.load_state_dict(checkpoint['model_state_dict'])
self.model.eval()
self.test_dl = self.test_loader(test_spec_header,test_dataset_dir)
self.test_spec_header = test_spec_header
def train_loader(self,train_spec_header,train_dataset_dir):
ds = genova.data.GenovaDataset(self.cfg,spec_header=train_spec_header,dataset_dir_path=train_dataset_dir)
sampler = genova.data.GenovaBatchSampler(self.cfg,self.device,0.95,train_spec_header,[0,128,256,512],self.model)
collate_fn = genova.data.GenovaCollator(self.cfg)
if self.distributed:
train_dl = DataLoader(ds,batch_sampler=sampler,collate_fn=collate_fn,pin_memory=True,num_workers=10)
else:
train_dl = DataLoader(ds,batch_sampler=sampler,collate_fn=collate_fn,pin_memory=True)
train_dl = genova.data.DataPrefetcher(train_dl,self.device)
return train_dl
def eval_loader(self,val_spec_header,val_dataset_dir):
ds = genova.data.GenovaDataset(self.cfg,spec_header=val_spec_header,dataset_dir_path=val_dataset_dir)
sampler = genova.data.GenovaBatchSampler(self.cfg,self.device,2,val_spec_header,[0,128,256,512],self.model)
collate_fn = genova.data.GenovaCollator(self.cfg)
if self.distributed:
eval_dl = DataLoader(ds,batch_sampler=sampler,collate_fn=collate_fn,pin_memory=True,num_workers=5)
else:
eval_dl = DataLoader(ds,batch_sampler=sampler,collate_fn=collate_fn,pin_memory=True)
eval_dl = genova.data.DataPrefetcher(eval_dl,self.device)
return eval_dl
def test_loader(self,test_spec_header,test_dataset_dir):
ds = genova.data.GenovaDataset(self.cfg,spec_header=test_spec_header,dataset_dir_path=test_dataset_dir)
sampler = genova.data.GenovaSequentialSampler(test_spec_header)
collate_fn = genova.data.GenovaCollator(self.cfg)
test_dl = DataLoader(ds,batch_sampler=sampler,collate_fn=collate_fn,pin_memory=True)
if isinstance(self.cfg.infer.device, int):
test_dl = genova.data.DataPrefetcher(test_dl,self.device)
return test_dl
def model_save(self):
if self.distributed:
torch.save({'model_state_dict':self.model.module.state_dict(),
'optimizer_state_dict':self.optimizer.state_dict()},self.persistent_file_name)
else:
torch.save({'model_state_dict':self.model.state_dict(),
'optimizer_state_dict':self.optimizer.state_dict()},self.persistent_file_name)
def train(self):
total_step = 0
loss_cum = 0
if self.cfg.task =='node_classification':
for epoch in range(0, self.cfg.train.total_epoch):
for encoder_input, label, label_mask in self.train_dl:
total_step += 1
if total_step%self.cfg.train.detect_period == 1: loss_cum = 0
self.optimizer.zero_grad()
with autocast():
output = self.model(encoder_input=encoder_input).squeeze(-1)
loss = self.train_loss_fn(output[label_mask],label[label_mask])
loss_cum += loss.item()
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
if total_step%self.cfg.train.detect_period == 0: yield loss_cum/self.cfg.train.detect_period, total_step, epoch
else:
for epoch in range(0, self.cfg.train.total_epoch):
for encoder_input, decoder_input, tgt, label, label_mask, _ in self.train_dl:
total_step += 1
if total_step%self.cfg.train.detect_period == 1: loss_cum = 0
self.optimizer.zero_grad()
with autocast():
output = self.model(encoder_input=encoder_input, decoder_input=decoder_input, tgt=tgt)
if self.cfg.task == 'optimal_path': output = output.log_softmax(-1)
loss = self.train_loss_fn(output[label_mask],label[label_mask])
loss_cum += loss.item()
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
if total_step%self.cfg.train.detect_period == 0: yield loss_cum/self.cfg.train.detect_period, total_step, epoch
def eval(self) -> float:
loss_cum = torch.Tensor([0]).to(self.device)
total_seq_len = torch.Tensor([0]).to(self.device)
if self.cfg.task =='node_classification':
total_match = torch.Tensor([0]).to(self.device)
true_positive = torch.Tensor([0]).to(self.device)
total_positive = torch.Tensor([0]).to(self.device)
total_true = torch.Tensor([0]).to(self.device)
for encoder_input, label, label_mask in self.eval_dl:
with torch.no_grad():
with autocast():
output = self.model(encoder_input=encoder_input)
output = output[label_mask].squeeze(-1)
label = label[label_mask]
loss = self.eval_loss_fn(output,label)
output = (output>0.5).float()
loss_cum += loss
total_seq_len += label_mask.sum()
total_match += (output == label).sum()
true_positive += ((output == label)[label == 1]).sum()
total_positive += (label == 1).sum()
total_true += (output == 1).sum()
if self.distributed:
dist.barrier()
dist.all_reduce(loss_cum)
dist.all_reduce(total_seq_len)
dist.all_reduce(total_match)
dist.all_reduce(true_positive)
dist.all_reduce(total_positive)
dist.all_reduce(total_true)
return (loss_cum/total_seq_len).item(), \
(total_match/total_seq_len).item(), \
(true_positive/total_positive).item(), \
(true_positive/total_true).item()
else:
for encoder_input, decoder_input, tgt, label, label_mask, _ in self.eval_dl:
with torch.no_grad():
with autocast():
output = self.model(encoder_input=encoder_input, decoder_input=decoder_input, tgt=tgt)
if self.cfg.task == 'optimal_path': output = output.log_softmax(-1)
loss = self.eval_loss_fn(output[label_mask],label[label_mask])
loss_cum += loss
total_seq_len += label_mask.sum()
if self.distributed:
dist.barrier()
dist.all_reduce(loss_cum)
dist.all_reduce(total_seq_len)
return (loss_cum/total_seq_len).item()
def inference(self) -> float:
if self.cfg.task == 'optimal_path':
optimal_path_infer(self.cfg, self.test_spec_header, self.test_dl, self.model, self.device)
elif self.cfg.task == 'sequence_generation':
seq_generation_infer(self.cfg, self.test_spec_header, self.test_dl, self.model, self.device)
| AmadeusloveIris/GraphNovo | genova/task/task.py | task.py | py | 10,916 | python | en | code | 6 | github-code | 36 |
506538640 | """
Projections in SQL
"""
def sql_proj(dbname, tbl, otbl, oepsg, cols=None, geomCol=None,
newGeom=None, whr=None, new_pk=None):
"""
Reproject geometric layer to another spatial reference system (srs)
"""
from glass.pys import obj_to_lst
from glass.sql.q import q_to_ntbl
geomCol = 'geom' if not geomCol else geomCol
newGeom = 'geom' if not newGeom else newGeom
if not cols:
from glass.prop.sql import cols_name
cols = cols_name(dbname, tbl)
cols.remove(geomCol)
else:
cols = obj_to_lst(cols)
if geomCol in cols and geomCol == newGeom:
cols.remove(geomCol)
cols.append('{c} AS old_{c}'.format(c=geomCol))
Q = (
"SELECT {}, ST_Transform({}, {}) AS {} "
"FROM {}{}"
).format(
", ".join(cols), geomCol, str(oepsg), newGeom, tbl,
"" if not whr else " WHERE {}".format(whr)
)
otbl = q_to_ntbl(dbname, otbl, Q, api='psql')
if new_pk:
from glass.sql.k import create_pk
create_pk(dbname, otbl, new_pk)
return otbl
| jasp382/glass | glass/prj/sql.py | sql.py | py | 1,106 | python | en | code | 2 | github-code | 36 |
42210310728 | # 管理员登录session键名
SESSION_LOGIN = "SESSION_LOGIN"
# 用户头像地址
SESSION_HEAD_PORTRAIT = "SESSION_HEAD_PORTRAIT"
# 登录账号
SESSION_USER_ID = 'session_user_id'
# 用户是否为开发者
SESSION_IS_DEVELOPER = "SESSION_IS_DEVELOPER"
# 用户类型
SESSION_USER_TYPE = "SESSION_USER_TYPE"
# 登录重定向uri
SESSION_REDIRECT_URI = "SESSION_REDIRECT_URI"
# 登录次数(失败一次后需要输入验证码)
SESSION_LOGIN_TIME = "SESSION_LOGIN_TIME"
# 验证码
SESSION_LOGIN_VALIDATE = "SESSION_LOGIN_VALIDATE"
# 第三方登录标志(包括设备管理系统厂商帐号)
SESSION_LOGIN_THIRD = "SESSION_LOGIN_THIRD"
# 注册成功标志
SESSION_REGISTER_SUCCESS = "SESSION_REGISTER_SUCCESS"
#################################################################
# 记住登录账户的cookie key
COOKIE_USER_ACCOUNT = "COOKIE_USER_ACCOUNT"
# 自动登录token的cookie key
AUTO_LOGIN = "token"
| 292887172/open | conf/sessionconf.py | sessionconf.py | py | 930 | python | zh | code | 0 | github-code | 36 |
20134586245 | # coding=utf-8
madlib_template = """I enjoy long, {0} walks on the beach, getting {1} in the rain and serendipitous encounters with {2}.
I really like pina coladas mixed with {3}, and romantic, candle-lit {4}.
I am looking for {5} and beauty in the form of a {6} goddess.
I would prefer if she knew how to cook, clean, and wash my {7}."""
def GetWord(prompt, completion):
word = raw_input(prompt)
print("You are {}% complete!".format(completion/8.0*100.0))
return word
adjective = GetWord("Adjective: ", 1)
verb = GetWord("Verb Ending in 'ed': ", 2)
pluralNoun1 = GetWord("Plural Noun: ", 3)
liquid = GetWord("Liquid: ", 4)
pluralNoun2 = GetWord("Plural Noun: ", 5)
noun = GetWord("Noun: ", 6)
nationality = GetWord("Nationality: ", 7)
pluralNoun3 = GetWord("Plural Noun: ", 8)
print(madlib_template.format(adjective, verb, pluralNoun1, liquid, pluralNoun2, noun, nationality, pluralNoun3))
| DavidColson/SimuLab-Lessons | Intro3_RockPaperScissors/MadlibFunctions.py | MadlibFunctions.py | py | 899 | python | en | code | 0 | github-code | 36 |
6320198920 | import pathlib2
import os
import wx
from option_loader import OptHandle
class EditFrame(wx.Frame):
def __init__(self, opt_instance:OptHandle):
super().__init__(parent = None, title="Edit Panel")
self.opt_handle = opt_instance
self.setup_panel()
self.SetSize(0, 0, 500, 750)
self.Center()
def setup_panel(self):
panel = wx.Panel(self)
panel.BackgroundColour = wx.Colour(50, 100, 255)
sizer = wx.BoxSizer(wx.VERTICAL)
panel.SetSizer(sizer)
flex_grid = wx.FlexGridSizer(2, len(self.opt_handle.dict), 2)
sizer.Add(flex_grid, wx.SizerFlags().Center().Expand())
for key_value_pair in self.opt_handle.dict.items():
key, value = key_value_pair
flex_grid.Add(wx.StaticText(panel, label = str(key)), wx.SizerFlags().Center())
tmp_sizer = wx.BoxSizer()
tmp_sizer.Add(wx.StaticText(panel, label = str(value)), wx.SizerFlags().Center())
button = wx.Button(panel, label = "Change Hotkey")
tmp_sizer.Add(button)
flex_grid.Add(tmp_sizer, wx.SizerFlags().Right())
"""
row_sizer = wx.BoxSizer()
sizer.Add(row_sizer, wx.SizerFlags().Expand())
left_sizer = wx.BoxSizer()
left_label = wx.StaticText(panel, label = str(key))
left_sizer.Add(left_label)
row_sizer.Add(left_sizer, flags = wx.SizerFlags().Border(wx.LEFT, 20).Center())
right_sizer = wx.BoxSizer()
right_label = wx.StaticText(panel, label = str(value))
right_button = wx.Button(panel, label = "Change Key")
right_sizer.Add(right_label, flags = wx.SizerFlags())
right_sizer.Add(right_button, flags = wx.SizerFlags())
row_sizer.Add(right_sizer, flags = wx.SizerFlags().Right())
"""
if __name__ == "__main__":
app = wx.App()
frame = EditFrame(OptHandle())
frame.Show()
app.MainLoop()
| Nickiel12/Church-Programs | WX-StreamController/edit_gui.py | edit_gui.py | py | 2,034 | python | en | code | 0 | github-code | 36 |
6044172734 | from django.conf.urls import patterns, url
from django.core.urlresolvers import reverse_lazy
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from . import views
from models import Entry
urlpatterns = patterns(
'',
url(r'^(?P<slug>\S+)/copy/$', views.copy_blog, name='blog_copy'),
url(r'^folders/info/', views.folderview, name='folder_view'),
url(r'^folders/$', views.TagList.as_view(), name='folders'),
url(r'^new-folder/$', views.TagCreate.as_view(), name='tag_new'),
url(r'^(?P<slug>\S+)/edit/$', views.BlogUpdate.as_view(), name='blog_edit'),
url(r'^new-user/$', views.UserCreate.as_view(), name='user_new'),
url(r'^new-post/$', views.EntryCreate.as_view(), name='entry_new'),
url(r'^(?P<slug>\S+)/delete/$', views.EntryDelete.as_view(), name='blog_delete'),
url(r'^tag-delete/(?P<slug>\S+)$', views.TagDelete.as_view(), name='tag_delete'),
url(r'^tag-edit/(?P<slug>\S+)$', views.TagUpdate.as_view(), name='tag_edit'),
url(r'^search-form/$', views.search_form, name='search_form'),
url(r'^search/$', views.search, name='search'),
url(r'^login$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}, name='user_login'),
url(r'^logout$', 'django.contrib.auth.views.logout_then_login', {'login_url': reverse_lazy('user_login')}, name='user_logout'),
url(r'^$', views.BlogIndex.as_view(), name="index"),
url(r'^order_by_title/$', views.BlogIndex.as_view(queryset=Entry.objects.order_by('title')), name='by_title'),
url(r'^order_by_slug/$', views.BlogIndex.as_view(queryset=Entry.objects.order_by('slug')), name='by_slug'),
url(r'^order_by_body/$', views.BlogIndex.as_view(queryset=Entry.objects.order_by('body')), name='by_body'),
url(r'^date_desc/$', views.BlogIndex.as_view(queryset=Entry.objects.order_by('created')), name='by_date_desc'),
url(r'^date_ascd/$', views.BlogIndex.as_view(queryset=Entry.objects.order_by('-created')), name='index'),
url(r'^(?P<slug>\S+)/decrypt_form/$', views.decrypt_form, name="decrypt_form"),
url(r'/decrypt/$', views.decrypt, name="decrypt"),
url(r'^(?P<slug>\S+)$', views.BlogDetail.as_view(), name="entry_detail"),
)
| lowmanb/cs3240-f14-team01 | blog/urls.py | urls.py | py | 2,245 | python | en | code | 0 | github-code | 36 |
28299898397 | from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import os
import random
import matplotlib.pyplot as plt
from PIL import Image
import torch
from torchvision.models import resnet18, ResNet18_Weights
import torch.nn as nn
import numpy as np
class Mydata(Dataset):
def __init__(self, data_path, transform, label):
super(Mydata, self).__init__()
self.transform = transform
self.label = label
self.data_path = data_path
def __len__(self):
return len(self.data_path)
def __getitem__(self, idx):
img = Image.fromarray(plt.imread(self.data_path[idx]))
img = self.transform(img)
label = self.label[idx]
return img, label
def visualize(labels, data_path):
fig = plt.figure(figsize=(9, 9))
rows, cols = 4, 4
d_label = {
0: 'Cat',
1: 'Dog',
}
for i in range(1, rows * cols + 1):
random_idx = random.randint(0, len(data_path) - 1)
img = plt.imread(data_path[random_idx])
label = d_label[labels[random_idx]]
fig.add_subplot(rows, cols, i)
plt.imshow(img)
plt.title(label)
plt.axis(False)
plt.show()
def get_model():
model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1)
model.fc = nn.Sequential(
nn.Linear(512, 1),
nn.Sigmoid()
)
for name, param in model.named_parameters():
if 'fc' not in name:
param.requires_grad = False
return model
def get_data():
train_path = [os.path.join("train", name) for name in os.listdir("train")]
cat_train_path = [x for x in train_path if 'cat' in x]
dog_train_path = [x for x in train_path if 'dog' in x]
train_data_path = cat_train_path[:int(len(cat_train_path) * 0.8)] + dog_train_path[:int(len(dog_train_path) * 0.8)]
val_data_path = cat_train_path[int(len(cat_train_path) * 0.8):] + dog_train_path[int(len(dog_train_path) * 0.8):]
random.shuffle(train_data_path)
random.shuffle(val_data_path)
labels_train = [0 if 'cat' in name else 1 for name in train_data_path]
labels_val = [0 if 'cat' in name else 1 for name in val_data_path]
return train_data_path, val_data_path, labels_train, labels_val
def get_dataloader():
train_data_path, val_data_path, labels_train, labels_val = get_data()
transform = transforms.Compose([
transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_data = Mydata(train_data_path, transform=transform, label=labels_train)
val_data = Mydata(val_data_path, transform=transform, label=labels_val)
train_loader = DataLoader(train_data, shuffle=True, batch_size=16, num_workers=2)
val_loader = DataLoader(val_data, shuffle=True, batch_size=16, num_workers=2)
return train_loader, val_loader
def train_step(model,
dataloader,
loss_fn,
optimizer,
device):
model.train()
train_loss, train_acc = 0, 0
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
y_pred = model(X)
y_pred = y_pred.squeeze(-1)
loss = loss_fn(y_pred, y.float())
train_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
y_pred_class = torch.tensor([0 if x < 0.5 else 1 for x in y_pred]).to(device)
train_acc += (y_pred_class == y).sum().item() / len(y_pred)
if batch % 100 == 0:
print(y_pred.squeeze(-1), y)
print()
print(
"Batch {} Loss {:.4f} Train Acc {}".format(batch, loss, (y_pred_class == y).sum().item() / len(y_pred)))
train_loss = train_loss / len(dataloader)
train_acc = train_acc / len(dataloader)
return train_loss, train_acc
def val_step(model,
dataloader,
loss_fn: torch.nn.Module,
device: torch.device):
model.eval()
test_loss, test_acc = 0, 0
with torch.no_grad():
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
test_pred_logits = model(X)
test_pred_logits = test_pred_logits.squeeze(-1)
loss = loss_fn(test_pred_logits, y.float())
test_loss += loss.item()
test_pred_labels = torch.tensor([0 if x < 0.5 else 1 for x in test_pred_logits]).to(device)
test_acc += ((test_pred_labels == y).sum().item() / len(test_pred_labels))
test_loss = test_loss / len(dataloader)
test_acc = test_acc / len(dataloader)
return test_loss, test_acc
if __name__ == '__main__':
# Config
model = get_model()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
num_epoches = 10
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=3e-5)
loss = nn.BCELoss()
model = model.to(device)
# Data
train_loader, val_loader = get_dataloader()
train_losses = []
train_accs = []
test_losses = []
test_accs = []
for epoch in range(num_epoches):
train_loss, train_acc = train_step(model, train_loader, loss, optimizer, device)
print("Epoch {} Train_loss {} Train_acc {}".format(epoch, train_loss, train_acc))
train_losses.append(train_loss)
train_accs.append(train_acc)
test_loss, test_acc = val_step(model, val_loader, loss, device)
print("Epoch {} Test_loss {} Test_acc {}".format(epoch, test_loss, test_acc))
test_losses.append(test_loss)
test_accs.append(test_acc)
print("Done Epoch {}".format(epoch))
# print("________________________________________________________________")
torch.save(model.state_dict(), "Weight/model.pt")
np.save('Accuracy/train_losses.npy', np.array(train_losses))
np.save('Accuracy/train_accs.npy', np.array(train_accs))
np.save('Accuracy/test_losses.npy', np.array(test_losses))
np.save('Accuracy/test_accs.npy', np.array(test_accs))
| kienptitit/Dog_Cat_Classification | train.py | train.py | py | 6,096 | python | en | code | 0 | github-code | 36 |
36837923429 | from __future__ import annotations
from dataclasses import dataclass
import bson.json_util as json
__all__ = ['Node', 'build_execution_tree']
@dataclass
class Node:
"""Represent SBE tree node."""
stage: str
plan_node_id: int
total_execution_time: int
n_returned: int
n_processed: int
children: list[Node]
def get_execution_time(self):
"""Execution time of the SBE node without execuion time of its children."""
return self.total_execution_time - sum(n.total_execution_time for n in self.children)
def print(self, level=0):
"""Pretty print of the SBE tree."""
print(
f'{"| "*level}{self.stage}, plaNodeId: {self.plan_node_id}, totalExecutionTime: {self.total_execution_time:,}, nReturned: {self.n_returned}, nProcessed: {self.n_processed}'
)
for child in self.children:
child.print(level + 1)
def build_execution_tree(execution_stats: dict[str, any]) -> Node:
"""Build SBE executioon tree from 'executionStats' field of query explain."""
assert execution_stats['executionSuccess']
return process_stage(execution_stats['executionStages'])
def process_stage(stage: dict[str, any]) -> Node:
"""Parse the given SBE stage."""
processors = {
'filter': process_filter,
'cfilter': process_filter,
'traverse': process_traverse,
'project': process_inner_node,
'limit': process_inner_node,
'scan': process_seek,
'coscan': process_leaf_node,
'nlj': process_nlj,
'hj': process_hash_join_node,
'mj': process_hash_join_node,
'seek': process_seek,
'ixseek': process_seek,
'limitskip': process_inner_node,
'group': process_inner_node,
'union': process_union_node,
'unique': process_unique_node,
'unwind': process_unwind_node,
}
processor = processors.get(stage['stage'])
if processor is None:
print(json.dumps(stage, indent=4))
raise ValueError(f'Unknown stage: {stage}')
return processor(stage)
def process_filter(stage: dict[str, any]) -> Node:
"""Process filter stage."""
input_stage = process_stage(stage['inputStage'])
return Node(**get_common_fields(stage), n_processed=stage['numTested'], children=[input_stage])
def process_traverse(stage: dict[str, any]) -> Node:
"""Process traverse, not used by Bonsai."""
outer_stage = process_stage(stage['outerStage'])
inner_stage = process_stage(stage['innerStage'])
return Node(**get_common_fields(stage), n_processed=stage['nReturned'],
children=[outer_stage, inner_stage])
def process_hash_join_node(stage: dict[str, any]) -> Node:
"""Process hj node."""
outer_stage = process_stage(stage['outerStage'])
inner_stage = process_stage(stage['innerStage'])
n_processed = outer_stage.n_returned + inner_stage.n_returned
return Node(**get_common_fields(stage), n_processed=n_processed,
children=[outer_stage, inner_stage])
def process_nlj(stage: dict[str, any]) -> Node:
"""Process nlj stage."""
outer_stage = process_stage(stage['outerStage'])
inner_stage = process_stage(stage['innerStage'])
n_processed = stage['totalDocsExamined']
return Node(**get_common_fields(stage), n_processed=n_processed,
children=[outer_stage, inner_stage])
def process_inner_node(stage: dict[str, any]) -> Node:
"""Process SBE stage with one input stage."""
input_stage = process_stage(stage['inputStage'])
return Node(**get_common_fields(stage), n_processed=input_stage.n_returned,
children=[input_stage])
def process_leaf_node(stage: dict[str, any]) -> Node:
"""Process SBE stage without input stages."""
return Node(**get_common_fields(stage), n_processed=stage['nReturned'], children=[])
def process_seek(stage: dict[str, any]) -> Node:
"""Process seek stage."""
return Node(**get_common_fields(stage), n_processed=stage['numReads'], children=[])
def process_union_node(stage: dict[str, any]) -> Node:
"""Process union stage."""
children = [process_stage(child) for child in stage['inputStages']]
return Node(**get_common_fields(stage), n_processed=stage['nReturned'], children=children)
def process_unwind_node(stage: dict[str, any]) -> Node:
"""Process unwind stage."""
input_stage = process_stage(stage['inputStage'])
return Node(**get_common_fields(stage), n_processed=input_stage.n_returned,
children=[input_stage])
def process_unique_node(stage: dict[str, any]) -> Node:
"""Process unique stage."""
input_stage = process_stage(stage['inputStage'])
n_processed = stage['dupsTested']
return Node(**get_common_fields(stage), n_processed=n_processed, children=[input_stage])
def get_common_fields(json_stage: dict[str, any]) -> dict[str, any]:
"""Exctract common field from json representation of SBE stage."""
return {
'stage': json_stage['stage'], 'plan_node_id': json_stage['planNodeId'],
'total_execution_time': json_stage['executionTimeNanos'],
'n_returned': json_stage['nReturned']
}
| mongodb/mongo | buildscripts/cost_model/execution_tree.py | execution_tree.py | py | 5,185 | python | en | code | 24,670 | github-code | 36 |
1151371916 | import json
from pprint import pprint
with open('rpPurchases.json') as f:
data = json.load(f)
l = []
for idx in data:
l.append((idx["amount"],idx["paymentType"]))
rp = 0
for a in l:
rp += a[0]
print(rp)
| kickass9797/Testing | rand/omg.py | omg.py | py | 239 | python | en | code | 0 | github-code | 36 |
22353446265 | from typing import List, Tuple, Union
import lightgbm as lgb
import numpy as np
import pandas as pd
import mlrun.errors
from .._ml_common import AlgorithmFunctionality, MLTypes, MLUtils
class LGBMTypes(MLTypes):
"""
Typing hints for the LightGBM framework.
"""
# A union of all LightGBM model base classes:
ModelType = Union[lgb.LGBMModel, lgb.Booster]
# A type for all the supported dataset types:
DatasetType = Union[MLTypes.DatasetType, lgb.Dataset]
# An evaluation result as packaged by the training in LightGBM:
EvaluationResultType = Union[
Tuple[str, str, float, bool], # As packaged in `lightgbm.train`
Tuple[str, str, float, bool, float], # As packaged in `lightgbm.cv`
]
# Detailed type for the named tuple `CallbackEnv` passed during LightGBM's training for the callbacks:
CallbackEnvType = Tuple[
lgb.Booster, dict, int, int, int, List[EvaluationResultType]
]
class LGBMUtils(MLUtils):
"""
Utilities functions for the LightGBM framework.
"""
@staticmethod
def to_array(dataset: LGBMTypes.DatasetType) -> np.ndarray:
"""
Convert the given dataset to np.ndarray.
:param dataset: The dataset to convert. Must be one of {lgb.Dataset, pd.DataFrame, pd.Series,
scipy.sparse.base.spmatrix, list, tuple, dict}.
:return: The dataset as a ndarray.
:raise MLRunInvalidArgumentError: If the dataset type is not supported.
"""
if isinstance(dataset, lgb.Dataset):
x = LGBMUtils.to_array(dataset=dataset.data)
if dataset.label is None:
return x
y = LGBMUtils.to_array(dataset=dataset.label)
return LGBMUtils.to_array(LGBMUtils.concatenate_x_y(x=x, y=y)[0])
try:
return MLUtils.to_array(dataset=dataset)
except mlrun.errors.MLRunInvalidArgumentError:
raise mlrun.errors.MLRunInvalidArgumentError(
f"Could not convert the given dataset into a numpy ndarray. Supporting conversion from: "
f"{LGBMUtils.get_union_typehint_string(LGBMTypes.DatasetType)}. The given dataset was of type: "
f"'{type(dataset)}'"
)
@staticmethod
def to_dataframe(dataset: LGBMTypes.DatasetType) -> pd.DataFrame:
"""
Convert the given dataset to pd.DataFrame.
:param dataset: The dataset to convert. Must be one of {lgb.Dataset, np.ndarray, pd.Series,
scipy.sparse.base.spmatrix, list, tuple, dict}.
:return: The dataset as a DataFrame.
:raise MLRunInvalidArgumentError: If the dataset type is not supported.
"""
if isinstance(dataset, lgb.Dataset):
x = LGBMUtils.to_dataframe(dataset=dataset.data)
if dataset.label is None:
return x
y = LGBMUtils.to_dataframe(dataset=dataset.label)
return LGBMUtils.concatenate_x_y(x=x, y=y)[0]
try:
return MLUtils.to_dataframe(dataset=dataset)
except mlrun.errors.MLRunInvalidArgumentError:
raise mlrun.errors.MLRunInvalidArgumentError(
f"Could not convert the given dataset into a pandas DataFrame. Supporting conversion from: "
f"{LGBMUtils.get_union_typehint_string(LGBMTypes.DatasetType)}. The given dataset was of type: "
f"'{type(dataset)}'"
)
@staticmethod
def get_algorithm_functionality(
model: MLTypes.ModelType = None,
y: MLTypes.DatasetType = None,
objective: str = None,
) -> AlgorithmFunctionality:
"""
Get the algorithm functionality of the LightGBM model. If SciKit-Learn API is used, pass the LGBBMModel and a y
sample. Otherwise, training API is used, so pass the objective of the params dictionary.
The objectives here are taken from the official docs of LightGBBM at:
https://lightgbm.readthedocs.io/en/latest/Parameters.html#core-parameters
:param model: The model to check if its a regression model or a classification model (SciKit-Learn API).
:param y: The ground truth values to check if its multiclass and / or multi output (SciKit-Learn API).
:param objective: The objective string (Training API).
:return: The objective's algorithm functionality.
"""
# Check if LightGBM is being used with SciKit-Learn API:
if objective is None:
return super().get_algorithm_functionality(model=model, y=y)
# Declare the conversion map according to the LightGBM docs:
objective_to_algorithm_functionality_map = {
# regression application:
"regression": AlgorithmFunctionality.REGRESSION,
"regression_l2": AlgorithmFunctionality.REGRESSION,
"l2": AlgorithmFunctionality.REGRESSION,
"mean_squared_error": AlgorithmFunctionality.REGRESSION,
"mse": AlgorithmFunctionality.REGRESSION,
"l2_root": AlgorithmFunctionality.REGRESSION,
"root_mean_squared_error": AlgorithmFunctionality.REGRESSION,
"rmse": AlgorithmFunctionality.REGRESSION,
"regression_l1": AlgorithmFunctionality.REGRESSION,
"l1": AlgorithmFunctionality.REGRESSION,
"mean_absolute_error": AlgorithmFunctionality.REGRESSION,
"mae": AlgorithmFunctionality.REGRESSION,
"huber": AlgorithmFunctionality.REGRESSION,
"fair": AlgorithmFunctionality.REGRESSION,
"poisson": AlgorithmFunctionality.REGRESSION,
"quantile": AlgorithmFunctionality.REGRESSION,
"mape": AlgorithmFunctionality.REGRESSION,
"mean_absolute_percentage_error": AlgorithmFunctionality.REGRESSION,
"gamma": AlgorithmFunctionality.REGRESSION,
"tweedie": AlgorithmFunctionality.REGRESSION,
# binary classification application:
"binary": AlgorithmFunctionality.BINARY_CLASSIFICATION,
# multi-class classification application:
"multiclass": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"softmax": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"multiclassova": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"multiclass_ova": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"ova": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"ovr": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
# cross-entropy application
"cross_entropy": AlgorithmFunctionality.BINARY_CLASSIFICATION,
"xentropy": AlgorithmFunctionality.BINARY_CLASSIFICATION,
"cross_entropy_lambda": AlgorithmFunctionality.BINARY_CLASSIFICATION,
"xentlambda": AlgorithmFunctionality.BINARY_CLASSIFICATION,
# ranking application
"lambdarank": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"rank_xendcg": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"xendcg": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"xe_ndcg": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"xe_ndcg_mart": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"xendcg_mart": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
}
# Return unknown if the objective is not in the map and otherwise return its functionality:
if objective not in objective_to_algorithm_functionality_map:
raise AlgorithmFunctionality.UNKNOWN
return objective_to_algorithm_functionality_map[objective]
| mlrun/mlrun | mlrun/frameworks/lgbm/utils.py | utils.py | py | 7,707 | python | en | code | 1,129 | github-code | 36 |
36081264181 | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
a = 2
#b = 1
def plotter(b):
func = lambda x : a - b*x - np.exp(-x)
guess = a/b
max_x = fsolve(func, guess)
x = np.arange(0.0, max_x*1.05, 0.01)
y1 = a - b*x
y2 = np.exp(-x)
y3 = y1 - y2
null = 0*x
plt.figure()
plt.fill_between(x, y1, y2)
plt.plot(x, y3, color="yellow", label="difference")
plt.plot(x, null, "--b")
plt.title('b = ' + str(b))
plt.legend(loc="best")
plt.xlabel('time')
plt.ylabel('P(t)')
plt.savefig("fig/SIR_b_" + str(b) + ".pdf")
def stability_plotter(b):
func = lambda x : a - b*x - np.exp(-x)
guess = a/b
max_x = fsolve(func, guess)
x = np.arange(0.0, max_x*1.2, 0.01)
y = a - b*x - np.exp(-x)
null = 0*x
# plt.figure()
# plt.fill_between(x, y1, y2)
plt.plot(x, y, label='b = ' + str(b))
plt.plot(x, null, "--b")
b_list = [0.2, 0.5, 0.8, 1, 1.2, 1.5, 2] #, 5, 10]
for b in b_list:
plotter(b)
plt.figure()
for b in b_list:
stability_plotter(b)
plt.title("Stability")
plt.legend(loc="best")
plt.xlabel('$u(\\tau)$')
plt.ylabel('$du/d\\tau$')
# plt.xscale("log")
plt.savefig("fig/SIR_stability.pdf")
# plot u* as func of b
u_list = []
b_max = 2
b_list = np.arange(0.01, b_max, 0.01)
for b in b_list:
func = lambda x : a - b*x - np.exp(-x)
guess = a/b
u_eq = fsolve(func, guess)
u_list.append(u_eq)
plt.figure()
plt.plot(b_list, u_list)
plt.title("equilibrium of u, parametrized by 'b'")
plt.legend(loc="best")
plt.xlabel('$b$')
plt.ylabel('$u*$')
# plt.xscale("log")
plt.savefig("fig/SIR_equilibrium_u_b.pdf")
| chrberrig/SEIR_age_diff | programming/sir_dynsys.py | sir_dynsys.py | py | 1,555 | python | en | code | 0 | github-code | 36 |
27097993038 | import logging
class CustomFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
cyan = "\u001b[36m"
green = "\u001b[32m"
yellow = "\u001b[33m"
red = "\u001b[35m"
bold_red = "\u001b[31m"
reset = "\u001b[0m"
debug_format = "%(asctime)s - %(name)s - {colour}%(levelname)s - %(message)s\u001b[0m (%(filename)s:%(lineno)d)"
norm_format = "%(asctime)s - %(name)s - {colour}%(levelname)s - %(message)s\u001b[0m"
FORMATS = {
logging.DEBUG: debug_format.format(colour=cyan),
logging.INFO: norm_format.format(colour=green),
logging.WARNING: debug_format.format(colour=yellow),
logging.ERROR: debug_format.format(colour=red),
logging.CRITICAL: debug_format.format(colour=bold_red)
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt, datefmt="%Y-%m-%d %H:%M:%S")
return formatter.format(record)
def get_logger(module_name, output_file, terminal_level=logging.DEBUG, file_level=logging.DEBUG):
logger = logging.getLogger(module_name)
logger.setLevel(logging.DEBUG)
terminal_handler = logging.StreamHandler()
terminal_handler.setLevel(terminal_level)
terminal_handler.setFormatter(CustomFormatter())
logger.addHandler(terminal_handler)
file_handler = logging.FileHandler(output_file)
file_handler.setLevel(file_level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
| Beatson-Institute-Digital-Pathology/reinhard-wsi-normalisation | reinhard_wsi/logging.py | logging.py | py | 1,711 | python | en | code | 2 | github-code | 36 |
8589141283 | from forum.models import Post, Comment
from django import forms
from tinymce.widgets import TinyMCE
class PostForm(forms.ModelForm):
class Meta:
model = Post
exclude = ['author', 'slug', 'course']
title = forms.CharField(
label='Title',
max_length=50,
widget=forms.TextInput(
attrs={
'required': 'True',
'placeholder': 'Enter post title'
}))
content = forms.CharField(widget=TinyMCE())
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ['author', 'post', 'course']
content = forms.CharField(label='Message', widget=TinyMCE())
| rafidirg/forum-saas-kowan | forum/forms.py | forms.py | py | 689 | python | en | code | 0 | github-code | 36 |
73192051625 | import argparse
import os
from distutils.util import strtobool
import random
import time
import numpy as np
import torch
import gym
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
from stable_baselines3.common.buffers import ReplayBuffer
def parse_args():
parser = argparse.ArgumentParser()
# Experiment settings
parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"),
help="the name of this experiment")
parser.add_argument("--seed", type=int, default=1,
help="seed of the experiment")
parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="if toggled, `torch.backends.cudnn.deterministic=False`")
parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="if toggled, cuda will be enabled by default")
parser.add_argument("--track", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True,
help="if toggled, this experiment will be tracked with Weights and Biases")
parser.add_argument("--wandb-project-name", type=str, default="cfrl",
help="the wandb's project name")
parser.add_argument("--wandb-entity", type=str, default=None,
help="the entity (team) of wandb's project")
parser.add_argument("--capture-video", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True,
help="whether to capture videos of the agent performances (check out `videos` folder)")
parser.add_argument("--env-id", type=str, default="CartPole-v1",
help="the id of the environment")
# Hyperparameters
parser.add_argument("--v-max", type=float, default=100,
help="the number of atoms")
parser.add_argument("--v-min", type=float, default=-100,
help="the number of atoms")
parser.add_argument("--n-atoms", type=int, default=101,
help="the number of atoms")
parser.add_argument("--replay-memory-size", type=int, default=1000000,
help="SGD updates are sampled from this number of most recent frames")
parser.add_argument("--agent-history-length", type=int, default=4,
help="The number of most recent frames experienced by the agent that are given as input to the Q network")
parser.add_argument("--action-repeat", type=int, default=4,
help="repeate each action selected by the agent this many times")
parser.add_argument("--minibatch-size", type=int, default=128,
help="the batch size of sample from the reply memory")
parser.add_argument("--update-frequency", type=int, default=4,
help="the number of actions selected by the agent between successive SGD updates")
parser.add_argument("--gamma", type=float, default=0.99,
help="the discount factor gamma")
parser.add_argument("--learning-rate", type=float, default=2.5e-4,
help="the learning rate of the optimizer")
parser.add_argument("--target-network-frequency", type=int, default=10000,
help="the timesteps it takes to update the target network")
parser.add_argument("--initial-exploration", type=float, default=1,
help="the starting epsilon for exploration")
parser.add_argument("--final-exploration", type=float, default=0.1,
help="the ending epsilon for exploration")
parser.add_argument("--final-exploration-frame", type=int, default=1000000,
help="the number of frames over which the initial value of epsilon is linearly annealed to its final value")
parser.add_argument("--replay-start-size", type=int, default=50000,
help="A uniform random policy is run for this number of frames before learning starts and the resulting experience is used to populate the replay memory")
parser.add_argument("--noop-max", type=int, default=30,
help="maximum number of doing nothing action to be performed by the agent at the start of an episode")
parser.add_argument("--total-timesteps", type=int, default=10000000,
help="total timesteps of the experiments")
args = parser.parse_args()
# fmt: on
return args
def make_env(env_id, seed, idx, capture_video, run_name):
def thunk():
env = gym.make(env_id)
env = gym.wrappers.RecordEpisodeStatistics(env)
if capture_video:
if idx == 0:
env = gym.wrappers.RecordVideo(env, f"videos/{run_name}")
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
return env
return thunk
class QNetwork(nn.Module):
def __init__(self, env, n_atoms=101, v_min=-100, v_max=100):
super().__init__()
self.env = env
self.n_atoms = n_atoms
self.register_buffer("atoms", torch.linspace(
v_min, v_max, steps=n_atoms))
self.n = env.single_action_space.n
self.network = nn.Sequential(
nn.Linear(np.array(env.single_observation_space.shape).prod(), 120),
nn.ReLU(),
nn.Linear(120, 84),
nn.ReLU(),
nn.Linear(84, self.n * n_atoms),
)
def forward(self, x):
return self.network(x)
def get_action(self, x, action=None):
logits = self.network(x)
# probability mass function for each action
pmfs = torch.softmax(logits.view(len(x), self.n, self.n_atoms), dim=2)
q_values = (pmfs * self.atoms).sum(2)
if action is None:
action = torch.argmax(q_values, 1)
return action, pmfs[torch.arange(len(x)), action]
def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
slope = (end_e - start_e) / duration
return max(slope * t + start_e, end_e)
if __name__ == "__main__":
args = parse_args()
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
if args.track:
import wandb
wandb.init(
project=args.wandb_project_name,
entity=args.wandb_entity,
sync_tensorboard=True,
config=vars(args),
name=run_name,
monitor_gym=True,
save_code=True,
)
writer = SummaryWriter(f"runs/{run_name}")
writer.add_text(
"hyperparameters",
"|param|value|\n|-|-|\n%s" % (
"\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
)
# TRY NOT TO MODIFY: seeding
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
device = torch.device(
"cuda" if torch.cuda.is_available() and args.cuda else "cpu")
# env setup
envs = gym.vector.SyncVectorEnv(
[make_env(args.env_id, args.seed, 0, args.capture_video, run_name)])
assert isinstance(envs.single_action_space,
gym.spaces.Discrete), "only discrete action space is supported"
q_network = QNetwork(envs, n_atoms=args.n_atoms,
v_min=args.v_min, v_max=args.v_max).to(device)
optimizer = optim.Adam(q_network.parameters(
), lr=args.learning_rate, eps=0.01 / args.minibatch_size)
target_network = QNetwork(envs).to(device)
target_network.load_state_dict(q_network.state_dict())
rb = ReplayBuffer(
args.replay_memory_size,
envs.single_observation_space,
envs.single_action_space,
device,
optimize_memory_usage=True,
handle_timeout_termination=True,
)
start_time = time.time()
# TRY NOT TO MODIFY: start the game
obs = envs.reset()
for global_step in range(args.total_timesteps):
# ALGO LOGIC: put action logic here
epsilon = linear_schedule(
args.initial_exploration, args.final_exploration, args.final_exploration_frame, global_step)
if random.random() < epsilon:
actions = np.array([envs.single_action_space.sample()
for _ in range(envs.num_envs)])
else:
actions, pmf = q_network.get_action(torch.Tensor(obs).to(device))
actions = actions.cpu().numpy()
# TRY NOT TO MODIFY: execute the game and log data.
next_obs, rewards, dones, infos = envs.step(actions)
# TRY NOT TO MODIFY: record rewards for plotting purposes
for info in infos:
if "episode" in info.keys():
print(
f"global_step={global_step}, episodic_return={info['episode']['r']}")
writer.add_scalar("charts/episodic_return",
info["episode"]["r"], global_step)
writer.add_scalar("charts/episodic_length",
info["episode"]["l"], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
break
# TRY NOT TO MODIFY: save data to reply buffer; handle `terminal_observation`
real_next_obs = next_obs.copy()
for idx, d in enumerate(dones):
if d:
real_next_obs[idx] = infos[idx]["terminal_observation"]
rb.add(obs, real_next_obs, actions, rewards, dones, infos)
# TRY NOT TO MODIFY: CRUCIAL step easy to overlook
obs = next_obs
# ALGO LOGIC: training.
if global_step > args.replay_start_size and global_step % args.update_frequency == 0:
data = rb.sample(args.minibatch_size)
with torch.no_grad():
_, next_pmfs = target_network.get_action(
data.next_observations)
next_atoms = data.rewards + args.gamma * \
target_network.atoms * (1 - data.dones)
# projection
delta_z = target_network.atoms[1] - target_network.atoms[0]
tz = next_atoms.clamp(args.v_min, args.v_max)
b = (tz - args.v_min) / delta_z
l = b.floor().clamp(0, args.n_atoms - 1)
u = b.ceil().clamp(0, args.n_atoms - 1)
# (l == u).float() handles the case where bj is exactly an integer
# example bj = 1, then the upper ceiling should be uj= 2, and lj= 1
d_m_l = (u + (l == u).float() - b) * next_pmfs
d_m_u = (b - l) * next_pmfs
target_pmfs = torch.zeros_like(next_pmfs)
for i in range(target_pmfs.size(0)):
target_pmfs[i].index_add_(0, l[i].long(), d_m_l[i])
target_pmfs[i].index_add_(0, u[i].long(), d_m_u[i])
_, old_pmfs = q_network.get_action(
data.observations, data.actions.flatten())
loss = (-(target_pmfs * old_pmfs.clamp(min=1e-5,
max=1 - 1e-5).log()).sum(-1)).mean()
if global_step % 100 == 0:
writer.add_scalar("losses/td_loss", loss, global_step)
old_val = (old_pmfs * q_network.atoms).sum(1)
writer.add_scalar("losses/q_values",
old_val.mean().item(), global_step)
print("SPS:", int(global_step / (time.time() - start_time)))
writer.add_scalar(
"charts/SPS", int(global_step / (time.time() - start_time)), global_step)
# optimize the model
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update the target network
if global_step % args.target_network_frequency == 0:
target_network.load_state_dict(q_network.state_dict())
envs.close()
writer.close()
| ChufanSuki/cfrl | examples/c51.py | c51.py | py | 12,157 | python | en | code | 0 | github-code | 36 |
4654671965 | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
from utilities import functional_annotation
from utilities import toGFF3
from utilities import clustering
from utilities import mapping
from utilities import mergeAll_to_gff
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--faa", required = True, help="The location of the directory containing all .faa files")
parser.add_argument("-n", "--fna", required = True, help = "The location of the directory containing all .fna files")
parser.add_argument("-a", "--asem", required = True, help="The location of the directory containing all assembly files")
parser.add_argument("-oa", "--annotationFolder",required = True, help="name of output containing annotation results")
parser.add_argument("-og", "--gffFolder",required = True, help="name of output containing gff files of each annotation tool")
parser.add_argument("-om", "--mergeFolder",required = True, help="name of output containing all merged gff files")
args = parser.parse_args()
""" In order to run DOOR2 and VFDB, we need a database, so have to check if the database is provided in current working folder """
operonDB="operonDB.fasta"
vfdbDB="VFDBdb.fasta"
if not os.path.exists(operonDB) or not os.path.exists(vfdbDB):
raise SystemExit("missing database for DOOR2 or VFDB, cannot running DOOR2/VFDB.Exit")
""" if there is no missing required files, then run annotation """
faa_dir=args.faa
""" cluster for eggNOG """
mergeFile_faa="merge.txt" #file that merge all 50 genomes
clusterFolder="cluster_CDHIT"
clusterFile="clusterFile.txt"
if not os.path.exists(clusterFolder):
clustering.main(faa_dir,mergeFile_faa,clusterFolder,clusterFile,0.95)
""" name the output folder for each tool """
outputFolder=args.annotationFolder
tmhmm_out="{}/tmhmm_result".format(outputFolder)
singalP_out="{}/signalP_result".format(outputFolder)
door2_out="{}/door2_result".format(outputFolder)
vfdb_out ="{}/VFDB_result".format(outputFolder)
card_out="{}/card_result".format(outputFolder)
piler_seq="{}/pilercr_seq".format(outputFolder)
piler_out="{}/pilercr_result".format(outputFolder)
eggNOG_out="{}/eggnog_result_oneFile".format(outputFolder) #contain 1 output , from annotated 1 cluster with eggnog
eggNOG_map_out="{}/eggnog_result_allFiles".format(outputFolder)
if not outputFolder in os.listdir():
subprocess.call(["mkdir",outputFolder])
try:
if not door2_out.split("/")[1] in os.listdir(outputFolder):
#functional_annotation.door2blast_local_all50(faa_dir,operonDB,"blast_operon_db",door2_out)
functional_annotation.door2blast_server_all50(faa_dir,operonDB,"blast_operon_db",door2_out)
if not vfdb_out.split("/")[1] in os.listdir(outputFolder):
#functional_annotation.vfdbblast_local_all50(args.fna,vfdbDB,"blast_vfdb_db",vfdb_out)
functional_annotation.vfdbblast_server_all50(args.fna,vfdbDB,"blast_vfdb_db",vfdb_out)
if not card_out.split("/")[1] in os.listdir(outputFolder):
functional_annotation.rgi_all50(faa_dir,card_out)
if not tmhmm_out.split("/")[1] in os.listdir(outputFolder):
functional_annotation.tmhmm_all50(faa_dir,tmhmm_out)
if not singalP_out.split("/")[1] in os.listdir(outputFolder):
functional_annotation.signalP_all50(faa_dir,singalP_out)
if not piler_out.split("/")[1] in os.listdir(outputFolder):
functional_annotation.piler_all50(args.asem,piler_seq,piler_out)
if not eggNOG_out.split("/")[1] in os.listdir(outputFolder):
subprocess.call(["python2","utilities/eggnog.py",eggNOG_out])
except Exception as e:
print(e)
raise SystemExit("please fix the error. Pipeline is terminated")
""" EGGNOG mapping: from one output of EGGNOG, map to 50 output files for 50 faa files"""
try:
if not eggNOG_map_out.split("/")[1] in os.listdir(outputFolder):
names_file="allfiles.txt" #contains 50 id number of each genome
id_file_faa="title.txt" # contains header line in merge file that concatenate all 50 genomes faa/fna
mapping.main(faa_dir,names_file,id_file_faa,mergeFile_faa,eggNOG_out,clusterFolder,eggNOG_map_out)
except Exception as e:
print(e)
sys.exit(-1)
""" after annotation all 50 files, then convert to gff for each annotation tool"""
try:
gff_out_folder=args.gffFolder #folder containing gff files for each annotation tool
if not gff_out_folder in os.listdir():
subprocess.call(["mkdir",gff_out_folder])
tmhmm_gff,signalP_gff,piler_gff,card_gff,door2_gff,vfdb_gff,eggnog_gff="{}/tmhmm_to_gff".format(gff_out_folder),"{}/signalP_to_gff".format(gff_out_folder)\
,"{}/pilercf_to_gff".format(gff_out_folder),"{}/card_to_gff".format(gff_out_folder),\
"{}/door2_to_gff".format(gff_out_folder),"{}/vfdb_to_gff".format(gff_out_folder),"{}/eggnog_to_gff".format(gff_out_folder)
toGFF3.main(tmhmm_out,tmhmm_gff,singalP_out,signalP_gff,door2_out,door2_gff,vfdb_out,vfdb_gff,card_out,card_gff,piler_out,piler_gff,eggNOG_map_out,eggnog_gff,outputFolder,gff_out_folder)
except Exception as e:
print(e)
sys.exit(-1)
#if there is exist all gff folders for all tool, we can begin to merge
try:
all_gff_faa=args.mergeFolder
if all_gff_faa not in os.listdir():
mergeAll_to_gff.main(tmhmm_gff,signalP_gff,piler_gff,card_gff,door2_gff,vfdb_gff,eggnog_gff,all_gff_faa)
except Exception as e:
print(e)
sys.exit(-1)
if __name__ == "__main__":
main()
| compgenomics2019/Team1-FunctionalAnnotation | FA_pipeline_final.py | FA_pipeline_final.py | py | 5,822 | python | en | code | 0 | github-code | 36 |
71778077224 | import xml.etree.ElementTree as Tree
import unittest
import os
# Clear Screen
def clear_screen():
if os.name == 'posix': # Linux
os.system('clear')
elif os.name in ('nt', 'dos', 'ce'): # Windows
os.system('CLS')
class ETLTool:
# Constructor
def __init__(self):
self.tree = None
# Function to read an XML file and load the elements.
# Since the presence of the file is tested here, we don"t test it elsewhere.
def parse_xml(self, file_path):
try:
self.tree = Tree.parse(file_path)
except Exception as e:
print(f"Error: {e}")
# Function to save the XML file after data manipulation.
def save_xml(self, save_path):
if self.tree is not None:
self.tree.write(save_path)
# Modified price of items in a category.
def modify_price(self, category, increase):
root = self.tree.getroot()
for prod in root.findall(f"./product[@category='{category}']"):
price = float(prod.find("price").text)
new_price = price * (1 + increase/100)
prod.find("price").text = str(new_price)
# Rename a category to another one.
def rename_category(self, old_name, new_name):
root = self.tree.getroot()
for prod in root.findall(f"./product[@category='{old_name}']"):
prod.set("category", new_name)
# Removes products below a certain rating.
def remove_products(self, category, min_rating):
root = self.tree.getroot()
for prod in root.findall(f"./product[@category='{category}']"):
if float(prod.find("rating").text) < min_rating:
root.remove(prod)
# Outputs the report on the CLI
def generate_report(self):
report = {}
root = self.tree.getroot()
for prod in root.findall("product"):
category = prod.attrib["category"]
price = float(prod.find("price").text)
report[category] = report.get(category, {"count": 0, "total_price": 0})
report[category]["count"] += 1
report[category]["total_price"] += price
for k, v in report.items():
print(f"\nCategory: {k}")
print(f"Total Product Count: {v['count']}")
print(f"Total Price: {v['total_price']}")
# Runs the UI for the tool as a menu based interface.
def run_tool(self):
while True:
clear_screen()
print("\nWelcome to the Product ETL Tool")
print("1: Load XML File")
print("2: Increase Price By Percent")
print("3: Rename Category")
print("4: Remove Products Below Minimum Rating")
print("5: Save Changes to New File")
print("6: Generate Report on CLI")
print("7: Exit")
select = input("Enter your choice here (Number): ")
LOAD_ERR = "Please load an XML file first by selecting option 1."
# Menu based CLI
if select == "1":
file_path = input("Enter the relative path to the XML file: ")
self.parse_xml(file_path)
elif select == "2":
# Check if tree is filled with a loaded XML. If not, we first need to load it (1)
if self.tree is None:
print(LOAD_ERR)
break
category = input("Enter the category name: ")
percentage = float(input("Enter the percentage increase (number only): "))
self.modify_price(category, percentage)
elif select == "3":
if self.tree is None:
print(LOAD_ERR)
break
old_name = input("Enter the current category name: ")
new_name = input("Enter the new category name: ")
self.rename_category(old_name, new_name)
elif select == "4":
if self.tree is None:
print(LOAD_ERR)
break
category = input("Enter the category name: ")
min_rating = float(input("Enter the minimum rating: "))
self.remove_products(category, min_rating)
elif select == "5":
if self.tree is None:
print(LOAD_ERR)
break
save_path = input("Enter the path to save the XML file: ")
self.save_xml(save_path)
elif select == "6":
if self.tree is None:
print(LOAD_ERR)
break
self.generate_report()
elif select == "7":
print("\nGoodbye!")
break
input("\nClick any key to proceed") # Breakpoint before next menu appears
if __name__ == "__main__":
# Init new tool
etl_tool = ETLTool()
# Run the tool
etl_tool.run_tool() | rkaushik29/xml_etl | etl_tool.py | etl_tool.py | py | 5,037 | python | en | code | 0 | github-code | 36 |
26610403483 | import pandas as pd
from simulation_model.create_params_grid import default_params, path_experiment_table
from utilities.small_functions import mkDir_if_not
import os
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from simulation_model.gather_results import bilinear_name
def get_results_experiment_name_path(experiment_name):
results_dir = mkDir_if_not("simulation_model/results")
return os.path.join(results_dir, "{}_results_cv.pkl".format(experiment_name))
def min_of_gt_0(x):
return np.min(x[x > 0])
acronyms_dict = {"optirank": "optirank",
"logistic_regression_on_ranks": "rank-lr",
"logistic_regression": "lr"}
def acronym(serie):
"""df is a dataframe with column classifier_name. Add column classifier_acronym"""
fun = lambda x: acronyms_dict[x]
return serie.apply(fun)
dict_param_name_to_latex = {"d": "$d$", # correspondences to the paper
"n_perturbing": "$d_{P}$",
"n_samples": "$n_{samples}$",
"tau": "$\\tau$",
"sigma": "$\sigma$"}
if __name__ == "__main__":
experiment_table = pd.read_pickle(path_experiment_table)
sns.set(font_scale=1.5) # was 1.5
sns.set_style("whitegrid")
for i_row, row in experiment_table.iterrows():
experiment_name = row["experiment_name"]
n_params = row["n_params"]
params_grid = list(row["param_grid"])
results = pd.read_pickle(get_results_experiment_name_path(experiment_name))
results["classifier"] = acronym(results["classifier_name"])
for with_legend in [True, False]:
output_dir = "simulation_model/results"
if with_legend:
legend = "full"
location = "upper left"
output_dir_plots = mkDir_if_not(os.path.join(output_dir, "plots", "legend_on"))
else:
legend = False
location = "best" # should have no effect
output_dir_plots = mkDir_if_not(os.path.join(output_dir, "plots", "legend_off"))
plt.figure()
p = sns.lineplot(data=results.reset_index(), x="param_value", y="test_balanced_accuracy", hue="classifier",
legend=legend)
lgd = plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2), fancybox=True, shadow=False, ncol=3)
p.axes.set_xlabel(dict_param_name_to_latex[pd.unique(results["param_name"]).item()], )
p.axes.set_ylabel("test balanced accuracy (%)")
# if pd.unique(results["param_name"]).item() == "sigma_m":
# p.axes.set_xscale("symlog", linthresh=min_of_gt_0(results[["param_value"]].values))
figpath = os.path.join(output_dir_plots, "results_{}.pdf".format(experiment_name))
plt.tight_layout()
if with_legend:
lgd.set_visible(True)
else:
lgd.set_visible(False)
plt.savefig(figpath)
plt.close()
# overlap figure
plt.figure()
results_of_interest = results.loc[results.classifier_name == bilinear_name]
p = sns.lineplot(data=results_of_interest.reset_index(), x="param_value", y="overlap", legend=legend)
lgd = plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2), fancybox=True, shadow=False, ncol=3)
p.axes.set_xlabel(dict_param_name_to_latex[pd.unique(results["param_name"]).item()], )
p.axes.set_ylabel("overlap")
p.axes.set_ylim([0, 1])
# if pd.unique(results["param_name"]).item() == "sigma_m":
# p.axes.set_xscale("symlog", linthresh=min_of_gt_0(results[["param_value"]].values))
figpath = os.path.join(output_dir_plots, "results_overlap_{}.pdf".format(
experiment_name))
plt.tight_layout()
if with_legend:
lgd.set_visible(True)
else:
lgd.set_visible(False)
plt.savefig(figpath)
plt.close()
# results for default parameters (taken from the experiment on d for instance)
experiment_name = "different_d"
results = pd.read_pickle(get_results_experiment_name_path(experiment_name))
results_per_classifier = results.loc[
results.param_value == default_params["d"], ["test_balanced_accuracy", "classifier_name", "overlap"]].groupby(
["classifier_name"]).agg(['mean', 'sem'])
results_per_classifier.to_csv("simulation_model/results/results_default_parameters.csv")
#output file for markdown
outfile_md = "simulation_model/results/results_default_parameters.md"
out_md = (100*results_per_classifier[("test_balanced_accuracy", "mean")]).map('{:,.0f}'.format) + " ± " + (100*results_per_classifier[("test_balanced_accuracy", "sem")]).map('{:,.0f}'.format)
out_md = out_md.to_frame(name="test balanced accuracy")
markdown_table = out_md.to_markdown()
f = open(outfile_md, "w")
f.write(markdown_table)
f.close()
| paolamalsot/optirank | simulation_model/plot_results.py | plot_results.py | py | 5,116 | python | en | code | 0 | github-code | 36 |
32232966229 | def lengthOfLIS(nums):
dp = [1]*len(nums)
for i in range(1,len(nums)):
longestSoFar = 1
for j in range(i):
if nums[i] > nums[j]:
longestSoFar = max(longestSoFar,1+dp[j])
dp[i] = longestSoFar
return max(dp)
nums = [10,9,2,5,3,7,101,18]
print(lengthOfLIS(nums))
| Gale6/leetcode--codes | lengthOfLIS.py | lengthOfLIS.py | py | 279 | python | en | code | 0 | github-code | 36 |
24682883982 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common import NoSuchElementException
from pages.utils import write_file
class BasePage:
def __init__(self, driver):
self.driver = driver
self.waiter = WebDriverWait(driver=driver, timeout=5)
def wait_until_displayed(self, by, xpath):
"""Waits until element displayed and return it, else raise an exception"""
return self.waiter.until(
method=expected_conditions.visibility_of_element_located(
(by, xpath)
)
)
def wait_until_clickable(self, by, xpath):
"""Waits until element clickable and return it, else raise an exception"""
return self.waiter.until(
method=expected_conditions.element_to_be_clickable((by, xpath)))
def is_element_exist(self, xpath):
"""Waits until element exist, else raise an exception"""
try:
self.driver.find_element(by=By.XPATH, value=xpath)
return True
except (TimeoutError, NoSuchElementException):
return False
def is_element_visible(self, xpath):
"""Waits until element exist, else raise an exception"""
try:
self.wait_until_displayed(by=By.XPATH, xpath=xpath)
return True
except (TimeoutError, NoSuchElementException):
return False
def fill_field(self, xpath, value):
"""Fill field using provided value"""
element = self.wait_until_clickable(by=By.XPATH, xpath=xpath)
element.clear()
element.send_keys(value)
def fill_field_with_submit(self, xpath, value):
"""Fill field using provided value"""
element = self.wait_until_clickable(by=By.XPATH, xpath=xpath)
element.clear()
element.send_keys(value)
element.submit()
def click(self, xpath):
"""Find and click on the element by providing xpath"""
# self.wait_until_displayed(by=By.XPATH, xpath=xpath).click()
self.driver.find_element(by=By.XPATH, value=xpath).click()
def move_mouse_on_element(self, xpath):
"""Moves mouse on provided element"""
try:
action = webdriver.ActionChains(self.driver)
element = self.driver.find_element(by=By.XPATH, value=xpath)
action.move_to_element(element)
action.perform()
except (BaseException, Exception) as ex:
write_file('move_mouse_on_element() Exception = ', ex)
def switch_to_alert(self, alert_accept_dismiss):
"""Moves focus to Alert window"""
self.waiter.until(
method=expected_conditions.alert_is_present()
)
if alert_accept_dismiss:
self.driver.switch_to.alert.accept()
else:
self.driver.switch_to.alert.dismiss()
def get_element_value(self, xpath):
"""Get element attribute value"""
if self.is_element_exist(xpath=xpath):
element = self.driver.find_element(By.XPATH, xpath).get_attribute('value')
return element
def compare_element_text(self, text, xpath):
"""Compare element's text with provided text """
element = self.wait_until_displayed(by=By.XPATH, xpath=xpath)
return element.text == text
| Flibustyer/TicketsBoard | pages/base.py | base.py | py | 3,434 | python | en | code | 0 | github-code | 36 |
36391585173 | from .method import get_json_ret, json_response_zh
class AuthMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request, *args, **kwargs):
if "application/json" in request.headers.get("Content-Type"):
import json
request.json = json.loads(request.body)
else:
request.json = request.POST
if not request.path.startswith('/negotiate_key'):
shared_secret = request.session.get("shared_secret")
if shared_secret is None:
return json_response_zh(get_json_ret(42, msg="请先协商密钥"))
from Crypto.Util.number import long_to_bytes
request.DH_key = long_to_bytes(shared_secret)[:16].ljust(16, b'\x00')
if not request.path.startswith('/negotiate_key') and not request.path == '/dynamicauth_api3/':
request.data = request.json.get("data")
if request.data is None:
return json_response_zh(get_json_ret(40, msg="请传递 data 参数"))
user_name = request.session.get("user_name")
if user_name:
from UserModel.models import UserModel
request.user = UserModel.objects.get(user_name=user_name)
if request.user is None:
request.session["user_name"] = None
response = self.get_response(request)
return response
| CryptoCompetition2019-RNG/AuthServer | AuthServer/middleware.py | middleware.py | py | 1,435 | python | en | code | 0 | github-code | 36 |
11687518350 | import numpy as np
import matplotlib
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.metrics import structural_similarity as ssim
from skimage.metrics import mean_squared_error
from skimage.transform import rescale, resize, downscale_local_mean
from skimage import io
im1 = io.imread('frac3_run2_sp5_map16.pgm')
im2 = io.imread('frac2_run4_sp5_map2.pgm')
im3 = io.imread('frac1A_run2_map5.pgm')
badim = io.imread('frac9A_run1_map2.pgm')
imfull = io.imread('frac3A_run2_map8.pgm')
plt.imshow(im1, plt.cm.gray)
plt.savefig("mygraph.png")
rows, cols = im1.shape
print("Size of im1: ",rows,cols)
print("Size of im2: ", im2.shape)
print("Size of im3: ", im3.shape)
img1 = resize(im1, (520, 500) )
img2 = resize(im2, (520, 500) )
img3 = resize(im3, (520, 500) )
imgFull = resize(imfull, (520, 500) )
badimg = resize(badim, (520, 500) )
plt.imshow(img1, plt.cm.gray)
plt.savefig("mygraph1.png")
plt.imshow(img2, plt.cm.gray)
plt.savefig("mygraph2.png")
plt.imshow(img3, plt.cm.gray)
plt.savefig("mygraph3.png")
plt.imshow(imgFull, plt.cm.gray)
plt.savefig("mygraph.png")
mse_11 = mean_squared_error(img1,img1)
mse_12 = mean_squared_error(img1,img2)
mse_22 = mean_squared_error(img2,img2)
mse_23 = mean_squared_error(img2, img3)
mse_13 = mean_squared_error(img1, img3)
print("MSE Error 11: %f, 12: %f, 22: %f, 23: %f, 13: %f"%(mse_11, mse_12, mse_22, mse_23, mse_13))
ssim11 = ssim(img1, img1, data_range=img1.max()-img1.min())
ssim22 = ssim(img2, img2, data_range=img2.max()-img2.min())
ssim12 = ssim(img1, img2, data_range=img1.max()-img1.min())
ssim23 = ssim(img2, img3, data_range=img1.max()-img1.min())
ssim13 = ssim(img1, img3, data_range=img1.max()-img1.min())
print("SSIM 12: ", ssim12, "ssim11 %f, ssim22 %f, ssim23 %f, ssim13: %f"%(ssim11, ssim22, ssim23, ssim13) )
print("Comparing with FULL map:")
print("MSE 1full: %f, 2full: %f, 3full: %f, badimg-full: %f"%(mean_squared_error(img1, imgFull) ,mean_squared_error(img2, imgFull) ,mean_squared_error(img3, imgFull), mean_squared_error(badimg, imgFull) ) )
print("SSIM with full: 1f: %f, 2f: %f, 3f: %f, badimg-full %f"%( ssim(img1, imgFull, data_range=imgFull.max()-imgFull.min()), ssim(img2, imgFull, data_range=imgFull.max()-imgFull.min()), ssim(img3, imgFull, data_range=imgFull.max()-imgFull.min()), ssim(badimg, imgFull, data_range=imgFull.max()-imgFull.min()) ) )
| aditi741997/robotics_project | plot_nav2d_mapQuality.py | plot_nav2d_mapQuality.py | py | 2,399 | python | en | code | 1 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.