seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7759594347 | import os
import sys
from PyQt5.QtWidgets import QFrame, QSizePolicy
def isFloat(s: str):
try:
float(s)
except ValueError:
return False
return True
def isInt(s: str):
try:
int(s)
except ValueError:
return False
return True
def returnFloat(s: str):
try:
float(s)
except ValueError:
return -1
return float(s)
def returnInt(s: str):
try:
int(s)
except ValueError:
return -1
return int(s)
def convertPressureUnits(value: float, fromUnit: str = "Pa", toUnit: str = "Pa"):
conversionFactor = 1
if fromUnit == "Pa":
conversionFactor *= 1
elif fromUnit == "kPa":
conversionFactor *= 1000
elif fromUnit == "MPa":
conversionFactor *= 1000000
elif fromUnit == "bar":
conversionFactor *= 100000
if toUnit == "Pa":
conversionFactor /= 1
elif toUnit == "kPa":
conversionFactor /= 1000
elif toUnit == "MPa":
conversionFactor /= 1000000
elif toUnit == "bar":
conversionFactor /= 100000
return value * conversionFactor
def convertTimeUnits(value: float, fromUnit: str = "s", toUnit: str = "s"):
conversionFactor = 1
if fromUnit == "s":
conversionFactor *= 1
elif fromUnit == "min":
conversionFactor *= 60
elif fromUnit == "h":
conversionFactor *= 3600
elif fromUnit == "ms":
conversionFactor /= 1000
if toUnit == "s":
conversionFactor /= 1
elif toUnit == "min":
conversionFactor /= 60
elif toUnit == "h":
conversionFactor /= 3600
elif toUnit == "ms":
conversionFactor *= 1000
return value * conversionFactor
def find_nth(haystack, needle, n):
start = haystack.find(needle)
while start >= 0 and n > 1:
start = haystack.find(needle, start+len(needle))
n -= 1
return start
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
class QHLine(QFrame):
def __init__(self):
super().__init__()
self.setMinimumWidth(1)
self.setFixedHeight(20)
self.setFrameShape(QFrame.HLine)
self.setFrameShadow(QFrame.Sunken)
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum)
return
| timhenning1997/Serial-Port-Monitor | UsefulFunctions.py | UsefulFunctions.py | py | 2,539 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "sys._MEIPASS",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"li... |
72699109627 | # -*- coding: UTF-8 –*-
import random
from datetime import datetime
"""This is a random address function"""
def address():
# 小区名,可自行添加
area_address_name = ['蓝湾上林院', '绿城金华御园(别墅)', '紫金湾', '玫瑰星城', '绿城兰园',
'龙庭一品', '江山风华', '中梁首府', '中梁首府', '都市豪园',
'光明湖海城市花园', '金色海塘', '天御花园', '广润翰城', '泰地金水湾',
'新纪元香湖', '绿城金都美地', '中天学府诚品', '金都美苑', '金都美苑',
'香格里拉城市花园', '广天九龙玉府', '中天公元诚品', '南岸名城', '欧景名城',
'御园(东区)', '蝶景湾御江山', '滨江金色蓝庭', '书香名邸', '蓝湾国际花园',
'丽州一品', '丽州一品', '苏桂院(一期)', '环球春江花园', '冠达东方兰庭',
'五星清华园', '鸿基彼岸', '东方明珠花园', '华庭常青墅', '四季尊域',
'尖峰郦园', '金地艺境', '保集蓝郡', '保集蓝郡', '泰瑞家园',
'泰瑞家园', '和信花园', '环球春江花园', '矿泉花园', '环球春江花园',
'福林花园', '海韵嘉园', '万科青岛小镇', '中海蓝庭', '城发长江瑞城',
'麦岛金岸', '城建湖光山色', '青岛印象山', '金帝山庄', '保利海上罗兰',
'东海路9号', '鲁商蓝岸丽舍', '瑞源名嘉汇', '中海清江华府', '万科魅力之城',
'中央国际', '湛园海德公园', '万达悦公馆', '万科如园', '和达璟城紫御',
'上实海上海', '温哥华花园', '金秋泰和郡', '海信珠山小镇', '海逸天成',
'青特小镇', '中海银海一号', '万科春阳花园', '山水名园二期', '晓港名城(五期)',
'浮山后四小区', '万丽海景', '浮山湾花园', '深蓝中心', '万科翡翠长江',
'青铁华润城', '左岸风度', '逍遥花园', '鲁商首府', '鲁德海德堡',
'海尔山海湾', '龙湖悠山郡', '保利百合花园', '浮山后六小区', '锦绣天成',
'万科金色城市', '海尔世纪公馆', '青特赫山', '丽泽花园', '万科城',
'御景峰', '柏悦华府', '依云曦城', '上林一品', '蔚蓝创新天地',
'融创御府', '广佛颐景园', '荔园新天地', '友谊大街18号街坊', '星海岸',
'金地天玺', '翠湖绿洲', '梧桐苑', '弘信山庄', '中海临安府',
'东逸湾(别墅)', '世纪华庭', '宝翠花园', '龙光水悦龙湾', '藏珑华府',
'半岛碧桂园', '都市豪园', '仙湖别墅', '惠景城', '雅居蓝湾',
'尚观嘉园', '阳光山色', '青春小区', '颐山源墅', '颐和国际',
'致越优城', '优山美地', '保利海德公园', '星湖湾', '影都学府',
'绿岛明珠', '天台山庄', '时代领峰', '国际城名苑', '保集半岛',
'保利东御花园', '碧桂园翡翠湾', '保利西雅图', '中恒海晖城', '嘉乐花园',
'金海岸花园', '绿地未来城', '尚辉苑', '南江壹号', '南江壹号',
'长华国际中心', '丽日豪庭', '北滘海琴水岸', '万科沁园', '丽泽花园',
'永盛新阳光', '柳湖花园', '山水庄园', '御景花园', '南江名郡',
'紫金玉澜', '长信东海银湾', '丹灶碧桂园', '青春小区', '青春小区',
'名汇浩湖湾', '广夏花园', '海琴湾', '保利东景花园', '新城云昱',
'天悦湾花园', '美的翰湖苑', '招商臻园', '荟景豪庭', '如意花园',
'同济广场', '金地悦荔', '岭南天地璟廷', '龙光水悦云天', '江山瑞城',
'红星天悦', '保利外滩一号', '金地九珑璧', '碧桂园钻石湾', '泰地世锦园',
'光明花半里', '新君汇花地湾', '鹿璟村', '美的领贤公馆', '君御花园',
'恒大帝景', '帝景蓝湾', '雅丽豪庭', '红星天悦', '鲁能公馆',
'凤起兰庭', '珠水豪庭', '花苑广场', '雅瑶绿洲', '顺德居小区',
'保利花园(六期)', '鼎太风华', '馥室成双(一期)', '二冶小区街坊', '鹿港小镇',
'自由路8号街坊', '恒大华府', '保利罗兰香谷', '保利拉菲公馆(二三期)', '滨海名都',
'东河国际商住城', '日月豪庭', '光辉佳苑', '文雅苑', '迎宾小区',
'万达广场万达小区', '中冶世家', '景苑花园', '保利花园(三期)', '青福新城',
'东方俪城', '富力城(EFG区)', '丰盈小区', '富强路七号街坊', '居然新城',
'锦尚国际', '奥宇新城', '阿尔丁小区', '三江尊园', '现代城',
'文馨苑', '新星壹品', '邻圃道街坊', '惠民小区(昆都仑)', '正翔国际枫景苑',
'新星美地', '维多利华府', '口岸花苑', '凡尔赛颐阁(凡尔赛观邸)', '友谊17号街坊',
'欧风丽景', '保利花园(一期)', '欧鹿生活城', '文脉苑', '东亚香堤丽舍',
'乌兰小区', '佳园小区', '富强路十号街坊', '阳光小区', '翡丽湾',
'檀香湾', '维多利摩尔城', '恒大名都', '友谊大街22号东街坊', '文博苑',
'青山路1号街坊', '京奥港花园', '凯旋豪庭', '六合新城(二区南)', '富丽佳园',
'绿地国际花都', '景富家园(C区)', '中建御澜世家', '阿南小区', '松石国际城石榴花园',
'中冶世家水晶城', '华丽家族', '美室层双', '松石名第', '燕赵锦河湾',
'牡丹花园', '友谊小区(南区)', '合志家', '园文芳苑', '山水佳苑',
'万郡大都城', '华发新城', '包钢友谊十三小区', '丽晶名邸', '金茂豪庭',
'少先路31号街坊', '百兴小区', '佳福小区', '首创加州郡府', '锦林花园',
'昆河壹号', '馥室成双(二期)', '青山路五号街坊', '恒基景苑', '振华二区',
'紫金华府', '保利花园(二期)', '富强路一号街坊', '健康新城', '望园小区',
'嘉园泊景湾', '新元华庭', '金沙华府', '育才小区', '龙熙盛景',
'呼得木林大街10号街坊', '青东华庭', '黄河小区', '呼得木林大街11号街坊', '中冶世家华庭',
'明日星城知情苑', '富力华庭', '锡华世纪花园', '自由路7号街坊', '保利花园(四期)',
'水岸花都', '鹿鸣苑', '青年路8号街坊', '龙苑小区(B区)', '富贵佳园',
'高新花园', '丰景佳苑', '荣资梦乡', '胜达小区', '检察馨苑',
'青山路六号街坊', '居然青年城', '少先路二十二号街坊', '大连新型居住区温馨园',
'保利拉菲公馆(一期)',
'桐荷嘉苑', '远洲国际城', '青11号街坊', '广基花园', '茂业天地',
'和发紫薇园', '城际美景', '丰景御苑', '裕民新城理想城', '东方花园',
'天疆骊城', '纺织社区', '惠德花园', '海威小区(二区)', '青松小区(二区)',
'铭峰佳苑', '景富家园(B区)', '颐和山庄', '大连新型居住区春意园', '鹿景苑',
'青云小区一段', '阳光尚品(南区)', '滨江国际阅江台', '融茂第一城(C1区)', '意城晶华',
'富强路三号街坊', '友谊大街19号街坊(一区)', '大连新型居住区长熙园', '幸福路7号街坊',
'华天云居',
'振翔小区', '神华佳苑', '幸八雅园', '当代左岸绿洲', '江南文枢苑',
'滨江国际澜泊湾', '丰产道一号街坊', '青年路10号街坊', '明日星城知乐苑', '新星水岸花园',
'北梁新区南二区', '幸福路5号街坊(哈达道)', '向阳花苑', '青年路7号街坊', '闽辉禧瑞都',
'友谊大街27号街坊', '瀚星华府', '龙昱华府', '景富家园(F区)', '友谊嘉园(三期)',
'怡然苑', '南排小区', '赛音小区(西区)', '天赐新城(B区)', '万达嘉园',
'金泰花园', '明日星城德景苑', '通顺东二区', '当代菁英国际', '友谊大街25号街坊',
'呼得木林大街7号街坊', '加州郡府融邦', '万新家园', '民馨家园(二区)', '呼得木林新天地2区',
'北大恒苑', '万和城(二期)', '东豪国际城', '自由路5号街坊', '明日星城知雅苑',
'钢铁大街36号街坊', '海威小区(五区)', '呼得木林大街14号街坊', '西五街房管楼小区(体育场南路)',
'碧水嘉苑',
'巨力时代', '民主路5号街坊', '汇金小区', '景晟开元', '瑞春园',
'金辉华府', '恩和小区', '喜瑞都御府', '钢铁大街18号街坊', '国际新城(南区公寓)',
'电力佳苑', '健康阳光城(北区)', '和平路西小区', '祥和苑', '幸福路1号街坊',
'龙苑小区(A区)', '北梁新区南四区', '鑫泰豪庭', '天福广场', '友谊大街23号街坊',
'海威小区(四区)', '银苑小区', '康乐小区(西区)', '中晟华悦', '保利香槟湾(保利香槟花园)',
'兵工华居', '西河景苑', '都兰小区', '友谊大街16号街坊', '公园大道',
'自由路4号街坊', '中冶世家荣园', '园林新村', '内蒙古地勘五院', '恒大帝景',
'苏宁广场', '朝阳小区(一区)', '佳禾公寓', '滨江国际王俯景', '青松小区(五区)',
'一化小区', '民馨家园(六区)', '瑞芬小区', '青年园', '核工业208小区',
'沃土阳光住宅小区', '春光小区(六区)', '华清佳苑', '瑞德花园', '北梁新区西一区',
'万和城(一期)', '明华学府', '青年路12号街坊住宅小区', '呼得木林大街12号街坊', '少先20号街坊',
'友谊大街22号西街坊', '松石国际城', '中和文化广场', '大连新型居住区逸民园', '振华小区',
'九郡嘉园', '世纪佳苑', '民主路3号街坊', '富强路十二号街坊', '西四街小区',
'夏日花园', '宏源鑫都', '明日星城安景苑', '幸福南路16号街坊', '青松小区(六区)',
'龙藏新城福地园', '大连新型居住区怡生园', '钢铁大街37号街坊', '锦绣嘉园', '融茂第一城(A区)',
'矿机小区', '保成上元名府(南区)', '幸福路2号街坊', '幸福路10号街坊', '青宾小区',
'矿办小区', '金泰丽苑', '民馨家园A区', '海湖豪庭', '赛音道五号街坊',
'西脑包康乐小区', '璟华苑', '三电住宅小区', '时代天骄', '明日星城知书苑',
'通顺西二区', '友谊小区(北区)', '山水文苑', '富强路四号街坊', '呼得木林新天地1区',
'当铺佳苑', '香林美地', '华丰园', '鹿城福满园', '明日星城文景苑(北区)',
'青苑小区', '公二街住宅小区', '春阳小区', '边防佳苑', '九中小区',
'鹿苑小区', '丰产道2号街坊', '绿都花庭', '顺鑫望潮苑(别墅)', '绿苑豪庭',
'傲北上城', '胜源滨河新城', '御融公馆(公寓住宅)', '青年路18号街坊', '总部经济园',
'田城康都苑', '赛音道六号街坊', '中慧新城', '美岸华庭(北区)', '呼得木林大街9号街坊',
'友谊大街26号街坊', '海威小区十区', '电力小区(青山)', '南海五村', '警官花园',
'信德雅居', '神力龙园', '怡景园', '三克拉', '天赐新城(A区)',
'贵发山庄(一期)', '步步高东苑', '朝阳小区(二区)', '太阳城', '青年路14号街坊',
'绿苑小区(东河)', '颐和山庄(半山湖)', '友谊大街31号小区', '安富小区', '天安雅居',
'草原小区', '青六新苑', '青云二段', '团结大街11号街坊', '新春小区',
'兵工佳苑', '胜达花苑', '福宇小区', '新桃园小区', '西一街小区',
'兰溪花园', '金桂名园', '福泰嘉苑', '安泰华庭', '保利钻石小区',
'迎宾道一号街坊', '幸福路9号街坊', '东方嘉苑', '永茂泉阳光小区', '横泰佳苑',
'明德花园(公寓住宅)', '少先路29号街坊盛世嘉苑', '友谊大街15号街坊', '御景华庭',
'团结大街8号街坊',
'自由路10号街坊', '青松小区(七区)', '北新街小区(北新苑东区)', '怡荷豪庭', '信合龙湾半岛',
'曹欣小区', '北梁新区西二区', '赛音道一号街坊', '健康阳光城(南区)', '光辉小区(三区)',
'古邑人家', '近水楼台', '龙藏新城雅典苑', '东海花园', '景富家园(A区)',
'国际新城(北区)', '工业路2号街坊', '松石雅居', '钢铁大街24号街坊', '锦裕园小区',
'青甲12号街坊', '团结22号街坊', '景天花园(一期)', '龙丰苑', '保利公园壹号',
'海威小区(六区)', '攀宇小区', '宁馨佳园', '丰产道3号街坊', '方兴观澜壹号']
# 随机选择小区名
area_name = random.choice(area_address_name)
# 随机选择楼栋
build_number = str(random.choice(
[1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32])
)
# 随机选取单元号
unit_number = str(random.choice(
[1, 2, 3, 5, 6, 7])
)
# 随机选择楼层号
floor_number = str(random.choice(
[1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23, 25, 26, 27])
)
# 随机选择门牌号
house_number = str(random.choice(
[1, 2, 3])
)
result_address_name = demo + area_name + build_number + "号" + unit_number + "单元" + floor_number + "0" + house_number + "室"
return result_address_name
"""this is persion input function"""
str1 = "龙洞堡御景新城大田大道"
str2 = "水川镇转青城镇西巴路"
print("##########请勿乱分享,珍惜劳动付出,谢谢!!!!")
while True:
print("* 1 : 龙洞堡御景新城大田大道")
print("* 2 : 水川镇转青城镇西巴路")
while True:
try:
persion_input = int(input('请输入地址头对应的数字:'))
break
except ValueError:
print('!!!!输入有误请重新输入==>[1或者2]!!!')
continue
if persion_input == 1:
demo = str1
print("==> 需要生成的地址前标记为: " + demo)
break
elif persion_input == 2:
demo = str2
print("==> 需要生成的地址前标记为: " + demo)
break
else:
print('您输入的不正确,请重新输入')
continue
while True:
try:
enter_number = int(input('请输入生成地址个数:'))
break
except ValueError:
print("输入有误请重新输入==>[整数数字]")
continue
nt = datetime.now()
day_time = nt.strftime('%Y{y}%m{m}%d{d} %H{h}%M{mm}%S{s}').format(y='年', m='月', d='日', h='时', mm='分', s='秒')
"""This is a Main function"""
with open(day_time + "地址.txt", "w", encoding='utf-8') as f:
for i in range(enter_number):
person_address = address()
f.write(person_address)
f.write('\n')
f.close()
print(
"""
_ _ _
| | | | ( )
| | ___ | |_ |/ ___ __ _ ___
| | / _ \ | __| / __| / _` | / _ \
| |____ | __/ | |_ \__ \ | (_| | | (_) |
\_____/ \___| \__| |___/ \__, | \___/
__/ |
|___/
"""
)
| zzyy8678/stady_python | create_address.py | create_address.py | py | 17,836 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "random.choice",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "random.choice",
"lin... |
34180911472 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 29 18:27:58 2020
@author: Kollarlab
"""
# import comtypes
import os
import time
#import subprocess
#import re
import scipy
import pylab
#import tarfile
#import struct
#import glob
import numpy
import time
#import pickle
#import datetime
#import itertools
import sys
from Acqiris import Acqiris
def get_color(ind):
colorlist = ['firebrick', 'mediumblue', 'deepskyblue', 'darkgoldenrod', 'forestgreen', 'indigo', 'dodgerblue']
nind = numpy.mod(ind, len(colorlist))
return colorlist[nind]
hardwareAddress = "PXI23::0::0::INSTR"
IVIbinPath = "C:\\Program Files\\IVI Foundation\\IVI\\Bin\\"
sys.path.append(IVIbinPath)
card.activeChannels = [1,2]
card.timeout = 120
avs = 1
segs = 1
card.samples = 1024*125
card.segments = segs
card.averages = 1
#
#card.SetParams() #here is the danger of not using properties to set everything.
##without this here, card.samples isn't
dataMat1 = numpy.zeros((len(delays), card.samples))
dataMat1_av = numpy.zeros((len(delays), card.samples))
dataMat2 = numpy.zeros((len(delays), card.samples))
dataMat2_av = numpy.zeros((len(delays), card.samples))
tMat = numpy.zeros((len(delays), card.samples))
#pretake data to set everything up for test
card.averages = 1
card.triggerDelay = 0
card.SetParams() #pushes default to to card if the fields haven't been edited
card.ArmAndWait() #initiates aquisition and calibrates if need be
if len(card.activeChannels) == 1:
data1 = card.ReadAllData() #read data for the active channels.
else:
data1, data2 = card.ReadAllData() #read data for the active channels.
t0 = time.time()
for ind in range(0, avs):
card.averages = 1
card.triggerDelay = 0
card.SetParams() #pushes default to to card if the fields haven't been edited
card.ArmAndWait() #initiates aquisition and calibrates if need be
if len(card.activeChannels) == 1:
data1 = card.ReadAllData() #read data for the active channels.
else:
data1, data2 = card.ReadAllData() #read data for the active channels.
ts = ( delay + scipy.arange(0, len(data1),1.)*1/card.sampleRate)
t1 = time.time()
card.averages = 50
card.triggerDelay = 0
card.SetParams() #pushes default to to card if the fields haven't been edited
card.ArmAndWait() #initiates aquisition and calibrates if need be
if len(card.activeChannels) == 1:
avData1 = card.ReadAllData() #read data for the active channels.
else:
avData1, avData2 = card.ReadAllData() #read data for the active channels.
t2 = time.time()
d1 = numpy.round(t1-t0, 3)
d2 = numpy.round(t2-t1, 3)
print('segments = ' + str(segs))
print('averages = ' + str(avs))
print('time for ' + str(avs) + ' single (possibly multiseg) runs = ' + str(d1) )
print('time for ' + str(avs) + ' averages on card = ' + str(d2) )
| MRitter95/Kollar-Lab | Old_scripts_delete_20220804/Control/Acqiris_development/CdriverPythonWrapper/Acqiris_testScript_Averagertiming.py | Acqiris_testScript_Averagertiming.py | py | 2,844 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "numpy.mod",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_numbe... |
35910630002 | from typing import Dict
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from app.routers import visualize
app = FastAPI()
app = FastAPI(
title="Model Visualizer",
description="",
version="0.10.0",
)
app.mount("/home/lirakr/repos/rnd-mermaid/app/static", StaticFiles(directory="/home/lirakr/repos/rnd-mermaid/app/static"), name="static")
@app.get(
"/",
summary="Status",
responses={200: {"content": {"application/json": {"example": {"status": "OK"}}}}},
)
async def index() -> Dict[str, str]:
"""
Show application status and docker image details
"""
return {"status": "OK"}
app.include_router(visualize.router)
| LirakR/rnd-mermaid | app/main.py | main.py | py | 684 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "app.routers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "app.routers",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "fastapi.FastAPI",
"line_numb... |
29214768086 | from django.shortcuts import render
from utils import Word
def home(request):
context = {}
if request.method == "POST":
text = request.POST['text']
context['results'] = Word(text).result()
context['text'] = text
return render(request, 'home.html', context)
| dest81/test-jerry | words_stats/views.py | views.py | py | 296 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "utils.Word",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 12,
"usage_type": "call"
}
] |
19025440782 | import os
import sys
import json
import torch
import traceback
# def returnFalse():
# return False
# torch.cuda.is_available = returnFalse
from scipy.io import wavfile
# from python.speaker_diarization.pipeline.speaker_diarization import SpeakerDiarization
class Diarization(object):
def __init__(self, logger, PROD, device, models_manager):
super(Diarization, self).__init__()
self.logger = logger
self.PROD = PROD
self.models_manager = models_manager
self.device = device
self.ckpt_path = None
# self.model = torch.hub.load('pyannote/pyannote-audio', 'dia')
self.model = load_model(f'{"./resources/app" if self.PROD else "."}/python/speaker_diarization/hub/')
# self.logger.info(str(self.model))
# self.model = BaseModel.from_pretrained(f'{"./resources/app" if self.PROD else "."}/python/audio_source_separation/assModel.pt')
self.isReady = True
def set_device (self, device):
self.device = device
self.model = self.model.to(device)
self.model.device = device
def runTask (self, data, websocket=None):
return self.diarize(data, websocket)
async def diarize (self, data, websocket):
inPath = data["inPath"]
mergeSameOutput = data["toolSettings"]["mergeSingleOutputFolder"]
outputAudacityLabels = data["toolSettings"]["outputAudacityLabels"]
if websocket is not None:
await websocket.send(json.dumps({"key": "task_info", "data": "Reading file"}))
audacity_file = []
# self.logger.info(f'diarization | {data["inPath"]} | {data["outputAudacityLabels"]} | {data} | {outputAudacityLabels}')
out_folder = f'{"./resources/app" if self.PROD else "."}/python/speaker_diarization/output/'
try:
rate, data = wavfile.read(inPath)
if websocket is not None:
await websocket.send(json.dumps({"key": "task_info", "data": "Splitting file"}))
diarization = self.model({'audio': inPath})
out_file_counter = 0
total_tracks = len(diarization._tracks)
for turn, _, speaker in diarization.itertracks(yield_label=True):
if websocket is not None:
await websocket.send(json.dumps({"key": "task_info", "data": f'Outputting chunks: {out_file_counter+1}/{total_tracks}'}))
start_s = turn.start
end_s = turn.end
# Skip audio chunks less than 1 second long
if end_s-start_s < 1:
continue
if outputAudacityLabels:
audacity_file.append('{:.6f}\t{:.6f}\tSpeaker_{}'.format(start_s, end_s, speaker))
split_data = data[int(start_s*rate):int(end_s*rate)]
folder_name = ".".join(inPath.split("/")[-1].split(".")[:-1]).replace(".", "_")
if not mergeSameOutput:
out_folder = f'{"./resources/app" if self.PROD else "."}/python/speaker_diarization/output/{folder_name}/speaker {speaker}'
os.makedirs(out_folder, exist_ok=True)
if mergeSameOutput:
wavfile.write(f'{out_folder}/{folder_name}_{str(out_file_counter).zfill(7)}.wav', rate, split_data)
else:
wavfile.write(f'{out_folder}/{folder_name}_{speaker}_{str(out_file_counter).zfill(7)}.wav', rate, split_data)
out_file_counter += 1
except:
self.logger.info(traceback.format_exc())
raise
if outputAudacityLabels:
with open(f'{out_folder}/audacity.txt', "w+", encoding="utf8") as f:
f.write("\n".join(audacity_file))
if websocket is not None:
await websocket.send(json.dumps({"key": "tasks_next"}))
#
#
#
# This is a huge mess, but pyannote very much wants models to be downloaded from the internet
# For future-proofing reasons, I don't want that, so I had to change a lot of the library code,
# and the way the models were loaded, such that torchhub isn't used, and instead the local model files are used.
#
#
#
def load_model (_HUB_DIR):
import typing
import shutil
import functools
import yaml
import zipfile
from pyannote.audio.features import Pretrained as _Pretrained
from pyannote.pipeline import Pipeline as _Pipeline
dependencies = ['pyannote.audio', 'torch']
_HUB_REPO = 'https://github.com/pyannote/pyannote-audio-hub'
_ZIP_URL = f'{_HUB_REPO}/raw/master/{{kind}}s/{{name}}.zip'
_PRETRAINED_URL = f'{_HUB_REPO}/raw/master/pretrained.yml'
# path where pre-trained models and pipelines are downloaded and cached
# _HUB_DIR = f'{"./resources/app" if self.PROD else "."}/python/speaker_diarization/hub'
# _HUB_DIR = pathlib.Path(os.environ.get("PYANNOTE_AUDIO_HUB",
# "~/.pyannote/hub")).expanduser().resolve()
# download pretrained.yml if needed
_PRETRAINED_YML = _HUB_DIR + 'pretrained.yml'
print(f'_PRETRAINED_YML, {_PRETRAINED_YML}')
# if not _PRETRAINED_YML.exists():
# msg = (
# f'Downloading list of pretrained models and pipelines '
# f'to "{_PRETRAINED_YML}".'
# )
# print(msg)
# from pyannote.audio.utils.path import mkdir_p
# mkdir_p(_PRETRAINED_YML.parent)
# torch.hub.download_url_to_file(_PRETRAINED_URL,
# _PRETRAINED_YML,
# progress=True)
def _generic(name: str,
duration: float = None,
step: float = 0.25,
batch_size: int = 32,
device: typing.Optional[typing.Union[typing.Text, torch.device]] = None,
pipeline: typing.Optional[bool] = None,
force_reload: bool = False) -> typing.Union[_Pretrained, _Pipeline]:
"""Load pretrained model or pipeline
Parameters
----------
name : str
Name of pretrained model or pipeline
duration : float, optional
Override audio chunks duration.
Defaults to the one used during training.
step : float, optional
Ratio of audio chunk duration used for the internal sliding window.
Defaults to 0.25 (i.e. 75% overlap between two consecutive windows).
Reducing this value might lead to better results (at the expense of
slower processing).
batch_size : int, optional
Batch size used for inference. Defaults to 32.
device : torch.device, optional
Device used for inference.
pipeline : bool, optional
Wrap pretrained model in a (not fully optimized) pipeline.
force_reload : bool
Whether to discard the existing cache and force a fresh download.
Defaults to use existing cache.
Returns
-------
pretrained: `Pretrained` or `Pipeline`
Usage
-----
>>> sad_pipeline = torch.hub.load('pyannote/pyannote-audio', 'sad_ami')
>>> scores = model({'audio': '/path/to/audio.wav'})
"""
# print("name", name)
model_exists = name in _MODELS
pipeline_exists = name in _PIPELINES
# print(f'PRE model_exists, {model_exists}')
# print(f'PRE pipeline_exists, {pipeline_exists}')
if model_exists and pipeline_exists:
# print(f'model_exists and pipeline_exists')
# pass
# if pipeline is None:
# msg = (
# f'Both a pretrained model and a pretrained pipeline called '
# f'"{name}" are available. Use option "pipeline=True" to '
# f'load the pipeline, and "pipeline=False" to load the model.')
# raise ValueError(msg)
if pipeline:
kind = 'pipeline'
# zip_url = _ZIP_URL.format(kind=kind, name=name)
# sha256 = _PIPELINES[name]
return_pipeline = True
else:
kind = 'model'
# zip_url = _ZIP_URL.format(kind=kind, name=name)
# sha256 = _MODELS[name]
return_pipeline = False
elif pipeline_exists:
# elif False:
# print(f'pipeline_exists')
# pass
# pass
if pipeline is None:
pipeline = True
if not pipeline:
msg = (
f'Could not find any pretrained "{name}" model. '
f'A pretrained "{name}" pipeline does exist. '
f'Did you mean "pipeline=True"?'
)
raise ValueError(msg)
kind = 'pipeline'
# zip_url = _ZIP_URL.format(kind=kind, name=name)
# sha256 = _PIPELINES[name]
return_pipeline = True
elif model_exists:
# print(f'model_exists')
# pass
if pipeline is None:
pipeline = False
kind = 'model'
# zip_url = _ZIP_URL.format(kind=kind, name=name)
# sha256 = _MODELS[name]
return_pipeline = pipeline
if name.startswith('emb_') and return_pipeline:
msg = (
f'Pretrained model "{name}" has no associated pipeline. Use '
f'"pipeline=False" or remove "pipeline" option altogether.'
)
raise ValueError(msg)
else:
# print("ERROR====================")
pass
# msg = (
# f'Could not find any pretrained model nor pipeline called "{name}".'
# )
# raise ValueError(msg)
# if sha256 is None:
# msg = (
# f'Pretrained {kind} "{name}" is not available yet but will be '
# f'released shortly. Stay tuned...'
# )
# raise NotImplementedError(msg)
pretrained_dir = _HUB_DIR + f'/{kind}s'
pretrained_subdir = pretrained_dir + f'/{name}'
pretrained_zip = pretrained_dir + f'/{name}.zip'
# import pathlib
# pretrained_subdir = pathlib.Path(pretrained_subdir)
# if not pretrained_subdir.exists() or force_reload:
# if pretrained_subdir.exists():
# shutil.rmtree(pretrained_subdir)
# from pyannote.audio.utils.path import mkdir_p
# mkdir_p(pretrained_zip.parent)
# try:
# msg = (
# f'Downloading pretrained {kind} "{name}" to "{pretrained_zip}".'
# )
# print(msg)
# torch.hub.download_url_to_file(zip_url,
# pretrained_zip,
# hash_prefix=sha256,
# progress=True)
# except RuntimeError as e:
# shutil.rmtree(pretrained_subdir)
# msg = (
# f'Failed to download pretrained {kind} "{name}".'
# f'Please try again.')
# raise RuntimeError(msg)
# # unzip downloaded file
# with zipfile.ZipFile(pretrained_zip) as z:
# z.extractall(path=pretrained_dir)
if kind == 'model':
params_yml = None
params_yml_parent = None
params_yml_c1 = os.listdir(pretrained_subdir)
for c1 in params_yml_c1:
params_yml_c2 = [fold for fold in os.listdir(f'{pretrained_subdir}/{c1}') if os.path.isdir(f'{pretrained_subdir}/{c1}/{fold}')]
for c2 in params_yml_c2:
params_yml_c3 = os.listdir(f'{pretrained_subdir}/{c1}/{c2}')
for c3 in params_yml_c3:
params_yml_c4 = os.listdir(f'{pretrained_subdir}/{c1}/{c2}/{c3}')
for c4 in params_yml_c4:
if c4=="params.yml":
params_yml_parent = f'{pretrained_subdir}/{c1}/{c2}/{c3}'
params_yml = f'{pretrained_subdir}/{c1}/{c2}/{c3}/params.yml'
break
# print(f'----------params_yml, {params_yml}')
# print(f'----------params_yml_parent, {params_yml_parent}')
# params_yml, = pretrained_subdir.glob('*/*/*/*/params.yml')
# pretrained = _Pretrained(validate_dir=params_yml.parent,
pretrained = _Pretrained(validate_dir=params_yml_parent,
duration=duration,
step=step,
batch_size=batch_size,
device=device)
# if return_pipeline:
# if name.startswith('sad_'):
# from pyannote.audio.pipeline.speech_activity_detection import SpeechActivityDetection
# print("HERE PRE SpeechActivityDetection")
# pipeline = SpeechActivityDetection(scores=pretrained)
# print("HERE POST")
# elif name.startswith('scd_'):
# from pyannote.audio.pipeline.speaker_change_detection import SpeakerChangeDetection
# print("HERE PRE SpeakerChangeDetection")
# pipeline = SpeakerChangeDetection(scores=pretrained)
# print("HERE POST")
# elif name.startswith('ovl_'):
# from pyannote.audio.pipeline.overlap_detection import OverlapDetection
# print("HERE PRE OverlapDetection")
# pipeline = OverlapDetection(scores=pretrained)
# print("HERE POST")
# else:
# # this should never happen
# msg = (
# f'Pretrained model "{name}" has no associated pipeline. Use '
# f'"pipeline=False" or remove "pipeline" option altogether.'
# )
# raise ValueError(msg)
# return pipeline.load_params(params_yml)
return pretrained
elif kind == 'pipeline':
from pyannote.audio.pipeline.utils import load_pretrained_pipeline
params_yml = None
params_yml_parent = None
# print(f'START pretrained_subdir, {pretrained_subdir}')
# params_yml_c1 = os.listdir(pretrained_subdir)
params_yml_c1 = [fold for fold in os.listdir(f'{pretrained_subdir}') if os.path.isdir(f'{pretrained_subdir}/{fold}')]
for c1 in params_yml_c1:
# params_yml_c2 = os.listdir(f'{pretrained_subdir}/{c1}'.replace("//","/"))
params_yml_c2 = [fold for fold in os.listdir(f'{pretrained_subdir}/{c1}') if os.path.isdir(f'{pretrained_subdir}/{c1}/{fold}')]
for c2 in params_yml_c2:
params_yml_c3 = os.listdir(f'{pretrained_subdir}/{c1}/{c2}')
for c3 in params_yml_c3:
if c3=="params.yml":
params_yml_parent = f'{pretrained_subdir}/{c1}/{c2}'
params_yml = f'{pretrained_subdir}/{c1}/{c2}/params.yml'
break
# params_yml, *_ = pretrained_subdir.glob('*/*/params.yml')
# return load_pretrained_pipeline(params_yml.parent)
# print("=== ptp PRE")
ptp = load_pretrained_pipeline(params_yml_parent)
# print("=== ptp POST")
return ptp
with open(_PRETRAINED_YML, 'r') as fp:
_pretrained = yaml.load(fp, Loader=yaml.SafeLoader)
# print(f'_pretrained, {_pretrained}')
___stuff = {}
_MODELS = _pretrained['models']
# print(f'_MODELS, {_MODELS}')
for name in _MODELS:
# print(f'_MODELS name, {name}')
# locals()[name] = functools.partial(_generic, name)
___stuff[name] = functools.partial(_generic, name)
_PIPELINES = _pretrained['pipelines']
# print(f'_PIPELINES, {_PIPELINES}')
for name in _PIPELINES:
# print(f'_PIPELINES name, {name}')
# locals()[name] = functools.partial(_generic, name)
___stuff[name] = functools.partial(_generic, name)
_SHORTCUTS = _pretrained['shortcuts']
# print(f'_SHORTCUTS, {_SHORTCUTS}')
for shortcut, name in _SHORTCUTS.items():
# print(f'_SHORTCUTS name, {name}')
# locals()[shortcut] = locals()[name]
___stuff[shortcut] = ___stuff[name]
return ___stuff["dia"]()
| DanRuta/xva-trainer | python/speaker_diarization/model.py | model.py | py | 16,869 | python | en | code | 78 | github-code | 6 | [
{
"api_name": "json.dumps",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "scipy.io.wavfile.read",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "scipy.io.wavfile",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"l... |
3648010096 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="waterstructureCreator",
version="0.0.1",
author="Nicolas G. Hoermann",
author_email="hoermann@fhi.mpg.de",
description=
"Creation of water structures on substrates",
long_description=long_description,
long_description_content_type="text/markdown",
url="",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'scipy==1.7.1', 'numpy==1.17.5', 'matplotlib==3.1.3', 'ipython==7.26.0', 'scikit-learn==0.24.1', 'ase==3.20.1', 'pymatgen==2020.11.11'
],
extras_require={'testing': ['pytest>=5.0']},
python_requires='==3.8.3',
)
| computationalelectrochemistrygroup/WaterStructureCreator | setup.py | setup.py | py | 868 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 16,
"usage_type": "call"
}
] |
32741409083 | import math
import numba
import numpy as np
def main():
starts, ends, rdf = np.loadtxt("rdf.dat").T
density = 1200 / 1.0**3
n_bins = len(rdf)
bin_width = ends[0] - starts[0]
corrector = np.zeros(n_bins)
kernel = compute_kernel(rdf, bin_width)
for step in range(100):
corrector = bin_width * kernel @ (rdf - 1 - density * corrector)
direct_rdf = np.exp(np.log(rdf) - density * corrector)
for r, raw_gr, direct_gr in zip(starts, rdf, direct_rdf):
print(f"{r:.3f}\t{raw_gr:.3f}\t{direct_gr:.3f}")
def compute_kernel(rdf, bin_width):
n_bins = len(rdf)
kernel = np.zeros((n_bins, n_bins))
@numba.njit
def integrate(r, s, div=1000):
theta_step = math.pi / div
integral = 0
for theta_bin in range(div):
theta = theta_bin * theta_step
distance = math.hypot(s * math.sin(theta), r - s * math.cos(theta))
distance_bin = int(distance / bin_width)
if distance_bin < n_bins:
integral += (rdf[distance_bin] - 1) * math.sin(theta)
integral *= 2 * math.pi * s**2 * theta_step
return integral
for r_bin in range(n_bins):
for s_bin in range(n_bins):
r = bin_width * r_bin
s = bin_width * s_bin
kernel[r_bin, s_bin] = integrate(r, s)
return kernel
main()
| snsinfu/bit5 | test418-ornstein_zernike/oz.py | oz.py | py | 1,377 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.loadtxt",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 19,... |
6701762608 | import discord
from discord.ext import commands
class HostPlugin(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def host(self, ctx):
await ctx.send("What is the time of the flight?")
flight_time = await self.bot.wait_for('message', check=lambda m: m.author == ctx.author)
await ctx.send("What is the time of departure?")
departure_time = await self.bot.wait_for('message', check=lambda m: m.author == ctx.author)
await ctx.send("Thank you! Announcing in the channel...")
announcement = f"Flight at {flight_time.content} departing at {departure_time.content}."
channel = self.bot.get_channel(991475748756009014)
await channel.send(announcement)
def setup(bot):
bot.add_cog(HostPlugin(bot))
| MayyCookie/swissannc | flighta 2.py | flighta 2.py | py | 822 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 8,
"usage_type": "call"
},
{
"api_... |
17516466448 | import torch
from torch import nn
__all__ = [
'_CONV_DICT',
'_CONV_TRANS_DICT',
'_AVG_POOL_DICT',
'_MAX_POOL_DICT',
'_NORM_DICT',
'_REFLECTION_PAD_DICT',
'_CENTER_CROP_DICT',
'_ACTIVATION_DICT',
'activation_from_str'
]
def center_crop_1d(layer: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
_, _, layer_width = layer.size()
_, _, target_width = target.size()
assert layer_width >= target_width
diff_x = (layer_width - target_width) // 2
return layer[:, :,
diff_x:(diff_x + target_width)]
def center_crop_2d(layer: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
_, _, layer_height, layer_width = layer.size()
_, _, target_height, target_width = target.size()
assert layer_height >= target_height
assert layer_width >= target_width
diff_x = (layer_width - target_width) // 2
diff_y = (layer_height - target_height) // 2
return layer[:, :,
diff_y:(diff_y + target_height),
diff_x:(diff_x + target_width)]
def center_crop_3d(layer: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
_, _, layer_depth, layer_height, layer_width = layer.size()
_, _, target_depth, target_height, target_width = layer.size()
assert layer_depth >= target_depth
assert layer_height >= target_height
assert layer_width >= target_width
diff_x = (layer_width - target_width) // 2
diff_y = (layer_height - target_height) // 2
diff_z = (layer_depth - target_depth) // 2
return layer[:, :,
diff_z:(diff_z + target_depth),
diff_y:(diff_y + target_height),
diff_x:(diff_x + target_width)]
_CONV_DICT = {
1: nn.Conv1d,
2: nn.Conv2d,
3: nn.Conv3d
}
_CONV_TRANS_DICT = {
1: nn.ConvTranspose1d,
2: nn.ConvTranspose2d,
3: nn.ConvTranspose3d
}
_AVG_POOL_DICT = {
1: nn.AvgPool1d,
2: nn.AvgPool2d,
3: nn.AvgPool3d
}
_MAX_POOL_DICT = {
1: nn.MaxPool1d,
2: nn.MaxPool2d,
3: nn.MaxPool3d
}
_NORM_DICT = {
'batch': {
1: nn.BatchNorm1d,
2: nn.BatchNorm2d,
3: nn.BatchNorm3d
}
}
_REFLECTION_PAD_DICT = {
1: nn.ReflectionPad1d,
2: nn.ReflectionPad2d
}
_CENTER_CROP_DICT = {
1: center_crop_1d,
2: center_crop_2d,
3: center_crop_3d
}
_ACTIVATION_DICT = {
'relu': nn.ReLU(),
'elu': nn.ELU(),
'selu': nn.SELU(),
'sigmoid': nn.Sigmoid(),
'leaky_relu': nn.LeakyReLU(),
'softplus': nn.Softplus()
}
def activation_from_str(activation_str: str):
return _ACTIVATION_DICT[activation_str] | broadinstitute/CellMincer | cellmincer/models/components/functions.py | functions.py | py | 2,557 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.Tensor",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Conv1d... |
36021786185 | import matplotlib.pyplot as plt;
import numpy as np;
data = np.loadtxt('jV_steady.dat', skiprows=1);
ref = np.loadtxt('G0610_cell3/1suns.dat', skiprows=3);
V_steady = data[:,0];
J_steady = data[:,1];
V_ref = ref[:,0];
J_ref = ref[:,1]*(-10);
V_steady += 0.0;
J_steady += 0;
# Plot all results
plt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k');
# jV steady
plt.axis((-0.1,1.4,-210,0));
plt.plot(V_steady, J_steady, 'k-');
plt.scatter(V_ref, J_ref, s=50, color='orange');
plt.yscale('linear');
plt.title('Fitting');
plt.grid(True);
| dglowienka/drift-diffusion_mini-modules | Spatial/JV_with_ref.py | JV_with_ref.py | py | 563 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.loadtxt",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot"... |
27247441921 | revision = '4160ccb58402'
down_revision = None
branch_labels = None
depends_on = None
import json
import os
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
sections = {
'update_authorized_keys': 'local',
'authorized_keys_file': 'local',
'githome_executable': 'local',
'githome_id': 'githome',
}
def upgrade():
con = op.get_bind()
old_cfg = table('configsetting',
column('key', sa.String),
column('json_value', sa.String))
# check we know where to put each key
for key, value in con.execute(old_cfg.select()):
if key not in sections:
raise RuntimeError('Cannot migrate configuration, unknown '
'configuration value: {}'.format(key))
new_cfg = op.create_table('config',
sa.Column('key', sa.String(), nullable=False),
sa.Column('section', sa.String(), nullable=False),
sa.Column('data', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('key', 'section')
)
section = sections[key]
new_recs = [{
'key': key,
'section': sections[key],
'data': value,
} for key, value in con.execute(old_cfg.select())]
op.bulk_insert(new_cfg, new_recs)
import githome
gh_client = os.path.join(os.path.dirname(githome.__file__), 'gh_client')
op.bulk_insert(new_cfg, [
{'section': 'local', 'key': 'authorized_keys_start_marker',
'data': r'"# -- added by githome {}, do not remove these markers --\n"'},
{'section': 'local', 'key': 'authorized_keys_end_marker',
'data': r'"# -- end githome {}. keep trailing newline! --\n"'},
{'section': 'local', 'key': 'use_gh_client',
'data': json.dumps(True)},
{'section': 'local', 'key': 'gh_client_socket',
'data': json.dumps('ghclient.sock')},
{'section': 'local', 'key': 'gh_client_executable',
'data': json.dumps(gh_client)},
])
# rename config key githome_id to id
op.execute(new_cfg.update().where(new_cfg.c['key'] == 'githome_id')
.values(key='id'))
op.rename_table('user', 'users')
op.rename_table('public_key', 'public_keys')
op.drop_table('configsetting')
| mbr/githome | alembic/versions/4160ccb58402_update_from_previous_version.py | 4160ccb58402_update_from_previous_version.py | py | 2,292 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "alembic.op.get_bind",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.sql.table",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.sql.co... |
34832286900 | import cv2
vid = cv2.VideoCapture("my_video.mp4")
while(1):
ret, frame = vid.read()
if ret:
frame = cv2.resize(frame, (0, 0), fx = 1.2, fy = 1.2)
cv2.imshow("video", frame)
else:
break
if cv2.waitKey(10000) == ord("q"):
break | jim2832/Image-Recognition | video2.py | video2.py | py | 277 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number":... |
13000612743 | import os
import time
import numpy as np
import torch
import cv2
import subprocess
import argparse
from PIL import Image, ImageDraw
from facenet_pytorch import MTCNN
from optical_flow import OpticalFlowTracker
parser = argparse.ArgumentParser(description='Face tracking using Optical Flow.')
parser.add_argument('--input', type=str, required=False, help='Path to the video file.', default = "videos/face-demographics-walking-and-pause.mp4")
parser.add_argument('--output', type=str, required=False, help='Path to the directory where output frames will be saved.', default = "tracked_face")
# Get length of video in seconds
def get_length(filename):
result = subprocess.run(["ffprobe", "-v", "error", "-show_entries",
"format=duration", "-of",
"default=noprint_wrappers=1:nokey=1", filename],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return float(result.stdout)
# IOU (Intersection Over Union): Area of overlap/area of union threshold
def calculate_iou(box1, box2):
# Calculate the (x, y)-coordinates of the intersection rectangle
xi1 = max(box1[0], box2[0])
yi1 = max(box1[1], box2[1])
xi2 = min(box1[2], box2[2])
yi2 = min(box1[3], box2[3])
inter_area = max(0, xi2 - xi1 + 1) * max(0, yi2 - yi1 + 1)
# Calculate the area of both rectangles
box1_area = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1)
box2_area = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1)
# Calculate the intersection over union
iou = inter_area / float(box1_area + box2_area - inter_area)
return iou
def main():
args = parser.parse_args()
# Use GPU
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
# Load face detection model
mtcnn = MTCNN(keep_all=True, device=device)
video_dir = args.input
video = cv2.VideoCapture(video_dir)
frames = []
trackers = []
while video.isOpened():
ret, frame = video.read()
if not ret:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(Image.fromarray(frame))
video.release()
frames_dir = args.output
os.makedirs(frames_dir, exist_ok=True)
video_length = get_length(video_dir)
num_frames = len(frames)
fps = num_frames / video_length
print("Video FPS: " + str(fps))
frames_tracked = []
track_face = True
for i, frame in enumerate(frames):
print('\rTracking frame: {}'.format(i + 1), end='')
frame_draw = frame.copy()
draw = ImageDraw.Draw(frame_draw)
frame_np = np.array(frame)
if track_face:
# Detect faces
boxes, _ = mtcnn.detect(frame)
# if a face is detected
if boxes is not None:
# sort by y coordinate of the box (topmost face)
boxes = sorted(boxes, key=lambda y: y[1])
# Only track the topmost face
box = boxes[0]
tracker_exists = False
for tracker in trackers:
iou = calculate_iou(box, tracker.bbox)
if iou > 0.5:
tracker_exists = True
break
if not tracker_exists:
tracker = OpticalFlowTracker(box.tolist(), frame_np, time.time())
tracker.start_frame_idx = i
trackers.append(tracker)
track_face = False
if trackers: # If there is a tracker in the list
tracker = trackers[0]
tracker.end_frame_idx = i
print("\nTracking in process...")
updated_bbox = tracker.update(frame_np)
updated_bbox = updated_bbox.tolist() # convert numpy array to list
# Ensure that the coordinates are valid
print(updated_bbox)
if updated_bbox[0] < updated_bbox[2] and updated_bbox[1] < updated_bbox[3] and updated_bbox[0] > 0 and updated_bbox[0] > 0 and updated_bbox[1] > 0 and updated_bbox[2] > 0 and updated_bbox[3] > 0:
draw.rectangle(updated_bbox, outline=(255, 0, 0), width=1)
else:
# If not valid, calculate wait time, remove tracker and restart face tracking
tracking_duration = (tracker.end_frame_idx - tracker.start_frame_idx + 1) / fps
print(f'Duration of tracking for person: {tracking_duration} seconds')
trackers.remove(tracker)
track_face = True
# Add to frame list
tracked_frame = frame_draw.resize((640, 360), Image.BILINEAR)
frames_tracked.append(tracked_frame)
# Save frame to file
tracked_frame.save(os.path.join(frames_dir, f'frame_{i+1:04d}.png'))
print('\nFinished')
if __name__ == "__main__":
main() | nishadi930313/Labmate | face_tracking.py | face_tracking.py | py | 5,042 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "subproces... |
75070674748 | #! /usr/bin/env python3
import json
def find_if (pred, collection):
try:
return next(filter(pred, collection))
except StopIteration:
return None
class Transition:
def __init__ (self, initial_state_name):
self.initial = initial_state_name
self.states = []
self.current = self.initial
def regist_state (self, state):
self.states.append(state)
DOT_TEMPLATE = """
digraph transition {{
graph [
charset = "UTF-8"
, label = "transition graph"
, labelloc = "t"
, labeljust = "c"
, bgcolor = "#ffffff"
, fontcolor = black
, fontsize = 18
, style = "filled"
, rankdir = TB
, margin = 0.2
, splines = spline
, ranksep = 1.0
, nodesep = 0.9
];
node [
colorscheme = "rdylgn11"
, style = "solid"
, fontsize = 16
, fontcolor = black
, fontname = "Migu 1M"
, color = black
, fillcolor = 7
, fixedsize = true
, height = 0.6
, width = 1.2
];
edge [
style = solid
, fontsize = 10
, fontcolor = black
, fontname = "Migu 1M"
, color = black
, labelfloat = true
, labeldistance = 2.5
, labelangle = 70
];
{nodes}
{edges}
}}
"""
NODE_TEMPLATE = "{0} [shape = box];\n"
EDGE_TEMPLATE = "{0} -> {1} [label = \"{2}\n({3})\", arrowhead = normal];\n"
def to_diagram (self):
nodes = "s [shape = circle, width = 0.1];\n"
edges = Transition.EDGE_TEMPLATE.format("s", self.initial, "", "初期状態")
for st in self.states:
nodes += Transition.NODE_TEMPLATE.format(st.name)
for cond in st.conditions:
edges += Transition.EDGE_TEMPLATE.format(st.name, cond.next, cond.name, cond.comment)
return Transition.DOT_TEMPLATE.format(nodes = nodes, edges = edges)
def to_json (self):
jsondict = {"initial": self.initial, "states": []}
for st in self.states:
statedict = {"name": st.name, "conditions": []}
for cond in st.conditions:
statedict["conditions"].append(\
{"name": cond.name, "next": cond.next, "comment": cond.comment})
jsondict["states"].append(statedict)
return json.dumps(jsondict, ensure_ascii = False)
def from_json (self, jsonstr):
jsondict = json.loads(jsonstr)
self.initial = jsondict["initial"]
self.current = self.initial
self.states = []
statedicts = jsondict["states"]
for st in statedicts:
state = TransitionState(st["name"])
conditiondicts = st["conditions"]
for cond in conditiondicts:
def incomplete_state ():
raise RuntimeError("incomplete state: {0}. (load from json)".format(cond["name"]))
state.regist_condition(cond["name"], cond["next"], incomplete_state, cond["comment"])
self.regist_state(state)
def update_check_fn (self, state_name, condition_name, check_fn):
state_info = find_if(lambda e: e.name == state_name, self.states)
if state_info is None:
raise RuntimeError("unregistered state: {0}".format(state_name))
condition_info = find_if(lambda e: e.name == condition_name, state_info.conditions)
if condition_info is None:
raise RuntimeError("unregistered condition: {0}, at {1}".format(condition_name, state_name))
condition_info.check = check_fn
def fill_check_fn (self, check_fn):
for st in self.states:
for cond in st.conditions:
cond.check = check_fn
def initialize (self):
self.current = self.initial
def transit (self, condition_name):
state_info = find_if(lambda e: e.name == self.current, self.states)
if state_info is None:
raise RuntimeError("unknown state: {0}".format(self.current))
condition_info = find_if(lambda e: e.name == condition_name, state_info.conditions)
if condition_info is None:
raise RuntimeError("unregistered condition: {0}, at {1}".format(condition_name, self.current))
if condition_info.check():
self.current = condition_info.next
print("transit to {0}".format(self.current))
return True
else:
print("fail transit by condition: {0}".format(condition_name))
return False
class TransitionState:
def __init__ (self, name):
self.name = name
self.conditions = []
def regist_condition (self, condition):
self.conditions.append(condition)
def regist_condition (self, name, next_state_name, check_fn, comment):
self.conditions.append(TransitionCondition(name, next_state_name, check_fn, comment))
class TransitionCondition:
def __init__ (self, name, next_state_name, check_fn, comment):
self.name = name
self.next = next_state_name
self.check = check_fn
self.comment = comment
| SPNSPN/state-json | py/transition.py | transition.py | py | 4,337 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.dumps",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 85,
"usage_type": "call"
}
] |
30061421331 | from bs4 import BeautifulSoup
import requests
import json
HEADING_ORDER = [
"defensePhysical",
"defensePhysicalStrike",
"defensePhysicalSlash",
"defensePhysicalPierce",
"defenseMagic",
"defenseFire",
"defenseLightning",
"defenseHoly",
"immunity",
"robustness",
"focus",
"vitality",
"poise",
"weight",
]
def extract_from_html(content, slot):
soup = BeautifulSoup(content, features="html.parser")
for table_row in soup.find_all("tr"):
name_cell = table_row.find("td")
if name_cell is None:
continue
name = name_cell.find_all("a")[-1].get_text().strip()
armor = {
"name": name,
"slot": slot,
"weight": 0,
"poise": 0,
"immunity": 0,
"robustness": 0,
"focus": 0,
"vitality": 0,
"defensePhysical": 0,
"defensePhysicalStrike": 0,
"defensePhysicalSlash": 0,
"defensePhysicalPierce": 0,
"defenseMagic": 0,
"defenseFire": 0,
"defenseLightning": 0,
"defenseHoly": 0,
}
for attribute, cell in zip(HEADING_ORDER, [x for x in table_row.children if x != "\n"][1:len(HEADING_ORDER) + 1]):
cell_text = cell.get_text()
if "defense" in attribute or attribute == "weight":
armor[attribute] = float(cell_text)
else:
armor[attribute] = int(cell_text)
yield armor
if __name__ == "__main__":
armor_data = []
armor_data.extend(extract_from_html(requests.get(
"https://eldenring.wiki.fextralife.com/Helms").text, "head"))
armor_data.extend(extract_from_html(requests.get(
"https://eldenring.wiki.fextralife.com/Chest+Armor").text, "body"))
armor_data.extend(extract_from_html(requests.get(
"https://eldenring.wiki.fextralife.com/Gauntlets").text, "arms"))
armor_data.extend(extract_from_html(requests.get(
"https://eldenring.wiki.fextralife.com/Leg+Armor").text, "legs"))
armor_data.sort(key=lambda x: x["name"])
with open("armor_data.json", "w") as f:
json.dump(armor_data, f, indent=2)
| lewisc64/Elden-Ring-Poise-Optimizer | data/sources/wiki/scrape_wiki.py | scrape_wiki.py | py | 2,230 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_... |
71066784189 | from collections import defaultdict
from copy import copy, deepcopy
from dataclasses import dataclass
from datetime import datetime, timedelta
from enum import Enum, auto, IntEnum
from typing import List, Tuple, Dict, Optional, Any
from dictdiffer import diff
from blaseball_mike.chronicler import get_entities
from ChangeSource import ChangeSource
from Player import Player
class TimestampSource(Enum):
FEED = auto()
CHRON_PLAYER = auto()
CHRON_GAME_EVENT = auto()
MANUAL = auto()
class ModDuration(IntEnum):
PERMANENT = 0
SEASON = 1
WEEKLY = 2
GAME = 3
ITEM = 4
LEAGUE = 5
class Effect:
def apply(self, player: Player) -> None:
raise NotImplementedError("Don't instantiate Effect")
def _duration_attribute(duration: ModDuration) -> Optional[str]:
if duration == ModDuration.GAME:
return "gameAttr"
elif duration == ModDuration.WEEKLY:
return "weekAttr"
elif duration == ModDuration.SEASON:
return "seasAttr"
elif duration == ModDuration.PERMANENT:
return "permAttr"
return None
@dataclass
class ModEffect(Effect):
from_mod: Optional[str]
to_mod: Optional[str]
type: ModDuration
def apply(self, player: Player) -> None:
attribute = _duration_attribute(self.type)
if attribute is None:
# This signifies that this mod effect is not stored on the player
return
if self.from_mod is not None:
player.data[attribute].remove(self.from_mod)
if self.to_mod is not None:
player.data[attribute].append(self.to_mod)
@dataclass
class SetStateEffect(Effect):
path: List[str]
value: Any
def apply(self, player: Player) -> None:
player.set_state(self.path, self.value)
@dataclass
class IncrementCounterEffect(Effect):
path: List[str]
def apply(self, player: Player) -> None:
player.increment_counter(self.path)
@dataclass
class ResetCounterEffect(Effect):
path: List[str]
def apply(self, player: Player) -> None:
player.reset_counter(self.path)
@dataclass
class Change:
source: ChangeSource
timestamp: datetime
timestamp_source: TimestampSource
effects: List[Effect]
def apply(self, player: Player) -> None:
for effect in self.effects:
effect.apply(player)
def _get_mod_effect(event: dict) -> ModEffect:
metadata = event['metadata']
if event['type'] == 106 or event['type'] == 146:
return ModEffect(from_mod=None,
to_mod=metadata['mod'],
type=ModDuration(metadata['type']))
elif event['type'] == 107 or event['type'] == 147:
return ModEffect(from_mod=metadata['mod'],
to_mod=None,
type=ModDuration(metadata['type']))
elif event['type'] == 148:
return ModEffect(from_mod=metadata['from'],
to_mod=metadata['to'],
type=ModDuration(metadata['type']))
raise ValueError("Not chron mod add/remove/change event")
def _player_id(event: dict) -> str:
assert len(event['playerTags']) == 1
return event['playerTags'][0]
def check_equality_recursive(chron: dict, ours: dict, path=""):
if type(chron) != type(ours):
raise RuntimeError(f"Mismatched type for {path}, expected " +
str(type(ours)) + " but chron has " +
str(type(chron)))
if isinstance(chron, list):
if len(chron) != len(ours):
raise RuntimeError(f"Mismatched length for {path}, expected " +
str(len(ours)) + " but chron has " +
str(len(chron)))
for i, (chron_elem, ours_elem) in enumerate(zip(chron, ours)):
check_equality_recursive(chron_elem, ours_elem, f"{path}.{i}")
if isinstance(chron, dict):
chron_keys = set(chron.keys())
our_keys = set(ours.keys())
if chron_keys - our_keys:
raise RuntimeError(f"Chron has additional key(s) for {path}: " +
", ".join(chron_keys - our_keys))
if our_keys - chron_keys:
raise RuntimeError(f"Chron is missing key(s) for {path}: " +
", ".join(our_keys - chron_keys))
assert chron_keys == our_keys
for key in chron_keys:
check_equality_recursive(chron[key], ours[key], f"{path}.{key}")
class Players:
def __init__(self, start_time: datetime):
self.players: Dict[str, Player] = {}
self.changes: Dict[str, List[Change]] = defaultdict(lambda: [])
for player in get_entities("player",
at=start_time,
cache_time=None):
self.players[player['entityId']] = Player(player)
def associate_chron_updates(self, chron_updates: List[dict]):
assert len(chron_updates) > 0
chron_update_time = chron_updates[0]['validFrom']
for chron_update in chron_updates:
player_id = chron_update['entityId']
player = deepcopy(self.players[player_id])
last_matching_player, last_matching_i = None, None
for i, change in enumerate(self.changes[player_id]):
change.apply(player)
if player.data == chron_update['data']:
last_matching_i = i
last_matching_player = deepcopy(player)
if last_matching_i is None:
print(list(diff(self.players[player_id].data,
chron_update['data'])))
raise RuntimeError("Unable to account for chron change")
# Changes up to last_matching_i are yielded, the rest are saved for
# the next chron update
last_matching_i += 1
changes = self.changes[player_id][:last_matching_i]
self.changes[player_id] = self.changes[player_id][last_matching_i:]
# Verification
for change in changes:
change.apply(self.players[player_id])
assert self.players[player_id].data == last_matching_player.data
yield chron_update, changes
for key, changes in self.changes.items():
for change in changes:
if chron_update_time - change.timestamp > timedelta(seconds=300):
raise RuntimeError("Chron update didn't account for "
f"{len(changes)} changes to ${key}")
def apply_event(self, event: dict) -> None:
print("Applying:", event['description'])
if 'parent' in event['metadata']:
changes = Players._find_change_by_parent_type[
event['metadata']['parent']['type']](self, event)
else:
changes = Players._find_change_by_own_type[
event['type']](self, event)
for player_id, change in changes:
self.changes[player_id].append(change)
def _find_change_superyummy(self, event: dict) -> List[Tuple[str, Change]]:
mod_effect = _get_mod_effect(event)
state_effect = SetStateEffect(path=['permModSources', mod_effect.to_mod],
value=['SUPERYUMMY'])
return [(_player_id(event),
Change(source=ChangeSource.SUPERYUMMY,
timestamp=event['created'],
timestamp_source=TimestampSource.FEED,
effects=[mod_effect, state_effect]))]
def _find_recorded_change_from_score(self, event: dict) \
-> List[Tuple[str, Change]]:
if event['type'] == 107 and event['metadata']['mod'] == 'COFFEE_RALLY':
return [(_player_id(event),
Change(source=ChangeSource.USE_FREE_REFILL,
timestamp=event['created'],
timestamp_source=TimestampSource.FEED,
effects=[_get_mod_effect(event)]))]
raise RuntimeError("Didn't find change type from hit")
def _find_unrecorded_change_from_hit(self, event: dict) \
-> List[Tuple[str, Change]]:
# I hope the player who hit the hit is guaranteed to be first.
return [(event['playerTags'][0],
Change(source=ChangeSource.HIT,
timestamp=event['created'],
timestamp_source=TimestampSource.FEED,
effects=[IncrementCounterEffect(['consecutiveHits'])]))]
def _find_unrecorded_change_from_non_hit(self, event: dict) \
-> List[Tuple[str, Change]]:
# TODO Get the player ID from blarser
return [("",
Change(source=ChangeSource.NON_HIT,
timestamp=event['created'],
timestamp_source=TimestampSource.FEED,
effects=[ResetCounterEffect(['consecutiveHits'])]))]
_find_change_by_parent_type = {
92: _find_change_superyummy,
4: _find_recorded_change_from_score, # stolen base
10: _find_recorded_change_from_score, # hit
}
_find_change_by_own_type = {
7: _find_unrecorded_change_from_non_hit,
# 9 is a home run, which has the same effects as hit
9: _find_unrecorded_change_from_hit,
10: _find_unrecorded_change_from_hit,
}
| beiju/blaseball-player-changes | v1/Players.py | Players.py | py | 9,476 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "enum.auto",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 18,
... |
70264770749 | import subprocess as sp
import pymysql
import pymysql.cursors
import datetime
def search():
try:
# letter = input(First letter)
query = "select H.sport_name from equipment as H where H.quantity in (select max(quantity) from equipment); "
print(query)
cur.execute(query)
con.commit()
print("Sports with max equipment fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def maxEquip():
try:
query = "select H.sport_name from equipment as H where H.quantity in (select max(quantity) from equipment); "
print(query)
cur.execute(query)
con.commit()
print("Sports with max equipment fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def lhostel():
try:
query = "select H.hostel_name from hostel as H where H.no_of_students in (select min(no_of_students) from hostel);"
print(query)
cur.execute(query)
con.commit()
print("Least populated hostel fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def amount():
try:
query = "select sum(P.salary) from professors;"
print(query)
cur.execute(query)
con.commit()
print("Total salary fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def avgStd():
try:
query = "select avg(S.no_of_students) from subjects as S;"
print(query)
cur.execute(query)
con.commit()
print("Avg no. of fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def labSub():
try:
query = "select count(*) from subjects as S where S.labs = 'Y';"
print(query)
cur.execute(query)
con.commit()
print("Subjects having lab fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def studgt30():
try:
query = "select course_id, subject_name from subjects where subjects.no_of_students > 30;"
print(query)
cur.execute(query)
con.commit()
print("Subjects having more than 30 students fetched!")
except Exception as e:
con.rollback()
print("Failed to get required details")
print(">>>>>>>>>>>>>>",e)
return
def nonEquipSports():
try:
query = "select S.sport_name from sports as S left join equipment as E on S.sport_name = E.sport_name where E.quantity is not NULL and E.quantity > 0;"
print(query)
cur.execute(query)
con.commit()
print("Sports with no equipment fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch sport details")
print(">>>>>>>>>>>>>>",e)
return
def deptBuilding():
try:
building_no = int(input("Buildin No: "))
query = "select * form department as D where D.building_no = %d;" % (building_no)
print(query)
cur.execute(query)
con.commit()
print("Department Details fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch department details")
print(">>>>>>>>>>>>>>",e)
return
def profDetails():
try:
prof_id = int(input("Professor ID: "))
query = "select * form professors as P where P.prof_id = %d;" % (prof_id)
print(query)
cur.execute(query)
con.commit()
print("Professor Details fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch professor details")
print(">>>>>>>>>>>>>>",e)
return
def studentDetails():
try:
rollno = int(input("Roll No: "))
query = "select * form students as S where S.roll_no = %d;" % (rollno)
print(query)
cur.execute(query)
con.commit()
print("Student Details fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch student details")
print(">>>>>>>>>>>>>>",e)
return
def equipDetails():
try:
sport = input("Enter Sport Name: ")
query = "select * from equipment as E where E.sport_name = '%s';" % (sport)
print(query)
cur.execute(query)
con.commit()
print("Equipment Details fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch equipment details")
print(">>>>>>>>>>>>>>",e)
return
def allEquip():
try:
query = "select * from equipment;"
print(query)
cur.execute(query)
con.commit()
print("Equipment Details fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch equipment details")
print(">>>>>>>>>>>>>>",e)
return
def subDetails():
try:
sub = int(input("Course_id : "))
query = "select * from subjects where course_id = %d;" % (sub)
print(query)
cur.execute(query)
con.commit()
print("Details fetched!")
except Exception as e:
con.rollback()
print("Failed to fetch subject details")
print(">>>>>>>>>>>>>>",e)
return
def newClub():
try:
row = {}
print("Enter Club details: ")
row["name"] = input("Name: ")
row["no_of_members"] = int(input("No. of members: "))
no_of_coords = int(input("No. of Coordinators (max 3): "))
row["coord1"] = input("Coord 1 : ")
if(no_of_coords > 1):
row["coord2"] = input("Coord 2 : ")
else:
row["coord2"] = "NULL"
if(no_of_coords > 2):
row["coord3"] = input("Coord 3 : ")
else:
row["coord3"] = "NULL"
query = " "
print(query)
cur.execute(query)
con.commit()
print("Added new club!")
except Exception as e:
con.rollback()
print("Failed to add new club")
print(">>>>>>>>>>>>>>",e)
return
def recruitProf():
try:
row = {}
print("Enter new proff's details: ")
# name = (input("Name (Fname Minit Lname): ")).split(' ')
name = (input("Name (Fname Minit Lname): "))
# row["Fname"] = name[0]
# row["Minit"] = name[1]
# row["Lname"] = name[2]
row["Prof_id"] = int(input("Prof_id: "))
row["Sex"] = input("Sex(F/M): ")
row["Salary"] = int(input("Salary: "))
row["Bdate"] = input("Birth Date (YYYY-MM-DD): ")
row["Dept"] = (input("Department: "))
row["course_id"] = int(input("course_id: "))
row["super_prof_id"] = int(input("super_prof_id: "))
# derive age
bdate = row["Bdate"]
blist = bdate.split('-')
dob = datetime.date(int(blist[0]),int(blist[1]),int(blist[2]))
today = datetime.date.today()
age = today.year - dob.year - ((today.month, today.day) < (dob.month, dob.day))
query = " INSERT INTO professors values ('%d','%s','%c','%s,'%d','%d','%s','%s,'%d')" % (
row["Prof_id"], name, row["Sex"], row["Dept"], row["Salary"], row["course_id"], row["Bdate"],age, row["super_prof_id"])
print(query)
cur.execute(query)
con.commit()
print("Added Student to the Database!")
except Exception as e:
con.rollback()
print("Failed to insert into database")
print(">>>>>>>>>>>>>", e)
return
def admitAStudent():
try:
row = {}
print("Enter new student's details: ")
# name = (input("Name (Fname Minit Lname): ")).split(' ')
name = (input("Name (Fname Minit Lname): "))
# row["Fname"] = name[0]
# row["Minit"] = name[1]
# row["Lname"] = name[2]
row["Roll_No"] = int(input("Roll No: "))
# row["CGPA"] = input("CGPA: ")
row["Sex"] = input("Sex(F/M): ")
row["Batch"] = input("Batch: ")
row["Bdate"] = input("Birth Date (YYYY-MM-DD): ")
row["Email"] = (input("Email: "))
row["Dept"] = (input("Department: "))
row["Hostel"] = input("Hostel: ")
row["Password"] = (input("Password: "))
# derive age
bdate = row["Bdate"]
blist = bdate.split('-')
dob = datetime.date(int(blist[0]),int(blist[1]),int(blist[2]))
today = datetime.date.today()
age = today.year - dob.year - ((today.month, today.day) < (dob.month, dob.day))
query = " INSERT INTO students values ('%d', NULL,'%c','%s',%d,'%s','%s','%s','%s','%s','%s')" % (
row["Roll_No"], row["Sex"], row["Batch"], age, row["Dept"], row["Email"], row["Bdate"], name, row["Password"], row["Hostel"])
# null is for cgpa
print(query)
cur.execute(query)
con.commit()
print("Added Student to the Database!")
except Exception as e:
con.rollback()
print("Failed to insert into database")
print(">>>>>>>>>>>>>", e)
return
def dispatch(ch):
"""
Function that maps helper functions to option entered
"""
if (ch == 1):
admitAStudent()
elif(ch == 2):
recruitProf()
# elif(ch == 3):
# option3()
# elif(ch == 4):
# option4()
else:
print("Error: Invalid Option")
# Global
while (1):
tmp = sp.call('clear', shell=True)
# Can be skipped if you want to hardcode username and password
username = input("Username: ")
password = input("Password: ")
try:
# Set db name accordingly which have been create by you
# Set host to the server's address if you don't want to use local SQL server
con = pymysql.connect(host='localhost',
port=3306,
user=username,
password=password,
db='project_final',
cursorclass=pymysql.cursors.DictCursor)
tmp = sp.call('clear', shell=True)
if (con.open):
print("Connected")
else:
print("Failed to connect")
tmp = input("Enter any key to CONTINUE>")
with con.cursor() as cur:
while (1):
tmp = sp.call('clear', shell=True)
# Here taking example of Employee Mini-world
print("1. Option 1") # Hire an Employee
print("2. Option 2") # Fire an Employee
print("3. Option 3") # Promote Employee
print("4. Option 4") # Employee Statistics
print("5. Logout")
ch = int(input("Enter choice> "))
tmp = sp.call('clear', shell=True)
if ch == 5:
exit()
else:
dispatch(ch)
tmp = input("Enter any key to CONTINUE>")
except Exception as e:
tmp = sp.call('clear', shell=True)
print(e)
print("Connection Refused: Either username or password is incorrect or user doesn't have access to database")
tmp = input("Enter any key to CONTINUE>")
| VanshMarda/Data-and-Application | Project_Phase_4/MiniWorld.py | MiniWorld.py | py | 11,599 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.date",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 290,
"usage_type": "attribute"
},
{
"api_name": "datetime.date... |
13933582750 | from django import forms
from .models import TodoList
class TodoListForm(forms.ModelForm):
class Meta:
model = TodoList
fields = ['task_title', 'task_description', 'task_status']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['task_title'].widget.attrs.update({'class': 'form-control'})
self.fields['task_description'].widget.attrs.update({'class':'form-control'})
| priyanka-infobeans/infoToDoList | infobeans_todolist/todolist_app/forms.py | forms.py | py | 451 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "models.TodoList",
"line_number": 6,
"usage_type": "name"
}
] |
20160930177 | from django.urls import path
from web import views
app_name ="web"
urlpatterns = [
path('',views.index,name="index"),
path("create/",views.create_product,name="create_product"),
path('deleted/<int:id>/',views.deleted_product,name="deleted_product"),
path('edit/<int:id>/',views.edit_product,name="edit_product"),
path('<int:id>/',views.product,name="product"),
]
| Aswathy-G/advanceddjango-project | web/urls.py | urls.py | py | 389 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "web.views.index",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "web.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"... |
11068770479 | from django.http import *
from forms import UploadForm
from django import template
from django.template.loader import get_template
from django.template import Context, RequestContext
from django.utils.decorators import method_decorator
from django.shortcuts import render_to_response
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.views.generic.base import TemplateView, View
from django.views.decorators.csrf import csrf_exempt
from django.contrib.sessions.models import Session
from django.contrib.auth.models import User, Group, Permission
from models import *
from django.db import models
from django.db.models import Count, Min, Sum, Max, Avg
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils import unittest
from django.db import connection, transaction
import logging
import hashlib
from google.appengine.api import files
try:
files.gs
except AttributeError:
import gs
files.gs = gs
PERPAGE=50
def checkadminlogin_dispatch(f):
def wrap(request, *args, **kwargs):
if 'IsLogin' in request.session and request.session['IsLogin'] and 'Staff' in request.session and request.session['Staff'].username !="":
staff_list = Admins.objects.filter(username = request.session['Staff_username'], pass_field = hashlib.md5(request.session['Staff_password']).hexdigest())
if staff_list:
request.session['IsLogin'] = True
request.session['Staff'] = staff_list[0]
success = True
else:
return HttpResponseRedirect('/logout')
logging.info('Fetch Started:: %s', staff_list[0])
else:
return HttpResponseRedirect('/logout')
return f(request, *args, **kwargs)
return wrap
class CsrfExemptMixin(object):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(CsrfExemptMixin, self).dispatch(request, *args, **kwargs)
class LoginRequiredMixin(object):
@method_decorator(checkadminlogin_dispatch)
def dispatch(self,request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
@csrf_exempt
def render_template(request, template, data=None):
errs =""
if request.method == 'GET' and 'err' in request.GET:
data.update({'errs':request.GET['err']})
response = render_to_response(template, data,
context_instance=RequestContext(request))
return response
class CMSClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
count = Extrapages.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allpages = Extrapages.objects.all()[offset-100:offset]
content = {'page_title': "Summary",
'allpages':allpages,
'count':count,
'page_num':page_num,
}
return render_template(request, "cms_pages.htm", content)
class CMSEditClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pageid = request.GET['pageid']
allpages = Extrapages.objects.get(id=pageid)
content = {'page_title': "Summary",
'allpages':allpages,
}
return render_template(request, "cms_pages_edit.htm", content)
class EmailViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
count = Emails.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allpages = Emails.objects.all()[offset-100:offset]
content = {'page_title': "Admin :: Email List",
'allpages':allpages,
'count':count,
'page_num':page_num,
}
return render_template(request, "email_pages.htm", content)
class EmailEditClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pageid = request.GET['id']
allpages = Emails.objects.get(id=pageid)
content = {'page_title': "Admin::Email Edit",
'allpages':allpages,
}
return render_template(request, "email_pages_edit.htm", content)
class CMSAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Summary",}
return render_template(request, "cms_pages_add.htm", content)
class TitlesContentClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
count = Html.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allpages = Html.objects.all()[offset-100:offset]
content = {'page_title': "Summary",
'allpages':allpages,
'count':count,
'page_num':page_num,
}
return render_template(request, "titles_content.htm", content)
class ProductWishListClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
#allitems = ProductWaitinglist.objects.annotate(dcount=Count('catalogid')).values('catalogid',
# 'current_stock',
# 'products__catalogid').all()[offset-100:offset]
allitems = ProductWaitinglist.objects.raw('select count(*) as dcount,product_waitinglist.catalogid,products.id,name,current_stock from product_waitinglist,products where product_waitinglist.catalogid=products.catalogid group by catalogid')[offset-100:offset]
count = ProductWaitinglist.objects.values('catalogid').annotate(dcount=Count('catalogid')).count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'page_num':page_num,
}
return render_template(request, "products_wish_list.htm", content)
class ProductWishViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
itemid = request.GET['itemid']
allitems = ProductWaitinglist.objects.filter(catalogid=itemid).all()[offset-100:offset]
count = ProductWaitinglist.objects.filter(catalogid=itemid).all().count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'page_num':page_num,
'itemid':itemid,
}
return render_template(request, "products_wish_list_view_list.htm", content)
class ReviewAllClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = ProductReview.objects.raw('select count(*) as dcount,product_review.catalogid,products.id,name,thumbnail from product_review, products where product_review.catalogid=products.catalogid group by catalogid')[offset-100:offset]
count = ProductReview.objects.values('catalogid').annotate(dcount=Count('catalogid')).count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'page_num':page_num,
}
return render_template(request, "products_7_reviews.htm", content)
class ProductsReviewsViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
itemid = request.GET['itemid']
allitems = ProductReview.objects.filter(catalogid=itemid).all()
count = ProductReview.objects.filter(catalogid=itemid).all().count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'itemid':itemid,
}
return render_template(request, "products_review_view_list.htm", content)
class ProductsReviewEditFormClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
itemid = request.GET['itemid']
allitems = ProductReview.objects.get(id=itemid)
content = {'page_title': "Summary",
'allitems':allitems,
#'count':count,
#'page_num':page_num,
'itemid':itemid,
}
return render_template(request, "products_7_reviews_edit_2_edit.htm", content)
class ApanelViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Profile",}
return render_template(request, "home-page-admin.htm", content)
class CustomersViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = customers.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'customers':customers.objects.all()[offset-100:offset],
'count':count,
'page_num':page_num,
}
return render_template(request, "customers.htm", content)
class CRMViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
if 'status' in request.GET and request.GET['status'] != "":
status = request.GET['status']
else:
status = 1
count = Crm.objects.filter(status=status).count()
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'allitems':Crm.objects.all().filter(status=status)[offset-100:offset],
'count':count,
'page_num':page_num,
}
return render_template(request, "crm.htm", content)
class CRMEditViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
crmid = request.GET['id']
allitems = Crm.objects.get(id=crmid)
categories = ProductCategory.objects.all()
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "crm_edit.htm", content)
class StaffViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Site Staff",
'customers':Admins.objects.all()[:100],
'count':Admins.objects.count(),}
return render_template(request, "admins.htm", content)
class CategoryViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Category.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'customers':Category.objects.all()[offset-100:offset],
'count':count,
'page_num':page_num,}
return render_template(request, "categories.htm", content)
class CustomerAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'title': "Add Customer",}
return render_template(request, "customer_add.htm", content)
class CustomerInfoClass(LoginRequiredMixin,TemplateView):
#summary = Customers.objects.all()
def get(self, request, *args, **kwargs):
cid = request.GET['id']
customer = customers.objects.get(contactid=cid)
customeremail= customer.email
customerrewards = CustomerRewards.objects.filter(contactid=cid).all()
totalrewards = CustomerRewards.objects.filter(contactid=cid).aggregate(Sum('points'))
#customers_promocode = SwfCustomerCreditsLog.objects.values_list('customers_promocode', flat=True)
#customers_promocode = customers_promocode['customers_promocode']
#storerewards = SwfCustomerCreditsLog.objects.filter(customers_email_address=customeremail)
storerewards = SwfCustomerCreditsLog.objects.raw('select *,swf_customer_credits_log.id as sid from swf_customer_credits_log , promotions where customers_promocode = coupon AND customers_email_address="'+customeremail+'" AND customers_promocode != ""')
fulldata = list(storerewards)
try:
wish_id = WshWishlist.objects.get(customerid=cid)
wishitems = WsiWishlistitems.objects.filter(wsh_id=wish_id.wsh_id)
except Exception as e:
wishitems = ""
content = {'page_title': "Customers Info",
'customer': customer,
'customerorders':Orders.objects.filter(ocustomerid=cid).all(),
'wishlists':wishitems,
'customerrewards':customerrewards,
'totalrewards':totalrewards,
'storerewards':fulldata,
}
#'count':Admins.objects.count(),}
return render_template(request, "customers_info.htm", content)
class ProductsViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Products.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'allitems':Products.objects.all()[offset-100:offset],
'count':count,
'page_num':page_num,}
return render_template(request, "products.htm", content)
class ProductViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allitems = Products.objects.get(catalogid=pid)
categories = ProductCategory.objects.all().filter(catalogid=pid)
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "productedit.htm", content)
class ProductRelatedClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allitems = Products.objects.get(catalogid=pid)
categories = ProductCategory.objects.all().filter(catalogid=pid)
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "productrelated.htm", content)
class ProductsImagesViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allitems = Products.objects.get(catalogid=pid)
categories = ProductCategory.objects.all().filter(catalogid=pid)
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "images_products.htm", content)
class ApanelViewOrdersClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
order_status = request.GET['order_status']
if order_status < 1:
order_status = 1
else:
order_status = order_status
count = Orders.objects.filter(order_status=order_status).count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = Orders.objects.all().filter(order_status=order_status)[offset-100:offset]
order_status_links = OrderStatus.objects.all().filter(visible='1')
#crm_messages=CrmMessages.objects.select_related(crmid__orderid='8623')
#return HttpResponse(crm_messages)
content = {'page_title': "Orders",
'allitems':allitems,
'count':count,
'page_num':page_num,
'order_status':order_status,
'order_links':order_status_links,}
return render_template(request, "vieworders.htm", content)
class ApanelViewOrdersStatusClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
allitems = OrderStatus.objects.all()
content = {'page_title': "Orders Status",
'allitems':allitems,
'order_links':OrderStatus.objects.all().filter(visible='1'),}
return render_template(request, "orders_status.htm", content)
class OrderPageClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
oid = request.GET['oid']
order_status_links = OrderStatus.objects.all().filter(visible='1')
allitems = Orders.objects.get(orderid=oid)
try:
transactions = Transactions.objects.get(orderid=oid)
amount = transactions.amount
totalamt = Oitems.objects.filter(orderid=oid).aggregate(Sum('unitprice'))
totalamt = totalamt['unitprice__sum']
except Exception as e:
transactions = ""
totalamt = 0
amount = 0
alloiitems = Oitems.objects.all().filter(orderid=oid)
finaltotal = (totalamt + int(allitems.oshipcost)) - allitems.coupondiscount
balance = finaltotal - amount
content = {'page_title': "Orders Status",
'allitems':allitems,
'alloiitems':alloiitems,
'order_links':order_status_links,
'totalamt':totalamt,
'finaltotal':finaltotal,
'paidamt':finaltotal,
'transactions':transactions,
'balance':balance,
}
return render_template(request, "orderpage.htm", content)
class AddAdminsFormClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
allitems = Admins.objects.all()
if "mode" in request.GET:
mode = request.GET['mode']
else:
mode = ""
allitems = ""
if "id" in request.GET:
allitems = Admins.objects.get(id=request.GET['id'])
else:
allitems = ""
content = {'page_title': "Add User",
'allitems':allitems,
'mode':mode,}
return render_template(request, "admins_add.htm", content)
class RmaPagesClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Rma.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = Rma.objects.all()[offset-100:offset]
content = {'page_title': "Orders Status",
'allitems':allitems,
'count':count,}
return render_template(request, "rma_pages.htm", content)
class RmaViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
rmaid=request.GET['rmaid']
allitems = Rma.objects.get(idrma=rmaid)
content = {'page_title': "View RMA",
'allitems':allitems,}
return render_template(request, "rmaview.htm", content)
class ShippingManagerViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
if "mode" in request.GET:
mode = request.GET['mode']
else:
mode = ""
allitems = ShippingCategory.objects.all()
content = {'page_title': "Admin: Shipping Manager View",
'allitems':allitems,
'mode':mode,}
return render_template(request, "adminshippingmanager.htm", content)
class TaxManagerViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
if "mode" in request.GET:
mode = request.GET['mode']
else:
mode = ""
allitems = Tax.objects.all()
content = {'page_title': "Admin: Tax Manager View",
'allitems':allitems,
'mode':mode,}
return render_template(request, "taxmanager.htm", content)
class GiftCertificatesViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = GiftCertificates.objects.all().count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = GiftCertificates.objects.all()[offset-100:offset]
content = {'page_title': "Admin: Gift Certificate View",
'allitems':allitems,
'page_num':page_num,
'count':count,
'order_links':OrderStatus.objects.all().filter(visible='1'),}
return render_template(request, "giftcertificate_pages.htm", content)
class EditGiftCertificateClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
giftid=request.GET['id']
allitems = GiftCertificates.objects.get(id=giftid)
total = allitems.certificate_amount + allitems.certificate_expenses
content = {'page_title': "Admin :: Edit Gift Certificate",
'allitems':allitems,
'order_links':OrderStatus.objects.all().filter(visible='1'),
'total':total}
return render_template(request, "edit_giftcertificate.htm", content)
class ProductArticleViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
prod = Products.objects.get(catalogid=pid)
allitems = ProductArticle.objects.all().filter(catalogid=pid)
count = allitems.count()
content = {'page_title': "Admin: Product Articles",
'allitems':allitems,
'prod':prod,
'count':count,
}
return render_template(request, "product_articles.htm", content)
class ProductArticleEditViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['id']
allpages = ProductArticle.objects.get(id=pid)
content = {'page_title': "Admin :: Edit Article",
'allpages':allpages,}
return render_template(request, "product_article_edit.htm", content)
class ProductArticleAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
content = {'page_title': "Admin :: Add Article",
'pid':pid,}
return render_template(request, "product_article_add.htm", content)
class ProductReviewsViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
prod = Products.objects.get(catalogid=pid)
allitems = ProductReview.objects.filter(catalogid=pid).all()
count = allitems.count()
content = {'page_title': "Admin: Product Articles",
'allitems':allitems,
'prod':prod,
'count':count,
}
return render_template(request, "product_reviews.htm", content)
class ProductOptionEditViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allpages = Products.objects.get(catalogid=pid)
content = {'page_title': "Admin :: Edit Options",
'allpages':allpages,
'prod':pid,}
return render_template(request, "product_options_edit.htm", content)
class BannersViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
allpages = SiteBanners.objects.all()
content = {'page_title': "Admin :: Banner Managements",
'allitems':allpages,}
return render_template(request, "viewbanners.htm", content)
class BannerEditViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
bid = request.GET['bid']
filename = "/gs/swf_product_images/banner/banner5.png"
allpages = SiteBanners.objects.get(id=bid)
content = {'page_title': "Admin :: Edit banner",
'allpages':allpages,
'bannerpath':filename,}
return render_template(request, "editbanner.htm", content)
class BannersAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Admin :: Add Banner Managements",}
return render_template(request, "addbanner.htm", content)
class GCSfilesClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Admin :: Add Banner Managements",}
file_list = files.listdir('/gs/swf_product_images')
for file_name in file_list:
if not file_name.__contains__('$folder$'):
self.response.write('<a href="https://storage.cloud.google.com/%s">%s<a><br>' %(file_name[4:], file_name[4:]))
#return render_template(request, "gcsfiles.htm", content)
class CouponsViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Promotions.objects.count()
if "page" in request.GET and request.GET['page'] != "":
page_num = request.GET['page']
else:
page_num = 1
#pages = count/100
page_num = int(page_num)
offset = page_num * 100
allitems = Promotions.objects.all()[offset-100:offset]
content = {'page_title': "Orders Status",
'allitems':allitems,
'count':count,}
return render_template(request, "viewcoupons.htm", content)
| hughsons/saltwaterfish | admin/views.py | views.py | py | 28,274 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "google.appengine.api.files.gs",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.api.files",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "google.appengine.api.files.gs",
"line_number": 28,
"usage_type": "attribute... |
7306633247 | import torch
from tqdm import tqdm
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
from data_loader import get_loader
from CNNtoRNN import CNNtoRNN
def train():
transform = transforms.Compose(
[
transforms.Resize((356, 356)),
transforms.RandomCrop((299, 299)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # mean and std for each channel (RGB)
]
)
train_loader, dataset = get_loader(
root_folder="../images",
captions_file="../captions.txt",
transform=transform,
num_workers=2
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
load_model = False
save_model = False
train_CNN = False
# Hyperparameters
embed_size = 256
hidden_size = 256
vocab_size = len(dataset.vocab)
num_layers = 1
learning_rate = 3e-4
num_epochs = 2
model = CNNtoRNN(embed_size, hidden_size, vocab_size, num_layers).to(device)
criterion = nn.CrossEntropyLoss(ignore_index=dataset.vocab.stoi["<PAD>"])
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
for name, param in model.encoderCNN.inception.named_parameters():
if "fc.weight" in name or "fc.bias" in name:
param.requires_grad = True
else:
param.requires_grad = False
model.train()
for epoch in range(num_epochs):
for idx, (imgs, captions) in tqdm(
enumerate(train_loader), total=len(train_loader), leave=False
):
imgs = imgs.to(device)
captions = captions.to(device)
outputs = model(imgs, captions[:-1])
loss = criterion(
outputs.reshape(-1, outputs.shape[2]), captions.reshape(-1)
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if __name__ =="__main__":
train()
| KarstenKu-Hub/ImageCaptioning | train.py | train.py | py | 2,050 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 14,
"usage_type": "call"
},
{
... |
42631511615 | #!/usr/bin/env python3
# Program to implement tweet classification
import nltk
import re
import sys
from collections import Counter
import pandas as pd
nltk.download('punkt')
# Read files
train_file = sys.argv[1]
test_file = sys.argv[2]
output_file = sys.argv[3]
train = open(train_file, 'r', errors='ignore').read()
test = open(test_file, 'r', errors='ignore').read()
location_wise_data, location_counts = {},{}
bag_of_words = []
# Preprocessing data by removing spacial characters
def preprocess_data(fileData):
clean_data = re.sub(r'[^a-zA-Z\d,_\s]', '', fileData)
clean_data = re.sub('([_]+)_','_', clean_data)
clean_data = re.sub('([ ]+)',' ', clean_data)
clean_data = clean_data.replace("\n"," ")
return clean_data
# Created a dictionary of dictionary to store
# Location : {word : count}
def populate_train_data(clean_train):
prev_start, prev_city = -1, ''
bag_of_words_str = ''
# Regular expression matches with the format of city,_state
for m in re.compile(r'\w{4,},_\w+ ').finditer(clean_train):
if(prev_start != -1 and prev_city != ''): # empty initially
if prev_city not in location_wise_data:
data = {}
tweet = clean_train[prev_start+len(prev_city)+1:m.start()]
tweet = tweet.replace(",","")
location_wise_data[prev_city] = tweet
location_counts[prev_city] = 1
bag_of_words_str += tweet
else:
data = location_wise_data.get(prev_city)
tweet = clean_train[prev_start+len(prev_city)+1:m.start()]
tweet = tweet.replace(",","")
location_wise_data[prev_city] =location_wise_data.get(prev_city)+ ' ' +tweet
location_counts[prev_city] = location_counts.get(prev_city)+1
bag_of_words_str += tweet
prev_start = m.start()
prev_city = m.group()
prev_city = prev_city.replace(" ","")
bag_of_words_str = re.sub('([ ]+) ',' ', bag_of_words_str)
bag_of_words = bag_of_words_str.split(" ")
# Function to generate tokens from tweet
# Find the probability of each word as count of word in location / number of words in a location
def generate_tokens_prob():
for k,v in (location_wise_data.items()):
list_of_words = v.lower().split(" ")
# Remove stop words
list_of_words = [x for x in list_of_words if x not in ['', '_', ',','\'','a','an','and','are','the','as', 'at', 'be' ,'by' ,'us','it','too','she' ,'for', 'from', 'has','he', 'in', 'yes','is', 'its', 'of', 'on', 'that', 'to', 'was', 'were', 'will', 'with','my','you','mine','yours','we','can','this','our','because','him','his','her']]
total_words = len(list_of_words)
location_wise_data[k] = Counter(list_of_words)
counter_dict = location_wise_data.get(k)
for k2,v2 in counter_dict.items():
counter_dict[k2] = v2 / total_words
clean_train = preprocess_data(train)
clean_test = test
populate_train_data(clean_train)
generate_tokens_prob()
# Test data is stored in dataframe
prev_start, prev_city = -1, ''
cols = ['actual','clean_tweet','tweet', 'predicted']
list_data = []
for m in re.compile(r'\w{4,},_\w+ ').finditer(clean_test):
if(prev_start != -1 and prev_city != ''): # empty initially
tweet = clean_test[prev_start+len(prev_city)+1:m.start()]
clean_tweet = re.sub(r'[^a-zA-Z\d\s]', '', tweet)
list_data.append([prev_city, clean_tweet, tweet, ''])
prev_start = m.start()
prev_city = m.group()
prev_city = prev_city.replace(" ","")
# To store last row
tweet = clean_test[prev_start+len(prev_city)+1:len(clean_test)]
clean_tweet = re.sub(r'[^a-zA-Z\d\s]', '', tweet)
clean_tweet = clean_tweet.replace("\n"," ")
list_data.append([prev_city, clean_tweet, tweet, ''])
test_df = pd.DataFrame(list_data, columns=cols)
# Applying naive bayes to find the probablity of location given list of words and then returning the location having maximum probablity
for index, row in test_df.iterrows():
wordList = row['clean_tweet'].lower().split(" ")
probabilies_by_city = {}
for city in location_counts.keys():
prob = 1
for word in wordList:
try:
# Naive bayes assumes that words are independent given location
prob = prob * location_wise_data.get(city).get(word)
except:
# If a word is not found in the given location, allocate a lowest probability to that word
prob = prob * 0.0000001
# Probablity of any location is 1/length of cities
probabilies_by_city[city] = prob * (1/len(location_wise_data))
row['predicted'] = max(probabilies_by_city, key = probabilies_by_city.get)
# FInding accuracy of test data
correct, wrong = 0, 0
for index, row in test_df.iterrows():
if(row['actual'] == row['predicted']):
correct += 1
else:
wrong +=1
print('Test Accuracy - ', correct/ (correct+wrong)*100)
#Writing to Output
f = open(output_file, "w+")
for index, row in test_df.iterrows():
# Actual tweet is used instead of cleaned tweet data
f.write(row['predicted'] + " " + row['actual'] + " " + row['tweet'])
f.close()
#Printing Top 5 words associated with each location
location_with_top_words = {}
cities = []
top_words = []
for k,v in (location_wise_data.items()):
li = []
cities.append(k)
for k2, v2 in v.most_common(5):
li.append(k2)
top_words.append(li)
location_with_top_words[k] = li
# Used panda tables to display locations having top 5 words
Table = {"Location ":cities, "Top 5 words ":top_words}
TableDF = pd.DataFrame(Table)
print(TableDF)
| tanvi5/Games-and-Bayes | part2/geolocate.py | geolocate.py | py | 5,766 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.download",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number"... |
7259480306 | import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import cv2
import sys
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(6,200)
self.fc2 = nn.Linear(200,100)
self.fc3 = nn.Linear(100,50)
self.fc4 = nn.Linear(50,4)
def forward(self, x):
x = F.relu(self.fc4(F.relu(self.fc3(F.relu(self.fc2(F.relu(self.fc1(x))))))))
return x
net = Net()
input = Variable(torch.randn(1,6), requires_grad=True)
out = net(input)
import torch.optim as optim
criterion = torch.nn.SmoothL1Loss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
data=[]
f=open('data.csv', "r")
lines = f.readlines()
for line in lines:
line=line.rstrip()
data.append([int(s) for s in line.split(",")])
min_loss=sys.maxsize
for epoch in range(100):
for i, data2 in enumerate(data):
x1, y1,x2,y2,x3,y3, bx1, by1, bx2, by2 = iter(data2)
X, Y = Variable(torch.FloatTensor([x1, y1, x2, y2, x3, y3]), requires_grad=True), Variable(torch.FloatTensor([bx1, by1, bx2, by2]), requires_grad=False)
optimizer.zero_grad()
outputs = net(X)
loss = criterion(outputs, Y)
loss.backward()
optimizer.step()
if (i!=0 and i % 99 == 0):
print("Epoch {} - loss: {}".format(epoch, loss.data))
if(loss<min_loss):
min_loss=loss
torch.save(net.state_dict(), 'model.pth')
(x,y,w,h)=(net(Variable(torch.Tensor([310, 134, 391, 258, 470, 207]))))
print((x,y,w,h))
def draw_humans1(npimg, x, y, w, h, imgcopy=False):
if imgcopy:
npimg = np.copy(npimg)
image_h, image_w = npimg.shape[:2]
cv2.line(npimg, (x,y),(x,y+h),CocoColors[0],4)
cv2.line(npimg, (x,y+h),(x+w,y+h),CocoColors[1],4)
cv2.line(npimg, (x+w,y),(x+w,y+h),CocoColors[2],4)
cv2.line(npimg, (x+w,y),(x,y),CocoColors[3],4)
return npimg
CocoColors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
oriImg = cv2.imread("images/sample3_cam2_627.jpg")
out = draw_humans1(oriImg,x,y,abs(w-x),abs(h-y))
cv2.imshow('result.png',out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| asbudhkar/Hand-Detector-with-Pose-Estimation | train.py | train.py | py | 2,462 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
5962356858 | from aoc_helpers.perf_helpers import *
from aoc_helpers.input_helpers import *
from collections import defaultdict
from collections import Counter
import string
import time
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import Voronoi, voronoi_plot_2d
from scipy.spatial import cKDTree
def PolygonArea(corners):
n = len(corners) # of corners
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area) / 2.0
return area
def get_bounds(points):
x_max = 0
y_max = 0
x_min = 1000000000000
y_min = 1000000000000
for point in points:
if point[0] < x_min:
x_min = point[0]
if point[0] > x_max:
x_max = point[0]
if point[1] < y_min:
y_min = point[1]
if point[1] > y_max:
y_max = point[1]
return (x_min, x_max, y_min, y_max)
@timeit
def get_solution():
input_strings = input_lines("test_input.txt")
data = list(map(lambda line: [int(n) for n in line.split(", ")], input_strings))
points = np.array(data)
vor = Voronoi(points)
# print(vor.regions)
# print(vor.vertices)
# print(vor.point_region)
# for each item in vor.regions
# if the region is finite
# get the corresponding point from vor.point_region
# and associate it with points
largest_area = 0
largest_area_index = -1
for i in range(len(vor.regions)):
if i == 0:
continue
# print(np.where(vor.point_region == i)[0][0])
region = vor.regions[i]
if -1 in region:
# Region is not finite
continue
# Region with point indexed at `i` is finite
# Compute area
verts = [vor.vertices[n] for n in region]
area = PolygonArea(verts)
# print(verts)
# print(area)
if area > largest_area:
largest_area = area
# largest_area_index = i
largest_area_index = np.where(vor.point_region == i)[0][0]
print("Largest finite region comes from point {0} and has an area of {1}".format(largest_area_index, largest_area))
bounds = get_bounds(points)
sampling_points = []
points_str = ""
for y in range(bounds[2] - 1, bounds[3] + 1):
line_str = ""
for x in range(bounds[0] - 1, bounds[1] + 1):
line_str += "({0}, {1})".format(x + 0.5, y + 0.5)
sampling_points.append([x + 0.5, y + 0.5])
points_str += line_str + "\n"
print("Bounds: {0}".format(bounds))
print("Sampling Points:\n{0}".format(points_str))
voronoi_kdtree = cKDTree(points)
test_point_dist, test_point_regions = voronoi_kdtree.query(sampling_points)
f = list(map(lambda x: string.ascii_uppercase[x], test_point_regions))
print(Counter(f).most_common(26))
print(f)
# for y in range(bounds[2] - 1, bounds[3] + 1):
# for x in range(bounds[0] - 1, bounds[1] + 1):
# pass
# print(Counter(test_point_regions))
# print(Counter(test_point_regions).most_common(1))
print("Sampled area of largest finite poly is {0}".format(test_point_regions[largest_area_index + 1]))
voronoi_plot_2d(vor)
plt.show()
print(get_solution())
| colejd/AdventOfCode2018 | day_06/day6part1_borked.py | day6part1_borked.py | py | 3,325 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.Voronoi",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.cKDTree",... |
75163131066 | import pprint, random, datetime
class Cliente():
_nomes = ['ERIC RUIZ', 'ROBERTA DE LIMA', 'DEIVIDI SCALZAVARA', 'ADOLFO NETO', 'JOSE MONESTEL', 'WAGNER CORREIA', 'JACEGUAY ZUKOSKI', 'MICHEL SOUZA', 'MAYRA RODRIGUES', 'MICHEL DUARTE', 'MARCIO FOSSA', 'MARCEL BORNANCIN', 'ELOISA PERIN', 'TIAGO WIPPEL', 'LUCAS FISCHER', 'DIEGO PRANDO', 'ADRIANO WEIGUERT NAGASAVA', 'FERNANDO MIRANDA', 'LUIS MONTES', 'MARCELO DE SOUZA']
_ruas = ['Av. Brasil', 'Rua Uruguai', 'Rua das Acácias', 'Rua Bulcão Viana', 'Av Marcos Konder']
_cidades = ['Itajaí', 'Florianópolis', 'Brusque', 'Navegantes']
_paises = ['Brasil']
def __init__(self):
self.nome = random.choice(self._nomes)
self.email = self.generateEmail()
self.endereco = {}
self.endereco['pais'] = random.choice(self._paises)
self.endereco['cidade'] = random.choice(self._cidades)
self.endereco['rua'] = random.choice(self._ruas)
self.endereco['numero'] = random.randint(10,999)
self.endereco['complemento'] = ''
def generateEmail(self, domain="fakemail.net"):
return self.nome.lower().replace(' ', '_') + "@" + domain
def getRandom():
clientes = []
for n in Cliente._nomes:
clientes.append({
'nome': n,
'contato': {
'email': n.lower().replace(' ', '_') + "@fakemail.net",
'telefone': '9' + str(random.randint(80000000, 99999999))
},
'endereco': {
'cidade': random.choice(Cliente._cidades),
'complemento': '',
'numero': random.randint(1, 999),
'pais': random.choice(Cliente._paises),
'rua': random.choice(Cliente._ruas)
}
})
return clientes
class Venda():
_clientes = [
{"_id": "5dc58145cfb83d37c2e6d1d8", "nome": "ERIC RUIZ"},
{"_id": "5dc58145cfb83d37c2e6d1d9", "nome": "ROBERTA DE LIMA"},
{"_id": "5dc58145cfb83d37c2e6d1da", "nome": "DEIVIDI SCALZAVARA"},
{"_id": "5dc58145cfb83d37c2e6d1db", "nome": "ADOLFO NETO"},
{"_id": "5dc58145cfb83d37c2e6d1dc", "nome": "JOSE MONESTEL"},
{"_id": "5dc58145cfb83d37c2e6d1dd", "nome": "WAGNER CORREIA"},
{"_id": "5dc58145cfb83d37c2e6d1de", "nome": "JACEGUAY ZUKOSKI"},
{"_id": "5dc58145cfb83d37c2e6d1df", "nome": "MICHEL SOUZA"},
{"_id": "5dc58145cfb83d37c2e6d1e0", "nome": "MAYRA RODRIGUES"},
{"_id": "5dc58145cfb83d37c2e6d1e1", "nome": "MICHEL DUARTE"},
{"_id": "5dc58145cfb83d37c2e6d1e2", "nome": "MARCIO FOSSA"},
{"_id": "5dc58145cfb83d37c2e6d1e3", "nome": "MARCEL BORNANCIN"},
{"_id": "5dc58145cfb83d37c2e6d1e4", "nome": "ELOISA PERIN"},
{"_id": "5dc58145cfb83d37c2e6d1e5", "nome": "TIAGO WIPPEL"},
{"_id": "5dc58145cfb83d37c2e6d1e6", "nome": "LUCAS FISCHER"},
{"_id": "5dc58145cfb83d37c2e6d1e7", "nome": "DIEGO PRANDO"},
{"_id": "5dc58145cfb83d37c2e6d1e8", "nome": "ADRIANO WEIGUERT NAGASAVA"},
{"_id": "5dc58145cfb83d37c2e6d1e9", "nome": "FERNANDO MIRANDA"},
{"_id": "5dc58145cfb83d37c2e6d1ea", "nome": "LUIS MONTES"},
{"_id": "5dc58145cfb83d37c2e6d1eb", "nome": "MARCELO DE SOUZA"}
]
_produtos = {
'smartphone': [
{'nome': 'Galaxy s10', 'valor_unitario': 999.99},
{'nome': 'Xiaomi Redmi', 'valor_unitario': 768.89},
{'nome': 'iPhone 11 pro', 'valor_unitario': 6899.0},
{'nome': 'LG K9', 'valor_unitario': 648.99},
{'nome': 'Moto G7 Play', 'valor_unitario': 829.90}
],
'notebook': [
{'nome': 'Lenovo Carbon', 'valor_unitario': 9999.98},
{'nome': 'Mac Book Air', 'valor_unitario': 4680.0},
{'nome': 'Dell XPS', 'valor_unitario': 7699.79},
{'nome': 'Alienware', 'valor_unitario': 12350.0},
{'nome': 'Positivo Motion', 'valor_unitario': 1450.0},
],
'tablet': [
{'nome': 'Galaxy Tab A10', 'valor_unitario': 899},
{'nome': 'Multilaser M7S', 'valor_unitario': 375.5},
{'nome': 'Amazon Fire7', 'valor_unitario': 359.99},
{'nome': 'iPad', 'valor_unitario': 2159.89},
{'nome': 'Acer Iconia', 'valor_unitario': 499.0}
],
'monitor': [
{'nome': 'LG Led 20-M37', 'valor_unitario': 1289.0},
{'nome': 'Samsung 32 Curve', 'valor_unitario': 2790.99},
{'nome': 'Philips LED 185', 'valor_unitario': 269.9},
{'nome': 'AOC 24 Freesync', 'valor_unitario': 619.29}
],
'câmera digital': [
{'nome': 'Canon Rebel SL2', 'valor_unitario': 3000},
{'nome': 'Sony W800', 'valor_unitario': 659},
{'nome': 'Leica V-lux t114', 'valor_unitario': 12300},
{'nome': 'Nikon Coolpix S8100', 'valor_unitario': 899},
],
'headset': [
{'nome': 'Razer Kraken', 'valor_unitario': 328.9},
{'nome': 'AKG K92', 'valor_unitario': 219.90},
{'nome': 'Sony MDR-5A', 'valor_unitario': 414.62},
{'nome': 'Apple Beats Studio', 'valor_unitario': 1599}
],
'carregador': [
{'nome': 'Qi wireless 10w', 'valor_unitario': 12.99},
{'nome': 'Universal 3 USB 3A', 'valor_unitario': 27.8},
{'nome': 'Qualcomm Turbo 3A', 'valor_unitario': 36.5}
]
}
def getRandom():
classificacao_produto = random.choice(list(Venda._produtos.keys()))
produto = random.choice(Venda._produtos[classificacao_produto])
cliente = random.choice(Venda._clientes)
return {
'nome_produto': produto['nome'],
'valor_unitario': produto['valor_unitario'],
'classificacao_produto': classificacao_produto,
'quantidade': random.choice([1,1,1,1,1,2,2,2,3,4]),
'nome_cliente': cliente['nome'],
'id_cliente': cliente['_id'],
'data_venda': datetime.date(
random.randint(2017,2019),
random.randint(1,12),
random.randint(1,28)
).isoformat()
}
def getRandomss():
c = random.choice(Venda._clientes)
for c in Venda._clientes:
vendas = random.randint(4,7)
while vendas > 0:
venda = Venda.getRandom()
venda['id_cliente'] = c['_id']
venda['nome_cliente'] = c['nome']
pp.pprint(venda)
vendas = vendas - 1
# def getRandom():
# return {
# 'nome_produto':
# 'valor_unitario':
# 'classificacao':
# 'quantidade':
# 'nome_cliente':
# 'id_cliente':
# 'data_venda':
# }
pp = pprint.PrettyPrinter()
# pp.pprint(Cliente.nomes)
| e-ruiz/big-data | 01-NoSQL/atividade-03/big_data_atividade_3.py | big_data_atividade_3.py | py | 7,003 | python | pt | code | 1 | github-code | 6 | [
{
"api_name": "random.choice",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_n... |
14755463895 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
# ==========================Core Module================================
class conv_block(nn.Module):
def __init__(self, ch_in, ch_out):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True),
nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class up_conv(nn.Module):
def __init__(self, ch_in, ch_out):
super(up_conv, self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.up(x)
return x
class U_Net(nn.Module):
def __init__(self, img_ch=3, output_ch=1):
super(U_Net, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(ch_in=img_ch, ch_out=32)
self.Conv2 = conv_block(ch_in=32, ch_out=64)
self.Conv3 = conv_block(ch_in=64, ch_out=128)
self.Conv4 = conv_block(ch_in=128, ch_out=256)
self.Conv5 = conv_block(ch_in=256, ch_out=512)
self.Up5 = up_conv(ch_in=512, ch_out=256)
self.Up_conv5 = conv_block(ch_in=512, ch_out=256)
self.Up4 = up_conv(ch_in=256, ch_out=128)
self.Up_conv4 = conv_block(ch_in=256, ch_out=128)
self.Up3 = up_conv(ch_in=128, ch_out=64)
self.Up_conv3 = conv_block(ch_in=128, ch_out=64)
self.Up2 = up_conv(ch_in=64, ch_out=32)
self.Up_conv2 = conv_block(ch_in=64, ch_out=32)
self.Conv_1x1 = nn.Conv2d(32, output_ch, kernel_size=1, stride=1, padding=0)
def forward(self, x):
# encoding path
x1 = self.Conv1(x)
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2)
# print("x4", x2.shape)
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3)
# print("x4", x3.shape)
x4 = self.Maxpool(x3)
x4 = self.Conv4(x4)
# print("x4", x4.shape)
x5 = self.Maxpool(x4)
x5 = self.Conv5(x5)
# print("x4", x5.shape)
# decoding + concat path
d5 = self.Up5(x5)
# print("x4", d5.shape)
d5 = torch.cat((x4, d5), dim=1)
# print("x4", d5.shape)
d5 = self.Up_conv5(d5)
# print("x4", d5.shape)
d4 = self.Up4(d5)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
d1 = self.Conv_1x1(d2)
# d1 = F.softmax(d1,dim=1) # mine
# return d1
out = nn.Sigmoid()(d1)
return out
#
# if __name__ == '__main__':
# net =U_Net(img_ch=3, output_ch=1)
# print(net)
# x = torch.rand((2, 3, 224, 224))
# print(net.forward(x).shape)
# from torchstat import stat
#
# model = U_Net()
# stat(model, (3, 224, 224)) | ikkbic/My-Repositories | segmentionn_models_trans/UNet-1.py | UNet-1.py | py | 3,430 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_... |
36643660386 | # -*- coding: utf-8 -*-
"""
@author: QgZhan
@contact: zhanqg@foxmail.com
@file: cifar.py
@time: 2022/4/19 11:19
"""
import os
from torch.utils.data import Dataset
from dataloader.dataloader_utils import *
from torchvision import datasets, transforms
from spikingjelly.datasets import cifar10_dvs
from torch.utils.data.sampler import SubsetRandomSampler
# your own data dir
DIR = {'CIFAR10': '/data/zhan/CV_data/cifar10',
'CIFAR10DVS': '/data/zhan/Event_Camera_Datasets/CIFAR10DVS',
'CIFAR10DVS_CATCH': '/data/zhan/Event_Camera_Datasets/CIFAR10DVS_dst_cache'
}
def get_cifar10(batch_size, train_set_ratio=1.0):
"""
get the train loader and test loader of cifar10.
:return: train_loader, test_loader
"""
trans_train = transforms.Compose([transforms.Resize(48),
transforms.RandomCrop(48, padding=4),
transforms.RandomHorizontalFlip(), # 随机水平翻转
CIFAR10Policy(), # TODO: 待注释
transforms.ToTensor(),
# transforms.RandomGrayscale(), # 随机变为灰度图
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), # 归一化
# transforms.Normalize((0., 0., 0.), (1, 1, 1)),
# Cutout(n_holes=1, length=16) # 随机挖n_holes个length * length的洞
])
trans_test = transforms.Compose([transforms.Resize(48),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
train_data = datasets.CIFAR10(DIR['CIFAR10'], train=True, transform=trans_train, download=True)
test_data = datasets.CIFAR10(DIR['CIFAR10'], train=False, transform=trans_test, download=True)
# take train set by train_set_ratio
n_train = len(train_data)
split = int(n_train * train_set_ratio)
indices = list(range(n_train))
random.shuffle(indices)
train_sampler = SubsetRandomSampler(indices[:split])
if train_set_ratio < 1.0:
train_dataloader = DataLoaderX(train_data, batch_size=batch_size, shuffle=False, num_workers=8, drop_last=True,
sampler=train_sampler, pin_memory=True)
else:
train_dataloader = DataLoaderX(train_data, batch_size=batch_size, shuffle=True, num_workers=8, drop_last=True,
pin_memory=True)
test_dataloader = DataLoaderX(test_data, batch_size=batch_size, shuffle=True, num_workers=8, drop_last=False,
pin_memory=True)
return train_dataloader, test_dataloader
def get_cifar10_DVS(batch_size, T, split_ratio=0.9, train_set_ratio=1, size=48, encode_type='TET'):
"""
get the train loader and test loader of cifar10.
:param batch_size:
:param T:
:param split_ratio: the ratio of train set: test set
:param train_set_ratio: the real used train set ratio
:param size:
:param encode_type:
:return: train_loader, test_loader
"""
if encode_type is "spikingjelly":
trans = DVSResize((size, size), T)
train_set_pth = os.path.join(DIR['CIFAR10DVS_CATCH'], f'train_set_{T}_{split_ratio}_{size}.pt')
test_set_pth = os.path.join(DIR['CIFAR10DVS_CATCH'], f'test_set_{T}_{split_ratio}_{size}.pt')
if os.path.exists(train_set_pth) and os.path.exists(test_set_pth):
train_set = torch.load(train_set_pth)
test_set = torch.load(test_set_pth)
else:
origin_set = cifar10_dvs.CIFAR10DVS(root=DIR['CIFAR10DVS'], data_type='frame', frames_number=T,
split_by='number', transform=trans)
train_set, test_set = split_to_train_test_set(split_ratio, origin_set, 10)
if not os.path.exists(DIR['CIFAR10DVS_CATCH']):
os.makedirs(DIR['CIFAR10DVS_CATCH'])
torch.save(train_set, train_set_pth)
torch.save(test_set, test_set_pth)
elif encode_type is "TET":
path = '/data/zhan/Event_Camera_Datasets/CIFAR10DVS/temporal_effecient_training_0.9'
train_path = path + '/train'
test_path = path + '/test'
train_set = DVSCifar10(root=train_path)
test_set = DVSCifar10(root=test_path)
elif encode_type is "3_channel":
path = '/data/zhan/Event_Camera_Datasets/CIFAR10DVS/temporal_effecient_training_0.9'
train_path = path + '/train'
test_path = path + '/test'
train_set = Channel_3_DVSCifar10(root=train_path)
test_set = Channel_3_DVSCifar10(root=test_path)
# take train set by train_set_ratio
n_train = len(train_set)
split = int(n_train * train_set_ratio)
indices = list(range(n_train))
random.shuffle(indices)
train_sampler = SubsetRandomSampler(indices[:split])
# valid_sampler = SubsetRandomSampler(indices[split:])
# generate dataloader
# train_data_loader = DataLoaderX(dataset=train_set, batch_size=batch_size, shuffle=True, drop_last=True,
# num_workers=8, pin_memory=True)
train_data_loader = DataLoaderX(dataset=train_set, batch_size=batch_size, shuffle=False, drop_last=True,
sampler=train_sampler, num_workers=8,
pin_memory=True) # SubsetRandomSampler 自带shuffle,不能重复使用
test_data_loader = DataLoaderX(dataset=test_set, batch_size=batch_size, shuffle=False, drop_last=False,
num_workers=8, pin_memory=True)
return train_data_loader, test_data_loader
def get_cifar100(batch_size):
"""
get the train loader and test loader of cifar100.
:return: train_loader, test_loader
"""
trans_t = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[n / 255. for n in [129.3, 124.1, 112.4]],
std=[n / 255. for n in [68.2, 65.4, 70.4]]),
Cutout(n_holes=1, length=16)
])
trans = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[n / 255. for n in [129.3, 124.1, 112.4]],
std=[n / 255. for n in [68.2, 65.4, 70.4]])])
train_data = datasets.CIFAR100(DIR['CIFAR100'], train=True, transform=trans_t, download=True)
test_data = datasets.CIFAR100(DIR['CIFAR100'], train=False, transform=trans, download=True)
train_dataloader = DataLoaderX(train_data, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True)
test_dataloader = DataLoaderX(test_data, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
return train_dataloader, test_dataloader
class DVSCifar10(Dataset):
# This code is form https://github.com/Gus-Lab/temporal_efficient_training
def __init__(self, root, train=True, transform=True, target_transform=None):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train
self.resize = transforms.Resize(size=(48, 48)) # 48 48
self.tensorx = transforms.ToTensor()
self.imgx = transforms.ToPILImage()
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
data, target = torch.load(self.root + '/{}.pt'.format(index))
# if self.train:
new_data = []
for t in range(data.size(0)):
new_data.append(self.tensorx(self.resize(self.imgx(data[t, ...]))))
data = torch.stack(new_data, dim=0)
if self.transform:
flip = random.random() > 0.5
if flip:
data = torch.flip(data, dims=(3,))
off1 = random.randint(-5, 5)
off2 = random.randint(-5, 5)
data = torch.roll(data, shifts=(off1, off2), dims=(2, 3))
if self.target_transform is not None:
target = self.target_transform(target)
return data, target.long().squeeze(-1)
def __len__(self):
return len(os.listdir(self.root))
class Channel_3_DVSCifar10(Dataset):
def __init__(self, root, train=True, transform=True, target_transform=None):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train
self.resize = transforms.Resize(size=(48, 48)) # 48 48
self.tensorx = transforms.ToTensor()
self.imgx = transforms.ToPILImage()
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
data, target = torch.load(self.root + '/{}.pt'.format(index))
T, C, H, W = data.shape
# if self.train:
new_data = []
for t in range(T):
tmp = data[t, ...] # (2, H, W)
tmp = torch.cat((tmp, torch.zeros(1, H, W)), dim=0) # (3, H, W)
mask = (torch.randn((H, W)) > 0).to(data)
tmp[2].data = tmp[0].data * mask + tmp[1].data * (1 - mask)
new_data.append(self.tensorx(self.resize(self.imgx(tmp))))
data = torch.stack(new_data, dim=0)
if self.transform:
flip = random.random() > 0.5
if flip:
data = torch.flip(data, dims=(3,))
off1 = random.randint(-5, 5)
off2 = random.randint(-5, 5)
data = torch.roll(data, shifts=(off1, off2), dims=(2, 3))
if self.target_transform is not None:
target = self.target_transform(target)
return data, target.long().squeeze(-1)
def __len__(self):
return len(os.listdir(self.root))
| rtao499/SAANet | dataloader/cifar.py | cifar.py | py | 10,609 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 29,
"usage_type": "call"
},
{
... |
43381026267 | import boto3
def lambda_handler(event, context):
sns = boto3.client('sns')
message = event.get('message', 'Default message')
params = {
'Message': message,
'TopicArn': 'arn:aws:sns:us-east-1:896553604990:LiveScore'
}
try:
response = sns.publish(**params)
message_id = response['MessageId']
print('Message published:', message_id)
return response
except Exception as e:
print('Error publishing message:', str(e))
raise e
| bayarbayasgalanj/cloud_computing | Project/lambda_function.py | lambda_function.py | py | 512 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "boto3.client",
"line_number": 4,
"usage_type": "call"
}
] |
71969293949 | import argparse
import logging
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='ML Trainer')
parser.add_argument('--project', type=str, help='Google Cloud project ID to use.')
parser.add_argument('--region', type=str, help='Which zone to run the analyzer.')
parser.add_argument('--cluster', type=str, help='The name of the cluster to run job.')
parser.add_argument('--package', type=str,
help='GCS Path of XGBoost distributed trainer package.')
parser.add_argument('--output', type=str, help='GCS path to use for output.')
parser.add_argument('--conf', type=str, help='GCS path of the training json config file.')
parser.add_argument('--rounds', type=int, help='Number of rounds to train.')
parser.add_argument('--workers', type=int, help='Number of workers to use for training.')
parser.add_argument('--train', type=str, help='GCS path of the training libsvm file pattern.')
parser.add_argument('--eval', type=str, help='GCS path of the eval libsvm file pattern.')
parser.add_argument('--analysis', type=str, help='GCS path of the analysis input.')
parser.add_argument('--target', type=str, help='Target column name.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
api = _utils.get_client()
logging.info('Submitting job...')
spark_args = [args.conf, str(args.rounds), str(args.workers), args.analysis, args.target,
args.train, args.eval, args.output]
job_id = _utils.submit_spark_job(
api, args.project, args.region, args.cluster, [args.package],
'ml.dmlc.xgboost4j.scala.example.spark.XGBoostTrainer', spark_args)
logging.info('Job request submitted. Waiting for completion...')
_utils.wait_for_job(api, args.project, args.region, job_id)
with open('/output.txt', 'w') as f:
f.write(args.output)
logging.info('Job completed.')
if __name__== "__main__":
main()
| kubeflow/kfp-tekton-backend | components/deprecated/dataproc/train/src/train.py | train.py | py | 1,939 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "common._ut... |
4630502954 | import tkinter as Tk
from math import floor
import numpy as np
from PIL import Image,ImageTk
## ---------------------- ##
##| CLASSES |##
## ---------------------- ##
class Texture:
def __init__(self,path):
self._img = Tk.PhotoImage(file=path)
def getImg(self): return self._img
class Textures:
def __init__(self):
dirt = Texture('.\\Textures\\dirt.gif')
rock = Texture('.\\Textures\\rock.gif')
water = Texture('.\\Textures\\water.gif')
grass = Texture('.\\Textures\\grass.gif')
snowyGrass = Texture('.\\Textures\\snowyGrass.gif')
sand = Texture('.\\Textures\\sand.gif')
wood = Texture('.\\Textures\\wood.gif')
leaf = Texture('.\\Textures\\leaf.gif')
redFlower = Texture('.\\Textures\\redFlower.gif')
self.__textures = {'dirt':dirt,'rock':rock,'water':water,'grass':grass,'sand':sand,'wood':wood,'leaf':leaf,'redFlower':redFlower,'snowyGrass':snowyGrass}
def getDict(self): return self.__textures
class Camera:
def __init__(self,can,env):
self.__height = int(can['height'])
self.__width = int(can['width'])
self.__can = can
self.__env = env
self.__scale = 40 # Block rendering size - DO NOT CHANGE WITHOUT RESIZE TEXTURES
# Camera position when starting
self.__posx = 8
self.__posy = 25
self.__chunkNumber = floor(self.__posx/16)
# Options
self.__renderingDistanceInChunks = 2
self.__moveVertical = 8
self.__moveHorizontal = 16
self.__skyUpdateTime = 1
self.__horzCamFollowing = 5
self.__vertCamFollowing = 5
# Data sets
self.__skies = dict()
self.__brightnesses = dict()
# skyRendering initialization
self.computeAndLoadImages()
backgroundImage = self.__skies['sky-0']
brightnessImage = self.__brightnesses['br-0']
self.__sky = self.__can.create_image(self.__width//2,self.__height//2,image=backgroundImage)
self.__brightness = self.__can.create_image(self.__width//2,self.__height//2,image=brightnessImage)
# Get useful values
def getScale(self): return self.__scale
def getPosx(self): return self.__posx
def getPosy(self): return self.__posy
# Convert a frame position into canvas position
def position2pixel(self,x,y):
xc = self.__posx
yc = self.__posy
xr = x-xc
yr = y-yc
px = self.__width//2 + xr*self.__scale
py = self.__height//2 - yr*self.__scale
return (px,py)
# Display stuff
def displayBlock(self,block):
x = block.getx()
y = block.gety()
(px1,py1) = self.position2pixel(x,y)
self.__can.delete(block.getDisplayAdress())
try:
img = block.getImg()
adress = self.__can.create_image(px1+self.__scale//2,py1-self.__scale//2,image=img)
except:
px2 = px1 + self.__scale
py2 = py1 - self.__scale
adress = self.__can.create_rectangle(px1,py1,px2,py2,fill=block.getColor())
block.setDisplayAdress(adress)
def displayChunk(self,chunk):
chunk.activate()
for blk in chunk.getBlocks().items():
self.displayBlock(blk[1])
def displayPlayer(self,player):
x1 = player.getPosx() - 0.25
y1 = player.getPosy() -0.9
x2 = x1 + 0.5
y2 = y1 + 1.8
(px1,py1) = self.position2pixel(x1,y1)
(px2,py2) = self.position2pixel(x2,y2)
displayAdress = self.__can.create_rectangle(px1,py1,px2,py2,fill='black')
player.setDisplayAdress(displayAdress)
def displayEnv(self,env):
for chunk in env.getChunks().items():
self.displayChunk(chunk[1])
# Move stuff
def moveBlock(self,block,dx,dy):
self.__can.move(block.getDisplayAdress(),dx*self.__scale,-dy*self.__scale)
def moveChunk(self,chunk,dx,dy):
for blk in chunk.getBlocks().items():
self.moveBlock(blk[1],dx,dy)
def movePlayer(self,player,dx,dy):
self.__can.move(player.getDisplayAdress(),dx*self.__scale,-dy*self.__scale)
self.__can.tag_raise(player.getDisplayAdress())
def moveEnv(self,dx,dy):
for chunk in self.__env.getChunks().items():
self.moveChunk(chunk[1],dx,dy)
# Chunk rendering methods
def eraseChunk(self,chunk):
chunk.disactivate()
for blk in chunk.getBlocks().items():
self.__can.delete(blk[1].getDisplayAdress())
def updateChunkRendeering(self,player):
playerChunk = player.getChunkNumber()
for chunk in self.__env.getChunks().items():
if abs(chunk[1].getChunkNumber() - playerChunk) > self.__renderingDistanceInChunks:
self.eraseChunk(chunk[1])
for n in range(-self.__renderingDistanceInChunks+playerChunk,self.__renderingDistanceInChunks+playerChunk):
if str(n) in self.__env.getChunks().keys():
if not self.__env.getChunks()[str(n)].isActive():
self.displayChunk(self.__env.getChunks()[str(n)])
else:
self.__env.createChunk(n)
self.displayChunk(self.__env.getChunks()[str(n)])
# Sky and brightness
def computeAndLoadImages(self):
print(' Creating and loading skies...')
T = self.__env.getDayAndNightCyclesDuration()
for t in range(0,T,self.__skyUpdateTime):
try:
self.__skies['sky-'+str(t)] = Tk.PhotoImage(file=".\\skies\\sky-"+str(int(t))+".gif")
except:
skyColor(t,self.__width,self.__height,T)
self.__skies['sky-'+str(t)] = Tk.PhotoImage(file=".\\skies\\sky-"+str(int(t))+".gif")
print(' Creating and loading brightnesses...')
for t in range(0,T,self.__skyUpdateTime):
try:
self.__brightnesses['br-'+str(t)] = Tk.PhotoImage(file=".\\brightnesses\\br-"+str(int(t))+".png")
except:
brightness(t,self.__width,self.__height,T)
self.__brightnesses['br-'+str(t)] = Tk.PhotoImage(file=".\\brightnesses\\br-"+str(int(t))+".png")
def updateSkyAndBrightnessRendering(self,t1,t2):
if floor(t1/self.__skyUpdateTime) == floor(t2/self.__skyUpdateTime) -1:
self.__can.delete(self.__sky)
self.__can.delete(self.__brightness)
T = self.__env.getDayAndNightCyclesDuration()
backgroundImage = self.__skies['sky-'+str(int(t2%T))]
brightnessImage = self.__brightnesses['br-'+str(int(t2%T))]
self.__sky = self.__can.create_image(self.__width//2,self.__height//2,image=backgroundImage)
self.__brightness = self.__can.create_image(self.__width//2,self.__height//2,image=brightnessImage)
self.reorder()
# Set all stuff on the good plane
def reorder(self):
self.__can.tag_lower(self.__sky)
self.__can.tag_raise(self.__brightness)
# Camera function call
def bind(self,player,env,t1,t2):
playerPosx = player.getPosx()
camPosx = self.__posx
playerPosy = player.getPosy()
camPosy = self.__posy
diffx = playerPosx-camPosx
diffy = playerPosy-camPosy
if (diffx/self.__horzCamFollowing)**2 + (diffy/self.__vertCamFollowing)**2 > 1:
self.moveEnv(-diffx,-diffy)
self.__posx += diffx
self.__posy += diffy
self.__can.delete(player.getDisplayAdress())
self.displayPlayer(player)
self.updateSkyAndBrightnessRendering(t1,t2)
## ---------------------- ##
##| ADDITIONAL FUNCTIONS |##
## ---------------------- ##
# Create the sky images
def skyColor(time,w,h,dayAndNightCycleTime):
T = dayAndNightCycleTime
transitionTime = dayAndNightCycleTime//6
size = (100,100)
img = Image.new('RGB', size)
upColor = [[0,0,0],[0,7,107],[0,65,163],[0,7,107]]
downColor = [[0,0,0],[250,196,0],[150, 192, 255],[250,196,0]]
if time < T//4 - transitionTime//2:
Cu1 = upColor[0]
Cu2 = upColor[1]
Cd1 = downColor[0]
Cd2 = downColor[1]
alpha = 0
elif time < T//4:
Cu1 = upColor[0]
Cu2 = upColor[1]
Cd1 = downColor[0]
Cd2 = downColor[1]
alpha = (time-(T//4 - transitionTime//2))/(transitionTime//2)
elif time < T//4 + transitionTime//2:
Cu1 = upColor[1]
Cu2 = upColor[2]
Cd1 = downColor[1]
Cd2 = downColor[2]
alpha = (time-T//4)/(transitionTime//2)
elif time < 3*T//4 - transitionTime//2:
Cu1 = upColor[2]
Cu2 = upColor[2]
Cd1 = downColor[2]
Cd2 = downColor[2]
alpha = 0
elif time < 3*T//4:
Cu1 = upColor[2]
Cu2 = upColor[3]
Cd1 = downColor[2]
Cd2 = downColor[3]
alpha = (time-(3*T//4 - transitionTime//2))/(transitionTime//2)
elif time < 3*T//4 + transitionTime//2:
Cu1 = upColor[3]
Cu2 = upColor[0]
Cd1 = downColor[3]
Cd2 = downColor[0]
alpha = (time-3*T//4)/(transitionTime//2)
else:
Cu1 = upColor[0]
Cu2 = upColor[0]
Cd1 = downColor[0]
Cd2 = downColor[0]
alpha = 1
R = np.linspace(Cu1[0]+(Cu2[0]-Cu1[0])*alpha,Cd1[0]+(Cd2[0]-Cd1[0])*alpha,100)
G = np.linspace(Cu1[1]+(Cu2[1]-Cu1[1])*alpha,Cd1[1]+(Cd2[1]-Cd1[1])*alpha,100)
B = np.linspace(Cu1[2]+(Cu2[2]-Cu1[2])*alpha,Cd1[2]+(Cd2[2]-Cd1[2])*alpha,100)
for i in range(100):
for j in range(100):
color = (int(R[j]),int(G[j]),int(B[j]))
img.putpixel((i,j),color)
img = img.resize((w*2,h*2))
img.save('.\\skies\\sky-'+str(int(time))+'.gif', "GIF")
# Create the brightness images
def brightness(time,w,h,dayAndNightCycleTime):
T = dayAndNightCycleTime
transitionTime = dayAndNightCycleTime//6
size = (w,h)
maxOpacity = 200
if time <T//4 - transitionTime//2:
transparency = maxOpacity
elif time < T//4 + transitionTime//2:
transparency = int(-(time-(T//4 - transitionTime//2))/transitionTime*maxOpacity+maxOpacity)
elif time < 3*T//4 - transitionTime//2:
transparency = 0
elif time < 3*T//4 + transitionTime//2:
transparency = int((time-(3*T//4 - transitionTime//2))/transitionTime*maxOpacity)
else:
transparency = maxOpacity
print(transparency)
img = Image.new('RGBA', size,(0,0,0,transparency))
img.save('.\\brightnesses\\br-'+str(int(time))+'.png', "PNG")
| MaximePerriquet/PyCraft | rendering.py | rendering.py | py | 10,623 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tkinter.PhotoImage",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tkinter.PhotoImage",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "tkinter.PhotoImage"... |
36185365035 | '''
Ahmad Abu Hanifah
A1C020026
Teknik Otomasi Pertanaian
'''
import numpy as np
import matplotlib.pyplot as plt
dOsp = 6.5
vmin = 0 # kecepatan aliran udara (L/s)
vmax = 2 # kecepatan aliran udara (L/s)
V = 1000000 #Volume sistem (L)
kLa = 0.045 # per menit
n = 4 # Jumlah aerator
# a = 0.4 # Luas permukaan antarmuka udara-air (m2/liter)
a = 400 # Luas permukaan kolam (m2)
def NilaidO(dOi, tn, ti, ):
dOn = dOi + (tn-ti)*((v*n*a*2.5)/V-(kLa/60)*dOi)
return dOn
time = np.linspace(1, 3000, 100)
dOact = np.zeros(time.size)
dOsetp = np.zeros(time.size)
i = 0
dO0 = 3
dOi = dO0
dOn = dO0
dOsetp[:] = dOsp
print("time", "error", "v", "DO aktual")
for t in time:
dOi = dOn
# menghitung error
err = dOi - dOsp
# kontroller OnOff
if err < 0:
v = vmax # pemanas hidup -> On
else:
v = vmin # pemanas mati -> Off
if i == 0:
ti = 0
# Hitung respon sistem
dOn = NilaidO(dOi, t, ti)
ti = t
print(f"{t}, {err}, {v}, {dOn}")
dOact[i] = dOn
# perulangan waktu selesai dan kembali ke atas
i = i + 1
# Plot hasil simulasi
plt.title("Simulasi Sistem Kontrol On-Off")
plt.xlabel("Waktu (s)")
plt.ylabel("DO (mg/L)")
plt.plot(time, dOact, "-b", label="DO Aktual")
plt.plot(time, dOsetp, "--r", label="DO Set-point")
plt.legend(loc="lower right", frameon=False)
plt.show()
| AbuHanifah1878/Teknik_Otomasi_Pertanian | KontrolDOOnOff.py | KontrolDOOnOff.py | py | 1,349 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.linspace",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
... |
27089285988 | import sys
import json
unique = {}
start = ['a','f','l','q','u']
end = ['e','k','p','t','z']
if(len(sys.argv) != 4):
print("========================================================================================================")
print("SORRY!! Please provide the path to the INPUT json file, the OUTPUT file, alphabet selection number [0-5]")
print("========================================================================================================")
print("Example: python3 Fan_in.py ./dummy.json ./output.txt 2 ")
print("========================================================================================================")
sys.exit()
f = open(sys.argv[1])
index = int(sys.argv[3])
if(index < 0 or index > 5):
print("INDEX should be between 0 and 5 only")
sys.exit()
for line in f:
data = json.loads(line)
try:
if(data is None or data['created_time'] is None):
continue
if(data['message'] is None):
continue
if('actor' not in data or 'username' not in data['actor'] or 'transactions' not in data or data['transactions'] is None or 'target' not in data['transactions'][0] or 'username' not in data['transactions'][0]['target']):
continue
tusername = data['transactions'][0]['target']['username']
username = data['actor']['username']
ltuser = tusername[0].lower()
if(index != 5 and (ltuser < start[index] or ltuser > end[index])):
continue
if(index == 5 and (ltuser >= 'a' or ltuser <= 'z')):
continue
if(tusername not in unique):
unique[tusername] = {'T':0,'users':set()}
if(username not in unique[tusername]):
unique[tusername]['users'].add(username.strip())
unique[tusername]['T'] += 1
except Exception as e:
continue
f.close()
outputfile1 = open(sys.argv[2] + str(index),"w")
for k,v in unique.items():
s = str(len(v['users']))+ " " + str(v['T'])
outputfile1.write(s + "\n")
outputfile1.close()
| STEELISI/Venmo | Fan_in.py | Fan_in.py | py | 2,106 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 18,... |
10233665355 | from __future__ import annotations
import datetime
from typing import Optional, Union, TYPE_CHECKING, List, Dict
from . import enums
from .utils import parse_timestamp
from .user import BitLeaderboardUser, PartialUser, User
if TYPE_CHECKING:
from .http import TwitchHTTP
__all__ = (
"BitsLeaderboard",
"Clip",
"CheerEmote",
"CheerEmoteTier",
"GlobalEmote",
"ChannelEmote",
"HypeTrainContribution",
"HypeTrainEvent",
"BanEvent",
"FollowEvent",
"SubscriptionEvent",
"Marker",
"VideoMarkers",
"Game",
"ModEvent",
"AutomodCheckMessage",
"AutomodCheckResponse",
"Extension",
"MaybeActiveExtension",
"ActiveExtension",
"ExtensionBuilder",
"Video",
"Tag",
"WebhookSubscription",
"Prediction",
"Predictor",
"PredictionOutcome",
"Schedule",
"ScheduleSegment",
"ScheduleCategory",
"ScheduleVacation",
"Stream",
"Team",
"ChannelTeams",
"ChannelInfo",
"Poll",
"PollChoice",
"Goal",
"ChatSettings",
"Raid",
"ChatterColor",
"Timeout",
"Ban",
"ShieldStatus",
"ChatBadge",
"ChatBadgeVersions",
"ContentClassificationLabel",
"CharityValues",
"CharityCampaign",
"ChannelFollowerEvent",
"ChannelFollowingEvent",
)
class BitsLeaderboard:
"""
Represents a Bits leaderboard from the twitch API.
Attributes
------------
started_at: Optional[:class:`datetime.datetime`]
The time the leaderboard started.
ended_at: Optional[:class:`datetime.datetime`]
The time the leaderboard ended.
leaders: List[:class:`BitLeaderboardUser`]
The current leaders of the Leaderboard.
"""
__slots__ = "_http", "leaders", "started_at", "ended_at"
def __init__(self, http: "TwitchHTTP", data: dict):
self._http = http
self.started_at = (
parse_timestamp(data["date_range"]["started_at"]) if data["date_range"]["started_at"] else None
)
self.ended_at = parse_timestamp(data["date_range"]["ended_at"]) if data["date_range"]["ended_at"] else None
self.leaders = [BitLeaderboardUser(http, x) for x in data["data"]]
def __repr__(self):
return f"<BitsLeaderboard started_at={self.started_at} ended_at={self.ended_at}>"
class CheerEmoteTier:
"""
Represents a Cheer Emote tier.
Attributes
-----------
min_bits: :class:`int`
The minimum bits for the tier
id: :class:`str`
The ID of the tier
colour: :class:`str`
The colour of the tier
images: :class:`dict`
contains two dicts, ``light`` and ``dark``. Each item will have an ``animated`` and ``static`` item,
which will contain yet another dict, with sizes ``1``, ``1.5``, ``2``, ``3``, and ``4``.
Ex. ``cheeremotetier.images["light"]["animated"]["1"]``
can_cheer: :class:`bool`
Indicates whether emote information is accessible to users.
show_in_bits_card: :class`bool`
Indicates whether twitch hides the emote from the bits card.
"""
__slots__ = "min_bits", "id", "color", "images", "can_cheer", "show_in_bits_card"
def __init__(self, data: dict):
self.min_bits: int = data["min_bits"]
self.id: str = data["id"]
self.color: str = data["color"]
self.images = data["images"] # TODO types
self.can_cheer: bool = data["can_cheer"]
self.show_in_bits_card: bool = data["show_in_bits_card"]
def __repr__(self):
return f"<CheerEmoteTier id={self.id} min_bits={self.min_bits}>"
class CheerEmote:
"""
Represents a Cheer Emote
Attributes
-----------
prefix: :class:`str`
The string used to Cheer that precedes the Bits amount.
tiers: :class:`~CheerEmoteTier`
The tiers this Cheer Emote has
type: :class:`str`
Shows whether the emote is ``global_first_party``, ``global_third_party``, ``channel_custom``, ``display_only``, or ``sponsored``.
order: :class:`str`
Order of the emotes as shown in the bits card, in ascending order.
last_updated :class:`datetime.datetime`
The date this cheermote was last updated.
charitable: :class:`bool`
Indicates whether this emote provides a charity contribution match during charity campaigns.
"""
__slots__ = "_http", "prefix", "tiers", "type", "order", "last_updated", "charitable"
def __init__(self, http: "TwitchHTTP", data: dict):
self._http = http
self.prefix: str = data["prefix"]
self.tiers = [CheerEmoteTier(x) for x in data["tiers"]]
self.type: str = data["type"]
self.order: str = data["order"]
self.last_updated = parse_timestamp(data["last_updated"])
self.charitable: bool = data["is_charitable"]
def __repr__(self):
return f"<CheerEmote prefix={self.prefix} type={self.type} order={self.order}>"
class GlobalEmote:
"""
Represents a Global Emote
Attributes
-----------
id: :class:`str`
The ID of the emote.
name: :class:`str`
The name of the emote.
images: :class:`dict`
Contains the image URLs for the emote. These image URLs will always provide a static (i.e., non-animated) emote image with a light background.
format: List[:class:`str`]
The formats that the emote is available in.
scale: List[:class:`str`]
The sizes that the emote is available in.
theme_mode: List[:class:`str`]
The background themes that the emote is available in.
"""
__slots__ = ("id", "name", "images", "format", "scale", "theme_mode", "template")
def __init__(self, http: "TwitchHTTP", data: dict):
self.id: str = data["id"]
self.name: str = data["name"]
self.images: dict = data["images"]
self.format: List[str] = data["format"]
self.scale: List[str] = data["scale"]
self.theme_mode: List[str] = data["theme_mode"]
def __repr__(self):
return f"<GlobalEmote id={self.id} name={self.name}"
class ChannelEmote(GlobalEmote):
"""
Represents a Channel Emote
Attributes
-----------
id: :class:`str`
The ID of the emote.
name: :class:`str`
The name of the emote.
images: :class:`dict`
Contains the image URLs for the emote. These image URLs will always provide a static (i.e., non-animated) emote image with a light background.
tier: :class:`str`
The subscriber tier at which the emote is unlocked.
type: :class:`str`
The type of emote.
set_id: :class:`str`
An ID that identifies the emote set that the emote belongs to.
format: List[:class:`str`]
The formats that the emote is available in.
scale: List[:class:`str`]
The sizes that the emote is available in.
theme_mode: List[:class:`str`]
The background themes that the emote is available in.
"""
__slots__ = ("tier", "type", "set_id")
def __init__(self, http: "TwitchHTTP", data: dict):
super().__init__(http, data)
self.tier: str = data["tier"]
self.type: str = data["emote_type"]
self.set_id: str = data["emote_set_id"]
def __repr__(self):
return f"<ChannelEmote id={self.id} name={self.name} type={self.type}>"
class Clip:
"""
Represents a Twitch Clip
Attributes
-----------
id: :class:`str`
The ID of the clip.
url: :class:`str`
The URL of the clip.
embed_url: :class:`str`
The URL to embed the clip with.
broadcaster: :class:`~twitchio.PartialUser`
The user whose channel the clip was created on.
creator: :class:`~twitchio.PartialUser`
The user who created the clip.
video_id: :class:`str`
The ID of the video the clip is sourced from.
game_id: :class:`str`
The ID of the game that was being played when the clip was created.
language: :class:`str`
The language, in an `ISO 639-1 <https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes>`_ format, of the stream when the clip was created.
title: :class:`str`
The title of the clip.
views: :class:`int`
The amount of views this clip has.
created_at: :class:`datetime.datetime`
When the clip was created.
thumbnail_url: :class:`str`
The url of the clip thumbnail.
duration: :class:`float`
Duration of the Clip in seconds (up to 0.1 precision).
vod_offset: Optional[:class:`int`]
The zero-based offset, in seconds, to where the clip starts in the video (VOD) or stream.
This can be None if the parent no longer exists
is_featured: :class:`bool`
Indicates if the clip is featured or not.
"""
__slots__ = (
"id",
"url",
"embed_url",
"broadcaster",
"creator",
"video_id",
"game_id",
"language",
"title",
"views",
"created_at",
"thumbnail_url",
"duration",
"vod_offset",
"is_featured",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.id: str = data["id"]
self.url: str = data["url"]
self.embed_url: str = data["embed_url"]
self.broadcaster = PartialUser(http, data["broadcaster_id"], data["broadcaster_name"])
self.creator = PartialUser(http, data["creator_id"], data["creator_name"])
self.video_id: str = data["video_id"]
self.game_id: str = data["game_id"]
self.language: str = data["language"]
self.title: str = data["title"]
self.views: int = data["view_count"]
self.created_at = parse_timestamp(data["created_at"])
self.thumbnail_url: str = data["thumbnail_url"]
self.duration: float = data["duration"]
self.vod_offset: Optional[int] = data["vod_offset"]
self.is_featured: bool = data["is_featured"]
def __repr__(self):
return f"<Clip id={self.id} broadcaster={self.broadcaster} creator={self.creator}>"
class HypeTrainContribution:
"""
A Contribution to a Hype Train
Attributes
-----------
total: :class:`int`
Total aggregated amount of all contributions by the top contributor. If type is ``BITS``, total represents aggregate amount of bits used.
If type is ``SUBS``, aggregate total where 500, 1000, or 2500 represent tier 1, 2, or 3 subscriptions respectively.
For example, if top contributor has gifted a tier 1, 2, and 3 subscription, total would be 4000.
type: :class:`str`
Identifies the contribution method, either BITS, SUBS or OTHER.
user: :class:`~twitchio.PartialUser`
The user making the contribution.
"""
__slots__ = "total", "type", "user"
def __init__(self, http: "TwitchHTTP", data: dict):
self.total: int = data["total"]
self.type: str = data["type"]
self.user = PartialUser(http, id=data["user"], name=None) # we'll see how this goes
def __repr__(self):
return f"<HypeTrainContribution total={self.total} type={self.type} user={self.user}>"
class HypeTrainEvent:
"""
Represents a Hype Train Event (progression)
Attributes
-----------
id: :class:`str`
The ID of the event.
event_id: :class:`str`
The ID of the Hype Train.
type: :class:`str`
The type of the event. Currently only ``hypetrain.progression``.
version: :class:`str`
The version of the endpoint.
broadcaster: :class:`~twitchio.PartialUser`
The user whose channel the Hype Train is occurring on.
timestamp: :class:`datetime.datetime`
The time the event happened at.
cooldown_end_time: :class:`datetime.datetime`
The time that another Hype Train can happen at.
expiry: :class:`datetime.datetime`
The time that this Hype Train expires at.
started_at: :class:`datetime.datetime`
The time that this Hype Train started at.
last_contribution: :class:`HypeTrainContribution`
The last contribution to this Hype Train.
level: :class:`int`
The level reached on this Hype Train (1-5).
top_contributions: List[:class:`HypeTrainContribution`]
The top contributors to the Hype Train.
contributions_total: :class:`int`
The total score towards completing the goal.
goal: :class:`int`
The goal for the next Hype Train level
"""
__slots__ = (
"id",
"type",
"timestamp",
"version",
"broadcaster",
"expiry",
"event_id",
"goal",
"level",
"started_at",
"top_contributions",
"contributions_total",
"cooldown_end_time",
"last_contribution",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.id: str = data["id"]
self.event_id: str = data["event_data"]["id"]
self.type: str = data["event_type"]
self.version: str = data["version"]
self.broadcaster = PartialUser(http, id=data["event_data"]["broadcaster_id"], name=None)
self.timestamp = parse_timestamp(data["event_timestamp"])
self.cooldown_end_time = parse_timestamp(data["event_data"]["cooldown_end_time"])
self.expiry = parse_timestamp(data["expires_at"])
self.started_at = parse_timestamp(data["event_data"]["started_at"])
self.last_contribution = HypeTrainContribution(http, data["event_data"]["last_contribution"])
self.level: int = data["event_data"]["level"]
self.top_contributions = [HypeTrainContribution(http, x) for x in data["event_data"]["top_contributions"]]
self.contributions_total: int = data["event_data"]["total"]
self.goal: int = data["event_data"]["goal"]
def __repr__(self):
return f"<HypeTrainEvent id={self.id} type={self.type} level={self.level} broadcaster={self.broadcaster}>"
class BanEvent:
"""
This has been deprecated.
Represents a user being banned from a channel.
Attributes
-----------
id: :class:`str`
The event ID.
type: :class:`str`
Type of ban event. Either ``moderation.user.ban`` or ``moderation.user.unban``.
timestamp: :class:`datetime.datetime`
The time the action occurred at.
version: :class:`float`
The version of the endpoint.
broadcaster: :class:`~twitchio.PartialUser`
The user whose channel the ban/unban occurred on.
user: :class:`~twichio.PartialUser`
The user who was banned/unbanned.
moderator: :class:`~twitchio.PartialUser`
The user who performed the action.
expires_at: Optional[:class:`datetime.datetime`]
When the ban expires.
reason: :class:`str`
The reason the moderator banned/unbanned the user.
"""
__slots__ = "id", "type", "timestamp", "version", "broadcaster", "user", "expires_at", "moderator", "reason"
def __init__(self, http: "TwitchHTTP", data: dict, broadcaster: Optional[Union[PartialUser, User]]):
self.id: str = data["id"]
self.type: str = data["event_type"]
self.timestamp = parse_timestamp(data["event_timestamp"])
self.version: float = float(data["version"])
self.reason: str = data["event_data"]["reason"]
self.broadcaster = broadcaster or PartialUser(
http, data["event_data"]["broadcaster_id"], data["event_data"]["broadcaster_name"]
)
self.user = PartialUser(http, data["event_data"]["user_id"], data["event_data"]["user_name"])
self.moderator = PartialUser(http, data["event_data"]["moderator_id"], data["event_data"]["moderator_name"])
self.expires_at = (
parse_timestamp(data["event_data"]["expires_at"]) if data["event_data"]["expires_at"] else None
)
def __repr__(self):
return f"<BanEvent id={self.id} type={self.type} broadcaster={self.broadcaster} user={self.user}>"
class FollowEvent:
"""
Represents a Follow Event.
Attributes
-----------
from_user: Union[:class:`~twitchio.User`, :class:`~twitchio.PartialUser`]
The user that followed another user.
to_user: Union[:class:`~twitchio.User`, :class:`~twitchio.PartialUser`]
The user that was followed.
followed_at: :class:`datetime.datetime`
When the follow happened.
"""
__slots__ = "from_user", "to_user", "followed_at"
def __init__(
self,
http: "TwitchHTTP",
data: dict,
from_: Union[User, PartialUser] = None,
to: Union[User, PartialUser] = None,
):
self.from_user: Union[User, PartialUser] = from_ or PartialUser(http, data["from_id"], data["from_name"])
self.to_user: Union[User, PartialUser] = to or PartialUser(http, data["to_id"], data["to_name"])
self.followed_at = parse_timestamp(data["followed_at"])
def __repr__(self):
return f"<FollowEvent from_user={self.from_user} to_user={self.to_user} followed_at={self.followed_at}>"
class ChannelFollowerEvent:
"""
Represents a ChannelFollowEvent Event.
Attributes
-----------
user: Union[:class:`~twitchio.User`, :class:`~twitchio.PartialUser`]
The user that followed another user.
followed_at: :class:`datetime.datetime`
When the follow happened.
"""
__slots__ = "user", "followed_at"
def __init__(
self,
http: "TwitchHTTP",
data: dict,
):
self.user: Union[User, PartialUser] = PartialUser(http, data["user_id"], data["user_login"])
self.followed_at = parse_timestamp(data["followed_at"])
def __repr__(self):
return f"<ChannelFollowerEvent user={self.user} followed_at={self.followed_at}>"
class ChannelFollowingEvent:
"""
Represents a ChannelFollowEvent Event.
Attributes
-----------
broadcaster: Union[:class:`~twitchio.User`, :class:`~twitchio.PartialUser`]
The user that is following another user.
followed_at: :class:`datetime.datetime`
When the follow happened.
"""
__slots__ = "broadcaster", "followed_at"
def __init__(
self,
http: "TwitchHTTP",
data: dict,
):
self.broadcaster: Union[User, PartialUser] = PartialUser(
http, data["broadcaster_id"], data["broadcaster_login"]
)
self.followed_at = parse_timestamp(data["followed_at"])
def __repr__(self):
return f"<ChannelFollowerEvent user={self.broadcaster} followed_at={self.followed_at}>"
class SubscriptionEvent:
"""
Represents a Subscription Event
Attributes
-----------
broadcaster: Union[:class:`~twitchio.User`, :class:`~twitchio.PartialUser`]
The user that was subscribed to.
user: Union[:class:`~twitchio.User`, :class:`~twitchio.PartialUser`]
The user who subscribed.
tier: :class:`int`
The tier at which the user subscribed. Could be ``1``, ``2``, or ``3``.
plan_name: :class:`str`
Name of the description. (twitch docs aren't helpful, if you know what this is specifically please PR :) ).
gift: :class:`bool`
Whether the subscription is a gift.
"""
__slots__ = "broadcaster", "gift", "tier", "plan_name", "user"
def __init__(
self,
http: "TwitchHTTP",
data: dict,
broadcaster: Union[User, PartialUser] = None,
user: Union[User, PartialUser] = None,
):
self.broadcaster: Union[User, PartialUser] = broadcaster or PartialUser(
http, data["broadcaster_id"], data["broadcaster_name"]
)
self.user: Union[User, PartialUser] = user or PartialUser(http, data["user_id"], data["user_name"])
self.tier: int = round(int(data["tier"]) / 1000)
self.plan_name: str = data["plan_name"]
self.gift: bool = data["is_gift"]
def __repr__(self):
return (
f"<SubscriptionEvent broadcaster={self.broadcaster} user={self.user} tier={self.tier} "
f"plan_name={self.plan_name} gift={self.gift}>"
)
class Marker:
"""
Represents a stream Marker
Attributes
-----------
id: :class:`str`
The ID of the marker.
created_at: :class:`datetime.datetime`
When the marker was created.
description: :class:`str`
The description of the marker.
position: :class:`int`
The position of the marker, in seconds.
url: Optional[:class:`str`]
The url that leads to the marker.
"""
__slots__ = "id", "created_at", "description", "position", "url"
def __init__(self, data: dict):
self.id: str = data["id"]
self.created_at = parse_timestamp(data["created_at"])
self.description: str = data["description"]
self.position: int = data["position_seconds"]
self.url: Optional[str] = data.get("URL")
def __repr__(self):
return f"<Marker id={self.id} created_at={self.created_at} position={self.position} url={self.url}>"
class VideoMarkers:
"""
Represents markers contained in a video
Attributes
-----------
id: :class:`str`
The video id.
markers: List[:class:`Marker`]
The markers contained in the video.
"""
__slots__ = "id", "markers"
def __init__(self, data: dict):
self.id: str = data["video_id"]
self.markers = [Marker(d) for d in data["markers"]]
def __repr__(self):
return f"<VideoMarkers id={self.id}>"
class Game:
"""
Represents a Game on twitch
Attributes
-----------
id: :class:`int`
Game ID.
name: :class:`str`
Game name.
box_art_url: :class:`str`
Template URL for the game's box art.
igdb_id: Optional[:class:`int`]
The IGDB ID of the game. If this is not available to Twitch it will return None
"""
__slots__ = "id", "name", "box_art_url", "igdb_id"
def __init__(self, data: dict):
self.id: int = int(data["id"])
self.name: str = data["name"]
self.box_art_url: str = data["box_art_url"]
self.igdb_id: Optional[int] = data.get("igdb_id") and int(data["igdb_id"])
def __repr__(self):
return f"<Game id={self.id} name={self.name}>"
def art_url(self, width: int, height: int) -> str:
"""
Adds width and height into the box art url
Parameters
-----------
width: :class:`int`
The width of the image
height: :class:`int`
The height of the image
Returns
--------
:class:`str`
"""
return self.box_art_url.format(width=width, height=height)
class ModEvent:
"""
Represents a mod add/remove action
Attributes
-----------
id: :class:`str`
The ID of the event.
type: :class:`~twitchio.ModEventEnum`
The type of the event.
timestamp: :class:`datetime.datetime`
The timestamp of the event.
version: :class:`str`
The version of the endpoint.
broadcaster: Union[:class:`~twitchio.PartialUser`, :class:`~twitchio.User`]
The user whose channel the event happened on.
user: :class:`~twitchio.PartialUser`
The user being removed or added as a moderator.
"""
__slots__ = "id", "type", "timestamp", "version", "broadcaster", "user"
def __init__(self, http: "TwitchHTTP", data: dict, broadcaster: Union[PartialUser, User]):
self.id: str = data["id"]
self.type = enums.ModEventEnum(value=data["event_type"])
self.timestamp = parse_timestamp(data["event_timestamp"])
self.version: str = data["version"]
self.broadcaster = broadcaster
self.user = PartialUser(http, data["event_data"]["user_id"], data["event_data"]["user_name"])
def __repr__(self):
return f"<ModEvent id={self.id} type={self.type} broadcaster={self.broadcaster} user={self.user}>"
class AutomodCheckMessage:
"""
Represents the message to check with automod
Attributes
-----------
id: :class:`str`
Developer-generated identifier for mapping messages to results.
text: :class:`str`
Message text.
user_id: :class:`int`
User ID of the sender.
"""
__slots__ = "id", "text", "user_id"
def __init__(self, id: str, text: str, user: Union[PartialUser, int]):
self.id = id
self.text = text
self.user_id = user.id if isinstance(user, PartialUser) else user
def _to_dict(self):
return {"msg_id": self.id, "msg_text": self.text, "user_id": str(self.user_id)}
def __repr__(self):
return f"<AutomodCheckMessage id={self.id} user_id={self.user_id}>"
class AutomodCheckResponse:
"""
Represents the response to a message check with automod
Attributes
-----------
id: :class:`str`
The message ID passed in the body of the check
permitted: :class:`bool`
Indicates if this message meets AutoMod requirements.
"""
__slots__ = "id", "permitted"
def __init__(self, data: dict):
self.id: str = data["msg_id"]
self.permitted: bool = data["is_permitted"]
def __repr__(self):
return f"<AutomodCheckResponse id={self.id} permitted={self.permitted}>"
class Extension:
"""
Represents an extension for a specified user
Attributes
-----------
id: :class:`str`
ID of the extension.
version: :class:`str`
Version of the extension.
active: :class:`bool`
Activation state of the extension, for each extension type (component, overlay, mobile, panel).
"""
__slots__ = "id", "active", "version", "_x", "_y"
def __init__(self, data):
self.id: str = data["id"]
self.version: str = data["version"]
self.active: bool = data["active"]
self._x = None
self._y = None
def __repr__(self):
return f"<Extension id={self.id} version={self.version} active={self.active}>"
@classmethod
def new(cls, active: bool, version: str, id: str, x: int = None, y: int = None) -> "Extension":
self = cls.__new__(cls)
self.active = active
self.version = version
self.id = id
self._x = x
self._y = y
return self
def _to_dict(self):
v = {"active": self.active, "id": self.id, "version": self.version}
if self._x is not None:
v["x"] = self._x
if self._y is not None:
v["y"] = self._y
return v
class MaybeActiveExtension(Extension):
"""
Represents an extension for a specified user that could be may be activated
Attributes
-----------
id: :class:`str`
ID of the extension.
version: :class:`str`
Version of the extension.
name: :class:`str`
Name of the extension.
can_activate: :class:`bool`
Indicates whether the extension is configured such that it can be activated.
types: List[:class:`str`]
Types for which the extension can be activated.
"""
__slots__ = "id", "version", "name", "can_activate", "types"
def __init__(self, data):
self.id: str = data["id"]
self.version: str = data["version"]
self.name: str = data["name"]
self.can_activate: bool = data["can_activate"]
self.types: List[str] = data["type"]
def __repr__(self):
return f"<MaybeActiveExtension id={self.id} version={self.version} name={self.name}>"
class ActiveExtension(Extension):
"""
Represents an active extension for a specified user
Attributes
-----------
id: :class:`str`
ID of the extension.
version: :class:`str`
Version of the extension.
active: :class:`bool`
Activation state of the extension.
name: :class:`str`
Name of the extension.
x: :class:`int`
(Video-component Extensions only) X-coordinate of the placement of the extension. Could be None.
y: :class:`int`
(Video-component Extensions only) Y-coordinate of the placement of the extension. Could be None.
"""
__slots__ = "id", "active", "name", "version", "x", "y"
def __init__(self, data):
self.active: bool = data["active"]
self.id: Optional[str] = data.get("id", None)
self.version: Optional[str] = data.get("version", None)
self.name: Optional[str] = data.get("name", None)
self.x: Optional[int] = data.get("x", None) # x and y only show for component extensions.
self.y: Optional[int] = data.get("y", None)
def __repr__(self):
return f"<ActiveExtension id={self.id} version={self.version} name={self.name}>"
class ExtensionBuilder:
"""
Represents an extension to be updated for a specific user
Attributes
-----------
panels: List[:class:`~twitchio.Extension`]
List of panels to update for an extension.
overlays: List[:class:`~twitchio.Extension`]
List of overlays to update for an extension.
components: List[:class:`~twitchio.Extension`]
List of components to update for an extension.
"""
__slots__ = "panels", "overlays", "components"
def __init__(
self, panels: List[Extension] = None, overlays: List[Extension] = None, components: List[Extension] = None
):
self.panels = panels or []
self.overlays = overlays or []
self.components = components or []
def _to_dict(self):
return {
"panel": {str(x): y._to_dict() for x, y in enumerate(self.panels)},
"overlay": {str(x): y._to_dict() for x, y in enumerate(self.overlays)},
"component": {str(x): y._to_dict() for x, y in enumerate(self.components)},
}
class Video:
"""
Represents video information
Attributes
-----------
id: :class:`int`
The ID of the video.
user: :class:`~twitchio.PartialUser`
User who owns the video.
title: :class:`str`
Title of the video
description: :class:`str`
Description of the video.
created_at: :class:`datetime.datetime`
Date when the video was created.
published_at: :class:`datetime.datetime`
Date when the video was published.
url: :class:`str`
URL of the video.
thumbnail_url: :class:`str`
Template URL for the thumbnail of the video.
viewable: :class:`str`
Indicates whether the video is public or private.
view_count: :class:`int`
Number of times the video has been viewed.
language: :class:`str`
Language of the video.
type: :class:`str`
The type of video.
duration: :class:`str`
Length of the video.
"""
__slots__ = (
"_http",
"id",
"user",
"title",
"description",
"created_at",
"published_at",
"url",
"thumbnail_url",
"viewable",
"view_count",
"language",
"type",
"duration",
)
def __init__(self, http: "TwitchHTTP", data: dict, user: Union[PartialUser, User] = None):
self._http = http
self.id: int = int(data["id"])
self.user = user or PartialUser(http, data["user_id"], data["user_name"])
self.title: str = data["title"]
self.description: str = data["description"]
self.created_at = parse_timestamp(data["created_at"])
self.published_at = parse_timestamp(data["published_at"])
self.url: str = data["url"]
self.thumbnail_url: str = data["thumbnail_url"]
self.viewable: str = data["viewable"]
self.view_count: int = data["view_count"]
self.language: str = data["language"]
self.type: str = data["type"]
self.duration: str = data["duration"]
def __repr__(self):
return f"<Video id={self.id} title={self.title} url={self.url}>"
async def delete(self, token: str):
"""|coro|
Deletes the video. For bulk deletion see :func:`Client.delete_videos`
Parameters
-----------
token: :class:`str`
The users oauth token with the channel:manage:videos
"""
await self._http.delete_videos(token, ids=[str(self.id)])
class Tag:
"""
Represents a stream tag
Attributes
-----------
id: :class:`str`
An ID that identifies the tag.
auto: :class:`bool`
Indicates whether the tag is an automatic tag.
localization_names: Dict[:class:`str`, :class:`str`]
A dictionary that contains the localized names of the tag.
localization_descriptions: :class:`str`
A dictionary that contains the localized descriptions of the tag.
"""
__slots__ = "id", "auto", "localization_names", "localization_descriptions"
def __init__(self, data: dict):
self.id: str = data["tag_id"]
self.auto: bool = data["is_auto"]
self.localization_names: Dict[str, str] = data["localization_names"]
self.localization_descriptions: Dict[str, str] = data["localization_descriptions"]
def __repr__(self):
return f"<Tag id={self.id}>"
class WebhookSubscription:
__slots__ = "callback", "expires_at", "topic"
def __init__(self, data: dict):
self.callback: str = data["callback"]
self.expires_at = parse_timestamp(data["expired_at"])
self.topic: str = data["topic"]
def __repr__(self):
return f"<WebhookSubscription callback={self.callback} topic={self.topic} expires_at={self.expires_at}>"
class Stream:
"""
Represents a Stream
Attributes
-----------
id: :class:`int`
The current stream ID.
user: :class:`~twitchio.PartialUser`
The user who is streaming.
game_id: :class:`int`
Current game ID being played on the channel.
game_name: :class:`str`
Name of the game being played on the channel.
type: :class:`str`
Whether the stream is "live" or not.
title: :class:`str`
Title of the stream.
viewer_count: :class:`int`
Current viewer count of the stream
started_at: :class:`datetime.datetime`
UTC timestamp of when the stream started.
language: :class:`str`
Language of the channel.
thumbnail_url: :class:`str`
Thumbnail URL of the stream.
tag_ids: List[:class:`str`]
Tag IDs that apply to the stream.
.. warning::
This field will be deprecated by twitch in 2023.
is_mature: :class:`bool`
Indicates whether the stream is intended for mature audience.
tags: List[:class:`str`]
The tags applied to the channel.
"""
__slots__ = (
"id",
"user",
"game_id",
"game_name",
"type",
"title",
"viewer_count",
"started_at",
"language",
"thumbnail_url",
"tag_ids",
"is_mature",
"tags",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.id: int = data["id"]
self.user = PartialUser(http, data["user_id"], data["user_name"])
self.game_id: int = data["game_id"]
self.game_name: str = data["game_name"]
self.type: str = data["type"]
self.title: str = data["title"]
self.viewer_count: int = data["viewer_count"]
self.started_at = parse_timestamp(data["started_at"])
self.language: str = data["language"]
self.thumbnail_url: str = data["thumbnail_url"]
self.tag_ids: List[str] = data["tag_ids"] or []
self.is_mature: bool = data["is_mature"]
self.tags: List[str] = data["tags"]
def __repr__(self):
return f"<Stream id={self.id} user={self.user} title={self.title} started_at={self.started_at}>"
class ChannelInfo:
"""
Represents a channel's current information
Attributes
-----------
user: :class:`~twitchio.PartialUser`
The user whose channel information was requested.
game_id: :class:`int`
Current game ID being played on the channel.
game_name: :class:`str`
Name of the game being played on the channel.
title: :class:`str`
Title of the stream.
language: :class:`str`
Language of the channel.
delay: :class:`int`
Stream delay in seconds.
This defaults to 0 if the broadcaster_id does not match the user access token.
tags: List[:class:`str`]
The tags applied to the channel.
content_classification_labels: List[:class:`str`]
The CCLs applied to the channel.
is_branded_content: :class:`bool`
Boolean flag indicating if the channel has branded content.
"""
__slots__ = (
"user",
"game_id",
"game_name",
"title",
"language",
"delay",
"tags",
"content_classification_labels",
"is_branded_content",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.user = PartialUser(http, data["broadcaster_id"], data["broadcaster_name"])
self.game_id: int = data["game_id"]
self.game_name: str = data["game_name"]
self.title: str = data["title"]
self.language: str = data["broadcaster_language"]
self.delay: int = data["delay"]
self.tags: List[str] = data["tags"]
self.content_classification_labels: List[str] = data["content_classification_labels"]
self.is_branded_content: bool = data["is_branded_content"]
def __repr__(self):
return f"<ChannelInfo user={self.user} game_id={self.game_id} game_name={self.game_name} title={self.title} language={self.language} delay={self.delay}>"
class Prediction:
"""
Represents channel point predictions
Attributes
-----------
user: :class:`~twitchio.PartialUser`
The user who is streaming.
prediction_id: :class:`str`
ID of the Prediction.
title: :class:`str`
Title for the Prediction.
winning_outcome_id: :class:`str`
ID of the winning outcome
outcomes: List[:class:`~twitchio.PredictionOutcome`]
List of possible outcomes for the Prediction.
prediction_window: :class:`int`
Total duration for the Prediction (in seconds).
prediction_status: :class:`str`
Status of the Prediction.
created_at: :class:`datetime.datetime`
Time for when the Prediction was created.
ended_at: :class:`datetime.datetime`
Time for when the Prediction ended.
locked_at: :class:`datetime.datetime`
Time for when the Prediction was locked.
"""
__slots__ = (
"user",
"prediction_id",
"title",
"winning_outcome_id",
"outcomes",
"prediction_window",
"prediction_status",
"created_at",
"ended_at",
"locked_at",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.user = PartialUser(http, data["broadcaster_id"], data["broadcaster_name"])
self.prediction_id: str = data["id"]
self.title: str = data["title"]
self.winning_outcome_id: str = data["winning_outcome_id"]
self.outcomes: List[PredictionOutcome] = [PredictionOutcome(http, x) for x in data["outcomes"]]
self.prediction_window: int = data["prediction_window"]
self.prediction_status: str = data["status"]
self.created_at = self._parse_time(data, "created_at")
self.ended_at = self._parse_time(data, "ended_at")
self.locked_at = self._parse_time(data, "locked_at")
def _parse_time(self, data, field) -> Optional["Datetime"]:
if field not in data or data[field] is None:
return None
time = data[field].split(".")[0]
return datetime.datetime.fromisoformat(time)
def __repr__(self):
return f"<Prediction user={self.user} prediction_id={self.prediction_id} winning_outcome_id={self.winning_outcome_id} title={self.title}>"
class Predictor:
"""
Represents a predictor
Attributes
-----------
user: :class:`~twitchio.PartialUser`
The user who is streaming.
channel_points_used: :class:`int`
Number of Channel Points used by the user.
channel_points_won: :class:`int`
Number of Channel Points won by the user.
"""
__slots__ = ("channel_points_used", "channel_points_won", "user")
def __init__(self, http: "TwitchHTTP", data: dict):
self.channel_points_used: int = data["channel_points_used"]
self.channel_points_won: int = data["channel_points_won"]
self.user = PartialUser(http, data["user_id"], data["user_login"])
def __repr__(self):
return f"<Predictor user={self.user} channel_points_used={self.channel_points_used} channel_points_won={self.channel_points_won}>"
class PredictionOutcome:
"""
Represents a prediction outcome
Attributes
-----------
outcome_id: :class:`str`
ID for the outcome.
title: :class:`str`
Text displayed for outcome.
channel_points: :class:`int`
Number of Channel Points used for the outcome.
color: :class:`str`
Color for the outcome.
users: :class:`int`
Number of unique uesrs that chose the outcome.
top_predictors: List[:class:`~twitchio.Predictor`]
List of the top predictors. Could be None.
"""
__slots__ = ("outcome_id", "title", "channel_points", "color", "users", "top_predictors")
def __init__(self, http: "TwitchHTTP", data: dict):
self.outcome_id: str = data["id"]
self.title: str = data["title"]
self.channel_points: int = data["channel_points"]
self.color: str = data["color"]
self.users: int = data["users"]
if data["top_predictors"]:
self.top_predictors: List[Predictor] = [Predictor(http, x) for x in data["top_predictors"]]
else:
self.top_predictors: List[Predictor] = None
def __repr__(self):
return f"<PredictionOutcome outcome_id={self.outcome_id} title={self.title} channel_points={self.channel_points} color={self.color} users={self.users}>"
@property
def colour(self) -> str:
"""The colour of the prediction. Alias to color."""
return self.color
def __repr__(self):
return f"<PredictionOutcome outcome_id={self.outcome_id} title={self.title} channel_points={self.channel_points} color={self.color}>"
class Schedule:
"""
Represents a channel's stream schedule
Attributes
-----------
segments: List[:class:`~twitchio.ScheduleSegment`]
List of segments of a channel's stream schedule.
user: :class:`~twitchio.PartialUser`
The user of the channel associated to the schedule.
vacation: :class:`~twitchio.ScheduleVacation`
Vacation details of stream schedule.
"""
__slots__ = ("segments", "user", "vacation")
def __init__(self, http: "TwitchHTTP", data: dict):
self.segments = [ScheduleSegment(d) for d in data["data"]["segments"]] if data["data"]["segments"] else []
self.user = PartialUser(http, data["data"]["broadcaster_id"], data["data"]["broadcaster_login"])
self.vacation = ScheduleVacation(data["data"]["vacation"]) if data["data"]["vacation"] else None
def __repr__(self):
return f"<Schedule segments={self.segments} user={self.user} vacation={self.vacation}>"
class ScheduleSegment:
"""
Represents a list segments of a channel's stream schedule
Attributes
-----------
id: :class:`str`
The ID for the scheduled broadcast.
start_time: :class:`datetime.datetime`
Scheduled start time for the scheduled broadcast
end_time: Optional[:class:`datetime.datetime`]
Scheduled end time for the scheduled broadcast
title: :class:`str`
Title for the scheduled broadcast.
canceled_until: :class:`datetime.datetime`
Used with recurring scheduled broadcasts. Specifies the date of the next recurring broadcast.
category: :class:`~twitchio.ScheduleCategory`
The game or category details for the scheduled broadcast.
is_recurring: :class:`bool`
Indicates if the scheduled broadcast is recurring weekly.
"""
__slots__ = ("id", "start_time", "end_time", "title", "canceled_until", "category", "is_recurring")
def __init__(self, data: dict):
self.id: str = data["id"]
self.start_time = parse_timestamp(data["start_time"])
self.end_time = parse_timestamp(data["end_time"]) if data["end_time"] else None
self.title: str = data["title"]
self.canceled_until = parse_timestamp(data["canceled_until"]) if data["canceled_until"] else None
self.category = ScheduleCategory(data["category"]) if data["category"] else None
self.is_recurring: bool = data["is_recurring"]
def __repr__(self):
return f"<ScheduleSegment id={self.id} start_time={self.start_time} end_time={self.end_time} title={self.title} canceled_until={self.canceled_until} category={self.category} is_recurring={self.is_recurring}>"
class ScheduleCategory:
"""
Game or category details of a stream's schedule
Attributes
-----------
id: :class:`str`
The game or category ID.
name: :class:`str`
The game or category name.
"""
__slots__ = ("id", "name")
def __init__(self, data: dict):
self.id: str = data["id"]
self.name: str = data["name"]
def __repr__(self):
return f"<ScheduleCategory id={self.id} name={self.name}>"
class ScheduleVacation:
"""
A schedule's vacation details
Attributes
-----------
start_time: :class:`datetime.datetime`
Start date of stream schedule vaction.
end_time: :class:`datetime.datetime`
End date of stream schedule vaction.
"""
__slots__ = ("start_time", "end_time")
def __init__(self, data: dict):
self.start_time = parse_timestamp(data["start_time"])
self.end_time = parse_timestamp(data["end_time"])
def __repr__(self):
return f"<ScheduleVacation start_time={self.start_time} end_time={self.end_time}>"
class Team:
"""
Represents information for a specific Twitch Team
Attributes
-----------
users: List[:class:`~twitchio.PartialUser`]
List of users in the specified Team.
background_image_url: :class:`str`
URL for the Team background image.
banner: :class:`str`
URL for the Team banner.
created_at: :class:`datetime.datetime`
Date and time the Team was created.
updated_at: :class:`datetime.datetime`
Date and time the Team was last updated.
info: :class:`str`
Team description.
thumbnail_url: :class:`str`
Image URL for the Team logo.
team_name: :class:`str`
Team name.
team_display_name: :class:`str`
Team display name.
id: :class:`str`
Team ID.
"""
__slots__ = (
"users",
"background_image_url",
"banner",
"created_at",
"updated_at",
"info",
"thumbnail_url",
"team_name",
"team_display_name",
"id",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.users: List[PartialUser] = [PartialUser(http, x["user_id"], x["user_login"]) for x in data["users"]]
self.background_image_url: str = data["background_image_url"]
self.banner: str = data["banner"]
self.created_at = parse_timestamp(data["created_at"].split(" ")[0])
self.updated_at = parse_timestamp(data["updated_at"].split(" ")[0])
self.info: str = data["info"]
self.thumbnail_url: str = data["thumbnail_url"]
self.team_name: str = data["team_name"]
self.team_display_name: str = data["team_display_name"]
self.id = data["id"]
def __repr__(self):
return f"<Team users={self.users} team_name={self.team_name} team_display_name={self.team_display_name} id={self.id} created_at={self.created_at}>"
class ChannelTeams:
"""
Represents the Twitch Teams of which the specified channel/broadcaster is a member
Attributes
-----------
broadcaster: :class:`~twitchio.PartialUser`
User of the broadcaster.
background_image_url: :class:`str`
URL for the Team background image.
banner: :class:`str`
URL for the Team banner.
created_at: :class:`datetime.datetime`
Date and time the Team was created.
updated_at: :class:`datetime.datetime`
Date and time the Team was last updated.
info: :class:`str`
Team description.
thumbnail_url: :class:`str`
Image URL for the Team logo.
team_name: :class:`str`
Team name.
team_display_name: :class:`str`
Team display name.
id: :class:`str`
Team ID.
"""
__slots__ = (
"broadcaster",
"background_image_url",
"banner",
"created_at",
"updated_at",
"info",
"thumbnail_url",
"team_name",
"team_display_name",
"id",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.broadcaster: PartialUser = PartialUser(http, data["broadcaster_id"], data["broadcaster_login"])
self.background_image_url: str = data["background_image_url"]
self.banner: str = data["banner"]
self.created_at = parse_timestamp(data["created_at"].split(" ")[0])
self.updated_at = parse_timestamp(data["updated_at"].split(" ")[0])
self.info: str = data["info"]
self.thumbnail_url: str = data["thumbnail_url"]
self.team_name: str = data["team_name"]
self.team_display_name: str = data["team_display_name"]
self.id = data["id"]
def __repr__(self):
return f"<ChannelTeams user={self.broadcaster} team_name={self.team_name} team_display_name={self.team_display_name} id={self.id} created_at={self.created_at}>"
class Poll:
"""
Represents a list of Polls for a broadcaster / channel
.. note::
Twitch have removed support for voting with bits.
By default bits_votes, bits_voting_enabled and bits_per_vote will be received as either 0 or False.
Attributes
-----------
id: :class:`str`
ID of a poll.
broadcaster: :class:`~twitchio.PartialUser`
User of the broadcaster.
title: :class:`str`
Question displayed for the poll.
choices: List[:class:`~twitchio.PollChoice`]
The poll choices.
bits_voting_enabled: :class:`bool`
Indicates if Bits can be used for voting.
.. warning::
Twitch have removed support for voting with bits.
This will return as False
bits_per_vote: :class:`int`
Number of Bits required to vote once with Bits.
.. warning::
Twitch have removed support for voting with bits.
This will return as 0
channel_points_voting_enabled: :class:`bool`
Indicates if Channel Points can be used for voting.
channel_points_per_vote: :class:`int`
Number of Channel Points required to vote once with Channel Points.
status: :class:`str`
Poll status. Valid values: ACTIVE, COMPLETED, TERMINATED, ARCHIVED, MODERATED, INVALID
duration: :class:`int`
Total duration for the poll (in seconds).
started_at: :class:`datetime.datetime`
Date and time the poll was started.
ended_at: :class:`datetime.datetime`
Date and time the poll was ended.
"""
__slots__ = (
"id",
"broadcaster",
"title",
"choices",
"channel_points_voting_enabled",
"channel_points_per_vote",
"status",
"duration",
"started_at",
"ended_at",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.id: str = data["id"]
self.broadcaster = PartialUser(http, data["broadcaster_id"], data["broadcaster_login"])
self.title: str = data["title"]
self.choices: List[PollChoice] = [PollChoice(d) for d in data["choices"]] if data["choices"] else []
self.channel_points_voting_enabled: bool = data["channel_points_voting_enabled"]
self.channel_points_per_vote: int = data["channel_points_per_vote"]
self.status: str = data["status"]
self.duration: int = data["duration"]
self.started_at: datetime.datetime = parse_timestamp(data["started_at"])
try:
self.ended_at: Optional[datetime.datetime] = parse_timestamp(data["ended_at"])
except KeyError:
self.ended_at = None
def __repr__(self):
return f"<Polls id={self.id} broadcaster={self.broadcaster} title={self.title} status={self.status} duration={self.duration} started_at={self.started_at} ended_at={self.ended_at}>"
class PollChoice:
"""
Represents a polls choices
Attributes
-----------
id: :class:`str`
ID for the choice.
title: :class:`str`
Text displayed for the choice.
votes: :class:`int`
Total number of votes received for the choice across all methods of voting.
channel_points_votes: :class:`int`
Number of votes received via Channel Points.
bits_votes: :class:`int`
Number of votes received via Bits.
.. warning::
Twitch have removed support for voting with bits.
This will return as 0
"""
__slots__ = ("id", "title", "votes", "channel_points_votes", "bits_votes")
def __init__(self, data: dict):
self.id: str = data["id"]
self.title: str = data["title"]
self.votes: int = data["votes"]
self.channel_points_votes: int = data["channel_points_votes"]
self.bits_votes: int = data["bits_votes"]
def __repr__(self):
return f"<PollChoice id={self.id} title={self.title} votes={self.votes} channel_points_votes={self.channel_points_votes} bits_votes={self.bits_votes}>"
class Goal:
"""
Represents a list of Goals for a broadcaster / channel
Attributes
-----------
id: :class:`str`
An ID that uniquely identifies this goal.
broadcaster: :class:`~twitchio.PartialUser`
User of the broadcaster.
type: :class:`str`
The type of goal.
Valid values: follower, subscription, subscription_count, new_subscription and new_subscription_count.
description: :class:`str`
A description of the goal, if specified.
current_amount: :class:`int`
The current value.
target_amount: :class:`int`
Number of Bits required to vote once with Bits.
created_at: :class:`datetime.datetime`
Date and time of when the broadcaster created the goal.
"""
__slots__ = (
"id",
"broadcaster",
"type",
"description",
"current_amount",
"target_amount",
"created_at",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.id: str = data["id"]
self.broadcaster = PartialUser(http, data["broadcaster_id"], data["broadcaster_login"])
self.type: str = data["type"]
self.description: str = data["description"]
self.current_amount: int = data["current_amount"]
self.target_amount: int = data["target_amount"]
self.created_at: datetime.datetime = parse_timestamp(data["created_at"])
def __repr__(self):
return f"<Goal id={self.id} broadcaster={self.broadcaster} description={self.description} current_amount={self.current_amount} target_amount={self.target_amount} created_at={self.created_at}>"
class ChatSettings:
"""
Represents current chat settings of a broadcaster / channel
Attributes
-----------
broadcaster: :class:`~twitchio.PartialUser`
User of the broadcaster. Only returns the ID.
emote_mode: :class:`bool`
Indicates whether emote only mode is enabled.
follower_mode: :class:`bool`
Indicates whether follower only chat is enabled.
follower_mode_duration: Optional[:class:`int`]
The length of time, in minutes, that the followers must have followed the broadcaster to participate in chat.
slow_mode: :class:`bool`
Indicates whether the chat is in slow mode.
slow_mode_wait_time: Optional[:class:`int`]
The amount of time, in seconds, that users need to wait between sending messages.
subscriber_mode: :class:`bool`
Indicates whether only users that subscribe to the broadcaster's channel can talk in chat.
unique_chat_mode: :class:`bool`
Indicates whether the broadcaster requires users to post only unique messages in the chat room.
moderator: Optional[:class:`~twitchio.PartialUser`]
The User of the moderator, if provided. Only returns the ID.
non_moderator_chat_delay: Optional[:class:`bool`]
Indicates whether the broadcaster adds a short delay before chat messages appear in the chat room.
non_moderator_chat_delay_duration: Optional[:class:`int`]
The amount of time, in seconds, that messages are delayed from appearing in chat.
"""
__slots__ = (
"broadcaster",
"emote_mode",
"follower_mode",
"follower_mode_duration",
"slow_mode",
"slow_mode_wait_time",
"subscriber_mode",
"unique_chat_mode",
"moderator",
"non_moderator_chat_delay",
"non_moderator_chat_delay_duration",
)
def __init__(self, http: "TwitchHTTP", data: dict):
self.broadcaster = PartialUser(http, data["broadcaster_id"], None)
self.emote_mode: bool = data["emote_mode"]
self.follower_mode: bool = data["follower_mode"]
self.follower_mode_duration: Optional[int] = data.get("follower_mode_duration")
self.slow_mode: bool = data["slow_mode"]
self.slow_mode_wait_time: Optional[int] = data.get("slow_mode_wait_time")
self.subscriber_mode: bool = data["subscriber_mode"]
self.unique_chat_mode: bool = data["unique_chat_mode"]
self.non_moderator_chat_delay: Optional[bool] = data.get("non_moderator_chat_delay")
self.non_moderator_chat_delay_duration: Optional[int] = data.get("non_moderator_chat_delay_duration")
try:
self.moderator = PartialUser(http, data["moderator_id"], None)
except KeyError:
self.moderator = None
def __repr__(self):
return f"<ChatSettings broadcaster={self.broadcaster} emote_mode={self.emote_mode} follower_mode={self.follower_mode} slow_mode={self.slow_mode} subscriber_mode={self.subscriber_mode} unique_chat_mode={self.unique_chat_mode}>"
class ChatterColor:
"""
Represents chatters current name color.
Attributes
-----------
user: :class:`~twitchio.PartialUser`
PartialUser of the chatter.
color: :class:`str`
The color of the chatter's name.
"""
__slots__ = ("user", "color")
def __init__(self, http: "TwitchHTTP", data: dict):
self.user = PartialUser(http, data["user_id"], data["user_login"])
self.color: str = data["color"]
def __repr__(self):
return f"<ChatterColor user={self.user} color={self.color}>"
class Raid:
"""
Represents a raid for a broadcaster / channel
Attributes
-----------
created_at: :class:`datetime.datetime`
Date and time of when the raid started.
is_mature: :class:`bool`
Indicates whether the stream being raided is marked as mature.
"""
__slots__ = ("created_at", "is_mature")
def __init__(self, data: dict):
self.created_at: datetime.datetime = parse_timestamp(data["created_at"])
self.is_mature: bool = data["is_mature"]
def __repr__(self):
return f"<Raid created_at={self.created_at} is_mature={self.is_mature}>"
class Ban:
"""
Represents a ban for a broadcaster / channel
Attributes
-----------
broadcaster: :class:`~twitchio.PartialUser`
The broadcaster whose chat room the user was banned from chatting in.
moderator: :class:`~twitchio.PartialUser`
The moderator that banned the user.
user: :class:`~twitchio.PartialUser`
The user that was banned.
created_at: :class:`datetime.datetime`
Date and time of when the ban was created.
"""
__slots__ = ("broadcaster", "moderator", "user", "created_at")
def __init__(self, http: "TwitchHTTP", data: dict):
self.broadcaster = PartialUser(http, data["broadcaster_id"], None)
self.moderator = PartialUser(http, data["moderator_id"], None)
self.user = PartialUser(http, data["user_id"], None)
self.created_at: datetime.datetime = parse_timestamp(data["created_at"])
def __repr__(self):
return f"<Ban broadcaster={self.broadcaster} user={self.user} created_at={self.created_at}>"
class Timeout:
"""
Represents a timeout for a broadcaster / channel
Attributes
-----------
broadcaster: :class:`~twitchio.PartialUser`
The broadcaster whose chat room the user was timed out from chatting in.
moderator: :class:`~twitchio.PartialUser`
The moderator that timed the user out.
user: :class:`~twitchio.PartialUser`
The user that was timed out.
created_at: :class:`datetime.datetime`
Date and time of when the timeout was created.
end_time: :class:`datetime.datetime`
Date and time of when the timeout will end.
"""
__slots__ = ("broadcaster", "moderator", "user", "created_at", "end_time")
def __init__(self, http: "TwitchHTTP", data: dict):
self.broadcaster = PartialUser(http, data["broadcaster_id"], None)
self.moderator = PartialUser(http, data["moderator_id"], None)
self.user = PartialUser(http, data["user_id"], None)
self.created_at: datetime.datetime = parse_timestamp(data["created_at"])
self.end_time: datetime.datetime = parse_timestamp(data["end_time"])
def __repr__(self):
return f"<Timeout broadcaster={self.broadcaster} user={self.user} created_at={self.created_at} end_time={self.end_time}>"
class ShieldStatus:
"""
Represents a Shield Mode activation status.
Attributes
-----------
moderator: :class:`~twitchio.PartialUser`
The moderator that last activated Shield Mode.
display_name: :class:`str`
The moderator's display name. Is an empty string if Shield Mode hasn't been previously activated.
last_activated_at: :class:`datetime.datetime`
The UTC datetime of when Shield Mode was last activated.
Is an empty string if Shield Mode hasn't been previously activated.
is_active: :class:`bool`
A Boolean value that determines whether Shield Mode is active.
Is true if the broadcaster activated Shield Mode; otherwise, false.
"""
__slots__ = ("moderator", "display_name", "last_activated_at", "is_active")
def __init__(self, http: "TwitchHTTP", data: dict):
self.moderator: Optional[PartialUser] = (
PartialUser(http, data["moderator_id"], data["moderator_login"]) if data["moderator_id"] else None
)
self.display_name: Optional[str] = data.get("moderator_name")
self.is_active: bool = data["is_active"]
self.last_activated_at: Optional[datetime.datetime] = (
parse_timestamp(data["last_activated_at"]) if data["last_activated_at"] else None
)
def __repr__(self):
return f"<ShieldStatus moderator={self.moderator} is_active={self.is_active} last_activated_at={self.last_activated_at}>"
class ChatBadge:
"""
Represents chat badges.
Attributes
-----------
set_id: :class:`str`
An ID that identifies this set of chat badges. For example, Bits or Subscriber.
versions: List[:class:`~twitchio.ChatBadgeVersions`]
The list of chat badges in this set.
"""
__slots__ = ("set_id", "versions")
def __init__(self, data: dict):
self.set_id: str = data["set_id"]
self.versions: List[ChatBadgeVersions] = [ChatBadgeVersions(version_data) for version_data in data["versions"]]
def __repr__(self):
return f"<ChatBadge set_id={self.set_id} versions={self.versions}>"
class ChatBadgeVersions:
"""
Represents the different versions of the chat badge.
Attributes
-----------
id: :class:`str`
An ID that identifies this version of the badge. The ID can be any value.
image_url_1x: :class:`str`
URL to the small version (18px x 18px) of the badge.
image_url_2x: :class:`str`
URL to the medium version (36px x 36px) of the badge.
image_url_4x: :class:`str`
URL to the large version (72px x 72px) of the badge.
title: :class:`str`
The title of the badge.
description: :class:`str`
The description of the badge.
click_action: Optional[:class:`str`]
The action to take when clicking on the badge. This can be None if no action is specified
click_url: Optional[:class:`str`]
The URL to navigate to when clicking on the badge. This can be None if no URL is specified.
"""
__slots__ = (
"id",
"image_url_1x",
"image_url_2x",
"image_url_4x",
"title",
"description",
"click_url",
"click_action",
)
def __init__(self, data: dict):
self.id: str = data["id"]
self.image_url_1x: str = data["image_url_1x"]
self.image_url_2x: str = data["image_url_2x"]
self.image_url_4x: str = data["image_url_4x"]
self.title: str = data["title"]
self.description: str = data["description"]
self.click_action: Optional[str] = data.get("click_action")
self.click_url: Optional[str] = data.get("click_url")
def __repr__(self):
return f"<ChatBadgeVersions id={self.id} title={self.title}>"
class ContentClassificationLabel:
"""
Represents a Content Classification Label.
Attributes
-----------
id: :class:`str`
Unique identifier for the CCL.
description: :class:`str`
Localized description of the CCL.
name: :class:`str`
Localized name of the CCL.
"""
__slots__ = ("id", "description", "name")
def __init__(self, data: dict):
self.id: str = data["id"]
self.description: str = data["description"]
self.name: str = data["name"]
def __repr__(self):
return f"<ContentClassificationLabel id={self.id}>"
class CharityValues:
"""
Represents the current/target funds of a charity campaign.
Attributes
-----------
value: :class:`int`
The value of the campaign (either so far, or the target value).
decimal_places: :class:`int`
The decimal places to be inserted into :attr:`.value`.
currency: :class:`str`
The currency this charity is raising funds in. eg ``USD``, ``GBP``, ``EUR``.
"""
__slots__ = ("value", "decimal_places", "currency")
def __init__(self, data: dict) -> None:
self.value: int = data["value"]
self.decimal_places: int = data["decimal_places"]
self.currency: str = data["currency"]
def __repr__(self) -> str:
return f"<CharityValues value={self.value} decimal_places={self.decimal_places} currency={self.currency}>"
class CharityCampaign:
"""
Represents a Charity Campaign on a channel.
Attributes
-----------
campaign_id: :class:`str`
The ID of the running charity campaign.
broadcaster: :class:`~twitchio.PartialUser`
The broadcaster running the campaign.
user: :class:`~twitchio.PartialUser`
The user who donated.
charity_name: :class:`str`
The name of the charity.
charity_description: :class:`str`
The description of the charity.
charity_logo: :class:`str`
The logo of the charity.
charity_website: :class:`str`
The websiet of the charity.
current: :class:`CharityValues`
The current funds raised by this campaign.
target: :class:`CharityValues`
The target funds to be raised for this campaign.
"""
__slots__ = (
"campaign_id",
"broadcaster",
"charity_name",
"charity_description",
"charity_logo",
"charity_website",
"current",
"target",
)
def __init__(self, data: dict, http: TwitchHTTP, broadcaster: PartialUser | None = None) -> None:
self.campaign_id: str = data["campaign_id"]
self.broadcaster: PartialUser = broadcaster or PartialUser(
http, data["broadcaster_id"], data["broadcaster_name"]
)
self.charity_name: str = data["charity_name"]
self.charity_description: str = data["charity_description"]
self.charity_logo: str = data["charity_logo"]
self.charity_website: str = data["charity_website"]
self.current: CharityValues = CharityValues(data["current_amount"])
self.target: CharityValues = CharityValues(data["target_amount"])
def __repr__(self) -> str:
return f"<CharityCampaign broadcaster={self.broadcaster} campaign_id={self.campaign_id} charity_name={self.charity_name}>"
| PythonistaGuild/TwitchIO | twitchio/models.py | models.py | py | 69,250 | python | en | code | 714 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "utils.parse_timestamp",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "utils.parse_timestamp",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "user.... |
11728318125 | import numpy as np
from tabulate import tabulate
from clustering.external_evaluation import calculate_purity
from clustering.k_means import KMeans
from data_preparation.inverted_index import InvertedIndex
from data_preparation.pre_processing import parse_corpus, pre_process_corpus
corpus, y_true, titles = parse_corpus()
preprocessed_corpus = pre_process_corpus(corpus)
y_true = [y_true.index(l) for l in y_true]
def generate_matrix(preprocessed_corpus):
inverted_index = InvertedIndex()
for i in range(len(preprocessed_corpus)):
for term in preprocessed_corpus[i].split():
inverted_index.parse_term(term, i)
document_term_matrix = np.array(inverted_index.make_document_by_term_matrix())
return document_term_matrix
matrix = generate_matrix(preprocessed_corpus)
k = KMeans(5, 1000)
document_clusters = k.assign_documents_to_cluster(matrix)
y_pred = document_clusters[0]
clusters = document_clusters[1]
cluster_tightness = document_clusters[2]
top_documents = document_clusters[3]
def write_clusters():
with open("clusters.txt", "w") as f:
for i in range(len(clusters)):
data = []
f.write(
"Cluster #%d contains the following %d documents: "
% (i, len(clusters[i]))
)
f.write("\n\n")
for j in range(len(clusters[i])):
id = clusters[i][j]
data.append([id, titles[id]])
f.write(tabulate(data, headers=["Document ID", "Document Title"]))
f.write("\n\n")
def sort_tuples(tuples):
# sort tuples in ascending order by the second element
# (distance from the centroid), which acts as the key
tuples.sort(key=lambda x: x[1])
return tuples
def show_summary():
for i in range(len(top_documents)):
data = []
print("The top 3 documents in cluster #%d are:\n " % i)
sortedTuples = sort_tuples(top_documents[i])[:3]
for j in sortedTuples:
data.append([j[0], titles[j[0]]])
print(tabulate(data, headers=["Document ID", "Document Title"]))
print()
def show_RSS():
data = []
for i in range(len(cluster_tightness)):
data.append([i, cluster_tightness[i]])
print(tabulate(data, headers=["Cluster ID", "RSS"]))
print("\nThe total RSS is %.2f." % sum(cluster_tightness))
def show_purity():
purity = calculate_purity(y_pred, y_true)
print("The purity is %.2f." % (100 * purity))
def display_menu():
# display menu shown to user
print("")
print(60 * "-", "Menu", 60 * "-")
print("1. Show Cluster Summary")
print("2. Calculate RSS")
print("3. Calculate Purity")
print("4. Write Clusters")
print("5. Exit")
print(127 * "-")
print("")
def wait_for_input():
input("\nPlease press Enter to continue...")
status = True
# main loop to display the menu
while status:
display_menu()
selection = input("Please enter your selection (1-4): ")
print()
if selection == "1":
show_summary()
wait_for_input()
elif selection == "2":
show_RSS()
wait_for_input()
elif selection == "3":
show_purity()
wait_for_input()
elif selection == "4":
write_clusters()
wait_for_input()
elif selection == "5":
print("\nThe program will now terminate.")
status = False
else:
# prompt user for a valid selection
input("Please select a valid option from the menu.\n")
| nzabdelke/News-Clustering | main.py | main.py | py | 3,536 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "data_preparation.pre_processing.parse_corpus",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "data_preparation.pre_processing.pre_process_corpus",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "data_preparation.inverted_index.InvertedIndex",
... |
72497450429 | from bs4 import BeautifulSoup
import requests, os
#Configuration Variables
search_refs = True
build_path = "API"
API_URL = "https://pythonapi.upbge.org/"
#Further addons
headers = {"bge" + os.sep + "types.py" : """
import mathutils
inf = 0
class CListValue:
def __init__(self, ctype):
self.__ret__ = ctype
self.__i__ = None
self.__itf__ = False
def __instanceme__(self):
if self.__i__ == None:
self.__i__ = self.__ret__()
return self.__i__
def __getitem__(self, key): return self.__instanceme__()
def __setitem__(self, key, val): return self.__instanceme__()
def get(self, key): return self.__instanceme__()
def __iter__(self): return self
def __next__(self):
self.__itf__ = not self.__itf__
if self.__itf__: return self.__instanceme__()
else: raise StopIteration
""",
"bge" + os.sep + "logic.py" :
"""globalDict = {}
keyboard = None
mouse = None
joysticks = []
"""}
erase = {"bge" + os.sep + "logic.py" : [
"""globalDict = None
keyboard = None
mouse = None
joysticks = None"""]}
fixes = {
"RandomMusic": [(", transition=(5)", ", transition=(5,0,0))")]
}
def dataToPath(dp):
i = dp.rfind(".")
return os.path.normpath(dp[:len(dp) if i == -1 else i].replace(".", "/") + ".py")
class File:
done_files = []
done_urls = []
registred_class = {}
def __init__(self, url, recursive=False, prefix=""):
self.current_class = ""
self.current_module = ""
self.recursive = recursive
self.makePage(url, recursive=recursive, prefix=prefix)
def getType(self, dl, noerror=False):
if dl==None: raise Exception("dl should not be None")
if type(dl)!=str:
try: t = dl.dd.table.tbody.tr.td.get_text()
except Exception: return "None"
else: t=dl
t=t.replace("\t", "")
t=t.replace("‘s", "")
#Correctors
if t == "MeshProxy": t = "KX_MeshProxy"
if t == "boolen": t = "bool"
#Registred
if t == self.current_class: return "self"
if t in File.registred_class.keys():
m = File.registred_class[t]
if self.current_module == m: return t + "()"
else: return m + '.' + t +"()"
for k, v in File.registred_class.items():
m = v+'.'+k
if m == t: return m + "()"
#Direct addressing
if t in ["float", "int", "bool"]: return t + "()"
if t in ["boolean", "boolean.", "bool"]: return "bool()"
if t == "double": return "float()"
if t in ["integer", "bitfield"]: return "int()"
if t in ["string", "str"]: return "str()"
if t in ["matrix", "Matrix", "mathutils.Matrix"]:
if self.current_module != "mathutils": return "mathutils.Matrix()"
else: return "Matrix()"
if t in ["vector", "Vector", "mathutils.Vector"]:
if self.current_module != "mathutils": return "mathutils.Vector()"
else: return "Vector()"
if t == "list" and not noerror: return "list()"
if t == "dict" and not noerror: return "dict()"
if t == "tuple" and not noerror: return "tuple()"
if t == "Quaternion":
if self.current_module != "mathutils": return "mathutils.Quaternion()"
else: return "Quaternion()"
#Special cases
if t == "list of functions and/or methods": return "list()"
if t == "3d vector.": return "mathutils.Vector()"
if t == "3-tuple (float, 3-tuple (x, y, z), 3-tuple (x, y, z))": return "(float, (0,0,0), (0,0,0))"
if t.startswith("\n3-tuple (KX_GameObject, 3-tuple (x, y, z), 3-tuple (nx, ny, nz))"):
return "(KX_GameObject, (0,0,0), (0,0,0), KX_PolyProxy, (0,0))"
if t == "list [x, y]": return "[0,0]"
if t in ["(integer, integer)", "(int,int)", "(int, int)"]: return "(0,0)"
if t == "list [str]": return "[str()]"
if t == "list [r, g, b]": return "[0,0,0]"
if t == "list[x, y, z]": return "[0,0,0]"
if t == "(Vector, float) pair": return "(Vector(), float())"
if t == "Matrix4x4 (read only)": return "mathutils.Matrix()"
if t == "tuple of two ints": return "(0,0)"
if t == "sequence of two ints": return "[0,0]"
if t == "sequence of two floats": return "[0.0,0.0]"
if t == "sequence of three ints": return "[0,0,0]"
if t == "sequence of four sequences of two ints": return "[[0,0],[0,0],[0,0],[0,0]]"
if t == "sequence of four sequences of five ints": return "[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]"
if t == "Buffer\n": return "bgl.Buffer()"
if t == "sequence supporting index/string lookups and iteration.": return "dict()"
#Addressing of containers
for st in ["list of ", "CListValue of "]:
if t.startswith(st):
h=self.getType(t[len(st):], True)
if h != "None":
if h.endswith("()"): h=h[:-2]
if h=="self": h=self.current_class
if self.current_module == "bge.types":
return "CListValue(" + h + ")"
else: return "bge.types.CListValue(" + h + ")"
if t.startswith("Vector"):
if self.current_module != "mathutils": return "mathutils.Vector()"
else: return "Vector()"
#Last chances to get it right
for ch in ['\n', ' ', ',']:
if ch in t:
for x in t.split(ch):
h=self.getType(x, True)
if h!="None": return h
for x in ["non-negative", "None"]:
if x in t: return "None"
if not noerror:
if type(dl) != str and search_refs:
links = dl.dd.table.tbody.tr.td.find_all("a")
url = File.done_urls[-1]
base_url = url[:url.rfind("/")+1]
for l in links:
link = l["href"]
if not "CListValue" in link:
link = base_url + link[:link.rfind("#")]
File(link, recursive=True)
return self.getType(dl, noerror)
print("Unknown type:", t)
return "None"
def getReturnType(self, o):
if o.dd.table == None: return "None"
for tr in o.dd.table.tbody.find_all("tr"):
if tr.th.string=="Return type:":
return self.getType(tr.td.get_text())
return "None"
def makePage(self, url, tab='', recursive=False, prefix=""):
if url in File.done_urls: return
else: File.done_urls.append(url)
if not url.endswith(".html"):
print("Skipped:", url)
return
print("Building page: ", url)
r = requests.get(url).text
soup = BeautifulSoup(r, "html.parser")
body = soup.body.find("h1").parent
if body.p.get_text().startswith("base class"):
link = body.p.a["href"]
link = url[:url.rfind("/")+1] + link[:link.rfind("#")]
if recursive==True:
File(link, recursive)
#Get current module, autodetect class vs module using case sensitive.
self.current_module = prefix + url[url.rfind('/')+1:url.rfind(".html")]
i = self.current_module.rfind(".")
if i != -1:
if not self.current_module.split(".")[-1][0:1].islower():
self.current_module = self.current_module[:i]
dest = url[url.rfind('/')+1:url.rfind(".html")]
else: dest = self.current_module + "."
else: dest = self.current_module + "."
#Identify Class or Module level data
code = ""
for dl in soup.find_all("dl"):
dtype=dl.get("class")
if dtype[0]=="class":
code += '\n' + self.makePythonClass(dl) + '\n'
if dtype[0]=="data":
name = dl.dt["id"]
#Make sure it's at module level
if len(name.split('.')) == len(self.current_module.split('.'))+1:
value = "None"
for th in dl.find_all("th"):
if th.get_text() == "Value:":
value = th.parent.td.get_text()
code += name.split('.')[-1] + " = " + value + "\n"
if dtype[0]=="function":
name = dl.dt["id"]
if len(name.split('.')) == len(self.current_module.split('.'))+1:
code += self.writeFunction(dl, False, '')
#Write the file
odest = dataToPath(dest)
dest = build_path + os.sep + odest
if os.sep in dest:
os.makedirs(os.path.dirname(dest), exist_ok=True)
if dest in File.done_files:
with open(dest, "a+", encoding="utf-8") as out: out.write(code)
else:
try: code = headers[odest] + code
except KeyError: pass
try:
for x in erase[odest]: code=code.replace(x, "")
except KeyError: pass
with open(dest, "w", encoding="utf-8") as out: out.write(code)
File.done_files.append(dest)
def makePythonClassTitle(self, dt):
cn = dt["id"]
self.current_class = cn[cn.rfind(".")+1:]
File.registred_class[self.current_class] = self.current_module
code = "class " + self.current_class + '('
for x in dt.find_all("em"):
if x.get("class"): continue
if not x.string[0].isupper(): continue
if x.string in ["A", "B", "C", "D", "E", "F"]: continue
code += x.string + ','
if code.endswith(","): return code[:-1] + '):\n'
else: return code [:-1]+ ":\n"
def makePythonClass(self, dl, tab=''):
tab+='\t'
docstring = '"""' + dl.dd.p.get_text() + '"""'
code = self.makePythonClassTitle(dl.dt) + tab + docstring + '\n\n'
temp_code = tab + "def __init__(self, "
for x in dl.dt.find_all("em"):
if x.get("class"): continue
if not x.string[0].islower() and not x.string in ["A", "B", "C", "D", "E", "F"]: continue
if not "=" in x.string: temp_code += x.string+"=None, "
else:
if x.string.split("=")[1][0]== '<':
temp_code += x.string.split("=")[0] + "=None, "
else:
temp_code += x.string + ', '
temp_code = temp_code[:-2] + "):\n"
tab+='\t'
for o in dl.dd.find_all("dl"):
if o["class"][0]=="data":
temp_code += tab + "self." + o.dt.code.string + " = int()\n"
if o["class"][0]=="attribute":
temp_code += tab + "self." + o.dt.code.string + " = " + self.getType(o) + '\n'
if not temp_code.endswith(":\n"): code += temp_code
tab=tab[:-1]
for o in dl.dd.find_all("dl"):
if o["class"][0]=="method":
code += self.writeFunction(o, True, tab)
if self.current_class in fixes:
for el in fixes[self.current_class]:
x, y = el
code = code.replace(x, y)
return code
def writeFunction(self, o, is_method=True, tab='\t'):
if is_method:
code = '\n' + tab + "def " + o.dt.code.string + "(self, "
else:
code = '\n' + tab + "def " + o.dt.find_all("code")[-1].string + "("
for arg in o.dt.find_all("em"):
m = arg.string.split("=")
if len(m)>1 and any([m[1].startswith(x) for x in ["KX_", "IMB_"]]):
code += m[0] + '=None, '
else: code += arg.string + ', '
if code.endswith("("): code += "):"
else: code = code[:-2]+"):"
try:
docstring = '"""' + o.dd.p.get_text() + '"""'
code += '\n' + tab + '\t' + docstring + '\n'
except Exception: code += " pass\n"
rt = self.getReturnType(o)
if rt != "None":
if code.endswith(" pass\n"): code=code[:-len(" pass\n")]+"\n"
tab+='\t'
if "bge." in rt: code += tab + "import bge\n"
code += tab + "return " + rt + '\n'
tab=tab[:-1]
if "deprecated" in code or "Deprecated" in code: return ""
return code
def build(url): File(url, recursive=True, prefix="core." if "api/" in url else "")
def build_bge(url):
build(url + "mathutils.html")
build(url + "bge.types.KX_MeshProxy.html")
build(url + "bge.types.KX_CharacterWrapper.html")
build(url + "bge.types.KX_VehicleWrapper.html")
build(url + "bge.types.SCA_PythonController.html")
build(url + "bge.types.KX_Scene.html")
build(url + "bge.logic.html")
build(url + "bge.texture.html")
build(url + "bge.events.html")
build(url + "bge.app.html")
build(url + "bge.constraints.html")
init="from . import logic, types, texture, events, app, constraints"
init_path = build_path + os.sep + "bge" + os.sep + "__init__.py"
with open(init_path, "w", encoding="utf-8") as out: out.write(init)
def build_core(url):
build(url + "api/media.html")
build(url + "api/event.html")
build(url + "api/sequencer.html")
build(url + "api/utils.html")
init="from . import media, event, utils, sequencer\nmedia.music=media.AudioFile()"
init_path = build_path + os.sep + "core" + os.sep + "__init__.py"
with open(init_path, "w", encoding="utf-8") as out: out.write(init)
def test():
test_bge()
test_core()
def test_bge():
import traceback
sys.path.append(build_path)
try:
import mathutils, bge
v=mathutils.Vector()
m=mathutils.Matrix()
scn = bge.logic.getCurrentScene()
o = scn.objects["some"]
a=o.isPlayingAction()
b=o.parent.addDebugProperty("LOL")
o.endObject()
print("Test BGE: OK")
except Exception: traceback.print_exc()
def test_core():
import traceback
sys.path.append(build_path)
try:
import core
core.media.music.filepath = ""
print("Test CORE: OK")
except Exception: traceback.print_exc()
build_path = os.path.normpath(build_path)
import sys
if len(sys.argv) == 1:
build_bge(API_URL)
build_core("http://coredoc.royalwebhosting.net/")
test()
print("Done.")
if len(sys.argv) == 2:
if sys.argv[1] == "-test": test()
| elmeunick9/UPBGE-CommunityAddon | documentation/BGEMockGen/make.py | make.py | py | 14,473 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "os.sep",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.path.normpath",
"line_numb... |
6757308419 | # exclude from patching
DONT_PATCH_MY_STAR_IMPORTS = True
from mods.RiftOptimizer.Patcher import *
import threading
import queue
import Level
import LevelGen
import inspect
import logging
import SteamAdapter
import Game
import os
import pygame
import dill as pickle
import mods.RiftOptimizer.RiftOptimizer as RiftOptimizer
####################################################
# Importing RiftWizard.py |
# Credit to trung on discord |
# |
#---------------------------------------------- |
import inspect # |
def get_RiftWizard(): # |
# Returns the RiftWizard.py module object |
for f in inspect.stack()[::-1]: # |
if "file 'RiftWizard.py'" in str(f): # |
return inspect.getmodule(f[0]) # |
# |
return inspect.getmodule(f[0]) # |
# |
RiftWizard = get_RiftWizard() # |
# |
# |
####################################################
import sys
need_to_setup_print_logs = False
if 'print' in sys.argv:
need_to_setup_print_logs = True
# Level.py calls both logging.debug and Logger.debug which are distinct apparently
original_logging_debug = logging.debug
def logging_debug(self, *args, **kwargs):
channel.put((original_logging_debug, (self, *args, *kwargs)))
Level.logging.debug = logging_debug
logging.debug = logging_debug
original_debug = logging.Logger.debug
def log_debug(self, *args, **kwargs):
channel.put((original_debug, (self, *args, *kwargs)))
def local_setup_logging(self):
# Clear handlers if they exist
for h in list(self.combat_log.handlers):
self.combat_log.removeHandler(h)
if need_to_setup_print_logs:
self.combat_log.addHandler(logging.StreamHandler(sys.stdout))
self.combat_log.addHandler(logging.FileHandler(os.path.join(self.logdir if self.logdir else '.', 'combat_log.txt'), mode='a'))
LevelGen.level_logger.debug = log_debug.__get__(LevelGen.level_logger,logging.Logger)
RiftWizard.mem_log.debug = log_debug.__get__(RiftWizard.mem_log,logging.Logger)
SteamAdapter.stats_log.debug = log_debug.__get__(SteamAdapter.stats_log,logging.Logger)
def setup_logging(self, logdir, level_num):
self.combat_log = logging.getLogger("damage")
self.combat_log.setLevel(logging.DEBUG)
self.combat_log.propagate = False
self.combat_log.debug = log_debug.__get__(self.combat_log,logging.Logger)
self.logdir = logdir
self.level_no = level_num
channel.put((local_setup_logging, (self)))
Level.Level.setup_logging = setup_logging
original_next_log_turn = Level.Level.next_log_turn
def next_log_turn(self, *args, **kwargs):
channel.put((original_next_log_turn, (self, *args, *kwargs)))
Level.Level.next_log_turn = next_log_turn
def write_finalize_level(stats, run_number, level_number):
filename = os.path.join('saves', str(run_number), 'stats.level_%d.txt' % level_number)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'w') as outfile:
outfile.write(''.join(stats))
def finalize_level(self, victory):
self.total_turns += self.cur_level.turn_no
stats = []
stats.append("Realm %d\n" % self.level_num)
if self.trial_name:
stats.append(self.trial_name + "\n")
stats.append("Outcome: %s\n" % ("VICTORY" if victory else "DEFEAT"))
stats.append("\nTurns taken:\n")
stats.append("%d (L)\n" % self.cur_level.turn_no)
stats.append("%d (G)\n" % self.total_turns)
counts = sorted(self.cur_level.spell_counts.items(), key=lambda t: -t[1])
spell_counts = [(s, c) for (s, c) in counts if not s.item]
if spell_counts:
stats.append("\nSpell Casts:\n")
for s, c in spell_counts:
stats.append("%s: %d\n" % (s.name, c))
dealers = sorted(self.cur_level.damage_dealt_sources.items(), key=lambda t: -t[1])
if dealers:
stats.append("\nDamage to Enemies:\n")
for s, d in dealers[:5]:
stats.append("%d %s\n" % (d, s))
if len(dealers) > 6:
total_other = sum(d for s,d in dealers[5:])
stats.append("%d Other\n" % total_other)
sources = sorted(self.cur_level.damage_taken_sources.items(), key=lambda t: -t[1])
if sources:
stats.append("\nDamage to Wizard:\n")
for s, d in sources[:5]:
stats.append("%d %s\n" % (d, s))
if len(sources) > 6:
total_other = sum(d for s,d in sources[5:])
stats.append("%d Other\n" % total_other)
item_counts = [(s, c) for (s, c) in counts if s.item]
if item_counts:
stats.append("\nItems Used:\n")
for s, c in item_counts:
stats.append("%s: %d\n" % (s.name, c))
if self.recent_upgrades:
stats.append("\nPurchases:\n")
for u in self.recent_upgrades:
fmt = u.name
if getattr(u, 'prereq', None):
fmt = "%s %s" % (u.prereq.name, u.name)
stats.append("%s\n" % fmt)
self.recent_upgrades.clear()
channel.put((write_finalize_level, (stats, self.run_number, self.level_num)))
RiftOptimizer.replace_only_vanilla_code(Game.Game.finalize_level,finalize_level)
def threaded_screenshot(surface, filename, run_number, level_number):
filename = os.path.join('saves', str(run_number), filename % level_number)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
pygame.image.save(surface, filename)
def make_level_screenshot(self):
self.draw_level()
self.draw_character()
fake_portal = Level.Portal(self.game.cur_level.gen_params)
self.examine_target = fake_portal
self.draw_examine()
channel.put((threaded_screenshot, (self.screen.copy(), 'level_%d_begin.png', self.game.run_number, self.game.level_num)))
self.examine_target = None
self.draw_examine()
RiftOptimizer.replace_only_vanilla_code(RiftWizard.PyGameView.make_level_screenshot,make_level_screenshot)
def make_level_end_screenshot(self):
self.draw_level()
self.draw_character()
self.examine_display.fill((0, 0, 0))
self.draw_panel(self.examine_display)
self.draw_level_stats()
self.screen.blit(self.examine_display, (self.screen.get_width() - self.h_margin, 0))
channel.put((threaded_screenshot, (self.screen.copy(), 'level_%d_finish.png', self.game.run_number, self.game.level_num)))
RiftOptimizer.replace_only_vanilla_code(RiftWizard.PyGameView.make_level_end_screenshot,make_level_end_screenshot)
def setup_logger_thread(channel):
try:
# let's wait for the first message
try:
msg = channel.get(timeout=1)
except queue.Empty:
print("\nthe ThreadedIO queue was empty after 1 second. the main thread might have crashed. will give up in 10 more seconds")
# TODO - should this be configurable?
giveup_timer = 10
while giveup_timer > 0:
try:
msg = channel.get(timeout=1)
print("communication reestablished\n")
break
except queue.Empty:
giveup_timer -= 1
if giveup_timer <= 3 and giveup_timer > 0:
print(giveup_timer)
if giveup_timer <= 0:
# TODO - revert to default functions first?
return
if not handle_message(msg):
return
# messages arrive and are executed sequentially in the same order as the main thread sent them
while True:
msg = channel.get()
if not handle_message(msg):
return
except:
# just crash the whole game if the io thread crashes
if not root_window:
back_channel.put("crash")
root_window.running = False
raise
def handle_message(msg):
if msg == "quit":
back_channel.put("quitting")
return False
elif hasattr(msg, '__len__') and len(msg) == 2 and callable(msg[0]):
if hasattr(msg[1], '__iter__'):
msg[0](*msg[1])
else:
msg[0](msg[1])
elif isinstance(msg, RiftWizard.PyGameView):
root_window = msg
else:
print("unknown message to IO thread:")
print(msg)
return True
channel = queue.Queue()
back_channel = queue.Queue()
original_run = RiftWizard.PyGameView.run
io_thread = threading.Thread(target=setup_logger_thread, args=(channel,), name="WriterThread")
io_thread.start()
# override RiftWizard.run() in order to close thread, handle crashes, etc
def run(self):
try:
try:
channel.put(self)
back_channel.get(False)
print("closing main thread due to ThreadedIO crash")
return
except queue.Empty:
pass
except:
raise
original_run(self)
except:
# make sure thread is killed if any error occurs
channel.put("quit")
io_thread.join()
raise
channel.put("quit")
# give the io thread time to close
try:
back_channel.get(timeout=2)
except queue.Empty:
pass
io_thread.join()
RiftWizard.PyGameView.run = run | anotak/RiftOptimizer | ThreadedIO.py | ThreadedIO.py | py | 9,785 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "inspect.stack",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "inspect.getmodule",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "inspect.getmodule",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"lin... |
3919530622 | # standard python
import base64
import bz2
import datetime
import json
import multiprocessing
import optparse
import os
import re
import socket
import sys
import time
import urllib.parse
import urllib.request
# custom browser driver
from webxray.ChromeDriver import ChromeDriver
class Client:
def __init__(self, server_url, pool_size=None):
"""
Init allows us to set a custom pool_size, otherwise
we base on CPU count.
"""
self.server_url = server_url
if pool_size:
self.pool_size = pool_size
else:
self.pool_size = multiprocessing.cpu_count()
# __init__
def get_and_process_client_tasks(self,proc_num):
"""
This is the main loop that should run indefintely. Purpose is to
send server "ready" message to get tasks which are either wait,
get_scan, or get_policy. If unable to get commands it will
wait and try again in 5 seconds. If command is get_scan or
get_policy, the appropriate action will be taken and results
will be sent as POST data back to server.
"""
local_test = False
debug = True
if local_test:
client_id = 'local_client'
wbxr_server_url = 'http://127.0.0.1:5000/'
else:
client_id = socket.gethostname()
wbxr_server_url = self.server_url
if debug: print(f'{client_id} [{proc_num}]\t😀 starting')
# main loop
while True:
# set up request
request = urllib.request.Request(
wbxr_server_url,
headers = {
'User-Agent' : 'wbxr_client_v0_0',
}
)
data = urllib.parse.urlencode({'ready':True,'client_id':client_id})
data = data.encode('utf8')
# attempt to get commands
if debug: print(f'[{proc_num}]\t📥 fetching commands')
try:
command_params = json.loads(urllib.request.urlopen(request,data,timeout=60).read().strip().decode('utf-8'))
except:
print(f'[{proc_num}]\t👎 Unable to contact server, will wait and try again.')
time.sleep(5)
continue
# process commands
task = command_params['task']
print('[%s]\t👉 TASK IS: %s' % (proc_num, task))
if task == 'wait':
time.sleep(10)
continue # restarts main loop
elif task == 'get_scan' or task == 'get_policy' or task == 'get_crawl' or task == 'get_random_crawl':
target = command_params['target']
client_config = command_params['client_config']
else:
print(f'[{proc_num}]\t🥴 CANNOT READ COMMAND SET, EXITING')
return
if debug: print('[%s]\t🚗 setting up driver' % proc_num)
if client_config['client_browser_type'] == 'chrome':
browser_driver = ChromeDriver(client_config, port_offset=proc_num)
else:
print('[%s]\t🥴 INVALID BROWSER TYPE, HARD EXIT!' % proc_num)
exit()
print(f'[{proc_num}]\t🏃♂️ GOING TO {task} on {str(target)[:30]}...')
if task == 'get_scan':
task_result = browser_driver.get_scan(target)
elif task == 'get_crawl':
task_result = browser_driver.get_crawl(target)
elif task == 'get_policy':
task_result = browser_driver.get_scan(target, get_text_only=True)
elif task == 'get_random_crawl':
task_result = browser_driver.get_random_crawl(target)
# unpack result
success = task_result['success']
task_result = task_result['result']
# if scan was successful we will have a big chunk of data
# so we compress it to speed up network xfer and reduce disk
# utilization while it is in the result queue
if success:
if debug: print(f'[{proc_num}]\t🗜️ compressing output for {str(target)[:30]}...')
task_result = base64.urlsafe_b64encode(bz2.compress(bytes(json.dumps(task_result),'utf-8')))
# build request to post results to server
if debug: print(f'[{proc_num}]\t📤 returning output')
data = urllib.parse.urlencode({
'client_id' : client_id,
'success' : json.dumps(success),
'target' : json.dumps(target),
'task' : task,
'task_result' : task_result
})
data = data.encode('utf-8')
# send the request
request = urllib.request.Request(
wbxr_server_url,
headers = {
'User-Agent' : 'wbxr_client_v0_0',
}
)
# adding charset parameter to the Content-Type header.
request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8")
# note we can lose this result
try:
print(f'[{proc_num}]\t📥 RESPONSE: %s' % (urllib.request.urlopen(request,data,timeout=600).read().decode('utf-8')))
continue
except:
print(f'[{proc_num}]\t😖 Unable to post results!!!')
time.sleep(5)
return
# get_and_process_client_tasks
def run_client(self):
if sys.platform == 'darwin' and multiprocessing.get_start_method(allow_none=True) != 'forkserver':
multiprocessing.set_start_method('forkserver')
# processes all need a number, this also gets
# used as a port offset
proc_nums = []
for i in range(0,self.pool_size):
proc_nums.append(i)
# start workers
myPool = multiprocessing.Pool(self.pool_size)
myPool.map(self.get_and_process_client_tasks, proc_nums)
# run_client
# Client | thezedwards/webXray | webxray/Client.py | Client.py | py | 4,988 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "multiprocessing.cpu_count",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "socket.gethostname",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "urllib.parse.request.Request",
"line_number": 60,
"usage_type": "call"
},
{
"api_nam... |
73734292027 | import json
import os
from account import Account
home_path = os.getenv("HOME")
config = json.load(open(os.path.join(home_path, ".config", "revChatGPT", "config.json")))
cache = json.load(open(os.path.join(home_path, ".cache", "revChatGPT", "config.json")))
# 从配置读取 token
session_token = config['accounts'][0]['session_token']
access_token = cache['access_token']
account = Account("fkxxyz", "fkxxyz@xxxx.com", "xxxxxxxx", session_token, "/tmp", config['proxy'])
# 尝试用 access_token 访问
is_logged_in = account.login_with_session_info()
# 用 session_token 登录得到 access_token
if not is_logged_in:
is_logged_in = account.login()
| fkxxyz/rev-chatgpt-web | test.py | test.py | py | 662 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.getenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"us... |
2965742594 | from odoo import http
from odoo.http import request
from odoo.addons.web.controllers.main import ensure_db
import werkzeug
import logging
_logger = logging.getLogger(__name__)
class SimpleUrlController(http.Controller):
@http.route('/redir', type='http', auth="user")
def redirect(self, **args):
ensure_db()
if not request.session.uid:
return werkzeug.utils.redirect('/web/login', 303)
request.uid = request.session.uid
if len(args) != 1:
_logger.debug("Wrong number of GET parameters ({})".format(args))
return werkzeug.utils.redirect('/web')
key, value = args.popitem()
rule_model = request.env['base_simple_urls.redirect_rule']
matching_rule = rule_model.search([('get_variable', '=', key)])
if not matching_rule:
_logger.debug(
"Redirect rule for GET parameters not found ({})".format(args)
)
return werkzeug.utils.redirect('/web')
if len(matching_rule) > 1:
_logger.debug(
"Multiple rules for GET parameters found ({})".format(args)
)
return werkzeug.utils.redirect('/web')
''' Do a case insensitive search to the model and field defined in the
redirect rule, e.g. product.product's default_code field '''
target_model = request.env[matching_rule[0].model_id.model]
if matching_rule[0].field_id.ttype == 'integer':
matching_ids = target_model.search(
[(matching_rule[0].field_id.name, '=', value)]
)
else:
matching_ids = target_model.search(
[(matching_rule[0].field_id.name, '=ilike', value)]
)
if len(matching_ids) != 1:
_logger.debug(
"Wrong number of search results. GET params: {}".format(args)
)
return werkzeug.utils.redirect('/web')
''' Form the URL and redirect the user '''
url_params = {
'view_type': 'form',
'model': matching_rule[0].model_id.model,
'id': matching_ids[0].id,
'action': matching_rule[0].action_id.id,
}
url_string = '/web#{}'.format(werkzeug.url_encode(url_params))
return werkzeug.utils.redirect(url_string)
| Tawasta/server-tools | base_simple_urls/controllers/simple_urls.py | simple_urls.py | py | 2,344 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "odoo.http.Controller",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "odoo.http",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "odoo.addons.web.... |
13895462756 | ########################################################################################
# Module with functions for parametric estimation of GC
########################################################################################
import numpy as np
import scipy.linalg
from .tools import *
def YuleWalker(X, m, maxlags=100):
'''
Estimate the VAR model coefficients by solving the YW equations.
Inputs:
> X : Data with size [Number of variables, Number of observations].
> m : Model order
Outputs:
> AR_yw : Coefficient matrix
> eps_yw:
'''
Nvars = X.shape[0]
N = X.shape[1]
# Compute cross-correlations matrices for each lag
lag, Rxx = xcorr(X,X,maxlags)
# Reorganizing data to compute crosscorrelation matrix
b = X.T[m:]
A = np.zeros([N-m,Nvars*m])
count = 0
for i in np.arange(0,m):
for j in range(0,Nvars):
A[:,count] = X.T[m-i-1:N-i-1,j]
count += 1
r = np.matmul(A.T,b)/N#np.reshape( Rxx[1:m+1], (Nvars*m,Nvars) )
R = np.matmul(A.T, A)/N
AR_yw = np.matmul(scipy.linalg.inv(R).T,r).T
AR_yw = AR_yw.T.reshape((m,Nvars,Nvars))
eps_yw = Rxx[0]
for i in range(m):
eps_yw += np.matmul(-AR_yw[i].T,Rxx[i+1])
return AR_yw, eps_yw
def compute_transfer_function(AR, sigma, f, Fs):
m = AR.shape[0]
Nvars = AR.shape[1]
H = np.zeros([Nvars,Nvars,f.shape[0]]) * (1 + 1j)
S = np.zeros([Nvars,Nvars,f.shape[0]]) * (1 + 1j)
for i in range(0,m+1):
comp = np.exp(-1j * f * 2 * np.pi * i/Fs)
if i == 0:
for j in range(comp.shape[0]):
H[:,:,j] += np.eye(Nvars) * comp[j]
else:
for j in range(comp.shape[0]):
H[:,:,j] += -AR[i-1].T * comp[j]
for i in range(f.shape[0]):
H[:,:,i] = np.linalg.inv(H[:,:,i])
for i in range(f.shape[0]):
S[:,:,i] = np.matmul( np.matmul(H[:,:,i], sigma), np.conj(H[:,:,i]).T )
return H, S
| ViniciusLima94/pyGC | pygc/parametric.py | parametric.py | py | 1,814 | python | en | code | 30 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.matmul",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.matmul",
"line_number... |
18307501302 | import warnings
from copy import deepcopy
from typing import Union, List, Tuple, Dict
import numpy as np
from aequilibrae.matrix import AequilibraeMatrix
from aequilibrae.paths.graph import Graph
from aequilibrae.paths.results import AssignmentResults
class TrafficClass:
"""Traffic class for equilibrium traffic assignment
.. code-block:: python
>>> from aequilibrae import Project
>>> from aequilibrae.matrix import AequilibraeMatrix
>>> from aequilibrae.paths import TrafficClass
>>> project = Project.from_path("/tmp/test_project")
>>> project.network.build_graphs()
>>> graph = project.network.graphs['c'] # we grab the graph for cars
>>> graph.set_graph('free_flow_time') # let's say we want to minimize time
>>> graph.set_skimming(['free_flow_time', 'distance']) # And will skim time and distance
>>> graph.set_blocked_centroid_flows(True)
>>> proj_matrices = project.matrices
>>> demand = AequilibraeMatrix()
>>> demand = proj_matrices.get_matrix("demand_omx")
>>> demand.computational_view(['matrix'])
>>> tc = TrafficClass("car", graph, demand)
>>> tc.set_pce(1.3)
"""
def __init__(self, name: str, graph: Graph, matrix: AequilibraeMatrix) -> None:
"""
Instantiates the class
:Arguments:
**name** (:obj:`str`): UNIQUE class name.
**graph** (:obj:`Graph`): Class/mode-specific graph
**matrix** (:obj:`AequilibraeMatrix`): Class/mode-specific matrix. Supports multiple user classes
"""
if not np.array_equal(matrix.index, graph.centroids):
raise ValueError("Matrix and graph do not have compatible sets of centroids.")
if matrix.matrix_view.dtype != graph.default_types("float"):
raise TypeError("Matrix's computational view need to be of type np.float64")
self.__config = {}
self.graph = graph
self.logger = graph.logger
self.matrix = matrix
self.pce = 1.0
self.vot = 1.0
self.mode = graph.mode
self.class_flow: np.array
self.results = AssignmentResults()
self.fixed_cost = np.zeros(graph.graph.shape[0], graph.default_types("float"))
self.fixed_cost_field = ""
self.fc_multiplier = 1.0
self._aon_results = AssignmentResults()
self._selected_links = {} # maps human name to link_set
self.__id__ = name
graph_config = {
"Mode": graph.mode,
"Block through centroids": graph.block_centroid_flows,
"Number of centroids": graph.num_zones,
"Links": graph.num_links,
"Nodes": graph.num_nodes,
}
self.__config["Graph"] = str(graph_config)
mat_config = {
"Source": matrix.file_path or "",
"Number of centroids": matrix.zones,
"Matrix cores": matrix.view_names,
}
if len(matrix.view_names) == 1:
mat_config["Matrix totals"] = {
nm: np.sum(np.nan_to_num(matrix.matrix_view)[:, :]) for nm in matrix.view_names
}
else:
mat_config["Matrix totals"] = {
nm: np.sum(np.nan_to_num(matrix.matrix_view)[:, :, i]) for i, nm in enumerate(matrix.view_names)
}
self.__config["Matrix"] = str(mat_config)
def set_pce(self, pce: Union[float, int]) -> None:
"""Sets Passenger Car equivalent
:Arguments:
**pce** (:obj:`Union[float, int]`): PCE. Defaults to 1 if not set
"""
if not isinstance(pce, (float, int)):
raise ValueError("PCE needs to be either integer or float ")
self.pce = pce
def set_fixed_cost(self, field_name: str, multiplier=1):
"""Sets value of time
:Arguments:
**field_name** (:obj:`str`): Name of the graph field with fixed costs for this class
**multiplier** (:obj:`Union[float, int]`): Multiplier for the fixed cost. Defaults to 1 if not set
"""
if field_name not in self.graph.graph.columns:
raise ValueError("Field does not exist in the graph")
self.fc_multiplier = float(multiplier)
self.fixed_cost_field = field_name
if np.any(np.isnan(self.graph.graph[field_name].values)):
self.logger.warning(f"Cost field {field_name} has NaN values. Converted to zero")
if self.graph.graph[field_name].min() < 0:
msg = f"Cost field {field_name} has negative values. That is not allowed"
self.logger.error(msg)
raise ValueError(msg)
def set_vot(self, value_of_time: float) -> None:
"""Sets value of time
:Arguments:
**value_of_time** (:obj:`Union[float, int]`): Value of time. Defaults to 1 if not set
"""
self.vot = float(value_of_time)
def set_select_links(self, links: Dict[str, List[Tuple[int, int]]]):
"""Set the selected links. Checks if the links and directions are valid. Translates link_id and
direction into unique link id used in compact graph.
Supply links=None to disable select link analysis.
:Arguments:
**links** (:obj:`Union[None, Dict[str, List[Tuple[int, int]]]]`): name of link set and
Link IDs and directions to be used in select link analysis"""
self._selected_links = {}
for name, link_set in links.items():
if len(name.split(" ")) != 1:
warnings.warn("Input string name has a space in it. Replacing with _")
name = str.join("_", name.split(" "))
link_ids = []
for link, dir in link_set:
if dir == 0:
query = (self.graph.graph["link_id"] == link) & (
(self.graph.graph["direction"] == -1) | (self.graph.graph["direction"] == 1)
)
else:
query = (self.graph.graph["link_id"] == link) & (self.graph.graph["direction"] == dir)
if not query.any():
raise ValueError(f"link_id or direction {(link, dir)} is not present within graph.")
# Check for duplicate compressed link ids in the current link set
for comp_id in self.graph.graph[query]["__compressed_id__"].values:
if comp_id in link_ids:
warnings.warn(
"Two input links map to the same compressed link in the network"
f", removing superfluous link {link} and direction {dir} with compressed id {comp_id}"
)
else:
link_ids.append(comp_id)
self._selected_links[name] = np.array(link_ids, dtype=self.graph.default_types("int"))
self.__config["select_links"] = str(links)
@property
def info(self) -> dict:
config = deepcopy(self.__config)
return {self.__id__: config}
def __setattr__(self, key, value):
if key not in [
"graph",
"logger",
"matrix",
"pce",
"mode",
"class_flow",
"results",
"_aon_results",
"__id__",
"vot",
"fixed_cost",
"fc_multiplier",
"fixed_cost_field",
"_selected_links",
"_TrafficClass__config",
]:
raise KeyError("Traffic Class does not have that element")
self.__dict__[key] = value
| AequilibraE/aequilibrae | aequilibrae/paths/traffic_class.py | traffic_class.py | py | 7,635 | python | en | code | 140 | github-code | 6 | [
{
"api_name": "aequilibrae.paths.graph.Graph",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "aequilibrae.matrix.AequilibraeMatrix",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "numpy.array_equal",
"line_number": 50,
"usage_type": "call"
},
{
... |
30729211140 | import threading
from random import randint
import pika
import time
from src.klein_queue.errors import KleinQueueError
from src.klein_queue.rabbitmq.publisher import Publisher
from src.klein_queue.rabbitmq.consumer import Consumer
from klein_config.config import EnvironmentAwareConfig
test_config = {
"rabbitmq": {
"host": ["localhost"],
"port": 5672,
"username": "doclib",
"password": "doclib",
}
}
class TestConsumer:
def test_consumption(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
assert msg == {'msg': 'test_message'}
assert properties.delivery_mode == 2
event.set()
cons.stop()
return handler_fn
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.consume",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.consume"
}
})
consumer = Consumer(config, "consumer")
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
publisher = Publisher(config, "publisher")
publisher.start()
publisher.publish({'msg': 'test_message'})
# timeout = 10 seconds on waiting for message to arrive
message_received_in_time = event.wait(10)
assert message_received_in_time
consumer.stop()
publisher.stop()
def test_exchange_creation(self):
test_message = {"id": "d5d581bb-8b42-4d1e-bbf9-3fee91ab5920"}
delivery = pika.spec.Basic.Deliver()
def handler_fn(msg, basic_deliver=None, **kwargs):
nonlocal delivery, waiting
delivery = basic_deliver
waiting = False
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.test-queue",
"auto_acknowledge": False,
"concurrency": 3,
"exchange": "test-exchange"
},
"publisher": {
"queue": "pytest.test-queue",
"exchange": "test-exchange"
},
})
consumer = Consumer(config, "consumer", handler_fn)
consumer.start()
test_publisher = Publisher(config, "publisher")
test_publisher.start()
test_publisher.publish(test_message)
waiting = True
while waiting:
pass
assert delivery.exchange == "test-exchange"
assert delivery.routing_key == "pytest.test-queue"
test_publisher.stop()
consumer.stop()
def test_worker_concurrency(self):
workers = randint(2, 5)
events = []
def handler_fn(msg, **kwargs):
event_id = msg['event']
events[event_id].set()
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.concurrency",
"concurrency": workers,
"auto_acknowledge": True
},
"publisher": {
"queue": "pytest.concurrency"
}
})
consumer = Consumer(config, "consumer", handler_fn)
# check number of threads spawned
assert len(consumer._consumer._workers) == workers
c = threading.Thread(target=consumer.run)
c.start()
publisher = Publisher(config, "publisher")
publisher.start()
for i in range(workers):
# send one message for each worker
events.append(threading.Event())
publisher.publish({'event': i})
for i in range(workers):
message_received_in_time = events[i].wait(5)
assert message_received_in_time
consumer.stop()
publisher.stop()
def test_default_exception_handler(self):
retries = 0
waiting = True
expected_retries = 10
def handler_fn(msg, **kwargs):
nonlocal waiting, retries
retries += 1
if retries >= expected_retries:
# Stop waiting and don't requeue
waiting = False
raise KleinQueueError("forced error")
else:
# Requeue the message
raise KleinQueueError("forced error", requeue=True)
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.default_exceptions",
"auto_acknowledge": False,
"concurrency": 3,
},
"publisher": {
"queue": "pytest.default_exceptions"
}
})
consumer = Consumer(config, "consumer", handler_fn)
consumer.start()
publisher = Publisher(config, "publisher")
publisher.start()
publisher.publish("message")
timeout = time.time() + 60
while waiting:
if time.time() > timeout:
# Fails this test if the expected number of retries has not been reached within the time limit.
assert False
time.sleep(1)
pass
consumer.stop()
publisher.stop()
def test_error_publishing_exception_handler(self):
test_message = {"id": "d5d581bb-8b42-4d1e-bbf9-3fee91ab5920"}
error_message = ""
error_properties = pika.BasicProperties()
message_properties = pika.BasicProperties()
def handler_fn(msg, properties=None, **kwargs):
nonlocal message_properties
message_properties = properties
raise KleinQueueError("forced error")
def error_handler_fn(msg, properties=None, **kwargs):
nonlocal waiting, error_message, error_properties
error_message = msg
error_properties = properties
waiting = False
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.exceptions",
"auto_acknowledge": False,
"concurrency": 3,
},
"publisher": {
"queue": "pytest.exceptions"
},
"error_publisher": {
"queue": "errors"
},
"error_consumer": {
"queue": "errors",
"auto_acknowledge": True
}
})
error_publisher = Publisher(config, "error_publisher")
error_publisher.start()
upstream_publisher = Publisher(config, "consumer")
upstream_publisher.start()
from src.klein_queue.rabbitmq.exceptions import new_error_publishing_exception_handler
exception_handler = new_error_publishing_exception_handler("consumer", upstream_publisher, error_publisher)
consumer = Consumer(config, "consumer", handler_fn, exception_handler=exception_handler)
consumer.start()
error_consumer = Consumer(config, "error_consumer", error_handler_fn)
error_consumer.start()
test_publisher = Publisher(config, "publisher")
test_publisher.start()
test_publisher.publish(test_message)
waiting = True
while waiting:
pass
test_publisher.stop()
upstream_publisher.stop()
error_publisher.stop()
consumer.stop()
error_consumer.stop()
assert message_properties.delivery_mode == 2
assert message_properties.headers['x-retry'] == 3
assert test_message == error_message
assert error_properties.delivery_mode == 2
assert error_properties.headers['x-consumer'] == "consumer"
assert "KleinQueueError" in error_properties.headers['x-exception']
assert error_properties.headers['x-message'] == "forced error"
assert error_properties.headers['x-queue'] == 'pytest.exceptions'
assert "forced error" in error_properties.headers['x-stack-trace']
assert error_properties.headers["x-original-routing-key"] == "pytest.exceptions"
assert error_properties.headers["x-original-exchange"] == ""
def test_on_empty_queue_callback_should_run_once_single_msg(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_empty_queue_fn(tracker=[]): # make use of shared instance of list
event.set()
tracker.append(1)
assert len(tracker) == 1 # Run the first time only
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.on_empty_queue_callback_should_run_once_single_msg",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.on_empty_queue_callback_should_run_once_single_msg"
}
})
consumer = Consumer(config, "consumer", on_empty_queue_fn=on_empty_queue_fn)
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
publisher = Publisher(config, "publisher")
publisher.start()
publisher.publish({'msg': 'test_message'})
# on waiting for message to arrive and then hit empty queue
message_received_in_time = event.wait(10)
assert message_received_in_time
consumer.stop()
publisher.stop()
def test_on_empty_queue_callback_should_be_called_once_multiple_msg(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_empty_queue_fn(tracker=[]):
event.set()
tracker.append(1)
assert len(tracker) == 1 # Run once only
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.consume",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.consume"
}
})
consumer = Consumer(config, "consumer", on_empty_queue_fn=on_empty_queue_fn)
consumer.set_handler(handle_handle(consumer))
publisher = Publisher(config, "publisher")
publisher.start()
publisher.publish({'msg': 'test_message'})
publisher.publish({'msg': 'test_message'})
publisher.publish({'msg': 'test_message'})
c = threading.Thread(target=consumer.run)
c.start()
# waiting for message to arrive and then hit empty queue
message_received_in_time = event.wait(30)
assert message_received_in_time
consumer.stop()
publisher.stop()
def test_on_empty_queue_callback_should_not_be_called(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_empty_queue_fn():
event.set()
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.on_empty_not_called",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.on_empty_not_called"
}
})
consumer = Consumer(config, "consumer", on_empty_queue_fn=on_empty_queue_fn)
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
# timeout = 60 seconds. event should not be reached as no message is sent
message_received_in_time = event.wait(10)
assert not message_received_in_time
consumer.stop()
def test_on_stop_callback_should_be_called_after_closed_no_msg(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_stop_fn():
event.set()
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.on_stop_callback_should_be_called_after_closed_no_msg",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.on_stop_callback_should_be_called_after_closed_no_msg"
}
})
consumer = Consumer(config, "consumer", on_stop_fn=on_stop_fn)
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
time.sleep(1) # Give the thread time to do its thing
consumer.stop()
# timeout = 60 seconds.
message_received_in_time = event.wait(10)
assert message_received_in_time
def test_on_stop_callback_should_not_be_called_before_closed_no_msg(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_stop_fn():
event.set()
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.on_stop_callback_should_not_be_called_before_closed_no_msg",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.on_stop_callback_should_not_be_called_before_closed_no_msg"
}
})
consumer = Consumer(config, "consumer", on_stop_fn=on_stop_fn)
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
# timeout = 60 seconds.
message_received_in_time = event.wait(10)
assert not message_received_in_time
consumer.stop()
def test_on_stop_callback_should_be_called_after_closed_with_msg(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_stop_fn():
event.set()
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.on_stop_callback_should_be_called_after_closed_with_msg",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.on_stop_callback_should_be_called_after_closed_with_msg"
}
})
consumer = Consumer(config, "consumer", on_stop_fn=on_stop_fn)
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
publisher = Publisher(config, "publisher")
publisher.start()
publisher.publish({'msg': 'test_message'})
time.sleep(1) # Give the thread time to do its thing
publisher.stop()
consumer.stop()
# timeout = 60 seconds.
message_received_in_time = event.wait(10)
assert message_received_in_time
def test_on_stop_callback_should_not_be_called_before_closed_with_msg(self):
event = threading.Event()
def handle_handle(cons):
def handler_fn(msg, properties=None, **kwargs):
pass
return handler_fn
def on_stop_fn():
event.set()
config = EnvironmentAwareConfig({
**test_config,
"consumer": {
"queue": "pytest.on_stop_callback_should_not_be_called_before_closed_with_msg",
"auto_acknowledge": True,
"create_on_connect": True
},
"publisher": {
"queue": "pytest.on_stop_callback_should_not_be_called_before_closed_with_msg"
}
})
consumer = Consumer(config, "consumer", on_stop_fn=on_stop_fn)
consumer.set_handler(handle_handle(consumer))
c = threading.Thread(target=consumer.run)
c.start()
publisher = Publisher(config, "publisher")
publisher.start()
publisher.publish({'msg': 'test_message'})
# timeout = 60 seconds.
message_received_in_time = event.wait(10)
assert not message_received_in_time
publisher.stop()
consumer.stop()
| mdcatapult/py-queue | tests/rabbitmq/test_consumer.py | test_consumer.py | py | 17,024 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "threading.Event",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "klein_config.config.EnvironmentAwareConfig",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "src.klein_queue.rabbitmq.consumer.Consumer",
"line_number": 46,
"usage_type": ... |
29431482505 | import os
import numpy as np
import cv2
import glob
srcw, srch = 1920, 1080
x, y, w, h = 6, 599, 517, 421
app_name = 'gpu_math.exe'
app_dir = 'D:\\Code\\gpu_tracking\\gpu-object-tracking\\build\\bin'
yuv_file = '%s\\test.yuv'%app_dir
roi_file = '%s\\dump.gpu-roi.0000.517x421.yuv'%app_dir
aff_file = '%s\\dump.gpu-affine.0000.517x421.yuv'%app_dir
proc_file = '%s\\dump.0000.gpu-preproc.1034x421.txt'%app_dir
cos2d_file = '%s\\dump.0000.gpu-cos2d.517x421.txt'%app_dir
R_file = '%s\\dump.0000.gpu-r.1034x421.txt'%app_dir
def execute(cmd):
print('#'*8, cmd)
os.system(cmd)
def dump_result(data, tag):
filename = '%s\\dump_%s_%dx%d.txt' % (app_dir, tag, data.shape[1], data.shape[0])
np.savetxt(filename, data, fmt='%+.18e', delimiter=', ')
def verify_affine():
# gpu result
cmd = 'cd %s && %s' % (app_dir, app_name)
execute(cmd)
frame = np.fromfile(roi_file, dtype=np.uint8, count=w*h).reshape((h, w))
cv2.imwrite('%s\\roi.bmp' % app_dir, frame)
frame = np.fromfile(aff_file, dtype=np.uint8, count=w*h).reshape((h, w))
cv2.imwrite('%s\\aff.bmp' % app_dir, frame)
# ref result
yuv = np.fromfile(yuv_file, dtype=np.uint8, count=srcw*srch).reshape((srch, srcw))
a = yuv[y:y+h, x:x+w]
T = np.array([[1.021916, -0.021326, -1.176091], [0.039830, 0.923501, 5.806976]])
b = cv2.warpAffine(a, T, (w, h), flags = cv2.INTER_LINEAR, borderMode = cv2.BORDER_REFLECT)
cv2.imwrite('%s\\ref.bmp'%app_dir, b)
def verify_fft():
def gaussian2(w, h, sigma=2.0):
xs, ys = np.meshgrid(np.arange(w), np.arange(h))
center_x, center_y = w / 2, h / 2
dist = ((xs - center_x) ** 2 + (ys - center_y) ** 2) / (sigma**2)
g = np.exp(-0.5*dist).astype(np.float64)
return g
def get_input(w, h):
filename = 'dump.0000.input.%dx%d.txt' % (w, h)
data = np.genfromtxt('%s\\%s'%(app_dir, filename), dtype=np.float64, delimiter=",")
data = data[::, :-1:]
return data.reshape((h, w))
def ref_fft(w, h):
g = get_input(w, h) # gaussian2(w, h)
dump_result(g, 'input')
# G = cv2.dft(g, flags = cv2.DFT_COMPLEX_OUTPUT)
G = np.fft.fft2(g)
result = np.zeros((h, w*2), dtype=np.float64)
result[:, 0::2] = G.real
result[:, 1::2] = G.imag
return result
def gpu_fft(w, h):
app_cmd = '%s %d %d' % (app_name, w, h)
cmd = 'cd %s && %s' % (app_dir, app_cmd)
execute(cmd)
filename = 'dump.0000.gpu-fft.%dx%d.txt' % (w*2, h)
result = np.genfromtxt('%s\\%s'%(app_dir, filename), dtype=np.float64, delimiter=",")
result = result[::, :-1:]
# r, i = result[:, 0::2], result[:, 1::2]
return result
w, h = 53, 31
gpu = gpu_fft(w, h)
dump_result(gpu, 'gpu')
ref = ref_fft(w, h)
dump_result(ref, 'ref')
# print('INFO: [%dx%d] sum of delta = %f, max = %f' % (w, h, np.sum(np.abs(ref - gpu)), np.max(np.abs(ref - gpu))))
def verify_preproc():
# x, y, w, h = 0, 0, 4, 4
# gpu result
args = '%s, %s, %s, %s' % (x, y, w, h)
cmd = 'cd %s && %s %s' % (app_dir, app_name, args)
execute(cmd)
# reference result
yuv = np.fromfile(yuv_file, dtype=np.uint8, count=srcw*srch).reshape((srch, srcw))
crop = yuv[y:y+h, x:x+w].astype(np.uint8)
crop.tofile('%s\\ref_crop.yuv'%app_dir)
norm = np.log(np.float64(crop)+1)
dump_result(norm, 'ref_norm')
def yuv_to_image():
for yuvfile in glob.glob('%s\\dump.*.yuv'%app_dir):
imgfile = '%s.bmp' % yuvfile
data = np.fromfile(yuvfile, dtype=np.uint8, count=w*h).reshape((h, w))
cv2.imwrite(imgfile, data)
def find_max():
r = np.genfromtxt(R_file, dtype=float, delimiter=',')
r = r[:, 0::2]
idx = np.unravel_index(r.argmax(), r.shape)
print(idx)
# yuv_to_image()
# verify_affine()
# verify_fft()
verify_preproc()
# find_max()
print('done') | mintaka33/gpu-object-tracking | run.py | run.py | py | 3,904 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.system",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number... |
73474178748 | from fastapi import APIRouter, Depends, FastAPI
from src.dependencies.auth import firebase_authentication
from src.routes.audios import views as audios_views
from src.routes.auth import views as auth_views
from src.routes.users import views as users_views
api_router = APIRouter()
api_router.include_router(auth_views.router, tags=['Authentication'])
api_router.include_router(users_views.router,
tags=['Reciter'],
prefix='/reciters',
dependencies=[Depends(firebase_authentication)])
api_router.include_router(audios_views.router,
tags=['Audios'],
prefix='/audios',
dependencies=[Depends(firebase_authentication)])
def init_api(app: FastAPI) -> None:
app.include_router(api_router)
| CrowdsourcingApps/Crowdsourcing-Ayat | src/routes/__init__.py | __init__.py | py | 846 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "src.routes.auth.views.router",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "src.routes.auth.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name":... |
8463884764 | from django.utils import timezone
from rest_framework import status
from rest_framework.generics import CreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from logistics.models import Logistic, LogisticRate
from receivers.models import Receiver
from senders.models import Sender
from users.authentications import CustomTokenAuthentication
from users.permissions import CustomPermission
from . import models, serializers
class OrderCreateView(APIView):
authentication_classes = [CustomTokenAuthentication]
permission_classes = [CustomPermission]
def post(self, request, *args, **kwargs):
user, _ = self.authentication_classes[0]().authenticate(request)
serializer = serializers.OrderSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
order_data = serializer.create(serializer.validated_data)
order_db = models.Order()
updated_order_data = order_db.update_order(order_data["id"], {"user_id": user["id"]})
logistics_db = Logistic()
logistic = logistics_db.get_logistic("id", updated_order_data["logistic_id"])
if not logistic:
return Response({"error": "Logistics Not Available"}, status=status.HTTP_400_BAD_REQUEST)
receiver_id = updated_order_data["receiver_id"]
sender_id = updated_order_data["sender_id"]
receiver_db = Receiver()
receiver = receiver_db.get_receiver("id", receiver_id)
to_region = receiver["region"]
sender_db = Sender()
sender = sender_db.get_sender("id", sender_id)
from_region = sender["region"]
logistic_id = logistic["id"]
logistic_rate_db = LogisticRate()
logistic_rate = logistic_rate_db.get_logistics_rate_price(
from_region=from_region, to_region=to_region, logistic_id=logistic_id
)
price = logistic_rate[0]["price"]
print(price)
order_db = models.Order()
updated_order_data = order_db.update_order(order_data["id"], {"price": price})
serializer = serializers.OrderSerializer(data=updated_order_data)
if serializer.is_valid():
response_data = {"data": serializer.data, "message": "Order Created Successfully"}
return Response(response_data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class OrderDetailView(RetrieveUpdateDestroyAPIView):
authentication_classes = [CustomTokenAuthentication]
permission_classes = [CustomPermission]
def get(self, request, id, *args, **kwargs):
user, _ = self.authentication_classes[0]().authenticate(request)
order_db = models.Order()
data = order_db.get_order("id", id)
if data["user_id"] != user["id"]:
return Response({"error": "You do not have the permission"}, status=status.HTTP_401_UNAUTHORIZED)
serializer = serializers.OrderSerializer(data=data)
if serializer.is_valid(raise_exception=True):
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_200_OK)
def update(self, request, id, *args, **kwargs):
user, _ = self.authentication_classes[0]().authenticate(request)
data = request.data
serializer = serializers.OrderSerializer(data=data)
if serializer.is_valid(raise_exception=True):
order_db = models.Order()
order = order_db.get_order("id", id)
if order["user_id"] != user["id"]:
return Response({"error": "You do not have the permission"}, status=status.HTTP_401_UNAUTHORIZED)
order = order_db.update_order(id, serializer.validated_data)
order = order_db.update_order(id, {"updated_at": str(timezone.now())})
serializer = serializers.OrderSerializer(data=order)
if serializer.is_valid():
data = {"data": serializer.data, "message": "Order Updated Successfully"}
return Response(data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, id, *args, **kwargs):
user, _ = self.authentication_classes[0]().authenticate(request)
data = request.data
order_db = models.Order()
order = order_db.get_order("id", id)
if order["user_id"] != user["id"]:
return Response({"error": "You do not have the permission"}, status=status.HTTP_401_UNAUTHORIZED)
order = order_db.update_order(id, {"is_active": False, "updated_at": str(timezone.now())})
data = {"data": order, "message": "Order set as inactive"}
return Response(data, status=status.HTTP_200_OK)
| Duade10/ditosell-api | orders/views.py | views.py | py | 4,947 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.views.APIView",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "users.authentications.CustomTokenAuthentication",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "users.permissions.CustomPermission",
"line_number": 18,
"usa... |
24486961270 | from airflow import DAG
from airflow.providers.http.operators.http import SimpleHttpOperator
from airflow.hooks.base import BaseHook
from airflow.operators.python import PythonOperator
import datetime
import requests
import json
dag = DAG(
dag_id='533_api_generate_report',
schedule_interval='0 0 * * *',
start_date=datetime.datetime(2021, 1, 1),
catchup=False,
dagrun_timeout=datetime.timedelta(minutes=60),
tags=['example', 'example2'],
params={"example_key": "example_value"},
)
business_dt = {'dt':'2022-05-06'}
nickname = 'ddd.z.2000'
cohort = '8'
api_token = '5f55e6c0-e9e5-4a9c-b313-63c01fc31460'
headers = {
"X-API-KEY": api_token,
"X-Nickname": nickname,
"X-Cohort": cohort
}
def create_files_request(headers):
api_conn = 'create_files_api'
api_endpoint = 'd5dg1j9kt695d30blp03.apigw.yandexcloud.net'
method_url = '/generate_report'
r = requests.post('https://'+api_endpoint+method_url, headers=headers)
response_dict = json.loads(r.content)
print(f"task_id is {response_dict['task_id']}")
return response_dict['task_id']
task = PythonOperator(task_id='create_files_request',
python_callable = create_files_request,
op_kwargs = {'headers':headers},
dag=dag)
task | Artem-ne-Artem/Data-engineering-by-Yandex-Practicum | s3-lessons/Theme_5/Task_5.3.3.py | Task_5.3.3.py | py | 1,229 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "airflow.DAG",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "requests.post",
... |
19594690506 | import tkinter
import mysql.connector
from tkinter import *
from tkinter import ttk
from tkinter.ttk import Treeview
from tkinter import messagebox
from PIL import Image, ImageTk
db = mysql.connector.connect(
host="localhost",
user="root",
password="1234",
database="bmh204"
)
mycursor = db.cursor()
def login():
if Eku.get() == "" or Esif.get() == "":
messagebox.showerror("Hata", "Kullanıcı Veya Şifrenizi Kontrol Ediniz")
else:
try:
mycursor.execute("select * from Maykod where tc ='%s' and sifre = %s " % (Eku.get(), Esif.get()))
row = mycursor.fetchone()
if row == None:
messagebox.showerror("Hata", "Kullanıcı Veya Şifrenizi Kontrol Ediniz")
else:
formgiris.destroy()
giris()
except EXCEPTION as es:
messagebox.showerror("Hata", f"Kullanıcı Veya Şifrenizi Kontrol Ediniz:{str(es)}")
db.commit()
def giris():
anasayfa()
def admin():
def admingir():
if aku.get() == "" or asif.get() == "":
messagebox.showerror("Hata", "Kullanıcı Veya Şifrenizi Kontrol Ediniz")
elif aku.get() != "a" or asif.get() != "1":
messagebox.showerror("Hata", "Kullanıcı Veya Şifrenizi Kontrol Ediniz")
else:
Login()
admin.destroy()
admin = Toplevel()
admin.title("Yetkili Formu")
admin.geometry('600x400')
my_picet = Image.open("adminbg.jpg")
resized = my_picet.resize((600, 400), Image.ANTIALIAS)
new_picet = ImageTk.PhotoImage(resized)
my_laben = Label(admin, image=new_picet)
my_laben.place(x=0, y=0)
frame1 = Frame(admin, bg="#e53c09")
frame1.place(relx=0.2, rely=0.15, relwidth=0.6, relheight=0.2)
cv = Canvas(admin, bg='white', width=420, height=200)
cv.place(x=100, y=100)
yetki = Label(admin, text="Admin Panel Login", fg="black", bg="#e53c09", font="Times 18 italic").place(x=130, y=65)
aku = Label(admin, text="Kullanıcı Adı:", fg="black", bg="white", font="Times 22 italic").place(x=150, y=100)
aku = Entry(admin, bd=1, width=25)
aku.place(x=150, y=150)
asif = Label(admin, text="Şifre :", fg="black", bg="white", font="Times 22 italic").place(x=150, y=190)
asif = Entry(admin, bd=1, width=25)
asif.place(x=150, y=250)
Kaydet = Button(admin, text="Giriş Yap", fg="Blue", bg="white", font="Times 22 italic", command=admingir)
Kaydet.place(x=180, y=280)
admin.mainloop()
def anasayfa():
ana = Tk()
ana.title('MAYKOD ANASAYFA')
ana.geometry('1550x900')
def yonetim():
top = Toplevel()
top.geometry('1000x1000')
top.title('Yönetim Ekibi')
top.iconbitmap("maykod.ico")
cmy = Canvas(top, width=1000, height=1000)
L1 = Label(top, text="YÖNETİM EKİBİ", bg="#00bcdd", font="Times 45 ").place(x=150, y=40)
img = PhotoImage(file="ybg.png")
my_image = cmy.create_image(0, 0, anchor=NW, image=img)
cmy.create_rectangle(1550, 120, 0, 20, fill='#00bcdd')
cmy.pack()
photo12 = PhotoImage(file='maykod.png')
photoRezised12 = photo12.subsample(2, 2)
cmy.create_image(75, 100, image=photoRezised12)
photo22 = PhotoImage(file='mşü1.png')
photoRezised22 = photo22.subsample(2, 2)
cmy.create_image(900, 100, image=photoRezised22)
frame1 = Frame(top, bg="#1608d6")
frame1.place(relx=0.35, rely=0.1, relwidth=0.25, relheight=0.18)
frame2 = Frame(top, bg="#1608d6")
frame2.place(relx=0.15, rely=0.3, relwidth=0.18, relheight=0.18)
frame3 = Frame(top, bg="#1608d6")
frame3.place(relx=0.55, rely=0.3, relwidth=0.18, relheight=0.18)
frame4 = Frame(top, bg="#1608d6")
frame4.place(relx=0.15, rely=0.55, relwidth=0.18, relheight=0.18)
frame5 = Frame(top, bg="#1608d6")
frame5.place(relx=0.55, rely=0.55, relwidth=0.18, relheight=0.18)
frame6 = Frame(top, bg="#1608d6")
frame6.place(relx=0.05, rely=0.75, relwidth=0.18, relheight=0.18)
frame7 = Frame(top, bg="#1608d6")
frame7.place(relx=0.35, rely=0.75, relwidth=0.18, relheight=0.18)
frame8 = Frame(top, bg="#1608d6")
frame8.place(relx=0.65, rely=0.75, relwidth=0.18, relheight=0.18)
photo = PhotoImage(frame1, file='pp.png')
photoRezised = photo.subsample(4, 4)
cmy.create_image(480, 250, image=photoRezised)
lbl=Label(top,text="Mehmet Can ARSLAN \n MAYKOD Başkanı", font="Comic 13 italic")
lbl.place(x=370, y=320)
photo2 = PhotoImage(frame2, file='esra.png')
photoRezised2 = photo2.subsample(2, 2)
cmy.create_image(250, 450, image=photoRezised2)
lbl = Label(top, text="Esra YILDIRIM \n MAYKOD Başkan Yardımcısı", font="Comic 13 italic")
lbl.place(x=140, y=505)
photo3 = PhotoImage(frame3, file='Volkan.png')
photoRezised3 = photo3.subsample(2, 2)
cmy.create_image(700, 450, image=photoRezised3)
lbl = Label(top, text="Volkan AKGÖL \n MAYKOD Başkan Yardımcısı", font="Comic 13 italic")
lbl.place(x=590, y=505)
photo4 = PhotoImage(frame4, file='merve.png')
photoRezised4 = photo4.subsample(2, 2)
cmy.create_image(250, 650, image=photoRezised4)
lbl = Label(top, text="Merve OT \n MAYKOD Yazman", font="Comic 13 italic")
lbl.place(x=140, y=705)
photo5 = PhotoImage(frame5, file='beyda.png')
photoRezised5= photo5.subsample(3, 3)
cmy.create_image(700, 650, image=photoRezised5)
lbl = Label(top, text="Beyda ÇETİN \n MAYKOD Sayman", font="Comic 13 italic")
lbl.place(x=590, y=705)
photo6 = PhotoImage(frame6, file='alper.png')
photoRezised6 = photo6.subsample(2, 2)
cmy.create_image(150, 850, image=photoRezised6)
lbl = Label(top, text="Alper KOÇAK \n Kurucu Üye", font="Comic 13 italic")
lbl.place(x=80, y=905)
photo7 = PhotoImage(frame7, file='neşe.png')
photoRezised7 = photo7.subsample(2, 2)
cmy.create_image(460, 850, image=photoRezised7)
lbl = Label(top, text="Neşe VUROL \n MAYKOD Sekteteri", font="Comic 13 italic")
lbl.place(x=350, y=905)
photo8 = PhotoImage(frame8, file='eda.png')
photoRezised8 = photo8.subsample(2, 2)
cmy.create_image(830, 850, image=photoRezised8)
lbl = Label(top, text="Edanur TAŞÇI \n Denetleme Kurul Üyesi", font="Comic 13 italic")
lbl.place(x=720, y=905)
top.mainloop()
def iletisim():
ilet = Toplevel()
ilet.geometry('1000x900')
ilet.title('Yönetim Ekibi')
ilet.iconbitmap("maykod.ico")
cv = Canvas(ilet, bg='white', width=10000, height=10000)
cv.pack()
cv.create_rectangle(1550, 120, 0, 20, fill='#00bcdd')
img = PhotoImage(file="ana.png")
my_image = cv.create_image(0, 0, anchor=NW, image=img)
photo7 = PhotoImage(file='mşü1.png')
photoRezised7 = photo7.subsample(2, 2)
cv.create_image(900, 100, image=photoRezised7)
photo = PhotoImage(file='mail.png')
photoRezised = photo.subsample(3, 3)
cv.create_image(65, 400, image=photoRezised)
photo6 = PhotoImage(file='maykod.png')
photoRezised6 = photo6.subsample(2, 2)
cv.create_image(75, 100, image=photoRezised6)
photo5 = PhotoImage(file='okul.png')
photoRezised5 = photo5.subsample(6, 6)
cv.create_image(65, 500, image=photoRezised5)
photo2 = PhotoImage(file="twiter.png")
photoRezised2 = photo2.subsample(12, 12)
cv.create_image(65, 720, image=photoRezised2)
photo3 = PhotoImage(file="insta.png")
photoRezised3 = photo3.subsample(12, 12)
cv.create_image(65, 632, image=photoRezised3)
photo4 = PhotoImage(file="tel.png")
photoRezised4 = photo4.subsample(5, 5)
cv.create_image(65, 825, image=photoRezised4)
frame1 = Frame(ilet, bg="#1608d6")
frame1.place(relx=0.07, rely=0.2, relwidth=0.8, relheight=0.05)
L1 = Label(ilet, text="İLETİŞİM", bg="white", font="Times 45 ").place(x=150, y=40)
Lf1 = Label(ilet, text="MUŞ ALPARSLAN ÜNİVERSİTESİ YAZILIM KULÜBÜ", bg="#1608d6", fg="white", font="Comic 20 italic").place(x=145, y=180)
Lf2 = Label(ilet, text="E-MAİL Adresi:", bg="#0d0075", fg="white",font="Comic 20 italic").place(x=150, y=380)
Lf2yan = Label(ilet, text="maykod@alparslan.edu.tr", bg="#0d0075",fg="white", font="Comic 20 italic").place(x=600, y=380)
Lf3 = Label(ilet, text="Muş Alparslan Üniversitesi", bg="#0d0075",fg="white", font="Comic 20 italic").place(x=150, y=450)
Lf3yan = Label(ilet, text="https://www.alparslan.edu.tr/tr", bg="#0d0075",fg="white", font="Comic 20 italic").place(x=600, y=450)
Lbu = Label(ilet, text="Bize Ulaşın", bg="#0d0075",fg="white", font="Times 30 italic").place(x=150, y=500)
Lf4 = Label(ilet, text="Instagram adresimiz:", bg="#0d0075", fg="white",font="Comic 20 italic").place(x=150, y=600)
Lf4yan = Label(ilet, text="maykodmsu", bg="#0d0075", fg="white",font="Comic 20 italic").place(x=600, y=600)
Lf5 = Label(ilet, text="twitter adresimiz:", bg="#0d0075", fg="white",font="Comic 20 italic").place(x=150, y=700)
Lf5yan = Label(ilet, text="@MaykodMSU", bg="#0d0075", fg="white",font="Comic 20 italic").place(x=600, y=700)
Lf6 = Label(ilet, text="Yönetici tel:", bg="#0d0075", fg="white",font="Comic 20 italic").place(x=150, y=800)
Lf6yan = Label(ilet, text="0 (545) 720 28 66", fg="white",bg="#0d0075", font="Comic 20 italic").place(x=600, y=800)
ilet.mainloop()
def hakkında():
root = Toplevel()
root.geometry("1100x1000")
mycanvas = Canvas(root, bg="white", width=1100, height=1000)
mycanvas.create_rectangle(1550, 120, 0, 0, fill='#00bcdd')
mlabel = Label(mycanvas, text="KULÜP FAALİYETLERİMİZ", bg="#00bcdd", font="Times 35 ").place(x=150, y=40)
mycanvas.pack()
photo1 = PhotoImage(file='maykod.png')
photoRezised1 = photo1.subsample(2, 2)
mycanvas.create_image(75, 100, image=photoRezised1)
photo2 = PhotoImage(file='mşü1.png')
photoRezised2 = photo2.subsample(2, 2)
mycanvas.create_image(1000, 100, image=photoRezised2)
root.mainloop()
canvas = Canvas(ana, width=1550, height=900)
image = ImageTk.PhotoImage(Image.open("ana.png"))
canvas.create_image(0, 0, anchor=NW, image=image)
canvas.pack()
canvas.create_rectangle(1550, 120, 0, 20, fill='#00bcdd')
my_picet = Image.open("mşü.png")
resized = my_picet.resize((1349, 124), Image.ANTIALIAS)
new_picet = ImageTk.PhotoImage(resized)
my_laben = Label(image=new_picet)
my_laben.place(x=100, y=750)
AnaSayfa = Button(ana, text="Anasayfa", bg='#00bcdd', borderwidth='0', fg='#00007f', font="Times 20 italic")
AnaSayfa.place(x=320, y=50)
Kulup = Button(ana, text="Kulüp", bg='#00bcdd', borderwidth='0', fg='#00007f', font="Times 20 italic", command=hakkında)
Kulup.place(x=500, y=50)
yonet = Button(ana, text="Yönetim", bg='#00bcdd', borderwidth='0', fg='#00007f', font="Times 20 italic", command=yonetim)
yonet.place(x=650, y=50)
foto = Button(ana, text="Fotoğraf Galerisi", bg='#00bcdd', borderwidth='0', fg='#00007f', font="Times 20 italic")
foto.place(x=800, y=50)
iletisim = Button(ana, text="İletişim", bg='#00bcdd', borderwidth='0', fg='#00007f', font="Times 20 italic", command=iletisim)
iletisim.place(x=1050, y=50)
my_pice = Image.open("maykod.png")
resized = my_pice.resize((130, 130), Image.ANTIALIAS)
new_pice = ImageTk.PhotoImage(resized)
my_laben = Label(image=new_pice)
my_laben.place(x=50, y=50)
my_pic = Image.open("mşü2.png")
resized = my_pic.resize((130, 130), Image.ANTIALIAS)
new_pic = ImageTk.PhotoImage(resized)
my_labe = Label(image=new_pic)
my_labe.place(x=1370, y=50)
my_picen = Image.open("admin.png")
resized = my_picen.resize((90, 60), Image.ANTIALIAS)
new_picen = ImageTk.PhotoImage(resized)
Admin = Button(image=new_picen, text="Giriş", fg="red", borderwidth='0', bg='#00007f', command=admin, font="arial 25")
Admin.place(x=1225, y=40)
my_ana = Image.open("s1.png")
resizedana = my_ana.resize((1124, 400), Image.ANTIALIAS)
new_picana = ImageTk.PhotoImage(resizedana)
my_labeana = Label(image=new_picana)
my_labeana.place(x=250, y=250)
def icice():
messagebox.showinfo("aliş")
menu = Menu(ana)
ana.config(menu=menu)
def quit():
ana.destroy()
subMenu = Menu(menu)
menu.add_cascade(label="File", font="Times 20", menu=subMenu)
subMenu.add_command(label="Admin", font="Times 13", command=admin)
subMenu.add_command(label="Destek", font="Times 13", command=icice)
subMenu.add_separator()
subMenu.add_command(label="EXIT", font="Times 13", command=quit)
ana.mainloop()
formgiris = Tk()
formgiris.title('MAYKOD')
formgiris.geometry('1600x650')
formgiris.iconbitmap('maykod.ico')
canvas = Canvas(formgiris, width=5000, height=5000, bg="white")
canvas.pack()
frame_orta=Frame(formgiris, bg="yellow")
frame_orta.place(relx=0.427, rely=0, relwidth=0.005, relheight=1)
my_picet = Image.open("yen3.jpg")
resized = my_picet.resize((900, 650), Image.ANTIALIAS)
new_picet = ImageTk.PhotoImage(resized)
my_laben = Label(image=new_picet)
my_laben.place(x=690, y=0)
def kayitolma():
kayitol = Toplevel()
kayitol.title("Kayıt Olma Formu")
kayitol.geometry('1600x566')
canvasa = Canvas(kayitol, width=5000, height=5000, bg="white")
canvasa.pack()
img = PhotoImage(file="yen3.png")
my_image = canvasa.create_image(0, 0, anchor=NW, image=img)
frame_orta = Frame(kayitol, bg="yellow")
frame_orta.place(relx=0.485, rely=0, relwidth=0.005, relheight=1)
formgiris.withdraw()
def kaydet():
if (etck.get() == "" or esifre.get() == "" or eadi.get() == "" or esoyadi.get() == "" or eemail.get() == "" or eil.get() == "" or eilce.get() == "" or ebolum.get() == ""):
messagebox.showerror("Hata", "Lütfen Bütün Alanları Doldurun")
else:
try:
mycursor.execute("INSERT INTO Maykod (tc,sifre,adi,soyadi,email,il,ilce,bolum) VALUES " \
"('%s','%s','%s','%s','%s','%s','%s','%s')" % (
etck.get(), esifre.get(), eadi.get(), esoyadi.get(), eemail.get(), eil.get(),
eilce.get(), ebolum.get()))
messagebox.showinfo("Durum", "Kaydınız Başarıyla Tamamlanmıştır")
formgiris.deiconify()
kayitol.destroy()
db.commit()
except EXCEPTION as es:
messagebox.showerror("Hata", f"Boş alanları kontrol ediniz:{str(es)}")
def geri():
formgiris.deiconify()
kayitol.destroy()
tck = Label(kayitol, text=" Kullanıcı Adı:", fg="black", bg="white", font="Times 20 italic").place(x=1000, y=80)
etck = Entry(kayitol, bd=1, width=25)
etck.place(x=1280, y=80)
lsifre = Label(kayitol, text="Şifre :", fg="black", bg="white", font="Times 20 italic").place(x=1090, y=120)
esifre = Entry(kayitol, bd=1, width=25)
esifre.place(x=1280, y=120)
ladi = Label(kayitol, text="Adı :", fg="black", bg="white", font="Times 20 italic").place(x=1100, y=160)
eadi = Entry(kayitol, bd=1, width=25)
eadi.place(x=1280, y=160)
lsoyadi = Label(kayitol, text="Soyadi :", fg="black", bg="white", font="Times 20 italic").place(x=1060, y=200)
esoyadi = Entry(kayitol, bd=1, width=25)
esoyadi.place(x=1280, y=200)
lemail = Label(kayitol, text="Email :", fg="black", bg="white", font="Times 20 italic").place(x=1070, y=240)
eemail = Entry(kayitol, bd=1, width=25)
eemail.place(x=1280, y=240)
lil = Label(kayitol, text="İL :", fg="black", bg="white", font="Times 20 italic").place(x=1070, y=280)
eil = Entry(kayitol, bd=1, width=25)
eil.place(x=1280, y=280)
lilce = Label(kayitol, text="İlçe :", fg="black", bg="white", font="Times 20 italic").place(x=1070, y=320)
eilce = Entry(kayitol, bd=1, width=25)
eilce.place(x=1280, y=320)
lbolum = Label(kayitol, text="Bölüm :", fg="black", bg="white", font="Times 20 italic").place(x=1070, y=360)
ebolum = Entry(kayitol, bd=1, width=25)
ebolum.place(x=1280, y=360)
Kaydet = Button(kayitol, text="Kaydol", fg="black", bg="white", font="Times 20 italic", command=kaydet)
Kaydet.place(x=1280, y=400)
geri = Button(kayitol, text="Geri Dön", fg="black", bg="white", font="Times 20 italic", command=geri)
geri.place(x=1400, y=400)
kayitol.mainloop()
def Login():
def listele():
liste.delete(*liste.get_children())
mycursor.execute('select * from Maykod')
results = mycursor.fetchall()
for row in results:
sifre = row[2]
adi = row[3]
soyadi = row[4]
email = row[5]
il = row[6]
ilce = row[7]
bolum = row[8]
liste.insert("", 0, text=row[0], values=(row[1], sifre, adi, soyadi, email, il, ilce, bolum))
def ekle():
mycursor.execute("INSERT INTO Maykod (tc,sifre,adi,soyadi,email,il,ilce,bolum) VALUES "\
"('%s','%s','%s','%s','%s','%s','%s','%s')" % (
Etc.get(), Esif.get(), Ead.get(), Esad.get(), Email.get(), Eil.get(), Eilce.get(), Ebolum.get()))
db.commit()
listele()
def guncelle():
mycursor.execute("UPDATE Maykod SET tc='%s',sifre='%s',adi='%s',soyadi='%s',email='%s',il='%s',ilce='%s',bolum='%s'" \
" WHERE id='%s'" % (Etc.get(), Esif.get(), Ead.get(), Esad.get(), Email.get(), Eil.get(), Eilce.get(), Ebolum.get(), Eid.get()))
db.commit()
listele()
def sil():
mycursor.execute("DELETE FROM Maykod WHERE id=%s " % (Eid.get()))
db.commit()
listele()
def getir(event):
idno = liste.item(liste.selection()[0])['text']
mycursor.execute("SELECT * FROM Maykod WHERE id = %s" % (idno))
results = mycursor.fetchone()
Eid.delete(0, END)
Eid.insert(0, results[0])
Etc.delete(0, END)
Etc.insert(0, results[1])
Esif.delete(0, END)
Esif.insert(0, results[2])
Ead.delete(0, END)
Ead.insert(0, results[3])
Esad.delete(0, END)
Esad.insert(0, results[4])
Email.delete(0, END)
Email.insert(0, results[5])
Eil.delete(0, END)
Eil.insert(0, results[6])
Eilce.delete(0, END)
Eilce.insert(0, results[7])
Ebolum.delete(0, END)
Ebolum.insert(0, results[8])
def listetikla(event):
idtext = liste.item(liste.selection()[0])['values'][0]
tctext = liste.item(liste.selection()[0])['values'][1]
sifretext = liste.item(liste.selection()[0])['values'][2]
adtext = liste.item(liste.selection()[0])['values'][3]
soyadtext = liste.item(liste.selection()[0])['values'][4]
emailtext = liste.item(liste.selection()[0])['values'][5]
iltext = liste.item(liste.selection()[0])['values'][6]
ilcetext = liste.item(liste.selection()[0])['values'][7]
bolumtext = liste.item(liste.selection()[0])['values'][8]
Eid.delete(0, END)
Eid.insert(0, idtext)
Etc.delete(0, END)
Etc.insert(0, tctext)
Esif.delete(0, END)
Esif.insert(0, sifretext)
Ead.delete(0, END)
Ead.insert(0, adtext)
Esad.delete(0, END)
Esad.insert(0, soyadtext)
Email.delete(0, END)
Email.insert(0, emailtext)
Eil.delete(0, END)
Eil.insert(0, iltext)
Eilce.delete(0, END)
Eilce.insert(0, ilcetext)
Ebolum.delete(0, END)
Ebolum.insert(0, bolumtext)
form = Toplevel()
form.title('Maykod')
form.geometry('1500x800')
form.configure(background="grey")
my_pice = Image.open("adminpanel.jpg")
resizede = my_pice.resize((1300, 650), Image.ANTIALIAS)
new_pice = ImageTk.PhotoImage(resizede)
my_laben = Label(form,image=new_pice)
my_laben.place(x=100, y=50)
Lid = Label(form, text="ID", bg="#454f50",fg="white", font="Times 15 italic").place(x=1120, y=120)
Eid = Entry(form, bd=1)
Eid.place(x=1150, y=150)
ltc = Label(form, text="TC", bg="#454f50",fg="white", font="Times 15 italic").place(x=1120, y=170)
Etc = Entry(form, bd=1)
Etc.place(x=1150, y=200)
Lad = Label(form, text="ADI", bg="#454f50",fg="white", font="Times 15 italic").place(x=1120, y=220)
Ead = Entry(form, bd=1)
Ead.place(x=1150, y=250)
Lsad = Label(form, text="SOYADI", bg="#454f50",fg="white", font="Times 15 italic").place(x=1120, y=270)
Esad = Entry(form, bd=1)
Esad.place(x=1150, y=300)
Lsif = Label(form, text="ŞİFRE", bg="#454f50", fg="white", font="Times 15 italic").place(x=1120, y=320)
Esif = Entry(form, bd=1)
Esif.place(x=1150, y=350)
Lmail = Label(form, text="E-MAİL", bg="#454f50", fg="white", font="Times 15 italic").place(x=1120, y=370)
Email = Entry(form, bd=1)
Email.place(x=1150, y=400)
Lil = Label(form, text="İL", bg="#454f50", fg="white", font="Times 15 italic").place(x=1120, y=420)
Eil = Entry(form, bd=1)
Eil.place(x=1150, y=450)
Lilce = Label(form, text="İLÇE", bg="#454f50", fg="white",font="Times 15 italic").place(x=1120, y=470)
Eilce = Entry(form, bd=1)
Eilce.place(x=1150, y=500)
lbolum = Label(form, text="BÖLÜM", bg="#454f50", fg="white", font="Times 15 italic").place(x=1120, y=520)
Ebolum = Entry(form, bd=1)
Ebolum.place(x=1150, y=550)
Kaydet = Button(form, text="Kaydet", command=ekle)
Kaydet.place(x=1100, y=650)
sil = Button(form, text="Sil", command=sil)
sil.place(x=1180, y=650)
guncelle = Button(form, text="Güncelle", command=guncelle)
guncelle.place(x=1230, y=650)
liste = Treeview(form, height=10, selectmode="extended")
liste["columns"] = ('sut1', 'sut2', 'sut3', 'sut4', 'sut5', 'sut6', 'sut7', 'sut8')
liste.place(x=120, y=100)
liste.column("#0", width=50)
liste.heading("#0", text="id",)
liste.column("sut1", width=100)
liste.heading("sut1", text="tc")
liste.column("sut2", width=90)
liste.heading("sut2", text="sifre")
liste.column("sut3", width=120)
liste.heading("sut3", text="adi")
liste.column("sut4", width=120)
liste.heading("sut4", text="soyadi")
liste.column("sut5", width=120)
liste.heading("sut5", text="email")
liste.column("sut6", width=90)
liste.heading("sut6", text="il")
liste.column("sut7", width=120)
liste.heading("sut7", text="ilce")
liste.column("sut8", width=120)
liste.heading("sut8", text="bolum")
liste.bind('<ButtonRelease-1>', getir)
style = ttk.Style()
style.theme_use("default")
style.configure("Treeview",
background="yellow",
foreground="black",
fieldbackground="silver"
)
style.map('Treeview',
background=[('selected', 'blue')])
listele()
form.mainloop()
lgir = Label(formgiris, text="MAYKOD", fg="black", bg="white", font="Times 40 italic").place(x=200, y=0)
Lkul = Label(formgiris, text="Kullanıcı Adı:", fg="black", bg="white", font="Times 18 italic").place(x=150, y=180)
Eku = Entry(formgiris, bd=1, width=25)
Eku.place(x=150, y=210)
Lsif = Label(formgiris, text="Şifre :", fg="black", bg="white", font="Times 18 italic").place(x=150, y=250)
Esif = Entry(formgiris, bd=1, width=25)
Esif.place(x=150, y=280)
Kaydet = Button(formgiris, text="Giriş Yap", fg="black", bg="white", font="Times 22 italic", command=login)
Kaydet.place(x=150, y=330)
Kayit = Button(formgiris, text="Kayıt Ol", fg="black", bg="white", font="Times 22 italic", command=kayitolma)
Kayit.place(x=300, y=330)
formgiris.mainloop() | arslncanm/Kulup_otomasyon_Python_tkinter | MAYKOD/main.py | main.py | py | 24,326 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "mysql.connector.connector.connect",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 10,
"usage_type": "name"
},
{
"... |
42812438276 | from __future__ import print_function
import numpy as np
from skimage import io
from tqdm import tqdm
import argparse
import os
from config import palette, invert_palette
def convert_to_color(arr_2d, palette=palette):
""" grayscale labels to RGB-color encoding """
arr_3d = np.zeros((arr_2d.shape[0], arr_2d.shape[1], 3), dtype=np.uint8)
for c, i in palette.items():
m = arr_2d == c
arr_3d[m] = i
return arr_3d
def convert_from_color(arr_3d, palette=invert_palette):
""" RGB-color encoding to grayscale labels """
arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8)
for c, i in palette.items():
m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2)
arr_2d[m] = i
return arr_2d
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("images", help="Images to process (at least one)",
nargs='+')
parser.add_argument("--to-color",
help="Convert from grayscale labels"
"to RGB encoded labels",
action="store_true")
parser.add_argument("--from-color",
help="Convert from RGB encoded labels"
"to grayscale labels",
action="store_true")
parser.add_argument("--out",
help="Folder where to save the modified images",
type=str)
args = parser.parse_args()
files = args.images
if args.to_color and args.from_color:
raise ValueError("Cannot specify both --from-color"
"and --to-color at the same time")
elif args.to_color:
convert_fun = convert_to_color
elif args.from_color:
convert_fun = convert_from_color
else:
raise ValueError("You need to specify whether to convert"
"from or to the RGB color labels")
if args.out is None:
OUTPUT_FOLDER = './out'
else:
OUTPUT_FOLDER = args.out
if os.path.isdir(OUTPUT_FOLDER):
print("WARNING : output folder {} exists !".format(OUTPUT_FOLDER))
else:
os.mkdir(OUTPUT_FOLDER)
for f in tqdm(files):
filename = f.split('/')[-1]
img = io.imread(f)
new_img = convert_fun(img)
io.imsave(OUTPUT_FOLDER + '/' + filename, new_img)
| nshaud/DeepNetsForEO | legacy/notebooks/convert_gt.py | convert_gt.py | py | 2,413 | python | en | code | 468 | github-code | 6 | [
{
"api_name": "config.palette",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "config.palette.items",
... |
23361190894 | import os
import numpy as np
from numpy import array, zeros, diag, diagflat, dot
import numpy
from flask import Flask, render_template, request, redirect, url_for
from werkzeug.utils import secure_filename
import copy
from os.path import join, dirname, realpath
UPLOAD_FOLDER = './uploads/'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
UPLOADS_PATH = join(dirname(realpath(__file__)), 'static/uploads/..')
ITERATION_LIMIT = 1000
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def check_roots(A, B, x):
result=list()
answer = list()
message = None
print(x)
print(A)
print(B)
for row in range(len(A)):
line_result = 0.0
for col in range(len(A)):
check = A[row][col] * x[col]
line_result += check
result.append(round(line_result))
print(result)
for i in range(len(result)):
if result[i] == B[i]:
answer.append(True)
else:
answer.append(False)
print(answer)
if len(answer) == 3:
if answer == [True, True, True]:
message = 'Root is correct!'
else:
message = 'Root is incorrect!'
else:
if answer == [True, True]:
message = 'Root is correct!'
else:
message = 'Root is incorrect!'
return message
def dd(X):
result = None
D = np.diag(np.abs(X)) # Find diagonal coefficients
S = np.sum(np.abs(X), axis=1) - D # Find row sum without diagonal
if np.all(D > S):
result = 'Matrix is diagonally dominant!'
else:
result = 'Matrix is not diagonally dominant!'
return result
def Jacobi(A, b):
x=None
if x is None:
x = zeros(len(A[0]))
D = diag(A)
print(D)
print(diagflat(D))
R = A - diagflat(D)
for i in range(ITERATION_LIMIT):
x = (b - dot(R,x)) / D
return x.tolist()
def Jordan_Causs(n, a, b):
j = 0
for i in a:
length = len(b)
i.append(b[j])
j+=1
x = np.zeros(n)
for i in range(n):
if a[i][i] == 0.0:
sys.exit('Divide by zero detected!')
for j in range(n):
if i != j:
ratio = a[j][i]/a[i][i]
for k in range(n+1):
a[j][k] = a[j][k] - ratio * a[i][k]
for i in range(n):
x[i] = a[i][n]/a[i][i]
return x
def TakeMatrix(Matrix_a):
array = Matrix_a[0].split(' ')
delta1 = list()
for i in array:
delta1.append(int(i))
print(delta1)
size = len(delta1)
line1 = list()
line2 = list()
line3 = list()
line4 = list()
delta = list()
if size == 9:
for i in delta1[:3]:
line1.append(i)
delta.append(line1)
for i in delta1[3:6]:
line2.append(i)
delta.append(line2)
for i in delta1[6:9]:
line3.append(i)
delta.append(line3)
if size == 4:
for i in delta1[:2]:
line1.append(i)
delta.append(line1)
for i in delta1[2:]:
line2.append(i)
delta.append(line2)
# delta = [[delta1[0], delta1[1], delta1[2]],[delta1[3], delta1[4], delta1[5]], [delta1[6], delta1[7], delta1[8]]]
return delta
def TakeB(Matrix_b):
array = Matrix_b[0].split(' ')
delta1 = list()
for i in array:
delta1.append(int(i))
return delta1
def SwapRows(A, B, row1, row2):
A[row1], A[row2] = A[row2], A[row1]
B[row1], B[row2] = B[row2], B[row1]
def DivideRow(A, B, row, divider):
A[row] = [a / divider for a in A[row]]
B[row] /= divider
def CombineRows(A, B, row, source_row, weight):
A[row] = [(a + k * weight) for a, k in zip(A[row], A[source_row])]
B[row] += B[source_row] * weight
def Gauss(A, B):
column = 0
while (column < len(B)):
current_row = None
for r in range(column, len(A)):
if current_row is None or abs(A[r][column]) > abs(A[current_row][column]):
current_row = r
if current_row is None:
return None
if current_row != column:
SwapRows(A, B, current_row, column)
DivideRow(A, B, column, A[column][column])
for r in range(column + 1, len(A)):
CombineRows(A, B, r, column, -A[r][column])
column += 1
X = [0 for b in B]
for i in range(len(B) - 1, -1, -1):
X[i] = B[i] - sum(x * a for x, a in zip(X[(i + 1):], A[i][(i + 1):]))
return X
def Zeidel(A, b):
x = [.0 for i in range(len(A))]
Iteration = 0
converge = False
pogr = 0.
while not converge:
x_new = copy.copy(x)
for i in range(len(A)):
s1 = sum(A[i][j] * x_new[j] for j in range(i))
s2 = sum(A[i][j] * x[j] for j in range(i + 1, len(A)))
x_new[i] = (b[i] - s1 - s2) / A[i][i]
pogr = sum(abs(x_new[i] - x[i]) for i in range(len(A)))
converge = pogr < 1e-6
Iteration += 1
x = x_new
return x
@app.route('/')
def hello_world():
return render_template('main_menu.html')
@app.route('/task_1', methods=['post', 'get'])
def Task_One():
a_list = list()
b_list = list()
array = []
array1 = []
result = []
ch = None
if request.method == 'POST':
check = request.form.get('check')
print(check)
a = request.form.get('A')
b = request.form.get('B')
a_list.append(a)
b_list.append(b)
array = TakeMatrix(a_list)
array1 = TakeB(b_list)
M3 = numpy.array(array)
v3 = numpy.array(array1)
result = numpy.linalg.solve(M3, v3)
if check == 'on':
ch = check_roots(array, array1, result)
else:
pass
return render_template('task_1.html', array=array, array1=array1, result=result, ch=ch)
@app.route('/task_2', methods=['post', 'get'])
def Task_Two():
a_list = list()
b_list = list()
array = []
array1 = []
result = []
ch= None
if request.method == 'POST':
check = request.form.get('check')
a = request.form.get('A')
b = request.form.get('B')
a_list.append(a)
b_list.append(b)
array = TakeMatrix(a_list)
array1 = TakeB(b_list)
result = Gauss(array, array1)
if check == 'on':
ch = check_roots(array, array1, result)
else:
pass
return render_template('task_2.html', array=array, array1=array1, result=result, ch=ch)
@app.route('/task_3', methods=['post', 'get'])
def Task_3():
a_list = list()
b_list = list()
array = []
array1 = []
result = []
ch = None
check_matrix = None
if request.method == 'POST':
check = request.form.get('check')
a = request.form.get('A')
b = request.form.get('B')
if a != None and b != None:
a_list.append(a)
b_list.append(b)
array = TakeMatrix(a_list)
array1 = TakeB(b_list)
check_matrix = dd(array)
result = Zeidel(array, array1)
else:
result = None
if check == 'on':
ch = check_roots(array, array1, result)
else:
pass
return render_template('task_3.html', result=result, check_matrix=check_matrix, ch=ch)
@app.route('/task_4', methods=['post', 'get'])
def Task_4():
a_list = list()
b_list = list()
array = []
array1 = []
result = []
ch = None
if request.method == 'POST':
check = request.form.get('check')
a = request.form.get('A')
b = request.form.get('B')
a_list.append(a)
b_list.append(b)
array = TakeMatrix(a_list)
array1 = TakeB(b_list)
result = Jordan_Causs(3, array, array1)
if check == 'on':
ch = check_roots(array, array1, result)
else:
pass
return render_template('task_4.html', array=array, array1=array1, result=result, ch=ch)
@app.route('/task_5', methods=['post', 'get'])
def Task_5():
a_list = list()
b_list = list()
array = []
array1 = []
result = []
ch = None
check_matrix = None
if request.method == 'POST':
check = request.form.get('check')
a = request.form.get('A')
b = request.form.get('B')
a_list.append(a)
b_list.append(b)
array = TakeMatrix(a_list)
array1 = TakeB(b_list)
check_matrix = dd(array)
result = Jacobi(array, array1)
if check == 'on':
ch = check_roots(array, array1, result)
else:
pass
return render_template('task_5.html', result=result, check_matrix=check_matrix, ch=ch)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['post', 'get'])
def Read_From_File():
pick = None
filename= None
list_a = list()
list_b = list()
result = None
if request.method == 'POST':
option = request.form.get('op')
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
with open('./uploads/'+filename, 'r') as file:
line = file.read()[2:].split('\n')
for lin in line:
new_list = lin.split(' ')
arr = list()
for i in new_list:
arr.append(float(i))
list_a.append(arr)
for i in list_a:
list_b.append(i[-1])
del i[-1]
pick = option
print(pick)
if pick == '1':
M3 = numpy.array(list_a)
v3 = numpy.array(list_b)
result = numpy.linalg.solve(M3, v3)
if pick == '2':
result = Gauss(list_a, list_b)
if pick == '3':
result = Zeidel(list_a, list_b)
if pick == '4':
result = Jordan_Causs(3, list_a, list_b)
if pick == '5':
result = Jacobi(list_a, list_b)
print(result)
return render_template('upload_file.html', pick=pick, list_a=list_a, list_b=list_b, result=result)
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
| konstantinkonstantinovich/Numerical-Methods-Sprint01- | Sprint01/app.py | app.py | py | 9,328 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line... |
855878754 | #!/usr/bin/env python
# This example shows how to extract portions of an unstructured grid
# using vtkExtractUnstructuredGrid. vtkConnectivityFilter is also used
# to extract connected components.
#
# The data found here represents a blow molding process. Blow molding
# requires a mold and parison (hot, viscous plastic) which is shaped
# by the mold into the final form. The data file contains several steps
# in time for the analysis.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create a reader to read the unstructured grid data. We use a
# vtkDataSetReader which means the type of the output is unknown until
# the data file is read. So we follow the reader with a
# vtkCastToConcrete and cast the output to vtkUnstructuredGrid.
reader = vtk.vtkDataSetReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/blow.vtk")
reader.SetScalarsName("thickness9")
reader.SetVectorsName("displacement9")
castToUnstructuredGrid = vtk.vtkCastToConcrete()
castToUnstructuredGrid.SetInputConnection(reader.GetOutputPort())
warp = vtk.vtkWarpVector()
warp.SetInput(castToUnstructuredGrid.GetUnstructuredGridOutput())
# The connectivity filter extracts the first two regions. These are
# know to represent the mold.
connect = vtk.vtkConnectivityFilter()
connect.SetInputConnection(warp.GetOutputPort())
connect.SetExtractionModeToSpecifiedRegions()
connect.AddSpecifiedRegion(0)
connect.AddSpecifiedRegion(1)
moldMapper = vtk.vtkDataSetMapper()
moldMapper.SetInputConnection(reader.GetOutputPort())
moldMapper.ScalarVisibilityOff()
moldActor = vtk.vtkActor()
moldActor.SetMapper(moldMapper)
moldActor.GetProperty().SetColor(.2, .2, .2)
moldActor.GetProperty().SetRepresentationToWireframe()
# Another connectivity filter is used to extract the parison.
connect2 = vtk.vtkConnectivityFilter()
connect2.SetInputConnection(warp.GetOutputPort())
connect2.SetExtractionModeToSpecifiedRegions()
connect2.AddSpecifiedRegion(2)
# We use vtkExtractUnstructuredGrid because we are interested in
# looking at just a few cells. We use cell clipping via cell id to
# extract the portion of the grid we are interested in.
extractGrid = vtk.vtkExtractUnstructuredGrid()
extractGrid.SetInputConnection(connect2.GetOutputPort())
extractGrid.CellClippingOn()
extractGrid.SetCellMinimum(0)
extractGrid.SetCellMaximum(23)
parison = vtk.vtkGeometryFilter()
parison.SetInputConnection(extractGrid.GetOutputPort())
normals2 = vtk.vtkPolyDataNormals()
normals2.SetInputConnection(parison.GetOutputPort())
normals2.SetFeatureAngle(60)
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.0, 0.66667)
parisonMapper = vtk.vtkPolyDataMapper()
parisonMapper.SetInputConnection(normals2.GetOutputPort())
parisonMapper.SetLookupTable(lut)
parisonMapper.SetScalarRange(0.12, 1.0)
parisonActor = vtk.vtkActor()
parisonActor.SetMapper(parisonMapper)
# graphics stuff
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(parisonActor)
ren.AddActor(moldActor)
ren.SetBackground(1, 1, 1)
ren.ResetCamera()
ren.GetActiveCamera().Azimuth(60)
ren.GetActiveCamera().Roll(-90)
ren.GetActiveCamera().Dolly(2)
ren.ResetCameraClippingRange()
renWin.SetSize(500, 375)
iren.Initialize()
renWin.Render()
iren.Start()
| VisTrails/VisTrails | examples/vtk_examples/VisualizationAlgorithms/ExtractUGrid.py | ExtractUGrid.py | py | 3,366 | python | en | code | 100 | github-code | 6 | [
{
"api_name": "vtk.util.misc.vtkGetDataRoot",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "vtk.vtkDataSetReader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "vtk.vtkCastToConcrete",
"line_number": 24,
"usage_type": "call"
},
{
"api_name"... |
71608494589 | # coding=utf-8
import logging
from datetime import datetime
import markupsafe
from playhouse.shortcuts import dict_to_model, model_to_dict
from app import components
from app.notes.model import Note, TaggedNote
from app.tags import tagService
from app.categories import categoryService
class NoteService(components.Service):
name = "notes"
model_class = Note
def __init__(self):
super().__init__()
def fetch_all_items(self, category_filter, milestone_filter):
user_id = components.current_user_id()
category_select = categoryService.category_filter_helper(Note, user_id, category_filter)
milestone_select = []
# milestone_filter == "all"
# milestone_filter == "unassigned"
# else ...
return Note.select(Note).where(
Note.is_deleted == False,
*category_select,
*milestone_select,
Note.owner_id == user_id
).order_by(Note.edited.desc()).objects()
def create_item(self, item_json):
(item_json, tags) = self._select_and_sanitize_tags(item_json)
# Check if user has ownership over the category given
if ("category" in item_json and item_json["category"] and not categoryService.read_item(item_json["category"])):
raise components.BadRequestError()
item = dict_to_model(Note, item_json)
item.content = markupsafe.escape(markupsafe.Markup(item.content))
item.owner = components.current_user()
item.save(force_insert=True)
item.tags.add(tags)
return item
def update_item(self, item_id, item_json):
myItem = self.read_item(item_id)
(item_json, tags) = self._select_and_sanitize_tags(item_json)
item = dict_to_model(Note, item_json)
with components.DB.atomic():
item.id = int(myItem.id)
item.changed()
item.save()
item.tags.clear()
item.tags.add(tags)
return item
raise RuntimeError("Could not update note")
def serialize_item(self, item):
item_json = model_to_dict(item, exclude=(
Note.is_deleted,
Note.owner,
Note.tags
), recurse=False)
tags = [tag for tag in item.tags]
item_json["tags"] = [tag.tag for tag in tags]
return item_json
def sanitize_fields(self, item_json):
if "due_date" in item_json:
due_date = datetime.fromtimestamp(int(item_json["due_date"])).date() if item_json["due_date"] else None
item_json["due_date"] = due_date
return super().sanitize_fields(item_json)
def _select_and_sanitize_tags(self, item_json):
tags = []
item_json = self.sanitize_fields(item_json)
if "tags" in item_json:
tags = tagService.bulk_search_or_insert(item_json["tags"])
del item_json["tags"]
logging.debug("Selected tags:" + ",".join([tag.tag for tag in tags]))
return (item_json, tags)
noteService = NoteService()
# ----------------------------------------
class Module(components.Module):
from app.notes.controller import NoteListController, NoteController
name = "notes"
services = [noteService]
models = [Note, TaggedNote]
controllers = [NoteListController, NoteController]
module = Module()
| caiwan/cai-notepad | backend/app/notes/__init__.py | __init__.py | py | 3,356 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "app.components.Service",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "app.components",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "app.notes.model.Note",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "app.c... |
30727489542 | from pydantic import BaseModel, Field, validator
class Address(BaseModel):
region: str
city: str
street_type: str
street: str
house_type: str
house: str
value: str
lat: float
lng: float
class Salary(BaseModel):
from_: int = Field(alias='from')
to: int
currency: str
gross: bool
class Contacts(BaseModel):
fullName: str
phone: str
email: str
'''тривиальная проверка почты'''
@validator('email')
def at_in_email(cls, v: str) -> str:
if not '@' in v:
raise ValueError('Email некорректный')
return v
class CandidateInfo(BaseModel):
description: str
employment: str
address: Address
name: str
salary: Salary
contacts: Contacts
class Experience(BaseModel):
id = "noMatter"
class ChangedCoordinates(BaseModel):
latitude: float
longitude: float
class Phone(BaseModel):
city: str
country: str
number: str
class ChangedContacts(BaseModel):
email: str
name: str
phone: Phone
class ChangedSalary(BaseModel):
from_: int = Field(alias='from')
to: int
class Schedule(BaseModel):
id: str
class ResultInfo(BaseModel):
address: str
allow_messages = True
billing_type = "packageOrSingle"
business_area = 1
contacts: ChangedContacts
coordinates: ChangedCoordinates
description: str
experience: Experience
html_tags = True
image_url = "https://img.hhcdn.ru/employer-logo/3410666.jpeg"
name: str
salary: int
salary_range: ChangedSalary
schedule: Schedule
| SayKonstantin/data_validation | models.py | models.py | py | 1,625 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pydantic.Field",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseMode... |
10565146032 | from matplotlib import pyplot
import matplotlib.pyplot as plt
import random, operator, math
from collections import defaultdict
def import_data(filename):
with open (filename, "r") as f:
dataPoints = [(float(line.split()[1]), float(line.split()[2])) \
for line in f if '#' not in line]
return dataPoints
def absolute_distance(x, y):
return abs(x[0] - y[0])
def squared_euclidean_distance(x, y):
dist = sum([(a-b)**2 for (a,b) in zip(x,y)])
return dist
# Calculate the z-score of each data point
def normalize(dataPoints):
new_pts = []
for dim_pts in zip(*dataPoints):
total = sum(dim_pts)
mean = total/len(dataPoints)
square_diffs = [(pt-mean)**2 for pt in dim_pts]
variance = sum(square_diffs)/len(dataPoints)
std_dev = math.sqrt(variance)
new_pts.append([(pt - mean)/std_dev for pt in dim_pts])
return list(zip(*new_pts))
# Args:
# dataPts, an array of tuples
# numClusters: the number of clusters to partition the data into
# Returns:
# A dictionary of the form cluster_id => list of dataPts indices
def kmeans(dataPts, numClusters):
dims = len(dataPts[0])
dataPts = normalize(dataPts)
if(dims == 1):
metric = absolute_distance
elif(dims == 2):
metric = squared_euclidean_distance
# Initialize by selecting random points as centers
means = random.sample(dataPts, numClusters)
while True:
clusters = defaultdict(list)
# Calculate cluster assignment for each point
for pt_idx, pt in enumerate(dataPts):
# Calculate the distance to each mean
distances = [metric(pt, m) for m in means]
# Assign to the cluster with the closest mean
min_idx, min_value = min(enumerate(distances), key=operator.itemgetter(1))
clusters[min_idx].append(pt_idx)
# Calculate the new means
new_means = []
for cluster_idx, pts_idx in clusters.items():
pts = [dataPts[idx] for idx in pts_idx]
n = len(pts)
m = [sum(dim)/n for dim in zip(*pts)]
new_means.append(m)
# check if we have converged
if new_means == means:
break
means = new_means
return clusters
# Calculate the VRC value for the given data points and k
def vrc(dataPoints, k):
clusters = kmeans(dataPoints, k)
dataPoints = normalize(dataPoints)
cluster_pts = [[dataPoints[idx] for idx in pts_idx] for pts_idx in clusters.values()]
metric = squared_euclidean_distance
grand_mean = [sum(pts)/len(dataPoints) for pts in zip(*dataPoints)]
ssb = 0
ssw = 0
for cluster in cluster_pts:
n = len(cluster)
center = [sum(pts)/n for pts in zip(*cluster)]
ssb += metric(grand_mean, center)*n
ssw += sum([metric(center, pt) for pt in cluster])
return (ssb/(k-1))/(ssw/(len(dataPoints)-k))
# Find the best k for the given data points
def min_vrc(dataPoints):
vrcs = {k: vrc(dataPoints, k) for k in range(2, 11)}
min_val = float("inf")
best_k = 0
for k in range(3, 10):
val = ((vrcs[k+1] - vrcs[k]) - (vrcs[k] - vrcs[k-1]))
if val < min_val:
min_val = val
best_k = k
return best_k
# Plot a single cluster
def plot_cluster(dataPoints, colour):
x = [point[0] for point in dataPoints]
y = [point[1] for point in dataPoints]
pyplot.scatter(x, y, color=colour) #ro meant red+dot
# Plot all clusters
def plot_clusters(clusters):
cluster_pts = []
color = ['Red', 'Green', 'Blue', 'Orange', 'Purple', 'Magenta', 'Black', 'Pink', 'Brown']
for cluster_idx, pts_idx in clusters.items():
cluster_pts.append([dataPoints[idx] for idx in pts_idx])
for idx, cluster in enumerate(cluster_pts):
plot_cluster(cluster, color[idx])
pyplot.show()
dataPoints = import_data('Exercise-8.dat')
# one dimensional clustering
xs = [(pt[0],) for pt in dataPoints]
ys = [(pt[1],) for pt in dataPoints]
#clusters = kmeans(xs, 2)
#clusters = kmeans(ys, 2)
# multi-dimensional clustering
clusters = kmeans(dataPoints, 6)
plot_clusters(clusters) | steffervescency/compling | exercise8/coli_ex_8.py | coli_ex_8.py | py | 4,401 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "math.sqrt",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",... |
73727858748 | #!/usr/bin/env python3
import argparse
import boutvecma
import easyvvuq as uq
import chaospy
import os
import numpy as np
import time
import matplotlib.pyplot as plt
CAMPAIGN_NAME = "Conduction."
def refine_sampling_plan(campaign, analysis, number_of_refinements):
"""
Refine the sampling plan.
Parameters
----------
number_of_refinements (int)
The number of refinement iterations that must be performed.
Returns
-------
None. The new accepted indices are stored in analysis.l_norm and the admissible indices
in sampler.admissible_idx.
"""
sampler = campaign.get_active_sampler()
for _ in range(number_of_refinements):
# compute the admissible indices
sampler.look_ahead(analysis.l_norm)
print(f"Code will be evaluated {sampler.n_new_points[-1]} times")
# run the ensemble
campaign.execute().collate(progress_bar=True)
# accept one of the multi indices of the new admissible set
data_frame = campaign.get_collation_result()
analysis.adapt_dimension("T", data_frame)
analysis.save_state(f"{campaign.campaign_dir}/analysis.state")
def plot_grid_2D(campaign, analysis, i, filename="out.pdf"):
fig = plt.figure(figsize=[12, 4])
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
accepted_grid = campaign.get_active_sampler().generate_grid(analysis.l_norm)
ax1.plot(accepted_grid[:, 0], accepted_grid[:, 1], "o")
ax2.plot(accepted_grid[:, 2], accepted_grid[:, 3], "o")
ax1.set_title(f"iteration {i}")
fig.tight_layout()
fig.savefig(filename)
def custom_moments_plot(results, filename, i):
fig, ax = plt.subplots()
xvalues = np.arange(len(results.describe("T", "mean")))
ax.fill_between(
xvalues,
results.describe("T", "mean") - results.describe("T", "std"),
results.describe("T", "mean") + results.describe("T", "std"),
label="std",
alpha=0.2,
)
ax.plot(xvalues, results.describe("T", "mean"), label="mean")
try:
ax.plot(xvalues, results.describe("T", "1%"), "--", label="1%", color="black")
ax.plot(xvalues, results.describe("T", "99%"), "--", label="99%", color="black")
except RuntimeError:
pass
ax.grid(True)
ax.set_ylabel("T")
ax.set_xlabel(r"$\rho$")
ax.set_title("iteration " + str(i))
ax.legend()
fig.savefig(filename)
def first_time_setup():
encoder = boutvecma.BOUTEncoder(
template_input="../../models/conduction/data/BOUT.inp"
)
# decoder = boutvecma.LogDataBOUTDecoder(variables=["T"])
decoder = boutvecma.SimpleBOUTDecoder(variables=["T"])
params = {
"conduction:chi": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:scale": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:gauss_width": {"type": "float", "min": 0.0, "max": 1e3, "default": 0.2},
"T:gauss_centre": {
"type": "float",
"min": 0.0,
"max": 2 * np.pi,
"default": np.pi,
},
}
actions = uq.actions.local_execute(
encoder,
os.path.abspath(
"../../build/models/conduction/conduction -q -q -q -q -d . |& tee run.log"
),
decoder,
root=".",
)
campaign = uq.Campaign(name=CAMPAIGN_NAME, actions=actions, params=params)
vary = {
"conduction:chi": chaospy.Uniform(0.2, 4.0),
"T:scale": chaospy.Uniform(0.5, 1.5),
"T:gauss_width": chaospy.Uniform(0.5, 1.5),
"T:gauss_centre": chaospy.Uniform(0.5 * np.pi, 1.5 * np.pi),
}
sampler = uq.sampling.SCSampler(
vary=vary,
polynomial_order=1,
quadrature_rule="C",
sparse=True,
growth=True,
midpoint_level1=True,
dimension_adaptive=True,
)
campaign.set_sampler(sampler)
print(f"Output will be in {campaign.campaign_dir}")
sampler = campaign.get_active_sampler()
print(f"Computing {sampler.n_samples} samples")
time_start = time.time()
campaign.execute().collate(progress_bar=True)
# Create an analysis class and run the analysis.
analysis = create_analysis(campaign)
campaign.apply_analysis(analysis)
analysis.save_state(f"{campaign.campaign_dir}/analysis.state")
plot_grid_2D(campaign, analysis, 0, f"{campaign.campaign_dir}/grid0.png")
for i in np.arange(1, 10):
refine_once(campaign, analysis, i)
time_end = time.time()
print(f"Finished, took {time_end - time_start}")
return campaign
def create_analysis(campaign):
return uq.analysis.SCAnalysis(sampler=campaign.get_active_sampler(), qoi_cols=["T"])
def refine_once(campaign, analysis, iteration):
refine_sampling_plan(campaign, analysis, 1)
campaign.apply_analysis(analysis)
analysis.save_state(f"{campaign.campaign_dir}/analysis.state")
results = campaign.last_analysis
plot_grid_2D(
campaign,
analysis,
iteration,
f"{campaign.campaign_dir}/grid{iteration:02}.png",
)
moment_plot_filename = os.path.join(
f"{campaign.campaign_dir}", f"moments{iteration:02}.png"
)
sobols_plot_filename = os.path.join(
f"{campaign.campaign_dir}", f"sobols_first{iteration:02}.png"
)
results.plot_sobols_first(
"T",
ylabel=f"iteration{iteration}",
xlabel=r"$\rho$",
filename=sobols_plot_filename,
)
plt.ylim(0, 1)
plt.savefig(f"{campaign.campaign_dir}/sobols{iteration:02}.png")
custom_moments_plot(results, moment_plot_filename, iteration)
with open(f"{campaign.campaign_dir}/last_iteration", "w") as f:
f.write(f"{iteration}")
def plot_results(campaign, moment_plot_filename, sobols_plot_filename):
results = campaign.get_last_analysis()
results.plot_sobols_first("T", xlabel=r"$\rho$", filename=sobols_plot_filename)
fig, ax = plt.subplots()
xvalues = np.arange(len(results.describe("T", "mean")))
ax.fill_between(
xvalues,
results.describe("T", "mean") - results.describe("T", "std"),
results.describe("T", "mean") + results.describe("T", "std"),
label="std",
alpha=0.2,
)
ax.plot(xvalues, results.describe("T", "mean"), label="mean")
try:
ax.plot(xvalues, results.describe("T", "1%"), "--", label="1%", color="black")
ax.plot(xvalues, results.describe("T", "99%"), "--", label="99%", color="black")
except RuntimeError:
pass
ax.grid(True)
ax.set_ylabel("T")
ax.set_xlabel(r"$\rho$")
ax.legend()
fig.savefig(moment_plot_filename)
print(f"Results are in:\n\t{moment_plot_filename}\n\t{sobols_plot_filename}")
def reload_campaign(directory):
"""Reload a campaign from a directory
Returns the campaign, analysis, and last iteration number
"""
campaign = uq.Campaign(
name=CAMPAIGN_NAME,
db_location=f"sqlite:///{os.path.abspath(directory)}/campaign.db",
)
analysis = create_analysis(campaign)
analysis.load_state(f"{campaign.campaign_dir}/analysis.state")
with open(f"{campaign.campaign_dir}/last_iteration", "r") as f:
iteration = int(f.read())
return campaign, analysis, iteration
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"conduction_sc",
description="Adaptive dimension refinement for 1D conduction model",
)
parser.add_argument(
"--restart", type=str, help="Restart previous campaign", default=None
)
parser.add_argument(
"-n", "--refinement-num", type=int, default=1, help="Number of refinements"
)
args = parser.parse_args()
if args.restart is None:
first_time_setup()
else:
campaign, analysis, last_iteration = reload_campaign(args.restart)
for iteration in range(
last_iteration + 1, last_iteration + args.refinement_num + 1
):
refine_once(campaign, analysis, iteration)
| boutproject/VECMA-hackathon | workflows/sc_adaptive_restartable/example_restartable_sc_adaptive.py | example_restartable_sc_adaptive.py | py | 8,019 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": ... |
3476194370 | from collections.abc import MutableMapping
from collections.abc import MutableSequence
from dpath import options
from dpath.exceptions import InvalidKeyName
import dpath.segments
_DEFAULT_SENTINAL = object()
MERGE_REPLACE = (1 << 1)
MERGE_ADDITIVE = (1 << 2)
MERGE_TYPESAFE = (1 << 3)
def __safe_path__(path, separator):
'''
Given a path and separator, return a tuple of segments. If path is
already a non-leaf thing, return it.
Note that a string path with the separator at index[0] will have the
separator stripped off. If you pass a list path, the separator is
ignored, and is assumed to be part of each key glob. It will not be
stripped.
'''
if not dpath.segments.leaf(path):
segments = path
else:
segments = path.lstrip(separator).split(separator)
# FIXME: This check was in the old internal library, but I can't
# see a way it could fail...
for i, segment in enumerate(segments):
if (separator and (separator in segment)):
raise InvalidKeyName("{} at {}[{}] contains the separator '{}'"
"".format(segment, segments, i, separator))
if options.CONVERT_INT_LIKE_SEGMENTS:
# Attempt to convert integer segments into actual integers.
final = []
for segment in segments:
try:
final.append(int(segment))
except:
final.append(segment)
segments = final
return segments
def new(obj, path, value, separator='/', creator=None):
'''
Set the element at the terminus of path to value, and create
it if it does not exist (as opposed to 'set' that can only
change existing keys).
path will NOT be treated like a glob. If it has globbing
characters in it, they will become part of the resulting
keys
creator allows you to pass in a creator method that is
responsible for creating missing keys at arbitrary levels of
the path (see the help for dpath.path.set)
'''
segments = __safe_path__(path, separator)
if creator:
return dpath.segments.set(obj, segments, value, creator=creator)
return dpath.segments.set(obj, segments, value)
def delete(obj, glob, separator='/', afilter=None):
'''
Given a obj, delete all elements that match the glob.
Returns the number of deleted objects. Raises PathNotFound if no paths are
found to delete.
'''
globlist = __safe_path__(glob, separator)
def f(obj, pair, counter):
(segments, value) = pair
# Skip segments if they no longer exist in obj.
if not dpath.segments.has(obj, segments):
return
matched = dpath.segments.match(segments, globlist)
selected = afilter and dpath.segments.leaf(value) and afilter(value)
if (matched and not afilter) or selected:
key = segments[-1]
parent = dpath.segments.get(obj, segments[:-1])
try:
# Attempt to treat parent like a sequence.
parent[0]
if len(parent) - 1 == key:
# Removing the last element of a sequence. It can be
# truly removed without affecting the ordering of
# remaining items.
#
# Note: In order to achieve proper behavior we are
# relying on the reverse iteration of
# non-dictionaries from dpath.segments.kvs().
# Otherwise we'd be unable to delete all the tails
# of a list and end up with None values when we
# don't need them.
del parent[key]
else:
# This key can't be removed completely because it
# would affect the order of items that remain in our
# result.
parent[key] = None
except:
# Attempt to treat parent like a dictionary instead.
del parent[key]
counter[0] += 1
[deleted] = dpath.segments.foldm(obj, f, [0])
if not deleted:
raise dpath.exceptions.PathNotFound("Could not find {0} to delete it".format(glob))
return deleted
def set(obj, glob, value, separator='/', afilter=None):
'''
Given a path glob, set all existing elements in the document
to the given value. Returns the number of elements changed.
'''
globlist = __safe_path__(glob, separator)
def f(obj, pair, counter):
(segments, found) = pair
# Skip segments if they no longer exist in obj.
if not dpath.segments.has(obj, segments):
return
matched = dpath.segments.match(segments, globlist)
selected = afilter and dpath.segments.leaf(found) and afilter(found)
if (matched and not afilter) or (matched and selected):
dpath.segments.set(obj, segments, value, creator=None)
counter[0] += 1
[changed] = dpath.segments.foldm(obj, f, [0])
return changed
def get(obj, glob, separator='/', default=_DEFAULT_SENTINAL):
'''
Given an object which contains only one possible match for the given glob,
return the value for the leaf matching the given glob.
If the glob is not found and a default is provided,
the default is returned.
If more than one leaf matches the glob, ValueError is raised. If the glob is
not found and a default is not provided, KeyError is raised.
'''
if glob == '/':
return obj
globlist = __safe_path__(glob, separator)
def f(obj, pair, results):
(segments, found) = pair
if dpath.segments.match(segments, globlist):
results.append(found)
if len(results) > 1:
return False
results = dpath.segments.fold(obj, f, [])
if len(results) == 0:
if default is not _DEFAULT_SENTINAL:
return default
raise KeyError(glob)
elif len(results) > 1:
raise ValueError("dpath.util.get() globs must match only one leaf : %s" % glob)
return results[0]
def values(obj, glob, separator='/', afilter=None, dirs=True):
'''
Given an object and a path glob, return an array of all values which match
the glob. The arguments to this function are identical to those of search().
'''
yielded = True
return [v for p, v in search(obj, glob, yielded, separator, afilter, dirs)]
def search(obj, glob, yielded=False, separator='/', afilter=None, dirs=True):
'''
Given a path glob, return a dictionary containing all keys
that matched the given glob.
If 'yielded' is true, then a dictionary will not be returned.
Instead tuples will be yielded in the form of (path, value) for
every element in the document that matched the glob.
'''
globlist = __safe_path__(glob, separator)
def keeper(segments, found):
'''
Generalized test for use in both yielded and folded cases.
Returns True if we want this result. Otherwise returns False.
'''
if not dirs and not dpath.segments.leaf(found):
return False
matched = dpath.segments.match(segments, globlist)
selected = afilter and afilter(found)
return (matched and not afilter) or (matched and selected)
if yielded:
def yielder():
for segments, found in dpath.segments.walk(obj):
if keeper(segments, found):
yield (separator.join(map(dpath.segments.int_str, segments)), found)
return yielder()
else:
def f(obj, pair, result):
(segments, found) = pair
if keeper(segments, found):
dpath.segments.set(result, segments, found, hints=dpath.segments.types(obj, segments))
return dpath.segments.fold(obj, f, {})
def merge(dst, src, separator='/', afilter=None, flags=MERGE_ADDITIVE):
'''
Merge source into destination. Like dict.update() but performs deep
merging.
NOTE: This does not do a deep copy of the source object. Applying merge
will result in references to src being present in the dst tree. If you do
not want src to potentially be modified by other changes in dst (e.g. more
merge calls), then use a deep copy of src.
NOTE that merge() does NOT copy objects - it REFERENCES. If you merge
take these two dictionaries:
>>> a = {'a': [0] }
>>> b = {'a': [1] }
... and you merge them into an empty dictionary, like so:
>>> d = {}
>>> dpath.util.merge(d, a)
>>> dpath.util.merge(d, b)
... you might be surprised to find that a['a'] now contains [0, 1].
This is because merge() says (d['a'] = a['a']), and thus creates a reference.
This reference is then modified when b is merged, causing both d and
a to have ['a'][0, 1]. To avoid this, make your own deep copies of source
objects that you intend to merge. For further notes see
https://github.com/akesterson/dpath-python/issues/58
flags is an OR'ed combination of MERGE_ADDITIVE, MERGE_REPLACE,
MERGE_TYPESAFE.
* MERGE_ADDITIVE : List objects are combined onto one long
list (NOT a set). This is the default flag.
* MERGE_REPLACE : Instead of combining list objects, when
2 list objects are at an equal depth of merge, replace
the destination with the source.
* MERGE_TYPESAFE : When 2 keys at equal levels are of different
types, raise a TypeError exception. By default, the source
replaces the destination in this situation.
'''
filtered_src = search(src, '**', afilter=afilter, separator='/')
def are_both_mutable(o1, o2):
mapP = isinstance(o1, MutableMapping) and isinstance(o2, MutableMapping)
seqP = isinstance(o1, MutableSequence) and isinstance(o2, MutableSequence)
if mapP or seqP:
return True
return False
def merger(dst, src, _segments=()):
for key, found in dpath.segments.kvs(src):
# Our current path in the source.
segments = _segments + (key,)
if len(key) == 0 and not options.ALLOW_EMPTY_STRING_KEYS:
raise InvalidKeyName("Empty string keys not allowed without "
"dpath.options.ALLOW_EMPTY_STRING_KEYS=True: "
"{}".format(segments))
# Validate src and dst types match.
if flags & MERGE_TYPESAFE:
if dpath.segments.has(dst, segments):
target = dpath.segments.get(dst, segments)
tt = type(target)
ft = type(found)
if tt != ft:
path = separator.join(segments)
raise TypeError("Cannot merge objects of type"
"{0} and {1} at {2}"
"".format(tt, ft, path))
# Path not present in destination, create it.
if not dpath.segments.has(dst, segments):
dpath.segments.set(dst, segments, found)
continue
# Retrieve the value in the destination.
target = dpath.segments.get(dst, segments)
# If the types don't match, replace it.
if ((type(found) != type(target)) and (not are_both_mutable(found, target))):
dpath.segments.set(dst, segments, found)
continue
# If target is a leaf, the replace it.
if dpath.segments.leaf(target):
dpath.segments.set(dst, segments, found)
continue
# At this point we know:
#
# * The target exists.
# * The types match.
# * The target isn't a leaf.
#
# Pretend we have a sequence and account for the flags.
try:
if flags & MERGE_ADDITIVE:
target += found
continue
if flags & MERGE_REPLACE:
try:
target['']
except TypeError:
dpath.segments.set(dst, segments, found)
continue
except:
raise
except:
# We have a dictionary like thing and we need to attempt to
# recursively merge it.
merger(dst, found, segments)
merger(dst, filtered_src)
return dst
| gshanko125298/Prompt-Engineering-In-context-learning-with-GPT-3-and-LLMs | myenve/Lib/site-packages/dpath/util.py | util.py | py | 12,695 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "dpath.segments.leaf",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "dpath.segments",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "dpath.exceptions.InvalidKeyName",
"line_number": 32,
"usage_type": "call"
},
{
"api_name"... |
33126999128 |
from sklearn.model_selection import train_test_split
from src.config import config
from mindspore import Tensor
import mindspore
class ModelDataProcessor:
def __init__(self):
self.get_dict()
def get_dict(self):
self.word_dict = {}
with open(config.vocab_file, 'r') as f:
cnt = 0
for line in f:
line = line.rstrip()
self.word_dict[line] = cnt
cnt += 1
def process_file(self, file_name:str):
setences_list = []
with open(file_name, 'r', encoding='Windows-1252') as f:
for line in f:
text = line.rstrip().split()
setences_list.append(text)
return setences_list
def process_data(self, file_name_pos, file_name_neg):
setences_list_pos = self.process_file(file_name_pos)
setences_list_neg = self.process_file(file_name_neg)
# 添加标签
setences_list = setences_list_pos + setences_list_neg
labels = [1 for i in range(len(setences_list_pos))] + [0 for i in range(len(setences_list_neg))]
# 制作数据集
X_train, X_test, y_train, y_test = train_test_split(setences_list, labels, test_size=0.3, shuffle=True, random_state=0, stratify=labels)
return X_train, X_test, y_train, y_test
def get_data(self):
# 提供给训练文件获取分割好的数据集
file_name_pos = './data/rt-polaritydata/pos.txt'
file_name_neg = './data/rt-polaritydata/neg.txt'
X_train, X_test, y_train, y_test = self.process_data(file_name_pos, file_name_neg)
return X_train, X_test, y_train, y_test
def get_data_loader(self):
X_train, X_test, y_train, y_test = self.get_data()
# 中间应该还增加对文本的编码
train_text_ids = [[self.word_dict[word] for word in item] for item in X_train]
test_text_ids = [[self.word_dict[word] for word in item] for item in X_test]
return train_text_ids, test_text_ids, y_train, y_test
def get_batch(self, x, y):
assert len(x) == len(y) , "error shape!"
n_batches = int(len(x) / config.batch_size) # 统计共几个完整的batch
for i in range(n_batches - 1):
x_batch = x[i*config.batch_size: (i + 1)*config.batch_size]
y_batch = y[i*config.batch_size: (i + 1)*config.batch_size]
lengths = [len(seq) for seq in x_batch]
max_length = max(lengths)
for i in range(len(x_batch)):
x_batch[i] = x_batch[i] + [0 for j in range(max_length-len(x_batch[i]))]
yield x_batch, y_batch
if __name__ == '__main__':
data_processor = ModelDataProcessor()
X_train, X_test, y_train, y_test = data_processor.get_data_loader()
for x_batch, y_batch in data_processor.get_batch(X_train, y_train):
x_batch = Tensor(x_batch, mindspore.int32)
y_batch = Tensor(y_batch, mindspore.int32)
print(x_batch)
print(y_batch) | Xie-Minghui/DPCNN_MS0 | src/data_loader.py | data_loader.py | py | 3,040 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "src.config.config.vocab_file",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "src.config.config",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 42,
"usage_type": "call"
... |
26253976434 | import re
from bowler import Query
from fissix.pytree import Node, Leaf
from fissix.fixer_util import FromImport, Name, Comma, is_import
from bowler.types import Capture, Filename
def update_regex_to_path(regex: str) -> str:
match = re.findall(r"\(\?P<(\w+)>([^\)]+)\)", regex)
if match:
for name, exp in match:
converted = ""
if exp == r"\d+" or exp == "[0-9]+":
converted = f"<int:{name}>"
if converted:
regex = regex.replace(f"(?P<{name}>{exp})", converted)
regex = re.sub(r"[\^\$]", "", regex)
return regex
return re.sub(r"[\^\$]", "", regex)
def convert_regex_to_path_modifier(
node: Node, capture: Capture, filename: Filename
) -> None:
# Replace the import
if is_import(node):
name_leafs = [
Name("path", prefix=" "),
Comma(),
Name("re_path", prefix=" "),
]
node.replace([FromImport("django.url", name_leafs=name_leafs)])
# And function calls from url to path, re_path
if capture and "function_arguments" in capture:
function_node: Node = next(node.leaves())
args = capture.get("function_arguments")
regex_leaf: Leaf = next(args[0].leaves())
converted = update_regex_to_path(regex_leaf.value)
if converted == regex_leaf.value:
function_node.replace(Name("re_path", prefix=function_node.prefix))
else:
function_node.replace(Name("path", prefix=function_node.prefix))
regex_leaf.value = update_regex_to_path(regex_leaf.value)
def run(urls, interactive: bool = False) -> Query:
convert_to_path = (
Query(urls).select_function("url").modify(convert_regex_to_path_modifier)
)
return convert_to_path.diff(interactive=interactive)
| aalekseev/healthy-projects | src/django_patches/url_2_path/patch.py | patch.py | py | 1,835 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.findall",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "fissix.pytree.Node",
"line_number": 24,
... |
42493210531 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 13 13:15:49 2018
@author: michal
"""
import networkx as nx
from copy import deepcopy
#import numpy as np
from solution import Solution
from random import sample
class PublicationMatcher:
def primitiveMaxPointsOfRest(self, publications):
allPointsOfRest = self.countMaxPublicationPoints(publications)
result = []
for p in publications:
allPointsOfRest -= self.publicationDict[p].points
result.append(allPointsOfRest)
return result
def maxPointsOfRestFromFlowTheory(self, publications, maxW):
result = []
for i in range(len(publications)):
result.append( self.maxPointsFromFlowTheory( publications[i:], maxW ) )
return result
def buildFlowGraph(self, publications):
flowG = nx.DiGraph()
flowG.add_node("s")
flowG.add_node("t")
pubs = publications
allAuthors = []
for p in pubs:
publication = self.publicationDict[p]
flowG.add_edge("s", p , capacity = publication.size, weight = - int(publication.points /publication.size) )
authors = list(self.pubGraph.neighbors(p))
allAuthors += authors
for a in authors:
flowG.add_edge(p, a)
allAuthors = list(set(allAuthors))
for a in allAuthors:
flowG.add_edge(a, "t", capacity = self.authorsDict[a].slots )
return flowG
def maxPointsFromFlowTheory(self, publications, maxW, returnDict =False):
W = int(100*maxW)
flowG = self.buildFlowGraph(publications)
maxFlow, flowDict = nx.maximum_flow(flowG, "s", "t")
if maxFlow < W:
W = maxFlow
flowG.nodes["s"]["demand"] = -W
flowG.nodes["t"]["demand"] = W
flowCost, flowDict = nx.network_simplex(flowG)
if returnDict:
data = { "maxPoints" : -flowCost/100, "maxSlots" : W/100, "flowGraph" : flowG, "flowDict" : flowDict}
return data
return -flowCost
def maxPointsIncludingSolution(self, solution, publications, maxW):
# W = int(100*maxW)
flowG = self.buildFlowGraph(publications)
p2a = solution.publication2authors
i = 0
for p in p2a:
flowG.remove_edge(p, p2a[p])
newSink = "s" + str(i)
newVent = "t" + str(i)
flowG.add_node( newVent, demand = self.publicationDict[p].size )
flowG.add_edge(p, newVent)
flowG.add_node( newSink, demand = -self.publicationDict[p].size )
flowG.add_edge( newSink, p2a[p])
i+=1
maxFlow, flowDict = nx.maximum_flow(flowG, "s", "t")
if maxFlow < maxW:
maxW = maxFlow
flowG.nodes["s"]["demand"] = -maxW
flowG.nodes["t"]["demand"] = maxW
flowCost, flowDict = nx.network_simplex(flowG)
return -flowCost
def getSortedPublicationByAuthor(self):
author2allPublications, author2pubNo = self.generateAuthor2Publications()
author2publications = self.generateSingleAuthor2PubDict()
publications = self.getAllPublicationsFromMainGraph()
pubOut = []
pubUsed = set()
for a in author2publications:
uniquePubs = author2publications[a]
pubOut += uniquePubs
pubUsed |= set(uniquePubs)
restPubs = author2allPublications[a]
restPubs = list( set(restPubs) - pubUsed)
pubOut += restPubs
pubUsed |= set(restPubs)
rest = list( set(publications) - pubUsed)
pubOut += rest
return pubOut
def getSortedPublicationByPoints(self):
publications = self.getAllPublicationsFromMainGraph()
sortedPubObjects = sorted( self.publicationList , key=lambda x: x.points, reverse=True)
outList = []
for p in sortedPubObjects:
# print( p.points)
if p.id in publications:
outList.append(p.id)
return outList
def branchAndBoundHeuristic(self, maxWeight, minimalPoints = 0, maxSolutionsNo = 20000, publications = [], maxPoints = []):
minimalPoints = int(round(minimalPoints*100))
if not publications:
publications = self.getAllPublicationsFromMainGraph()
maxPointsOfRest = maxPoints
if not maxPoints :
maxPoints = self.maxPointsOfRestFromFlowTheory(publications, maxWeight)
# print(maxPoints)
print("Maksymalne punkty z teori przeplywu - obliczone")
print(maxPoints)
maxWeight = int(round(maxWeight*100))
minSizePerWeight = int( maxSolutionsNo/maxWeight )
queue = [ Solution() ]
pubLen = len(publications)
progressFile = open("progress.log", 'w' )
progressFile.close()
inpossibleBranches = 0
toHeavyBranches = 0
toCheapBranches = 0
bestPointsForWeight = {}
for n, publication in enumerate(publications):
authors = list(self.pubGraph.neighbors(publication))
maxPointsOfRest = maxPoints[n]
newQueue = []
for solution in queue:
for author in authors:
newSolution = deepcopy(solution)
solutionPossible = newSolution.addConnection(self.authorsDict[ author], self.publicationDict[publication] )
if not solutionPossible:
inpossibleBranches += 1
continue
##
if newSolution.actualWeight > maxWeight:
toHeavyBranches += 1
continue
#
if newSolution.actualPoints + maxPointsOfRest < minimalPoints:
toCheapBranches += 1
continue
weight = newSolution.actualWeight
if weight in bestPointsForWeight:
if newSolution.actualPoints > bestPointsForWeight[weight]:
bestPointsForWeight[weight] = newSolution.actualPoints
else:
bestPointsForWeight[weight] = newSolution.actualPoints
points = newSolution.actualPoints
if len(queue) > 0.5*maxSolutionsNo:
if bestPointsForWeight[weight] * 0.9 > points:
continue
newQueue.append(deepcopy(newSolution))
if solution.actualPoints + maxPointsOfRest >= minimalPoints:
newQueue.append(deepcopy(solution))
else:
toCheapBranches += 1
queue = newQueue
if len(queue) > maxSolutionsNo:
newQueue = []
for solution in queue:
weight = solution.actualWeight
points = solution.actualPoints
if bestPointsForWeight[weight] * 0.9 < points:
newQueue.append(solution)
queue = newQueue
if len(newQueue) > maxSolutionsNo:
mass2solutions = {}
for solution in newQueue:
weight2dict = solution.actualWeight
if not weight2dict in mass2solutions:
mass2solutions[weight2dict] = [ solution ]
else:
mass2solutions[weight2dict].append(solution)
newQueue = []
for mass in mass2solutions:
if len(mass2solutions[mass]) <= minSizePerWeight:
newQueue += mass2solutions[mass]
else:
newQueue += sample( mass2solutions[mass], minSizePerWeight )
queue = newQueue
progressFile = open("progress.log", 'a' )
progressFile.write("#########################\n")
progressFile.write(str(float(n/pubLen)*100) + " % "+str(n)+"\n")
progressFile.write("in queue: " + str(len(queue))+"\n")
progressFile.write("impossible branches: "+ str(inpossibleBranches)+"\n")
progressFile.write("to heavy branches: "+ str(toHeavyBranches)+"\n")
progressFile.write("to cheap branches: "+ str(toCheapBranches)+"\n")
progressFile.close()
if not queue:
print("nic nie znaleziono!")
return
bestSolution = None
bestPoints = 0
lowestPoints = 10000
# print("wszystkie rozwiazania: ", len(queue))
for solution in queue:
if solution.actualPoints > bestPoints:
bestPoints = solution.actualPoints
bestSolution = solution
if solution.actualPoints < lowestPoints:
lowestPoints = solution.actualPoints
return bestSolution
def branchAndBound(self, maxWeight, minimalPoints = 0, publications = [], maxPoints = []):
minimalPoints = int(round(minimalPoints*100))
if not publications:
publications = self.getAllPublicationsFromMainGraph()
maxPointsOfRest = maxPoints
if not maxPoints :
maxPoints = self.maxPointsOfRestFromFlowTheory(publications, maxWeight)
# print(maxPoints)
print("Maksymalne punkty z teori przeplywu - obliczone")
print(maxPoints)
maxWeight = int(round(maxWeight*100))
queue = [ Solution() ]
pubLen = len(publications)
progressFile = open("progress.log", 'w' )
progressFile.close()
inpossibleBranches = 0
toHeavyBranches = 0
toCheapBranches = 0
for n, publication in enumerate(publications):
authors = list(self.pubGraph.neighbors(publication))
maxPointsOfRest = maxPoints[n]
newQueue = []
for solution in queue:
for author in authors:
newSolution = deepcopy(solution)
solutionPossible = newSolution.addConnection(self.authorsDict[ author], self.publicationDict[publication] )
if not solutionPossible:
inpossibleBranches += 1
continue
##
if newSolution.actualWeight > maxWeight:
toHeavyBranches += 1
continue
#
if newSolution.actualPoints + maxPointsOfRest < minimalPoints:
toCheapBranches += 1
continue
newQueue.append(deepcopy(newSolution))
if solution.actualPoints + maxPointsOfRest >= minimalPoints:
newQueue.append(deepcopy(solution))
else:
toCheapBranches += 1
queue = newQueue
progressFile = open("progress.log", 'a' )
progressFile.write("#########################\n")
progressFile.write(str(float(n/pubLen)*100) + " % "+str(n)+"\n")
progressFile.write("in queue: " + str(len(queue))+"\n")
progressFile.write("impossible branches: "+ str(inpossibleBranches)+"\n")
progressFile.write("to heavy branches: "+ str(toHeavyBranches)+"\n")
progressFile.write("to cheap branches: "+ str(toCheapBranches)+"\n")
progressFile.close()
if not queue:
print("nic nie znaleziono!")
return
bestSolution = None
bestPoints = 0
lowestPoints = 10000
# print("wszystkie rozwiazania: ", len(queue))
for solution in queue:
if solution.actualPoints > bestPoints:
bestPoints = solution.actualPoints
bestSolution = solution
if solution.actualPoints < lowestPoints:
lowestPoints = solution.actualPoints
return bestSolution
def countIdenticalElements( vector2test, vectorKnown):
count = 0
for el in vectorKnown:
if el in vector2test:
count +=1
return count
| chemiczny/pubMatch | pubMatch/publicationMatcher.py | publicationMatcher.py | py | 13,317 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "networkx.DiGraph",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "networkx.maximum_flow",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "networkx.network_simplex",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "solut... |
40189093783 | __author__ = 'eladron'
import folium
#variables
lat = 32.12830
long = 34.79269
loc = [lat,long]
zs = 18
tls = 'Stamen Terrain'
map_path = 'App2-Leaflet_Webmaps/map_test.html'
map = folium.Map(location=loc, zoom_start = zs)
map.simple_marker(location=loc, popup='My address' , marker_color='purple')
map.create_map(map_path)
| Elad73/PythonTutorials | python/Udemy/Mega_Course/App2-Leaflet_Webmaps/map.py | map.py | py | 334 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "folium.Map",
"line_number": 15,
"usage_type": "call"
}
] |
74150991549 | import numpy as np
import pygame as pyg
from math import cos, sin
from src.objects.point import Point
class Cube(Point):
def __init__(self, x: int, y: int, z: int, side:int, rotation: str = 'xyz', static: bool = False) -> None:
super().__init__(x, y, z, rotation, static)
self.center = self.vector
self.vertexes = [Point(
side*(1 if i in (1, 2, 5, 6) else 0) + x-side/2,
side*(1 if i in (2, 3, 6, 7) else 0) + y-side/2,
side*(1 if i in (4, 5, 6, 7) else 0) + z-side/2,
rotation, static, self.center
) for i in range(8)]
for j in (0, 2):
for i in (1, 3, 4+j):
self.vertexes[j].attachedPoints.append(self.vertexes[i])
for i in (1+j, 4, 6):
self.vertexes[j+5].attachedPoints.append(self.vertexes[i])
def update(self, angle: float) -> None:
for i in self.vertexes:
i.update(angle)
return super().update(angle)
def draw_ortho(self, screen: pyg.Surface, scale: int) -> None:
for i in self.vertexes:
i.draw_ortho(screen, scale)
return super().draw_ortho(screen, scale)
| FukuInTheCode/pythonMath | src/objects/cube.py | cube.py | py | 1,309 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "src.objects.point.Point",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "src.objects.point.Point",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 37,
"usage_type": "attribute"
}
] |
27937808825 | import logging
import time
import sys
from selenium import webdriver
from selenium.webdriver.edge.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.edge.service import Service as EdgeService
from webdriver_manager.microsoft import EdgeChromiumDriverManager
from selenium.common.exceptions import TimeoutException
class WhatsappBot(object):
def __init__(self, config):
self.config = config
# set options as you wish
self.options = Options()
self.options.add_argument("--disable-infobars")
self.options.add_argument("start-maximized")
self.options.add_argument("--disable-extensions")
if self.config.user_dir_folder:
self.options.add_argument("--user-data-dir=" + self.config.user_dir_folder)
# setup Edge Driver
self.browser = webdriver.Edge(service=EdgeService(EdgeChromiumDriverManager().install()), options=self.options)
def send_message(self, to, message=""):
# identify contact / group
name_argument = f"//span[contains(@title,'{to}')]"
title = self.wait.until(EC.presence_of_element_located((By.XPATH, name_argument)))
title.click()
# many a times class name or other HTML properties changes so keep a track of current class name for input box by using inspect elements
input_path = '//*[@id="main"]/footer//p[@class="selectable-text copyable-text"]'
box = self.wait.until(EC.presence_of_element_located((By.XPATH, input_path)))
# wait for security
time.sleep(1)
# send your message followed by an Enter
box.send_keys(message + Keys.ENTER)
# wait for security
time.sleep(2)
def get_back(self):
"""
Simulate a back action on browser.
"""
self.browser.back()
def login(self):
try:
self.browser.get("https://web.whatsapp.com/")
self.browser.maximize_window()
self.wait = WebDriverWait(driver=self.browser, timeout=900)
# wait 5s until leanding page displays
try :
landing = WebDriverWait(driver=self.browser, timeout=20).until(
EC.presence_of_element_located((By.XPATH, '//div[@class="landing-main"]'))
)
if landing:
print("Scan QR Code, And then Enter")
input()
print("Logged In")
except TimeoutException as e:
print("No need to authenticate !")
except Exception as e:
logging.info("There was some error while logging in.")
logging.info(sys.exc_info()[0])
exit()
def close_and_quit(self):
"""
Close current browser page and quit browser instance
"""
self.browser.close()
self.browser.quit()
| Zyniel/DansePlanningManager | src/app/whatsapp_bot.py | whatsapp_bot.py | py | 3,082 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.edge.options.Options",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Edge",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 29,
"usage_type": "name"
},
{
... |
40883369274 | import sys
from kubernetes import client, config
pods_templates = [
"authservice-",
"cluster-local-",
"istio-citadel-",
"istio-galley-",
"istio-ingressgateway-",
"istio-nodeagent-",
"istio-pilot-",
"istio-policy-",
"istio-security-post-install-",
"istio-sidecar-injector-",
"istio-telemetry-",
"kfserving-ingressgateway-",
"prometheus-",
"admission-webhook-deployment-",
"application-controller-stateful-set-",
"argo-ui-",
"centraldashboard-",
"jupyter-web-app-deployment-",
"katib-controller-",
"katib-db-manager-",
"katib-mysql-",
"katib-ui-",
"kfserving-controller-manager-",
"minio-",
"ml-pipeline-ml-pipeline-visualizationserver-",
"ml-pipeline-persistenceagent-",
"ml-pipeline-scheduledworkflow-",
"ml-pipeline-ui-",
"ml-pipeline-viewer-controller-deployment-",
"ml-pipeline-",
"mysql-",
"notebook-controller-deployment-",
"profiles-deployment-",
"pytorch-operator-",
"seldon-controller-manager-",
"spartakus-volunteer-",
"tf-job-operator-",
"workflow-controller-",
"dex-"
]
config.load_kube_config()
v1 = client.CoreV1Api()
pod_list = v1.list_namespaced_pod("istio-system")
pods = pod_list.items
pod_list = v1.list_namespaced_pod("kubeflow")
pods.extend(pod_list.items)
pod_list = v1.list_namespaced_pod("auth")
pods.extend(pod_list.items)
for pod in pods:
name = pod.metadata.name
status = pod.status.phase
if status == 'Succeeded' or (status == 'Running' and pod.status.container_statuses[0].ready):
for template in pods_templates:
if name.startswith(template):
pods_templates.remove(template)
break
sys.exit(len(pods_templates))
| dzhyrov/private-manifests-1.3 | private-manifests/utils/pods-validator.py | pods-validator.py | py | 1,763 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "kubernetes.config.load_kube_config",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "kubernetes.config",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "kubernetes.client.CoreV1Api",
"line_number": 47,
"usage_type": "call"
},
{
"... |
37123499778 | import nltk
from Model.Model import Model
from View.View import View
from docx import Document
from datetime import datetime
from classes.Document import MyDocument
import os
import string
import pymorphy2
from tkinter import filedialog
from tkinter import messagebox
import tkinter as tk
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import math
import numpy as np
import re
import heapq
class Controller:
def __init__(self, root):
self.model = Model()
self.view = View(root,self)
def __punctuation(self, str):
punctuation = string.punctuation
translator = str.maketrans('', '', punctuation)
result = str.translate(translator)
characters_to_remove = ['"', '“', '”', '«', '»']
for char in characters_to_remove:
result = result.replace(char, '')
return result
def __get_synonyms(self,word):
morph = pymorphy2.MorphAnalyzer()
normal_form = morph.parse(word)[0].normal_form
synonyms = []
for synset in morph.parse(normal_form)[0].lexeme:
synonyms.append(synset.word)
return synonyms
def create_dictionary_by_documents(self):
dictionary = []
documents = self.model.get_documents()
for doc in documents:
dictionary+=self.__punctuation(doc.text.lower()).split()
dictionary = list(set(dictionary))
self.model.set_dictionary(dictionary)
def create_binary_vector_documents(self):
dictionary = self.model.get_dictionary()
docs = self.model.get_documents()
matrix_of_docs = []
for doc in docs:
vector_doc = []
for word in dictionary:
vector_doc.append(1 if word in doc.text else 0)
matrix_of_docs.append(vector_doc)
self.model.set_docs_vectors(matrix_of_docs)
def create_binary_vector_query(self, query):
query = self.__punctuation(query).lower()
query = query.split()
query_termins_synonyms = []
for word in query:
query_termins_synonyms+= list(set(self.__get_synonyms(word)))
dictionary = self.model.get_dictionary()
vector_binary_query = []
for word in dictionary:
vector_binary_query.append(1 if word in query_termins_synonyms else 0)
self.model.set_query_vector(vector_binary_query)
def calculate_similar(self):
matrix_docs = self.model.get_docs_vectors()
query_vector = np.array(self.model.get_query_vector())
e_query_vector = np.linalg.norm(query_vector)
similar = {}
id = 0
for vector in matrix_docs:
vec = np.array(vector)
e_vec = np.linalg.norm(vec)
if (e_vec * e_query_vector) != 0:
query_equals_doc = (np.dot(vec, query_vector))/(e_vec * e_query_vector)
similar[id]=query_equals_doc
id+=1
else:
query_equals_doc = "Nan"
similar[id] = query_equals_doc
id += 1
sorted_similar = {k: v for k, v in sorted(similar.items(),reverse=True, key=lambda item: item[1])}
self.model.set_result_similar(sorted_similar)
def open_word_file(self):
documents = []
file_path = filedialog.askopenfilenames(filetypes=[("Word Files", "*.docx")])
if file_path:
for path in file_path:
doc = Document(path)
doc_name = os.path.basename(path)
doc_content = "\n".join([paragraph.text for paragraph in doc.paragraphs])
doc_created_date = datetime.fromtimestamp(os.path.getctime(path)).strftime('%H:%M - %d.%m.%Y').split(
"-")
document = MyDocument(doc_name, path, doc_content, doc_created_date[1], doc_created_date[0])
documents.append(document)
self.model.set_documents(documents)
self.update_log("Files uploaded")
def update_log(self, message):
self.view.log_text.config(state=tk.NORMAL) # Делаем текстовое поле активным
self.view.log_text.insert(tk.END, message + "\n") # Добавляем запись
self.view.log_text.config(state=tk.DISABLED) # Делаем текстовое поле неактивным
self.view.log_text.see(tk.END)
def check_is_nan(self, similar):
for key, value in similar.items():
if value == "Nan":
self.update_log("Совпадения не найдены.")
return False
else: return True
def start(self):
if not self.model.get_documents():
messagebox.showinfo("Ошибка", "Вы не загрузили документы")
return 0
if not self.view.query_entry.get():
messagebox.showinfo("Ошибка", "Введите языковой запрос")
return 0
self.create_dictionary_by_documents()
self.create_binary_vector_documents()
self.create_binary_vector_query(self.view.query_entry.get())
self.calculate_similar()
if not self.check_is_nan(self.model.get_result_similar()):
return 0
docs_id = list(self.model.get_result_similar().keys())
self.update_log("Наиболее подходящие документы:")
for id in range(len(docs_id)):
self.update_log(f"{id+1}. "+self.model.get_document_by_id(docs_id[id]).title + f": {self.model.get_result_similar()[docs_id[id]]}")
self.view.show_open_files_button()
def generate_annotation(self):
path = f"../docs/"
article_text = ""
selected_index = self.view.listbox.curselection()
if selected_index:
selected_file = self.view.listbox.get(selected_index[0])
file_path = os.path.join(path, selected_file)
print(file_path)
try:
if file_path.endswith('.docx'):
doc = Document(file_path)
for paragraph in doc.paragraphs:
article_text += paragraph.text + '\n'
elif file_path.endswith('.txt'):
with open(file_path, 'r', encoding='utf-8') as file:
article_text = file.read()
else:
print("Неподдерживаемый формат файла.")
except Exception as e:
print(f"Произошла ошибка при чтении файла: {e}")
print(article_text)
article_text = re.sub(r'\[[0-9]*\]', ' ', article_text)
article_text = re.sub(r'\s+', ' ', article_text)
formatted_article_text = re.sub('[^а-яА-Я]', ' ', article_text)
formatted_article_text = re.sub(r'\s+', ' ', formatted_article_text)
sentence_list = nltk.sent_tokenize(article_text)
stopwords = nltk.corpus.stopwords.words('russian')
word_frequencies = {}
for word in nltk.word_tokenize(formatted_article_text):
if word not in stopwords:
if word not in word_frequencies.keys():
word_frequencies[word] = 1
else:
word_frequencies[word] += 1
print(word_frequencies.values())
maximum_frequency = max(word_frequencies.values())
for word in word_frequencies.keys():
word_frequencies[word] = (word_frequencies[word] / maximum_frequency)
sentence_scores = {}
for sent in sentence_list:
for word in nltk.word_tokenize(sent.lower()):
if word in word_frequencies.keys():
if len(sent.split(' ')) < 30:
if sent not in sentence_scores.keys():
sentence_scores[sent] = word_frequencies[word]
else:
sentence_scores[sent] += word_frequencies[word]
summary_sentences = heapq.nlargest(3, sentence_scores, key=sentence_scores.get)
summary = ' '.join(summary_sentences)
self.update_log(f"\n{selected_file}: {summary}")
def update_file_list(self):
docs_id = list(self.model.get_result_similar().keys())
self.view.listbox.delete(0, tk.END)
for id in range(len(docs_id)):
self.view.listbox.insert(tk.END, self.model.get_document_by_id(docs_id[id]).title)
def open_new_files(self):
path = f"../docs/"
selected_index = self.view.listbox.curselection()
if selected_index:
selected_file = self.view.listbox.get(selected_index[0])
os.startfile(path+selected_file)
def recall_metric(self, a, c): # and average precision
return a/(a+c) #r
def precision_metric(self, a, b):
return a/(a+b) # p
def accuracy_metric(self, a, b, c, d):
return (a+d)/(a+b+c+d)
def error_metric(self, a, b, c, d):
return (b+c)/(a+b+c+d)
def f_measure_metric(self,r, p):
return 2/((1/p)+(1/r))
def precision_n_metric(self,a):
return a/3
def r_precision_metric(self, a):
return 2/a
def grafik(self):
recall = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 1.0])
p = []
for el in recall:
if el > 0.5:
p.append(0)
else: p.append(1)
p = np.array(p)
# Сортируем оценки уверенности в порядке убывания
sorted_indices = np.argsort(recall)[::-1]
p_sorted = p[sorted_indices]
# Инициализируем списки для хранения точности и полноты на 11 уровнях
precision_at_recall = []
recall_levels = np.linspace(0, 1, 11) # 11 равномерно распределенных уровней полноты от 0 до 1
# Вычисляем точность на каждом уровне полноты
for recall_level in recall_levels:
cutoff = int(recall_level * len(p_sorted))
y_true_cutoff = p_sorted[:cutoff]
precision = np.sum(y_true_cutoff) / (
cutoff + 1e-9) # Добавляем маленькое значение для избегания деления на ноль
precision_at_recall.append(precision)
# Интерполируем значения
interpolated_precision = np.maximum.accumulate(precision_at_recall[::-1])[::-1]
# Создаем фигуру matplotlib
fig = Figure(figsize=(8, 6))
ax = fig.add_subplot(111)
# Построение кривой полноты/точности с точками и интерполированными значениями
ax.step(recall_levels, precision_at_recall, marker='o', label='Точки')
ax.plot(recall_levels, interpolated_precision, linestyle='--', label='Интерполированная линия')
ax.set_xlabel('Полнота (Recall)')
ax.set_ylabel('Точность (Precision)')
ax.set_title('Кривая полноты/точности с интерполированными значениями')
ax.grid(True)
ax.legend()
canvas = FigureCanvasTkAgg(fig, master=self.view.metrics_window)
canvas_widget = canvas.get_tk_widget()
canvas_widget.pack()
def calculate_metrics(self):
amount_relevant_docs = len(self.model.get_relevant_documents()) # a
amount_irrelevant_docs = len(self.model.get_irrelevant_documents()) # b
amount_bad_relevant_docs = len(self.model.get_bad_relevant_documents()) # d
not_finded_docs = 0 # c
reccal = self.recall_metric(amount_relevant_docs,not_finded_docs)
precision = self.precision_metric(amount_relevant_docs, amount_irrelevant_docs)
accuracy = self.accuracy_metric(amount_relevant_docs,amount_irrelevant_docs, not_finded_docs, amount_bad_relevant_docs)
error = self.error_metric(amount_relevant_docs,amount_irrelevant_docs, not_finded_docs, amount_bad_relevant_docs)
f_measure = self.f_measure_metric(reccal, precision)
precision_n = self.precision_n_metric(amount_relevant_docs)
r_precision = self.r_precision_metric(amount_relevant_docs)
txt = f"Recall: {reccal} \n" \
f"Precision: {precision}\n" \
f"Average precision: {reccal}\n" \
f"Accuracy: {accuracy}\n" \
f"F-measure: {f_measure}\n" \
f"Precision by n: {[precision_n]}\n" \
f"R-precision: {r_precision}\n"
self.view.label_metrics.config(text=txt)
def calculate_idfs(self):
# Создайте словарь для хранения числа документов, содержащих каждый термин
term_document_count = {}
documents = self.model.get_documents
total_documents = len(documents)
termins = []
for doc in documents:
unique_terms = set(doc.text.split())
for term in unique_terms:
termins.append(term)
for doc in documents:
for term in termins:
if term in set(doc.text.split()):
term_document_count[term] = term_document_count.get(term, 0) + 1
idf_values = {}
for term, doc_count in term_document_count.items():
idf = math.log(total_documents / (doc_count + 1)) # Добавляем 1, чтобы избежать деления на 0
idf_values[term] = idf
self.model.set_IDFS(idf_values)
def calculated_weight_termins_and_L_vector_in_documents(self):
documents = self.model.get_documents()
IDFS = self.model.get_IDFS()
WTDS = []
L_vector = []
if not IDFS:
return False
for doc in documents:
term_document_count = {}
Li = []
for key in IDFS:
term_document_count[key] = doc.text.count(key) * IDFS[key]
if key in doc.text:
Li.append(1)
else:
Li.append(0)
L_vector.append(Li)
WTDS.append(term_document_count)
self.model.set_L_vector(L_vector)
self.model.set_WTDS(WTDS)
def search_query_transformation(self, user_query):
user_termins = set(user_query.split())
IDFS = self.model.get_IDFS()
query_vector = []
for termin in user_termins:
if termin in IDFS:
value = IDFS[termin] * user_query.count(termin)
query_vector.append(value)
self.model.set_query_vector(query_vector)
| F1linnn/info-search-system | Controller/Controller.py | Controller.py | py | 14,983 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "Model.Model.Model",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "View.View.View",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "pymorphy2.Mo... |
39434532575 | from collections import Counter
import zarr
from fastai.tabular.all import *
from fastai.data.all import *
from fastai.vision.gan import *
from fastai import *
from tsai.all import *
from torch import nn
import numpy as np
import seaborn as sns
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import torch.nn.functional as F
from model import stagerNetAAE, stagerNetCritic
from utils import LossAttrMetric, GetLatentSpace, norm_batch, UnfreezeFcCrit, \
SwitchAttribute, distrib_regul_regression, hist_lab, plot_results
# Load the config file
config_file = 'config.json'
with open(config_file, 'r') as file:
config = json.load(file)
# Set the device on which you want to train the model
device = torch.device(config['device'])
torch.cuda.set_device(device)
lab_area = torch.Tensor(np.load(f'{config["labels_path"]}/area_db.npy'))[:,None]
lab_arousal = torch.Tensor(np.load(f'{config["labels_path"]}/arousal_db.npy'))[:,None]
lab_duration = torch.Tensor(np.load(f'{config["labels_path"]}/duration_db.npy'))[:,None]
# Define the labels
# 1) discrete labels
lab_area = torch.Tensor(np.load(f'{config["labels_path"]}/area_db.npy'))[:,None]
lab_arousal = torch.Tensor(np.load(f'{config["labels_path"]}/arousal_db.npy'))[:,None]
lab_duration = torch.Tensor(np.load(f'{config["labels_path"]}/duration_db.npy'))[:,None]
lab_all = torch.Tensor(4*lab_area + 2*lab_arousal + lab_duration)
lab_discrete = torch.hstack((lab_area,lab_duration,lab_arousal))
# 2) switch to match the desired encoding
tmp = copy(lab_all)
lab_all[tmp==3] = 4
lab_all[tmp==4] = 3
# 3) 3-level labels ("low", "medium", "high")
lab3 = deepcopy(lab_all)
lab3[:] = 0
lab3[lab_all>1] = 1
lab3[lab_all>5] = 2
# 4) 4-level labels ("all metrics at low level", "1 metrics at high level", "2 metrics at high level", "all metrics at high level")
lab4 = deepcopy(lab_all)
lab4[lab_all>0] = 1
lab4[lab_all>3] = 2
lab4[lab_all==7] = 3
# 5) normalize the label values
lab_norm_area = torch.Tensor(np.load(f'{config["labels_path"]}/norm_area_db.npy')).unsqueeze(-1)
lab_norm_duration = torch.Tensor(np.load(f'{config["labels_path"]}/norm_duration_db.npy')).unsqueeze(-1)
lab_norm = torch.hstack((lab_norm_area,lab_norm_duration,lab_arousal))
#normalize the binary arousal value with respect to the std of area and duration labels
lab_arousal_tmp = torch.Tensor([-1 if x==0 else 1 for x in lab_arousal]).unsqueeze(-1)
lab_norm_arousal = lab_arousal_tmp * (lab_norm_area.std() + lab_norm_duration.std()) / 2
lab_gather = torch.hstack((lab_norm_area,lab_norm_duration,lab_norm_arousal))
lab_gather = lab_gather.mean(dim=1).unsqueeze(-1) # mean of all metrics
# 6) Gather all the labels in a list in right order
label_stack = torch.hstack((lab_gather, lab_area, lab_duration, lab_arousal, lab3, lab4))
# Define dls
if config['load_dls']:
dls = torch.load(config['dls_path']) # should be a .pkl file
else:
# Read your data (.zarr file)
path = Path(config['data_path'])
X = zarr.open(path, mode='r')
t = torch.Tensor(X)
print('data properly read')
# Define splitter
n_train_samples = round(len(t)*config['trainset_part'])
n_total_samples = len(t)
splits = (L(range(n_train_samples), use_list=True),
L(np.arange(n_train_samples, n_total_samples), use_list=True))
splitter = IndexSplitter(splits[1])
getters = [ItemGetter(0), ItemGetter(1)]
dblock = DataBlock(blocks=(TSTensorBlock,TSTensorBlock),
getters=getters,
splitter=splitter,
batch_tfms=norm_batch())
src = itemify(t.to('cpu'),label_stack.to('cpu'))
dls = dblock.dataloaders(src, bs=config['bs'], val_bs=config['val_bs'], drop_last=True)
torch.save(dls, config['dls_path'])
# free memory space
del X
time.sleep(.2)
torch.cuda.empty_cache()
print('memory flushed')
dls = dls.to(device)
print('dls:')
print(dls.one_batch())
### Train the AutoEncoder part ###
acc_factor = config['acc_factor']
latent_dim = config['latent_dim']
model = stagerNetAAE(latent_dim=latent_dim,acc_factor=acc_factor)
model = model.to(device)
if config['train_ae']:
metrics = [rmse]
learn = Learner(dls, model, loss_func = model.ae_loss_func, metrics=metrics, opt_func=ranger)
learning_rate = learn.lr_find()
learn.fit_flat_cos(n_epoch=config['n_epoch'], lr=learning_rate.valley,
cbs=[
GradientAccumulation(n_acc=dls.bs*acc_factor),
TrackerCallback(),
SaveModelCallback(fname=config['ae_filename']),
EarlyStoppingCallback(min_delta=1e-4,patience=config['patience'])])
state_dict = torch.load(f'models/{config["ae_filename"]}.pth') # load the best weights
### Train the Classifier part ###
classif_filename = config['classif_filename']
model.load_state_dict(state_dict, strict=False)
#define the metrics to show
metrics = [LossAttrMetric("gather_loss"), LossAttrMetric("simple_loss"),
LossAttrMetric("area_loss"), LossAttrMetric("duration_loss"),
LossAttrMetric("arousal_loss"), LossAttrMetric("ord_loss")]
#freeze the discriminator weights
for name, param in model.named_parameters():
if "fc_crit" in name:
param.requires_grad_(False)
if config['train_classif_discrete']:
#define the losses to montitor
monitor_loss = ['area_loss','duration_loss','arousal_loss']
#set the learning rates
learning_rates = [1e-3,5e-4,2e-4]
# Start curriculum learning
total_cycles = config['nb_of_metrics']
for i in range(total_cycles):
curr_filename = str(classif_filename)+'_level'+str(i+1)
model.level = i+1
met = metrics[1:i+3] + metrics[-1:]
learn = Learner(dls, model, loss_func=model.classif_loss_func,
metrics=met, opt_func=ranger)
learn.fit_flat_cos(config['n_epoch'], lr=learning_rates[i],
cbs=[
GradientAccumulation(n_acc=dls.bs*acc_factor),
TrackerCallback(monitor=monitor_loss[i]),
SaveModelCallback(fname=curr_filename,monitor=monitor_loss[i]),
EarlyStoppingCallback(min_delta=1e-4,patience=config['patience'],monitor=monitor_loss[i]),
SwitchAttribute(attribute_name='global_loss', switch_every=5)
])
learn.load(curr_filename)
model.load_state_dict(learn.model.state_dict())
state_dict = torch.load(f'models/{classif_filename}_level3.pth') # load the best weights
model.load_state_dict(state_dict, strict=False)
if config['train_regress']:
model.level = 0
model.dropout_rate = .1
learn = Learner(dls, model, loss_func=model.classif_loss_func,
metrics=metrics, opt_func=ranger)
learn.fit_flat_cos(config['n_epoch'], lr=1e-3,
cbs=[
GradientAccumulation(n_acc=dls.bs*acc_factor),
TrackerCallback(monitor='gather_loss'),
SaveModelCallback(fname=classif_filename, monitor='gather_loss'),
EarlyStoppingCallback(min_delta=1e-4,patience=config['patience'],monitor='gather_loss'),
SwitchAttribute(attribute_name='global_loss', switch_every=5)])
np.save('results/'+str(classif_filename)+'_losses.npy', learn.recorder.losses)
np.save('results/'+str(classif_filename)+'_values.npy', learn.recorder.values)
state_dict = torch.load(f'models/{config["classif_filename"]}.pth') # load the best weights
### Train the Adversarial part ###
model.load_state_dict(state_dict, strict=False)
adv_filename = config['aae_filename']
if config['train_aae']:
metrics = [LossAttrMetric("classif_loss"), LossAttrMetric("recons_loss"),
LossAttrMetric("adv_loss")]
learn = Learner(dls, model, loss_func=model.aae_loss_func,
metrics=metrics, opt_func=ranger)
learn.fit_flat_cos(config['n_epoch'], lr=1e-3,
cbs=[
GradientAccumulation(n_acc=dls.bs*acc_factor),
TrackerCallback(monitor='classif_loss'),
SaveModelCallback(fname=adv_filename, monitor='classif_loss'),
EarlyStoppingCallback(min_delta=1e-4,patience=config['patience'],monitor='classif_loss'),
UnfreezeFcCrit(switch_every=2),
SwitchAttribute(attribute_name='global_loss', switch_every=5)])
state_dict = torch.load(f'models/{adv_filename}.pth') # load the best weights
### Extract the latent space ###
result_filename = config['result_filename']
model.load_state_dict(state_dict, strict=False)
learn = Learner(dls,model,loss_func=model.aae_loss_func)
if config['load_latent_space']:
new_zi = torch.load(f'data/z_{result_filename}.pt')
print(f'latent space loaded with shape {new_zi.shape}')
else:
learn.zi_valid = torch.tensor([]).to(device)
learn.get_preds(ds_idx=0,cbs=[GetLatentSpace(cycle_len=1)])
new_zi = learn.zi_valid
learn.zi_valid = torch.tensor([]).to(device)
learn.get_preds(ds_idx=1,cbs=[GetLatentSpace(cycle_len=1)])
new_zi = torch.vstack((new_zi,learn.zi_valid))
print("new_zi shape: "+str(new_zi.shape))
torch.save(new_zi,f'data/z_{result_filename}.pt')
### Display the latent space ###
plot_results(new_zi.to(device),lab_gather,learn,result_filename) | numediart/xAAEnet | main.py | main.py | py | 9,638 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.device",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.cuda.set_device",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
... |
41550408574 | import time
from enum import IntEnum
from .. util import log
from .. project import attributes, load
DEFAULT_FPS = 24
class STATE(IntEnum):
ready = 0
running = 1
complete = 2
canceled = 3
max_steps = 4
timeout = 5
class Runner(object):
def __init__(self, *, amt=1, fps=0, sleep_time=0, max_steps=0,
until_complete=False, max_cycles=0, seconds=None,
threaded=False, main=None, flat_out=False,
repeats=None, **kwds):
attributes.check(kwds, 'run')
if max_steps < 0:
log.error('max_steps %s < 0', max_steps)
max_steps = 0
if sleep_time < 0:
log.error('sleep_time %s < 0', sleep_time)
sleep_time = 0
if max_cycles < 0:
log.error('max_cycles %s < 0', max_cycles)
max_cycles = 0
if fps < 0:
log.error('fps %s < 0', fps)
fps = 0
if repeats and repeats < 0:
log.error('repeats %s < 0', repeats)
repeats = None
if sleep_time and fps:
log.error('sleep_time=%s and fps=%s cannot both be set',
sleep_time, fps)
sleep_time = 0
if seconds and max_steps:
log.error('seconds=%s and max_steps=%s cannot both be set',
seconds, max_steps)
max_steps = 0
self.amt = amt
if fps:
self.sleep_time = 1 / fps
elif sleep_time:
self.sleep_time = sleep_time
else:
self.sleep_time = 1 / DEFAULT_FPS
self.until_complete = until_complete
self.seconds = seconds
self.run_start_time = 0
self.max_steps = max_steps
self.max_cycles = max_cycles
self.seconds = seconds
self.threaded = threaded
self.flat_out = flat_out
self.main = load.code(main)
if repeats is not None:
self.until_complete = True
self.max_cycles = repeats
self.repeats = repeats
self.time = time.time
def set_project(self, project):
if self.flat_out:
project.flat_out()
self.time = project.clock.time
@property
def fps(self):
return 1 / self.sleep_time
@fps.setter
def fps(self, fps):
self.sleep_time = 1 / fps
def compute_state(self, cur_step, state):
if self.seconds:
elapsed = self.time() - self.run_start_time
if elapsed >= self.seconds:
return STATE.timeout
elif self.max_steps:
if cur_step >= self.max_steps:
return STATE.max_steps
elif not self.until_complete:
if state == STATE.complete:
# Ignore STATE.complete if until_complete is False
return STATE.running
return state
| ManiacalLabs/BiblioPixel | bibliopixel/animation/runner.py | runner.py | py | 2,884 | python | en | code | 263 | github-code | 6 | [
{
"api_name": "enum.IntEnum",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "project.attributes.check",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "project.attributes",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "util.log.erro... |
17634455157 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from re import I
from flask import Flask
from flask import request
import chromadb
from chromadb.config import Settings
app = Flask(__name__)
client = chromadb.Client(Settings(chroma_api_impl='rest',
chroma_server_host='localhost',
chroma_server_http_port=8000))
@app.route('/collections', methods=['GET', 'POST','DELETE'])
def create_or_get_collections():
collection_name = request.args.get('name')
collection = client.create_collection(collection_name,
get_or_create=True)
if request.method == 'DELETE':
client.delete_collection(collection_name)
return
return dict(collection)
@app.route('/collections/<string:collection_name>', methods=['GET',
'POST'])
def add_or_query_collection(collection_name):
collection = client.create_collection(collection_name,
get_or_create=True)
if request.method == 'POST':
request_data = request.get_json()
collection_documents = request_data['documents']
collection_ids = request_data['ids']
collection.add(documents=collection_documents,
ids=collection_ids)
return 'Documents successfully added to collection'
else:
query = request.args.get('query')
result = collection.query(query_texts=query, n_results=1)
return result['documents'][0][0]
@app.route('/collections/<string:collection_name>/all', methods='GET')
def get_collection(collection_name):
collection = client.create_collection(collection_name,
get_or_create=True)
total_count = collection.count()
return dict(collection.peek(limit=total_count))
@app.route('/collections/<string:collection_name>', methods=['GET','DELETE'])
def delete_document(collection_name):
collection = client.create_collection(collection_name,
get_or_create=True)
ids = request.args.get('ids')
if request.method == 'GET':
return dict(collection.get(ids=ids))
else:
collection.delete(ids=ids)
if __name__ == '__main__':
app.run(host='192.168.144.129')
| aravindcz/mygpt-chromadbwrapper | controller/controller.py | controller.py | py | 2,166 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "chromadb.Client",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "chromadb.config.Settings",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request.arg... |
2116138484 | """
Tests for QCFractals CLI
"""
import os
import time
import tempfile
import pytest
from qcfractal import testing
from qcfractal.cli.cli_utils import read_config_file
import yaml
# def _run_tests()
_options = {"coverage": True, "dump_stdout": True}
_pwd = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope="module")
def qcfractal_base_init(postgres_server):
tmpdir = tempfile.TemporaryDirectory()
args = [
"qcfractal-server", "init", "--base-folder",
str(tmpdir.name), "--db-own=False", "--clear-database",
f"--db-port={postgres_server.config.database.port}"
]
assert testing.run_process(args, **_options)
yield f"--base-folder={tmpdir.name}"
@testing.mark_slow
def test_cli_server_boot(qcfractal_base_init):
port = "--port=" + str(testing.find_open_port())
args = ["qcfractal-server", "start", qcfractal_base_init, port]
assert testing.run_process(args, interupt_after=10, **_options)
@testing.mark_slow
def test_cli_upgrade(qcfractal_base_init):
args = ["qcfractal-server", "upgrade", qcfractal_base_init]
assert testing.run_process(args, interupt_after=10, **_options)
@pytest.mark.skip(reason="Failing on Travis for unknown reasons.")
@testing.mark_slow
def test_cli_server_local_boot(qcfractal_base_init):
port = "--port=" + str(testing.find_open_port())
args = ["qcfractal-server", "start", "--local-manager=1", port, qcfractal_base_init]
assert testing.run_process(args, interupt_after=10, **_options)
@pytest.fixture(scope="module")
def active_server(request, qcfractal_base_init):
port = str(testing.find_open_port())
args = ["qcfractal-server", "start", qcfractal_base_init, f"--port={port}"]
assert testing.run_process(args, interupt_after=10, **_options)
with testing.popen(args, **_options) as server:
time.sleep(2)
server.test_uri_cli = "--fractal-uri=localhost:" + port
yield server
@testing.mark_slow
@pytest.mark.parametrize("log_apis", [0, 1])
def test_with_api_logging(postgres_server, log_apis):
tmpdir = tempfile.TemporaryDirectory()
args = [
"qcfractal-server", "init", "--base-folder",
str(tmpdir.name), "--db-own=False", "--clear-database",
f"--db-port={postgres_server.config.database.port}",
f"--log-apis={log_apis}"
]
assert testing.run_process(args, **_options)
port = "--port=" + str(testing.find_open_port())
args = ["qcfractal-server", "start", f"--base-folder={tmpdir.name}", port]
assert testing.run_process(args, interupt_after=10, **_options)
@testing.mark_slow
def test_manager_local_testing_process():
assert testing.run_process(["qcfractal-manager", "--adapter=pool", "--test", "--tasks-per-worker=2"], **_options)
@testing.mark_slow
def test_manager_executor_manager_boot(active_server):
args = [
"qcfractal-manager", active_server.test_uri_cli, "--adapter=pool", "--tasks-per-worker=2", "--verify=False"
]
assert testing.run_process(args, interupt_after=7, **_options)
@testing.mark_slow
def test_manager_executor_manager_boot_from_file(active_server, tmp_path):
yaml_file = """
common:
adapter: pool
tasks_per_worker: 4
cores_per_worker: 4
server:
fractal_uri: {}
verify: False
""".format(active_server.test_uri_cli.split("=")[1])
p = tmp_path / "config.yaml"
p.write_text(yaml_file)
args = ["qcfractal-manager", "--config-file={}".format(p)]
assert testing.run_process(args, interupt_after=7, **_options)
def cli_manager_runs(config_data, tmp_path):
temp_config = tmp_path / "temp_config.yaml"
temp_config.write_text(yaml.dump(config_data))
args = ["qcfractal-manager", f"--config-file={temp_config}", "--test"]
assert testing.run_process(args, **_options)
def load_manager_config(adapter, scheduler):
config = read_config_file(os.path.join(_pwd, "manager_boot_template.yaml"))
config["common"]["adapter"] = adapter
config["cluster"]["scheduler"] = scheduler
return config
@testing.mark_slow
@pytest.mark.parametrize(
"adapter,scheduler",
[
("pool", "slurm"),
pytest.param("dask", "slurm", marks=testing.using_dask_jobqueue),
pytest.param("dask", "PBS", marks=testing.using_dask_jobqueue),
pytest.param("dask", "MoAb", marks=testing.using_dask_jobqueue),
pytest.param("dask", "SGE", marks=testing.using_dask_jobqueue),
pytest.param("dask", "lSf", marks=testing.using_dask_jobqueue),
pytest.param("parsl", "slurm", marks=testing.using_parsl),
pytest.param("parsl", "PBS", marks=testing.using_parsl),
pytest.param("parsl", "MoAb", marks=testing.using_parsl),
pytest.param("parsl", "SGE", marks=testing.using_parsl),
pytest.param("parsl", "lSf", marks=[testing.using_parsl, pytest.mark.xfail]), # Invalid combination
pytest.param("NotAParser", "slurm", marks=pytest.mark.xfail), # Invalid Parser
pytest.param("pool", "NotAScheduler", marks=pytest.mark.xfail), # Invalid Scheduler
])
def test_cli_managers(adapter, scheduler, tmp_path):
"""Test that multiple adapter/scheduler combinations at least can boot up in Managers"""
config = load_manager_config(adapter, scheduler)
cli_manager_runs(config, tmp_path)
@testing.mark_slow
@testing.using_parsl
def test_cli_manager_parsl_launchers(tmp_path):
config = load_manager_config("parsl", "slurm")
config["parsl"]["provider"].update({"launcher": {"launcher_class": "singleNODELauncher"}})
cli_manager_runs(config, tmp_path)
@testing.mark_slow
@pytest.mark.parametrize("adapter", [
pytest.param("dask", marks=testing.using_dask_jobqueue),
pytest.param("parsl", marks=testing.using_parsl),
])
def test_cli_managers_missing(adapter, tmp_path):
"""Test that the manager block missing correctly sets defaults"""
config = load_manager_config(adapter, "slurm")
config.pop(adapter, None)
cli_manager_runs(config, tmp_path)
@testing.mark_slow
@pytest.mark.parametrize("adapter", [
pytest.param("dask", marks=testing.using_dask_jobqueue),
pytest.param("parsl", marks=testing.using_parsl),
])
def test_cli_managers_none(adapter, tmp_path):
"""Test that manager block set to None correctly assigns the defaults"""
config = load_manager_config(adapter, "slurm")
config[adapter] = None
cli_manager_runs(config, tmp_path)
def test_cli_managers_help():
"""Test that qcfractal_manager --help works"""
args = ["qcfractal-manager", "--help"]
testing.run_process(args, **_options)
def test_cli_managers_schema():
"""Test that qcfractal_manager --schema works"""
args = ["qcfractal-manager", "--schema"]
testing.run_process(args, **_options)
| yudongqiu/QCFractal | qcfractal/cli/tests/test_cli.py | test_cli.py | py | 6,785 | python | en | code | null | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tempfile.TemporaryDirect... |
15251411062 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nearsight', '0003_auto_20170718_1326'),
]
operations = [
migrations.AlterField(
model_name='layer',
name='layer_uid',
field=models.CharField(default='Unknown', max_length=100),
),
]
| venicegeo/nearsight | nearsight/migrations/0004_auto_20170718_1327.py | 0004_auto_20170718_1327.py | py | 426 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 14,
"usage_type": "call"
},
{... |
72064206267 | # -*- coding: utf-8 -*-
# @Time : 2022/7/24 15:46
# @Author : 4v1d
# @File : 中国招标网.py
# @Software: PyCharm
import httpx
url = 'https://www.baidu.com'
res = httpx.get(url)
print(res.text) | daweiTech/Spider | 爬虫/01-网络爬虫通讯原理/demo1.py | demo1.py | py | 217 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "httpx.get",
"line_number": 11,
"usage_type": "call"
}
] |
4970677598 | # Importing Modules
import matplotlib.pyplot as plt
#%matplotlib inline
# Graph Rev 7
x_values = range(1, 1001)
y_values = [x**2 for x in x_values]
plt.style.use('seaborn')
#fig, ax = plt.subplots()
fig, ax = plt.subplots(figsize=(5,3))
# Using Colormap
# Colormap references:
ax.scatter(x_values, y_values, c = y_values, cmap = plt.cm.plasma, s = 10)
# Setting titles and axes names
ax.set_title('Square Numbers', fontsize = 15)
ax.set_xlabel('Value', fontsize = 10)
ax.set_ylabel('Square of Values', fontsize = 10)
# Set size of the ticks labels
ax.tick_params(axis='both', which='major', labelsize = 10)
# Set the range for each axis
ax.axis([0, 1100, 0, 1100000])
plt.show()
fig.savefig('../../outputs/generating data/scatter_squares/scatter_output7.png', bbox_inches = 'tight') | RaulMaya/Data-Visualization | python_programs/generating data/scatter_squares.py | scatter_squares.py | py | 791 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_na... |
33093309616 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Decoder(nn.Module):
''' This class contains the implementation of Decoder Module.
Args:
embedding_dim: A integer indicating the embedding size.
output_dim: A integer indicating the size of output dimension.
hidden_dim: A integer indicating the hidden size of rnn.
n_layers: A integer indicating the number of layers in rnn.
dropout: A float indicating the dropout.
'''
def __init__(self, embedding_dim, output_dim, hidden_dim, n_layers, dropout):
super().__init__()
self.embedding_dim = embedding_dim
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.dropout = dropout
self.embedding = nn.Embedding(output_dim, embedding_dim)
self.rnn = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first = False).to(device)
self.linear = nn.Linear(hidden_dim, output_dim).to(device)
self.dropout = nn.Dropout(dropout).to(device)
def forward(self, input, hidden, cell):
# input is of shape [batch_size]
# hidden is of shape [n_layer * num_directions, batch_size, hidden_size]
# cell is of shape [n_layer * num_directions, batch_size, hidden_size]
input = input.unsqueeze(0)
# input shape is [1, batch_size]. reshape is needed rnn expects a rank 3 tensors as input.
# so reshaping to [1, batch_size] means a batch of batch_size each containing 1 index.
embedded = self.embedding(input)
embedded = self.dropout(embedded)
# embedded is of shape [1, batch_size, embedding_dim]
output, (hidden, cell) = self.rnn(embedded, (hidden, cell))
# generally output shape is [sequence_len, batch_size, hidden_dim * num_directions]
# generally hidden shape is [num_layers * num_directions, batch_size, hidden_dim]
# generally cell shape is [num_layers * num_directions, batch_size, hidden_dim]
# sequence_len and num_directions will always be 1 in the decoder.
# output shape is [1, batch_size, hidden_dim]
# hidden shape is [num_layers, batch_size, hidden_dim]
# cell shape is [num_layers, batch_size, hidden_dim]
predicted = F.log_softmax(self.linear(output), dim = 2) # linear expects as rank 2 tensor as input
# predicted shape is [batch_size, output_dim]
return predicted, hidden, cell
class AttnDecoder(nn.Module):
def __init__(self, embedding_dim, output_dim, hidden_dim, n_layers, dropout, max_length):
super(AttnDecoder, self).__init__()
self.hidden_size = hidden_dim
self.output_dim = output_dim
self.embedding = nn.Embedding(output_dim, embedding_dim)
self.num_layers = n_layers
self.max_length = max_length
self.dropout_p = dropout
self.attn = nn.Linear(self.hidden_size + embedding_dim, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size + embedding_dim, embedding_dim)
self.dropout = nn.Dropout(self.dropout_p)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, self.num_layers, dropout=dropout)
self.linear = nn.Linear(hidden_dim, output_dim)
def forward(self, input, hidden, cell, encoder_outputs):
embedded = self.embedding(input)
encoder_outputs = encoder_outputs.view(-1, self.hidden_size, self.max_length)
attn_weights = F.softmax(self.attn(torch.cat((embedded, hidden[0]), 1)), dim=1).unsqueeze(0).view(-1, self.max_length, 1)
#encoder_outputs = encoder_outputs.view(-1, self.hidden_size, self.max_length)
attn_applied = torch.bmm(encoder_outputs, attn_weights)
output = torch.cat((embedded, attn_applied[:, :, 0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output, (hidden, cell) = self.lstm(output, (hidden, cell))
predicted = F.log_softmax(self.linear(output), dim = 2)
return predicted, hidden, cell
class RecurrentEncoder(nn.Module):
''' Sequence to sequence networks consists of Encoder and Decoder modules.
This class contains the implementation of Encoder module.
Args:
input_dim: A integer indicating the size of input dimension.
emb_dim: A integer indicating the size of embeddings.
hidden_dim: A integer indicating the hidden dimension of RNN layers.
n_layers: A integer indicating the number of layers.
dropout: A float indicating dropout.
'''
def __init__(self, input_dim, emb_dim, hidden_dim, n_layers, dropout, bi_directional=False):
super().__init__()
self.input_dim = input_dim
self.emb_dim = emb_dim
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.dropout = dropout
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hidden_dim, n_layers, dropout=dropout, bidirectional=False)
self.hrnn = nn.LSTM(hidden_dim,hidden_dim, n_layers, dropout = dropout, bidirectional = False)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
# src is of shape [sentence_length, batch_size], it is time major
# embedded is of shape [sentence_length, batch_size, embedding_size]
embedded = self.embedding(src)
embedded = self.dropout(embedded)
# Decode the hidden state of the last time step
# inputs to the rnn is input, (h, c); if hidden, cell states are not passed means default initializes to zero.
# input is of shape [sequence_length, batch_size, input_size]
# hidden is of shape [num_layers * num_directions, batch_size, hidden_size]
# cell is of shape [num_layers * num_directions, batch_size, hidden_size]
outputs, (hidden, cell) = self.rnn(embedded)
outputs, (hidden, cell) = self.hrnn(outputs)
# outputs are always from the top hidden layer, if bidirectional outputs are concatenated.
# outputs shape [sequence_length, batch_size, hidden_dim * num_directions]
return outputs, hidden, cell
class Encoder(nn.Module):
''' Sequence to sequence networks consists of Encoder and Decoder modules.
This class contains the implementation of Encoder module.
Args:
input_dim: A integer indicating the size of input dimension.
emb_dim: A integer indicating the size of embeddings.
hidden_dim: A integer indicating the hidden dimension of RNN layers.
n_layers: A integer indicating the number of layers.
dropout: A float indicating dropout.
'''
def __init__(self, input_dim, emb_dim, hidden_dim, n_layers, dropout, bi_directional=False):
super().__init__()
self.input_dim = input_dim
self.emb_dim = emb_dim
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.dropout = dropout
self.bi_directional = bi_directional
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.LSTM(emb_dim, hidden_dim, n_layers, dropout=dropout, bidirectional=bi_directional)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
# src is of shape [sentence_length, batch_size], it is time major
# embedded is of shape [sentence_length, batch_size, embedding_size]
embedded = self.embedding(src)
embedded = self.dropout(embedded)
# Decode the hidden state of the last time step
# inputs to the rnn is input, (h, c); if hidden, cell states are not passed means default initializes to zero.
# input is of shape [sequence_length, batch_size, input_size]
# hidden is of shape [num_layers * num_directions, batch_size, hidden_size]
# cell is of shape [num_layers * num_directions, batch_size, hidden_size]
outputs, (hidden, cell) = self.rnn(embedded)
# outputs are always from the top hidden layer, if bidirectional outputs are concatenated.
# outputs shape [sequence_length, batch_size, hidden_dim * num_directions]
if self.bi_directional:
outputs = outputs[:, :, self.hidden_dim:] + outputs[:, :, :self.hidden_dim]
hidden = hidden[:2,:,:] + hidden[2:,:,:]
cell = cell[:2,:,:] + cell[2:,:,:]
#hidden = hidden.view(self.n_layers,-1,self.hidden_dim)
#cell = cell.view(self.n_layers,-1,self.hidden_dim)
return outputs, hidden, cell
| facebookresearch/UNLU | codes/rnn.py | rnn.py | py | 8,608 | python | en | code | 34 | github-code | 6 | [
{
"api_name": "torch.device",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
... |
1688662512 | from selenium import webdriver
import time
import csv
# driver = webdriver.Chrome(r'path\to\the\chromedriver.exe')
driver = webdriver.Chrome()
# Go to the page that we want to scrape
driver.get("https://blog.feedspot.com/usa_news_websites/")
#close the pop up
time.sleep(2)
close_button = driver.find_element_by_xpath('//*[@id="wp_subscribe_popup"]/button')
close_button.click()
time.sleep(2)
csvfile = open('feedspot_data.csv', 'w', encoding='utf-8')
writer = csv.DictWriter(csvfile,fieldnames=['title','info', 'frequency number', 'frequency period', 'facebook fans', 'twitter followers'])
writer.writeheader()
infos = driver.find_elements_by_xpath('//p[@class="trow trow-wrap"]')
titles = driver.find_elements_by_xpath('//h3/a')
for i,info in enumerate(infos):
# print('\n\n info list: \n{}\n\n'.format(info.text))
# print('\n\n info len: \n{}\n\n'.format(len(info.text.split('\n'))))
#split info
# rawfrequency = info.text[info.text.find('\nFrequency ')+11:info.text.find('\nWebsite')-1] #careful with variable name
rawfrequency = info.text[info.text.find('\nFrequency ')+11:info.text.find('.',info.text.find('Frequency ')+11)]
freqnumber = rawfrequency.split()[1]
freqperiod = rawfrequency.split()[-1]
facebookrawnum = info.text[info.text.find('\nFacebook fans ')+14:info.text.find('. Twitter followers')-1]
facebooknum = facebookrawnum.replace(',', '')
twitterrawnum = info.text[info.text.find('Twitter followers ')+18:info.text.find('.',info.text.find('Twitter followers ')+18)]
twitternum = twitterrawnum.replace(',', '')
writer.writerow({
'title':titles[i].text,
'info':info.text,
'frequency number':freqnumber,
'frequency period':freqperiod,
'facebook fans':facebooknum,
'twitter followers':twitternum
#'about':info.text.split('\n')[0],
# 'frequency':info[1],
# 'website': info[2],
# 'popularity': info[3]
})
# for title in titles:
# print(title.text)
# print(infos[0].text.split('\n'))
# print(infos[1])
# for info in infos:
# print(info.text)
csvfile.close()
driver.close()
| skyyaya28/NYCDSA-Webscraping | feedspot_seleium.py | feedspot_seleium.py | py | 2,100 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
15837575627 | # -*- coding: utf-8 -*-
# @Time : 2022/6/17 15:05
# @Author : renyumeng
# @Email : 2035328756@qq.com
# @File : Solve.py
# @Project : ProbabilityTheoryAndMathematicalStatisticsExperiments
import numpy as np
import scipy.stats as sts
class Solve:
def __init__(self, N) -> None:
self.n: int = N
self._random_num: np.ndarray = self.get_normal_num
self._describe_num: tuple = self.get_describe
self.mean: float = self.get_mean
self._describe_variance: float = self._describe_num[-3]
self.func_variance: float = self.get_variance
def __str__(self) -> str:
return f"""使用describe函数得到的方差:{self._describe_variance}\n使用公式计算出的方差:{self.func_variance}"""
@property
def get_normal_num(self) -> np.ndarray:
_normal_num: np.array = sts.norm.rvs(loc=0, scale=1, size=self.n)
return _normal_num
@property
def get_describe(self) -> tuple:
_describe_ans: tuple = sts.describe(self._random_num)
return _describe_ans
@property
def get_mean(self) -> float:
_mean: float = self._random_num.mean()
return _mean
@property
def get_variance(self) -> float:
temp_array: np.ndarray = self._random_num.copy()
_mean: float = self.mean
ans: float = 0
for i in range(len(temp_array)):
ans += (temp_array[i] - _mean) ** 2
ans /= (self.n - 1)
return ans
if __name__ == "__main__":
newSolve: Solve = Solve(10)
print(newSolve)
| renyumeng1/ProbabilityTheoryAndMathematicalStatisticsExperiments | firstExper/第三题/Solve.py | Solve.py | py | 1,551 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.ndarray",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats.norm.rvs",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "scipy.stats.... |
37974301119 | '''
Time calculations
Author: Howard Webb
Date: 2/9/2023
'''
from datetime import datetime
import time
import math
from MARSFarm_Util import *
def get_day(start_date):
# calculate number of days since start_date (as timestamp)
now = datetime.now().timestamp()
dif = now - start_date
days = math.ceil(dif/(60*60*24))
return days
def get_week(start_date):
# calaculate number of weeks since start_date
days = get_day(start_date)
weeks = math.ceil(days/7)
return weeks
def get_time_struct(start_date):
# build record time structure, start_time is None if not in trial
ts = datetime.now().timestamp()
tstr = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if start_date is not None:
time = {TIMESTAMP:ts, TIME_STR:tstr, DAY:get_day(start_date), WEEK:get_week(start_date)}
else:
time = {TIMESTAMP:ts, TIME_STR:tstr}
return time
def get_time_str(timestamp):
dt = datetime.fromtimestamp(timestamp)
return dt.strftime("%Y-%m-%d %H:%M:%S")
def test():
print("Time Util Test")
start_date = datetime.strptime("2023-1-2", "%Y-%m-%d").timestamp()
print("Day", get_day(start_date))
print("Week", get_week(start_date))
print(start_date, get_time_struct(start_date))
print("None", get_time_struct(None))
print("Time Str", get_time_str(time.time()))
print("Done")
if __name__=="__main__":
test()
| webbhm/MARSFarm-VX | Time_Util.py | Time_Util.py | py | 1,423 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "math.ceil",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"li... |
23364168677 | """Module for parse svg file and return the
position of different elements"""
# Global variables ------------------------------------------------------------
GRAPH_PATH = "../ressources/graphes/"
# Imports ---------------------------------------------------------------------
import os
import random
import xml.etree.ElementTree as ET
from .node import Node, Arrow
# Classes ---------------------------------------------------------------------
class Parser:
"""Class for get node's and arrow's coordinates
Also give the minimum size for the canvas"""
def __init__(self, window, path=None):
self.__select_file(path)
self.parser = ET.parse(self.path)
self.root = self.parser.getroot()
self.window_size = window
self.graph_width = self.root.attrib['width'].replace('pt', '')
self.graph_height = self.root.attrib['height'].replace('pt', '')
def get_nodes(self):
"""Return all nodes in the svg file"""
nodes = list()
for child in self.root[0]:
if 'node' in child.attrib.values():
for element in child:
if 'title' in element.tag:
current_name = element.text
elif 'ellipse' in element.tag:
if element.attrib['fill'] == "none":
poisoned = False
else:
poisoned = True
nodes.append(
Node(current_name,
poisoned,
(float(element.attrib['cx']),
float(element.attrib['cy']) * -1)))
return self.__create_dico(nodes)
def get_arrows(self):
"""Return all edges in the svg file"""
arrows = list()
for child in self.root[0]:
if 'edge' in child.attrib.values():
current_points_line = list()
current_points_sting = list()
for element in child:
if 'title' in element.tag:
current_name = tuple(element.text.split("->"))
elif 'path' in element.tag:
element.attrib['d'] = element.attrib['d'].replace('C', ' ')
coord_lines = element.attrib['d'].split(' ')
coord_lines[0] = coord_lines[0].replace('M', '')
coord_lines = coord_lines[::3]
for points in coord_lines:
points = points.split(',')
for point in points:
current_points_line.append(point)
elif 'polygon' in element.tag:
current_points_sting = element.attrib['points'].replace(" ", ",").split(",")
self.__formalize_number(current_points_line, current_points_sting)
arrows.append(
Arrow(current_name,
current_points_line,
current_points_sting))
return arrows
def __formalize_number(self, line, sting):
"""Convert negative number for avoid weird result on render
Arguments:
line {List} -- list of point for line
sting {List} -- list of point for sting
"""
for i, value in enumerate(line):
if float(value) < 0:
line[i] = self.window_size - (-1 * float(value))
for i, value in enumerate(sting):
if float(value) < 0:
sting[i] = self.window_size - (-1 * float(value))
def __create_dico(self, nodes):
"""Convert the nodes list to a dictionary for improve the
complexity of the program
Arguments:
nodes {List} -- The list to convert
Returns:
Dict -- The dictionary
"""
dic = dict()
for node in nodes:
dic[node.id_node] = node
return dic
def __select_file(self, file):
"""Select the file to parse data in other word select
the graph for play if None select a random file
Arguments:
file {string} -- the name of the file
"""
files = os.listdir(GRAPH_PATH)
selected = str()
if not file:
selected = GRAPH_PATH + random.choice(files)
else:
assert file in files
selected = GRAPH_PATH + file
self.path = selected
| Remyb98/chomp-sur-graphes | src/entity/parser.py | parser.py | py | 4,631 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "node.Node",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "node.Arro... |
35605543653 | import numpy as np
import init_lattice as lat
import MH_algorithm as MH
import Wolff_algorithm as W
import autocorrelation_functions as acf
import importlib
importlib.reload(MH)
importlib.reload(W)
importlib.reload(lat)
importlib.reload(acf)
# Produces data of internal energy autocorrelation against sweeps and the autocorrelation time for use in the report
# Initialise temperature
T = 2
# Temporary data storage
MH_autocorr_temp = []
MH_sweeps_tau_f_temp = []
Wolff_autocorr_temp = []
Wolff_sweeps_tau_f_temp = []
#Repeat and average
for i in range(5):
print(i)
# Reset lattice
lattice = lat.make_lattice(25,1)
# Start by burning iterations to equilibrium
burn = W.Wolff_evolve_and_compute_E(lattice, T**-1, 1, 1000)[0]
# Evolve with Wolff
Es, sweeps_Wolff = W.Wolff_evolve_and_compute_E(lattice, T**-1, 1, 1000)
# Now find autocorrelation
Wolff_autocorr_temp.append(acf.compute_autocorrelation(Es))
print('Wolff done')
# Repeat with MH
# Reset lattice
lattice = lat.make_lattice(25,1)
# Start by burning iterations to equilibrium
burn = MH.evolve_and_compute_E(lattice, T**-1, 1, 0, 100000)[0]
# Evolve the lattice with MH
Es, sweeps_MH = MH.evolve_and_compute_E(lattice,T**-1, 1, 0, 100000)
# Now find autocorrelation
MH_autocorr_temp.append(acf.compute_autocorrelation(Es))
print('MH done')
# Take Averages
MH_autocorr = np.mean(MH_autocorr_temp, axis = 0)
MH_sweeps_tau_f = sweeps_MH[acf.estimate_correlation_time(Es)]
Wolff_autocorr = np.mean(Wolff_autocorr_temp, axis = 0)
Wolff_sweeps_tau_f = sweeps_Wolff[acf.estimate_correlation_time(Es)]
# Save data
np.save('MH_autocorr_evolution_sweeps_E.npy', sweeps_MH)
np.save('MH_autocorr_evolution_autocorr_E.npy', MH_autocorr)
np.save('MH_autocorr_evolution_sweeps_tau_f_E.npy', MH_sweeps_tau_f)
np.save('Wolff_autocorr_evolution_sweeps_E.npy', sweeps_Wolff)
np.save('Wolff_autocorr_evolution_autocorr_E.npy', Wolff_autocorr)
np.save('Wolff_auto_corr_evolution_sweeps_tau_f_E.npy', Wolff_sweeps_tau_f) | Part-II-Computational-Physics/cluster-algorithms-for-monte-carlo-jbd29 | figure_12_E.py | figure_12_E.py | py | 2,040 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "importlib.reload",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "importlib.reload",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "importlib.reload",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "importlib.reload",
... |
22042359096 | #Developed By: Tonumoy Mukherjee
import os
from scipy.io import wavfile
import scipy
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
from keras.layers import Conv2D, MaxPool2D, Flatten, LSTM
from keras.layers import Dropout, Dense, TimeDistributed
from keras.models import Sequential
from keras.utils import to_categorical
from keras import optimizers
from sklearn.utils.class_weight import compute_class_weight
from tqdm import tqdm
from python_speech_features import mfcc
import pickle
from keras.callbacks import ModelCheckpoint
from cfg import Config
import random
import theano
from keras.utils import plot_model
#import pdb
from mpl_toolkits.axes_grid1 import make_axes_locatable
def check_data():
if os.path.isfile(config.p_path):
print('Loading exixting data for {} model' .format(config.mode))
with open(config.p_path, 'rb') as handle:
tmp = pickle.load(handle)
return tmp
else:
return None
#%% Feature Extraction
def build_rand_feat():
tmp = check_data()
if tmp:
return tmp.data[0], tmp.data[1]
X = []
y = []
_min, _max = float('inf'), -float('inf')
for _ in tqdm(range(n_samples)):
rand_class = np.random.choice(class_dist.index, p=prob_dist)
file = np.random.choice(df[df.label==rand_class].index)
rate, wav = wavfile.read('clean-train/'+file)
label = df.at[file, 'label']
rand_index = np.random.randint(0, wav.shape[0]-config.step)
sample = wav[rand_index:rand_index+config.step]
X_sample = mfcc(sample, rate,
numcep=config.nfeat, nfilt=config.nfilt, nfft=config.nfft)
_min = min(np.amin(X_sample), _min)
_max = max(np.amax(X_sample), _max)
X.append(X_sample)
y.append(classes.index(label))
config.min = _min
config.max = _max
X, y = np.array(X), np.array(y)
X = (X - _min) / (_max - _min)
if config.mode == 'conv':
X = X.reshape(X.shape[0], X.shape[1], X.shape[2],1)
elif config.mode == 'time':
X = X.reshape(X.shape[0], X.shape[1], X.shape[2])
y = to_categorical(y, num_classes=2)
config.data = (X, y)
with open(config.p_path, 'wb') as handle:
pickle.dump(config, handle, protocol=2)
return X,y
#%% CNN Model
def get_conv_model():
model = Sequential()
model.add(Conv2D(16, (3, 3), activation='relu', strides=(1, 1),
padding='same', input_shape=input_shape))
#pdb.set_trace()
model.add(Conv2D(32, (3, 3), activation='relu', strides=(1, 1),
padding='same', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu', strides=(1, 1),
padding='same', input_shape=input_shape))
model.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1),
padding='same', input_shape=input_shape))
model.add(MaxPool2D((2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
#model.add(Dense(32, activation='relu'))
#model.add(Dense(16, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.summary()
#adam = optimizers.Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-9, amsgrad=False)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
#keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
return model
#%% LSTM Model
def get_recurrent_model():
#shape of data for RNN is (n, time, features)
model = Sequential()
model.add(LSTM(128, return_sequences=True, input_shape=input_shape))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(Dropout(0.5))
model.add(TimeDistributed(Dense(64, activation='relu')))
model.add(TimeDistributed(Dense(32, activation='relu')))
model.add(TimeDistributed(Dense(16, activation='relu')))
model.add(TimeDistributed(Dense(8, activation='relu')))
model.add(Flatten())
model.add(Dense(2, activation='softmax'))
model.summary()
#sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
#adam = optimizers.Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-9, amsgrad=False)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
# model.contrib.layers.l2_regularizer(
# scale=1 ,
# scope=None
#)
return model
#%% Data Management & Model Selection
df = pd.read_csv('Quake_mod.csv')
df.set_index('fname', inplace=True)
for f in df.index:
rate, signal = wavfile.read('clean-train/'+f)
signal =signal[0:int(0.2*rate)] #first 0.2 sec of signal
df.at[f, 'length'] = signal.shape[0]/rate
classes = list(np.unique(df.label))
class_dist = df.groupby(['label'])['length'].mean()
n_samples = 2 * int(df['length'].sum()/0.1) #10th of a second
prob_dist = class_dist/class_dist.sum()
choices = np.random.choice(class_dist.index, p=prob_dist)
fig, ax = plt.subplots()
ax.set_title('Class Distribution', y=1.08,fontsize='large', fontweight='bold')
ax.pie(class_dist, labels=class_dist.index,autopct='%2.2f%%',
shadow=False, startangle=90)
ax.axis('equal')
plt.show()
config = Config(mode='conv')
if config.mode == 'conv':
X, y = build_rand_feat()
y_flat = np.argmax(y, axis=1)
input_shape = (X.shape[1], X.shape[2], 1)
model = get_conv_model()
elif config.mode == 'time':
X, y = build_rand_feat()
y_flat = np.argmax(y, axis=1)
input_shape = (X.shape[1], X.shape[2])
model = get_recurrent_model()
#%% Training
class_weight = compute_class_weight('balanced', np.unique(y_flat), y_flat)
checkpoint = ModelCheckpoint(config.model_path, monitor='val_acc', verbose=1, mode='max',
save_best_only=True, save_weights_only=False, period=1)
model.fit(X, y, epochs=100, batch_size=32, shuffle=True,
class_weight=class_weight, validation_split=0.1,
callbacks=[checkpoint])
model.save(config.model_path)
plot_model(model, to_file='convolutional_neural_network.png')
#%%
#def plot_filters(layer,X,y):
##
##
## filters = layer.W.get_value()
# filters, biases = layer.get_weights()
# fig = plt.figure()
# for j in range(len (filters)):
# ax = fig.add_subplot(y,X,j+1)
# ax.matshow(filters[j][0], cmap = cm.binary)
##
# plt.xticks(np.array([]))
# plt.yticks(np.array([]))
# plt.tight_layout()
# return plt
##
#plot_filters(model.layers[0],4,4) #first convolution layer filters
##
##%%
#for layer in model.layers:
# # check for convolutional layer
# if 'conv' not in layer.name:
# continue
# # get filter weights
# filters, biases = layer.get_weights()
# print(layer.name, filters.shape)
#%% Apda Cde Img Resize Nearest Neighbour
def my_resize(arr, f):
newarr = np.ones((arr.shape[0]*f, arr.shape[1]*f, arr.shape[2], arr.shape[3]))
for k1 in range(arr.shape[2]):
for k2 in range(arr.shape[3]):
temp = arr[:, :, k1, k2]
temp = (temp-np.min(temp))/(np.max(temp)-np.min(temp))
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
newarr[i*f:(i+1)*f, j*f:(j+1)*f, k1, k2]=temp[i, j]
return newarr
def plot_filter(arr, f, padd):
up_arr = my_resize(arr, f)
newarr = np.ones((arr.shape[2]*(up_arr.shape[0]+padd), arr.shape[3]*(up_arr.shape[1]+padd)))
for i in range(arr.shape[2]):
for j in range(arr.shape[3]):
newarr[i*up_arr.shape[0]+i*padd:(i+1)*up_arr.shape[0]+i*padd, j*up_arr.shape[0]+j*padd:(j+1)*up_arr.shape[0]+j*padd]= \
up_arr[:,:,i, j]
return newarr
#%% Filter output plots CNN
fig1, (ax1,ax2,ax3,ax4) = plt.subplots(nrows=4 , ncols=1)
ax1.set_title("Layer 1 - 16 Filters")
#ax1.set_xlabel("X-label for axis 1"
filters, bias = model.layers[0].get_weights() #1st layer 16 filters
#filters = filters.reshape(3, 3, 4,4)
#title_obj = plt.title('16 Filters of Layer - 1') #get the title property handler
#plt.getp(title_obj, 'text') #print out the properties of title
out = plot_filter(filters, 8, 1)
ax1.imshow(out, cmap=cm.gray)
filters, bias = model.layers[1].get_weights() #2nd layer 32 filters
out = random.sample(list(plot_filter(filters, 8, 1)),32)
ax2.imshow(out, cmap=cm.gray)
ax2.set_title("Layer 2 - 16 X 32 Filters")
filters, bias = model.layers[2].get_weights() #3rd layer 64 filters
out = random.sample(list(plot_filter(filters, 8, 1)),64)
ax3.imshow(out, cmap=cm.gray)
ax3.set_title("Layer 3 - 32 X 64 Filters")
filters, bias = model.layers[3].get_weights() #4thlayer 128 filters
out = random.sample(list(plot_filter(filters, 8, 1)),128)
ax4.imshow(out, cmap=cm.gray)
ax4.set_title("Layer 4 - 64 X 128 Filters")
#%%
fig2, axs = plt.subplots(nrows=2 , ncols=5)
axs[0,0].imshow(X[1,:,:,0]) #Positive Class I/P
axs[0,0].set_title("Positive Class I/P")
axs[1,0].imshow(X[0,:,:,0]) #Negative Class I/P
axs[1,0].set_title("Negative Class I/P")
axs[0,1].imshow(X[5,:,:,0]) #Positive Class I/P
axs[0,1].set_title("Positive Class I/P")
axs[1,1].imshow(X[6,:,:,0]) #Negative Class I/P
axs[1,1].set_title("Negative Class I/P")
axs[0,2].imshow(X[8,:,:,0]) #Positive Class I/P
axs[0,2].set_title("Positive Class I/P")
axs[1,2].imshow(X[9,:,:,0]) #Negative Class I/P
axs[1,2].set_title("Negative Class I/P")
axs[0,3].imshow(X[20,:,:,0]) #Positive Class I/P
axs[0,3].set_title("Positive Class I/P")
axs[1,3].imshow(X[21,:,:,0]) #Negative Class I/P
axs[1,3].set_title("Negative Class I/P")
axs[0,4].imshow(X[24,:,:,0]) #Positive Class I/P
axs[0,4].set_title("Positive Class I/P")
axs[1,4].imshow(X[25,:,:,0]) #Negative Class I/P
axs[1,4].set_title("Negative Class I/P")
#%%
#from keras import backend as K
#def get_activations(model, layer_idx, X_batch):
# get_activations = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer_idx].output,])
# activations = get_activations([X_batch,0])
# return activations
# visualizing intermediate layers
#output_layer = model.layers[0].get_output()
#output_fn = theano.function([model.layers[0].get_input()], output_layer)
#
## the input image
#
#input_image=X[1,:,:,0]
#print(input_image.shape)
#
#plt.imshow(input_image[0,:,:,0],cmap ='gray')
#plt.imshow(input_image[0,0,:,0])
#
#
#output_image = output_fn(input_image)
#print(output_image.shape)
#
## Rearrange dimension so we can plot the result
#output_image = np.rollaxis(np.rollaxis(output_image, 3, 1), 3, 1)
#print(output_image.shape)
fig3, axs = plt.subplots(nrows=3 , ncols=5)
filters, bias = model.layers[3].get_weights()
filt1 = filters[:,:,0,0] # 1st filter
filt2 = filters[:,:,0,1] # 2nd filter
filt3 = filters[:,:,0,11] # 3rd filter
filt4 = filters[:,:,0,13] # 4th filter
filt5 = filters[:,:,0,14] # 5th filter
inp1 = X[8,:,:,0] # random input
fst_conv = scipy.signal.convolve2d(inp1, filt1, mode='same', boundary='fill', fillvalue=0) #first filter convolution
fst_conv[fst_conv<0] = 0 #relu
scnd_conv = scipy.signal.convolve2d(inp1, filt2, mode='same', boundary='fill', fillvalue=0) #second filter convolution
scnd_conv[scnd_conv<0] = 0 #relu
thrd_conv = scipy.signal.convolve2d(inp1, filt3, mode='same', boundary='fill', fillvalue=0) #third filter convolution
thrd_conv[thrd_conv<0] = 0 #relu
frth_conv = scipy.signal.convolve2d(inp1, filt4, mode='same', boundary='fill', fillvalue=0) #fourth filter convolution
frth_conv[frth_conv<0] = 0 #relu
ffth_conv = scipy.signal.convolve2d(inp1, filt5, mode='same', boundary='fill', fillvalue=0) #fifth filter convolution
ffth_conv[ffth_conv<0] = 0 #relu
axs[0,0].imshow(filt1, cmap =cm.gray)
axs[0,0].set_title("Layer 1, Filter 1")
axs[0,1].imshow(filt2, cmap =cm.gray)
axs[0,1].set_title("Layer 1, Filter 2")
axs[0,2].imshow(filt3, cmap =cm.gray)
axs[0,2].set_title("Layer 1, Filter 3")
axs[0,3].imshow(filt4, cmap =cm.gray)
axs[0,3].set_title("Layer 1, Filter 4")
axs[0,4].imshow(filt5, cmap =cm.gray)
axs[0,4].set_title("Layer 1, Filter 5")
axs[1,0].imshow(inp1, cmap =cm.gray)
axs[1,1].imshow(inp1, cmap =cm.gray)
axs[1,2].imshow(inp1, cmap =cm.gray)
axs[1,2].set_title("Identical Positive Input to the filters")
axs[1,3].imshow(inp1, cmap =cm.gray)
im5 = axs[1,4].imshow(inp1, cmap =cm.gray)
divider = make_axes_locatable(axs[1,4])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im5, cax=cax, orientation='vertical')
axs[2,0].imshow(fst_conv, cmap =cm.gray)
axs[2,0].set_title("Layer 1, Filter 1 Activation")
axs[2,1].imshow(scnd_conv, cmap =cm.gray)
axs[2,1].set_title("Layer 1, Filter 2 Activation")
axs[2,2].imshow(thrd_conv, cmap =cm.gray)
axs[2,2].set_title("Layer 1, Filter 3 Activation")
axs[2,3].imshow(frth_conv, cmap =cm.gray)
axs[2,3].set_title("Layer 1, Filter 4 Activation")
axs[2,4].imshow(ffth_conv, cmap =cm.gray)
axs[2,4].set_title("Layer 1, Filter 5 Activation")
#plt.imshow(conv, cmap = cm.gray) # activations
| Tonumoy/MFCCNet-A-Network-for-Earthquake-Early-Warning-Applications-using-Speech-Recognition-Techniques | model.py | model.py | py | 13,362 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.isfile",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number"... |
38903193024 | import argparse
import csv
class MergeDataset:
def __call__(self, positive_handle, negative_handle, out_handle, delimiter=",", quote_character='"'):
csv_writer = csv.writer(out_handle, delimiter=delimiter, quotechar=quote_character)
# Write positive
for r in positive_handle:
csv_writer.writerow([r.strip("\n"), 1])
# Write negative
for r in negative_handle:
csv_writer.writerow([r.strip("\n"), 0])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("positivefile",
help="The positive file to merge")
parser.add_argument("negativefile",
help="The negativefile file to merge")
parser.add_argument("outfile",
help="The output file")
args = parser.parse_args()
with open(args.positivefile, "r", encoding="latin") as p:
with open(args.negativefile, "r", encoding="latin") as n:
with open(args.outfile, "w", encoding="latin") as o:
MergeDataset()(p, n, o)
| elangovana/sentimentanalysis-chainer-sagemaker | custom_chainer/datasetmovies/MergeDataset.py | MergeDataset.py | py | 1,092 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "csv.writer",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
}
] |
72779091069 | from typing import List
import hikari
async def alert(event: hikari.GuildMessageCreateEvent, command: str, config, *args) -> None:
guild: hikari.GatewayGuild = event.get_guild()
roles: List[hikari.Role] = guild.get_roles().values()
for role in roles:
if role.mention == args[0] and role.name not in config['excluded_roles']:
for member in guild.get_members().values():
if role in member.get_roles():
await member.user.send(' '.join(args[1:]))
| Angry-Maid/DiscordAlertBot | commands/alert.py | alert.py | py | 514 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "hikari.GuildMessageCreateEvent",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "hikari.GatewayGuild",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": ... |
37377572966 | import os
import gym
import joblib
import cv2
import numpy as np
import tensorflow as tf
from collections import deque
from argparse import ArgumentParser
from gym import spaces
from tensorflow.python.training.moving_averages import assign_moving_average
cv2.ocl.setUseOpenCL(False)
try:
import const
except:
from . import const
const.DEBUG = 1
# DEBUG_PRINT函数,去除print时只需要将const.DEBUG=0
def DEBUG_PRINT(*kwargs):
if const.DEBUG:
print(*kwargs)
def common_arg_parser():
argparser = ArgumentParser()
argparser.add_argument(
'--num_timesteps',
type=float,
default=1e8,
dest='total_steps_num',
help='the total steps for training')
argparser.add_argument(
'--params-file',
metavar='params_file',
default='dqn_parameters.json',
help='path to parameters file.Default=dqn_parameters.json')
argparser.add_argument(
'--save-path',
default="trained_models/",
metavar="save_path",
help="directory to save/load trained model. Default= ./trained_models/")
argparser.add_argument(
"--load-path",
default=None,
metavar='load_path',
help="directory to load trained model. Default= ./trained_models/carla-dqn-model.ckpt")
argparser.add_argument(
'--images-to-disk',
action='store_true',
dest='save_images_to_disk',
help='save images (and Lidar data if active) to disk')
argparser.add_argument(
'--gpu-id',
type=int,
default=0,
metavar="GPU_ID",
help='GPU device ID to use. Default:0')
argparser.add_argument(
'--play',
default=False,
action='store_true',
help='play the trained model. Default:False')
return argparser
class NoopResetEnv(gym.Wrapper):
'''
在reset后随机走若干steps, 以保证每次reset 返回的observation不一样
'''
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
'''
reset 后 agent 必须执行某个step
'''
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
'''
skip 若干 frames, 并挑选这些frames中max_observation 和 total_reward 返回
'''
def __init__(self, env, skip=4, use_image_only_observation=True):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
if use_image_only_observation:
self._obs_image_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
else:
self._obs_image_buffer = np.zeros((2,)+env.observation_space.spaces[0].shape, dtype=np.uint8)
self._obs_measurement_buffer = np.zeros(env.observation_space.spaces[1].shape, dtype=np.float32)
self._skip = skip
self._use_image_only_obs = use_image_only_observation
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
if self._use_image_only_obs:
self._obs_image_buffer[0] = obs
else:
self._obs_image_buffer[0] = obs[0]
if i == self._skip - 1:
if self._use_image_only_obs:
self._obs_image_buffer[1] = obs
else:
self._obs_image_buffer[1] = obs[0]
self._obs_measurement_buffer = obs[1]
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_image_buffer.max(axis=0)
if self._use_image_only_obs:
observation = max_frame
else:
observation = (max_frame, self._obs_measurement_buffer)
return observation, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def reset_env(self, **kwargs):
return self.env.reset_env(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
'''
将reward 统一裁剪为 -1, 0, +1,
'''
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
'''
裁剪 frames(images), 范围,存储格式,大小形状
'''
def __init__(self, env, width=84, height=84, grayscale=True):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = width
self.height = height
self.grayscale = grayscale
if self.grayscale:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
else:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 3), dtype=np.uint8)
def observation(self, frame):
if self.grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
if self.grayscale:
frame = np.expand_dims(frame, -1)
return frame
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
'''
不建议使用, 因为返回 float32 类型的observation
'''
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class NormalizedEnv(gym.ObservationWrapper):
'''
observation 归一化, 返回 float32 类型 observation
'''
def __init__(self, env=None):
gym.ObservationWrapper.__init__(self, env)
self.state_mean = 0
self.state_std = 0
self.alpha = 0.9999
self.num_steps = 0
def observation(self, observation):
self.num_steps += 1
self.state_mean = self.state_mean * self.alpha + observation.mean() * (1 - self.alpha)
self.state_std = self.state_std * self.alpha + observation.std() * (1 - self.alpha)
unbiased_mean = self.state_mean / (1 - pow(self.alpha, self.num_steps))
unbiased_std = self.state_std / (1 - pow(self.alpha, self.num_steps))
obs = (observation - unbiased_mean) / (unbiased_std + 1e-8)
return obs
def make_atari(env_id, timelimit=True):
# XXX(john): remove timelimit argument after gym is upgraded to allow double wrapping
env = gym.make(env_id)
if not timelimit:
env = env.env
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
return env
def wrap_carla(env, episode_life=False, clip_rewards=False, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
# if 'FIRE' in env.unwrapped.get_action_meanings():
# env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
def batch_norm(x, train, eps=1e-03, decay=0.99, affine=True, name=None):
'''
:param x: input tensor
:param train: True/False, whether train or not
:param eps: epsilon cofficient used in divsion
:param decay:
:param affine:
:param name:
:return:
'''
with tf.variable_scope(name, default_name='BatchNorm2d', reuse=tf.AUTO_REUSE):
params_shape = [x.shape[-1]]
moving_mean = tf.get_variable('mean', shape=params_shape, initializer=tf.zeros_initializer, trainable=False)
moving_variance = tf.get_variable('variance', shape=params_shape, initializer=tf.ones_initializer, trainable=False)
def mean_var_with_update():
axises = list(np.arange(len(x.shape) - 1))
mean, variance = tf.nn.moments(x, axes=axises, name='moments')
with tf.control_dependencies([assign_moving_average(moving_mean, mean, decay),
assign_moving_average(moving_variance, variance, decay)]):
return tf.identity(mean), tf.identity(variance)
mean, variance = tf.cond(train, mean_var_with_update, lambda: (moving_mean, moving_variance))
if affine:
beta = tf.get_variable('beta', params_shape, initializer=tf.zeros_initializer)
gamma = tf.get_variable('gamma', params_shape, initializer=tf.ones_initializer)
x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, eps)
print("bn beta name : ", beta.name)
print("bn gamma name : ", gamma.name)
else:
x = tf.nn.batch_normalization(x, mean, variance, None, None, eps)
return x
def save_variables(save_path, variables=None, sess=None):
"""
保存模型参数
:param save_path: the path to the model file
:param variables: the trainable variables in the graph
:param sess: the session of the graph
:return: None
"""
sess = sess or tf.get_default_session()
variables = variables or tf.trainable_variables()
ps = sess.run(variables)
save_dict = {v.name: value for v, value in zip(variables, ps)}
dirname = os.path.dirname(save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(save_dict, save_path)
def load_variables(load_path, variables=None, sess=None):
"""
加载模型参数
:param load_path: the path to the model file
:param variables: the trainable variables in the graph
:param sess: the session of the graph
:return: None
"""
sess = sess or tf.get_default_session()
variables = variables or tf.trainable_variables()
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
if isinstance(loaded_params, list):
assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
for d, v in zip(loaded_params, variables):
restores.append(v.assign(d))
else:
for v in variables:
restores.append(v.assign(loaded_params[v.name]))
sess.run(restores)
def get_vars(scope):
'''
获取命名空间scope内的变量
:param scope:
:return:
'''
return [x for x in tf.global_variables() if scope in x.name]
def count_vars(scope):
'''
返回命名空间scope内变量的个数
:param scope:
:return:
'''
v = get_vars(scope)
return sum([np.prod(var.shape.as_list()) for var in v])
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
) | fangchuan/carla-DRL | utils/common.py | common.py | py | 16,371 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.ocl.setUseOpenCL",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.ocl",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "const.DEBUG",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "const.DEBUG",
... |
7326203114 | import hls4ml
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import tarfile
import shutil
PARSE = False
data = []
data_path = 'data_pickles/data6.pkl'
saved_dir = os.getcwd()
if PARSE:
df = pd.read_pickle(data_path)
os.chdir('/eos/home-n/nghielme/')
ids = df['ID'].tolist()
for dir in os.listdir('.'):
if dir.startswith('enet-results-run'):
os.chdir(dir)
else:
continue
for model in os.listdir('.'):
datum = {}
if model.endswith('.tar.gz') and model[8:-7] not in ids:
with tarfile.open(model) as tar:
subdir_and_files = [
tarinfo for tarinfo in tar.getmembers()
if tarinfo.name.startswith('hls')
]
tar.extractall(members=subdir_and_files)
else:
continue
model = model[8:-7]
parsed = hls4ml.report.vivado_report.parse_vivado_report(model + '_FIFO_OPT')
shutil.rmtree(model + '_FIFO_OPT')
model_info = model.split('_')
datum['ID'] = model
datum['Run'] = dir.split('-')[-1]
datum['Filters'] = int(model_info[1][1:])
datum['Clock'] = int(model_info[2][3:])
datum['ReuseFactor'] = int(model_info[3][2:])
datum['Model'] = 'Clock: ' + str(datum['Clock']) + ' \n RF: ' + str(datum['ReuseFactor'])
datum['Quantization'] = int(model_info[4][1:])
datum['Precision'] = model_info[7].replace('-', ',')
try:
datum['LUTs%'] = int(round(parsed['ImplementationReport']['TotLUTs%']))
datum['FFs%'] = int(round(parsed['ImplementationReport']['FFs%']))
datum['RAM36Bs%'] = int(round(parsed['ImplementationReport']['RAMB36s%']))
datum['RAM18s%'] = int(round(parsed['ImplementationReport']['RAMB18s%']))
datum['DSPs%'] = int(round(parsed['ImplementationReport']['DSPs%']))
datum['WNS'] = parsed['TimingReport']['WNS']
except KeyError:
datum['LUTs%'] = 'NA'
datum['FFs%'] = 'NA'
datum['RAM36Bs%'] = 'NA'
datum['RAM18s%'] = 'NA'
datum['DSPs%'] = 'NA'
datum['WNS'] = 'NA'
datum['MaxLatency'] = parsed['CosimReport']['LatencyMax']
data.append(datum)
os.chdir('..')
os.chdir(saved_dir)
df1 = pd.DataFrame(data)
list_df = [df, df1]
res = df.concat(list_df)
res.to_pickle(data_path)
else:
df = pd.read_pickle(data_path)
df_na = df[df['LUTs%'] == 'NA']
df_na.to_csv('NA_models.csv')
df = df[df['LUTs%'] != 'NA']
df['Max Latency [ms]'] = df['MaxLatency'] * 1e-5
df['10 x WNS [ns]'] = df['WNS'] * 10
df['Latency Overclock [ms]'] = df['MaxLatency'] * (10 - df['WNS']) * 1e-6
# df.to_csv('dataframe.csv')
ap_fixed_16_6_data = df[df['Precision'] == '16,6']
ap_fixed_8_4_data = df[df['Precision'] == '8,4']
ap_fixed_8_4_data = ap_fixed_8_4_data.sort_values(by=['Clock', 'ReuseFactor'], ascending=True)
ap_fixed_16_6_data = ap_fixed_16_6_data.sort_values(by=['Clock', 'ReuseFactor'], ascending=True)
def print_plot(data, title):
def pointplot_with_outliers(*args, **kwargs):
local_data = kwargs.pop('data')
gt100ms = local_data.copy()
gt100ms.loc[gt100ms['Max Latency [ms]'] >= 100, 'Max Latency [ms]'] = 100
gt100ms[['LUTs%', 'FFs%', 'RAM36Bs%', 'RAM18s%', 'DSPs%', '10 x WNS [ns]', 'Latency Overclock [ms]']] = -10
lt100ms = local_data.copy()
lt100ms.loc[lt100ms['Max Latency [ms]'] >= 100, 'Max Latency [ms]'] = -10
gt100ms = gt100ms.melt(id_vars=['Model', 'ReuseFactor', 'Clock', 'Filters', 'Quantization'],
value_vars=['LUTs%', 'FFs%', 'RAM36Bs%', 'RAM18s%', 'DSPs%',
'Max Latency [ms]', '10 x WNS [ns]', 'Latency Overclock [ms]'])
lt100ms = lt100ms.melt(id_vars=['Model', 'ReuseFactor', 'Clock', 'Filters', 'Quantization'],
value_vars=['LUTs%', 'FFs%', 'RAM36Bs%', 'RAM18s%', 'DSPs%',
'Max Latency [ms]', '10 x WNS [ns]', 'Latency Overclock [ms]'])
palette = kwargs['palette']
if len(gt100ms) > 0:
kwargs['palette'] = 'dark:brown'
sns.pointplot(**kwargs, data=gt100ms, markers='x')
kwargs['palette'] = palette
sns.pointplot(**kwargs, data=lt100ms)
sns.set_theme()
g = sns.FacetGrid(data, col='Filters', row='Quantization', sharex=False, sharey=False, aspect=3.2,
ylim=(0, 110))
g.map_dataframe(pointplot_with_outliers, join=False, x='Model', y='value', hue='variable', palette='tab10')
g.add_legend()
g.set_xticklabels(rotation=45)
g.fig.suptitle(title)
plt.show()
print_plot(ap_fixed_8_4_data, 'Default Quantization: ap_fixed<8,4>')
print_plot(ap_fixed_16_6_data, 'Default Quantization: ap_fixed<16,6>')
| nicologhielmetti/enet-script | analyze_results.py | analyze_results.py | py | 5,105 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.getcwd",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_pickle",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number":... |
33902526132 | import asyncio
import ssl
from itertools import zip_longest
import click
from aiohttp import TCPConnector
from aiohttp.http import HeadersParser
from hls_get.downloader import HLSDownloader
async def download(links, path, names, coros, headers, timeout, clean_up, verify):
headers_parser = HeadersParser()
header_lines = [b'', *(line.encode('latin-1') for line in headers), b'']
parsed_headers, raw_headers = headers_parser.parse_headers(header_lines)
kwargs = dict()
if not verify:
kwargs['connector'] = TCPConnector(verify_ssl=False)
for link, name in zip_longest(links, names):
async with HLSDownloader(
link, path, name, coros, timeout,
headers=parsed_headers,
clean_up=clean_up,
**kwargs
) as downloader:
await downloader.download(link)
downloader.on_success()
@click.command(
help='Download m3u8 links '
'(like "http://www.example.domain/path/to/index.m3u8#Save name" '
' etc.) asynchronously, and merge into mp4 files.'
)
@click.argument('links', nargs=-1, required=True)
@click.option('-P', '--path', default='.', help='Save path')
@click.option('-N', '--names', multiple=True, help='Save name')
@click.option('-C', '--coros', default=5, help='Max coroutines')
@click.option('-H', '--headers', multiple=True, help='Headers parameters like curl\'s')
@click.option('-X', '--timeout', default=0, help='timeout in seconds')
@click.option('-c', '--clean-up', default=True, help='Clean up the cache directory when completed', is_flag=True)
@click.option('--verify', default=True, help='Verify certificate', is_flag=True)
@click.option('-D', '--delay', default=3, help='delay seconds before retrying')
@click.option('-R', '--retry-times', default=10, help='Max retry times')
def main(*args, delay=3, retry_times=10, **kwargs):
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
loop = asyncio.get_event_loop()
orig_handler = loop.get_exception_handler()
def ignore_ssl_error(loop, context):
if context.get('message') in {'SSL error in data received',
'Fatal error on transport'}:
# validate we have the right exception, transport and protocol
exception = context.get('exception')
if (isinstance(exception, ssl.SSLError) and
exception.reason == 'KRB5_S_INIT'):
if loop.get_debug():
asyncio.log.logger.debug('Ignoring SSL KRB5_S_INIT error')
return
if orig_handler is not None:
orig_handler(loop, context)
else:
loop.default_exception_handler(context)
loop.set_exception_handler(ignore_ssl_error)
loop.run_until_complete(download(*args, **kwargs))
if __name__ == '__main__':
main()
| SoulMelody/hls-get | hls_get/cli.py | cli.py | py | 2,929 | python | en | code | 39 | github-code | 6 | [
{
"api_name": "aiohttp.http.HeadersParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "aiohttp.TCPConnector",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "itertools.zip_longest",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": ... |
3795696476 | # -*- coding: utf_8 -*-
import sys
import time
import json
import re
import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
options = Options()
options.add_argument('--headless')
options.add_argument("--disable-infobars")
options.add_argument("--disable-extensions")
options.add_argument('--log-level=OFF')
options.add_argument('--no-sandbox')
options.add_argument('--disable-application-cache')
options.add_argument('--disable-gpu')
options.add_argument('--start-maximized')
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--incognito")
options.add_argument("--verbose")
options.add_argument('--disable-browser-side-navigation')
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option("prefs", prefs)
s_dt = input()
e_dt = input()
print("Scraping Date: " + s_dt + " ~ " + e_dt)
driver = webdriver.Chrome(ChromeDriverManager().install(), options = options)
start = datetime.datetime.strptime(s_dt, "%Y-%m-%d")
end = datetime.datetime.strptime(e_dt, "%Y-%m-%d")
date_generated = [start + datetime.timedelta(days=x) for x in range(1, (end-start).days+2)]
start_flag = False
for date in date_generated:
drange = date.strftime("%Y%m%d")
main_url = "https://info.jfx.co.jp/jfxphpapl/mnavi/mnavi_SwapPoint.php?stdate=P" + drange
# print(main_url)
driver.get(main_url)
iframe = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, "//iframe[@name='SWAPSCREEN']")))
f1 = driver.find_element(By.XPATH, "//td[@class='f1']")
dt = f1.text
dt.replace("<","")
dt.replace(">","")
dt = dt.strip()
real_dt = dt
dt = ''.join([n for n in dt if n.isdigit()])
# print(dt)
if start_flag == False and dt != start.strftime("%Y%m%d"):
continue
start_flag = True
driver.switch_to.frame(iframe)
# Getting individual cities url
soup = BeautifulSoup(driver.page_source, 'html.parser')
trs = soup.findAll("tr", {"bgcolor" : "white"})
print("===================================================================================")
for tr in trs:
tds = tr.findAll('td')
currency = tds[0].getText()
buy = tds[4].getText()
sell = tds[5].getText()
print(real_dt + " " + currency + " " + buy + " " + sell)
| 1neoneo3/scrape | scraping1.py | scraping1.py | py | 2,819 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 43,
"usage_type": "name"
},
{... |
31569984020 | # normal libraries
from inspect import signature # used in the method eval of the class
import numpy as np
import scipy.stats # functions of statistics
# other files
from corai_error import Error_type_setter
from scipy.integrate import simps
# my libraries
np.random.seed(124)
# section ######################################################################
# #############################################################################
# some information
# -------------------------------------------------------------------------------------------------------
# list of the possible kernels:
# fct_top_hat
# fct_plain
# fct_truncnorm
# fct_biweight
#
#
# the functions are correct, they scale and shift the way it is supposed.
# However they are written in the following way : f_t(t_i) = K( t_i - t )
# example of kernels:
# list_of_kernels =
# [Kernel(fct_top_hat, name="wide top hat", a=-450, b=450),
# Kernel(fct_top_hat, name="normal top hat", a=-200, b=200),
# Kernel(fct_truncnorm, name="wide truncnorm", a=-500, b=500, sigma=350),
# Kernel(fct_truncnorm, name="normal truncnorm", a=-350, b=350, sigma=250)]
# -------------------------------------------------------------------------------------------------------
# the functions only work for positive time. If one input negative times, it messes up the orientation.
# section ######################################################################
# #############################################################################
# class
class Kernel:
# kernel is a functor, used for weighting some computations.
# the evaluation gives back a list of np.array
# the function should hand in the list of np.arrays non scaled.
# the parameters of the function (to be called) are gathered before:
# the weights do not change inside the estimation process.
# the name is for identification in plots
def __init__(self, fct_kernel, name=' no name ', **kwargs):
self.fct_kernel = fct_kernel
self.name = name
self.__dict__.update(kwargs)
def __repr__(self):
return f"Function is {repr(self._fct_kernel)} and name {self.name}."
def __call__(self, T_t, eval_point, T_max, debug=False):
# getting the length over each dimensions for the kernel.
shape_T_t = [len(T_t[i]) for i in range(len(T_t))] # recall each dim has different nb of jumps
# ans is the kernel evaluated on the jumps
ans = self._fct_kernel(T_t=T_t, eval_point=eval_point, shape_T_t=shape_T_t,
**{k: self.__dict__[k] for k in self.__dict__ if
k in signature(self._fct_kernel).parameters})
# ans is a list of np arrays. It is normalized such that it is a kernel.
# then I want to scale every vector.
# The total integral should be T_max, so I multiply by T_max
# If it isn't fct plain, then I have to scale.
if self._fct_kernel.__name__ != 'fct_plain':
# I want to rescale the results for the kernels that are not covering seen part. For that reason,
# I compute the integral of the kernel, and scale accordingly.
tt_integral = [np.linspace(0, T_max, int(5E5))] # in a list to respect the format list of list of T_t.
yy = self._fct_kernel(T_t=tt_integral, eval_point=eval_point, shape_T_t=[1],
**{k: self.__dict__[k] for k in self.__dict__ if
k in signature(self._fct_kernel).parameters})
integral = simps(yy[0], tt_integral[0])
# yy[0] bc function gives back a list of arrays.
for i in range(len(shape_T_t)):
ans[i] = ans[i] / integral * T_max
# *= do not work correctly since the vectors are not the same type (int/float).
# I also divide by the sum, the vector is normalized, however,
# possibly we're on the edge and we need to take that into account.
if debug:
print(f"inside kernel debug, "
f"that's my integral : "
f"{np.sum(ans[0][:-1]) * T_max / (len(ans[0]) - 1)}. "
f"Name : {self.fct_kernel.__name__}.")
return ans
# section ######################################################################
# #############################################################################
# getters setters
@property
def fct_kernel(self):
return self._fct_kernel
@fct_kernel.setter
def fct_kernel(self, new_fct_kernel):
self._fct_kernel = new_fct_kernel
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
if isinstance(new_name, str):
self._name = new_name
else:
raise Error_type_setter(f'Argument is not an string.')
# section ######################################################################
# #############################################################################
# kernels' functions
def fct_top_hat(T_t, shape_T_t, eval_point, a=-200, b=200):
output = []
for i in range(len(shape_T_t)):
vector = np.array(T_t[i])
# -1 if x < 0, 0 if x==0, 1 if x > 0.
output.append(1 / (2 * (b - a)) *
(np.sign(vector - eval_point - a) +
np.sign(b - vector + eval_point))
)
return output
def fct_plain(T_t, shape_T_t, eval_point):
# no scaling parameter, would be full to use scaling on plain.
return [np.full(shape_T_t[i], 1) for i in range(len(shape_T_t))] # full of 1.
def fct_truncnorm(T_t, shape_T_t, eval_point, a=-300, b=300, sigma=200):
output = []
for i in range(len(shape_T_t)):
output.append(scipy.stats.truncnorm.pdf(np.array(T_t[i]), a / sigma, b / sigma,
loc=eval_point, scale=sigma))
return output
def fct_truncnorm_test(T_t, shape_T_t, eval_point, a=-300, b=300, sigma=200):
output = []
i = 0 # for output[i] after, but there shouldn't be any problem.
for i in range(len(shape_T_t)):
output.append(2 * scipy.stats.truncnorm.pdf(np.array(T_t[i]), a / sigma, b / sigma,
loc=eval_point, scale=sigma))
output[i][T_t[i] < eval_point] = 0
return output
def fct_biweight(T_t, shape_T_t, eval_point, a=-300, b=300):
# if important, I can generalize biweight with function beta.
# Thus creating like 4 kernels with one function ( BETA(1), BETA(2)...)
assert a == -b, "The kernel only accepts symmetrical bounds."
output = []
for i in range(len(shape_T_t)):
xx = (np.array(T_t[i]) - (a + b) / 2 - eval_point) * 2 / (b - a)
# the correct order is eval_point - T_t,
# bc we evaluate at eval_point but translated by T_t,
# if kernel not symmetric a != b, then we also need to translate by the mid of them.
xx[(xx < -1) | (xx > 1)] = 1
output.append(15 / 16 * np.power(1 - xx * xx, 2) * 2 / (b - a))
return output
def fct_epa(T_t, shape_T_t, eval_point, a=-300, b=300):
assert a == -b, "The kernel only accepts symmetrical bounds."
output = []
for i in range(len(shape_T_t)):
xx = (np.array(T_t[i]) - (a + b) / 2 - eval_point) * 2 / (b - a)
# the correct order is eval_point - T_t,
# bc we evaluate at eval_point but translated by T_t,
# if kernel not symmetric a != b, then we also need to translate by the mid of them.
xx[(xx < -1) | (xx > 1)] = 1
output.append(3 / 4 * (1 - xx * xx) * 2 / (b - a))
return output
| Code-Cornelius/ITiDeEP | src/hawkes/kernel.py | kernel.py | py | 7,949 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "inspect.signature",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.linspace"... |
75163153466 | import werkzeug
def test_CVE_2019_14806():
"""
CVE-2019-14806
high severity
Vulnerable versions: < 0.15.3
Patched version: 0.15.3
https://github.com/advisories/GHSA-gq9m-qvpx-68hc
Pallets Werkzeug before 0.15.3, when used with Docker,
has insufficient debugger PIN randomness because
Docker containers share the same machine id.
"""
werkzeug_version = tuple(map(int, werkzeug.__version__.split('.')))
secure_version = (0, 15, 3)
assert werkzeug_version >= secure_version
| e-ruiz/big-data | 01-NoSQL/atividade-04/src/tests/test_security.py | test_security.py | py | 533 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "werkzeug.__version__.split",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "werkzeug.__version__",
"line_number": 15,
"usage_type": "attribute"
}
] |
17007174174 | from scrapy import Spider
from scrapy.selector import Selector
from stack.items import StackItem
with open(r'C:\Users\amarciniak\AppData\Local\Programs\Python\Python35-32\Scripts\stack\stack\spiders\links.txt') as f:
linkList = f.read().splitlines()
class StackSpider(Spider):
name = "stack"
allowed_domains = ["realcanadiansuperstore.ca"]
start_urls = linkList
def parse(self, response):
name = Selector(response)
calories = Selector(response)
item = StackItem()
item['ItemName'] = name.xpath('//h1/text()').extract()[1].strip(';\n\t ')
itemTempCal =calories.xpath('//*[@id="nutrition"]/div/div[1]/div/div[1]/div[4]/span[2]/text()').extract()
item['Length']= len(itemTempCal)
tempLength = len(itemTempCal)
item['Calories'] = ('').join(itemTempCal).strip(';\n\t ')
yield item
| AdamMarciniak/SuperCrawler2 | stack/stack/spiders/stack_spider.py | stack_spider.py | py | 973 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "scrapy.selector.Selector",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scrapy.selector.Selector",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "stack... |
72031135549 | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2021/1/3 21:23
# @Author : mafei0728
# @Version:V 0.1
# @File : bar.py
# @desc :
# 1)准备数据
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
movie_name = ['雷神3:诸神黄昏', '正义联盟', '寻梦环游记']
first_day = [10587.6, 10062.5, 1275.7]
first_weekend = [36224.9, 34479.6, 11830]
x = range(len(movie_name))
# 2)创建画布
plt.figure(figsize=(20, 8), dpi=100)
# 3)绘制柱状图
plt.bar(x, first_day, width=0.2, label="首日票房")
plt.bar([i + 0.2 for i in x], first_weekend, width=0.2, label="首周票房")
# 显示图例
plt.legend()
# 修改x轴刻度显示
plt.xticks([i + 0.1 for i in x], movie_name)
# 4)显示图像
plt.show()
| mafei0728/pythonProject | mateplotlibDemo/day03/bar.py | bar.py | py | 810 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 12,
"usage_type": "attribute"
},
{
... |
35292064506 | import pandas as pd
import datetime
import pickle
import numpy as np
from sklearn.linear_model import SGDRegressor
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
import random
# Codes des compagnies aériennes et l'équivalent du code interne après numérisation de la feature.
carrier_dict = {'AA':0, 'AS':1, 'B6':2, 'DL':3, 'EV':4, 'F9':5, 'HA':6, 'NK':7, 'OO':8, 'UA':9, 'VX':10,'WN':11}
# Distance entre les destinations
tripDistances=pd.DataFrame()
# codes numérique et codes textuelles des aéroports
airport_codes=pd.DataFrame()
# New Year day, Martin Luther King Jr. Day, Presidents' Day, Memorial Day
# Independence Day, Labor Day, Columbus Day, Veterans Day,
# Thanksgiving, Christmas Day
holidays = [datetime.date(2018, 1, 1),datetime.date(2019, 1, 1), datetime.date(2020, 1, 1),
datetime.date(2018, 1, 15),datetime.date(2019, 1, 21), datetime.date(2020, 1, 20),
datetime.date(2018, 2, 19), datetime.date(2019, 2, 18), datetime.date(2020, 2, 17),
datetime.date(2018, 5, 28), datetime.date(2019, 5, 27), datetime.date(2020, 5, 25),
datetime.date(2018, 7, 4), datetime.date(2019, 7, 4), datetime.date(2020, 7, 4),
datetime.date(2018, 9, 3), datetime.date(2019, 9, 2), datetime.date(2020, 9, 7),
datetime.date(2018,10, 8), datetime.date(2019,10, 14), datetime.date(2020,10, 12),
datetime.date(2018, 11, 11), datetime.date(2019, 11, 11), datetime.date(2020, 11, 11),
datetime.date(2018, 11, 22), datetime.date(2019, 11, 28), datetime.date(2020, 11, 26),
datetime.date(2018, 12, 25), datetime.date(2019, 12, 25), datetime.date(2020, 12, 25)]
# Notre modèle de prédiction sauvegardé dans un fichier
predictionModel = SGDRegressor()
encoder = OneHotEncoder()
scaler = StandardScaler()
error_info = ''
def init(model_file='data/flights_delays_model.pkl', trip_distance_file='data/tripDistance.pkl', airport_code_file='data/airportCodesDF.pkl', encoder_file='data/categ_featuresEncoder.pkl', scaler_file='data/numfeaturesScaler.pkl') :
global predictionModel, tripDistances, airport_codes,encoder, scaler
predictionModel = joblib.load(model_file)
pkl_file = open(trip_distance_file, 'rb')
tripDistances = pickle.load(pkl_file)
pkl_file = open(airport_code_file, 'rb')
airport_codes = pickle.load(pkl_file)
encoder = joblib.load(encoder_file)
scaler = joblib.load(scaler_file)
# Retourne le numéro de semaine correspondant à la date
def getWeekNum(day, month,year) :
global error_info
try :
fl_date = datetime.date(year, month, day)
return fl_date.isocalendar()[1]
except Exception as err:
error_info += 'Invalid date entered (' + str(day) + '/' + str(month) + '/' + str(year) + ') :' + str(err) + '. '
raise(err)
# Retourne le jour de la semaine (1 = lundi, ...)
def getWeekDay(day, month,year) :
global error_info
try :
return datetime.date(year, month, day).weekday() + 1
except Exception as err:
error_info += 'Invalid date entered (' + str(day) + '/' + str(month) + '/' + str(year) + ') :' + str(err) + '. '
raise(err)
# retourne le code numérique correspondant au code de la compagnies
def getCarrierCodeNum(unique_carrier_code):
global error_info
if unique_carrier_code in carrier_dict :
return carrier_dict[unique_carrier_code]
else :
error_info += 'Cannot find carrier code (' + unique_carrier_code + '). '
raise ValueError('Bad carrier code')
# retourne la distance de vols entre 2 aéroports
def getTripDistance(origin_code, destination_code):
global error_info
try:
distance = np.array(float(tripDistances[(tripDistances.ORIGIN == origin_code) &
(tripDistances.DEST == destination_code)].DISTANCE.drop_duplicates()))
return distance
except Exception as err:
error_info += 'Route was not found in the data. Please try a different nearby city or a new route.'
raise(err)
# Retourne le code numérique de l'aéoport d'origine (si true) ou destination si false.
def getAirportCodeNum(airport_code, origin=True):
global error_info
try :
if origin :
return int(airport_codes[airport_codes.AIRPORT_CODE == airport_code].ORIGIN_CODE)
else :
return int(airport_codes[airport_codes.AIRPORT_CODE == airport_code].DEST_CODE)
except Exception as err:
error_info += 'No airport found with code ' + str(airport_code) + '. '
raise(err)
# Retourne le nombre de jour à proximité d'un jour férié
def getNumDaysToHoliday(day, month, year):
if year not in [2018, 2019, 2020] :
error_info += 'No data found for the year ' + str(year) + '. '
raise ValueError('Bad year')
c_date = datetime.date(year, month, day)
return np.min(np.abs(np.array(c_date) - np.array(holidays))).days
# Utilisation de notre modèle pour prédire le retard éventuel.
def delay_prediction(originCode, destCode, carrier, day, month, year, dep_hour) :
global error_info
error_info=''
try :
origin_code_num = getAirportCodeNum(originCode, True)
dest_code_num = getAirportCodeNum(destCode, False)
carrier_code_num = carrier_dict[carrier]
weekday = getWeekDay(day, month, year)
week_num = getWeekNum(day, month, year)
hdays = getNumDaysToHoliday(day, month, year)
distance = getTripDistance(originCode, destCode)
numerical_values = np.c_[distance, hdays]
# Scale the features
numerical_values_scaled = scaler.transform(numerical_values)
categorical_values = np.zeros(8)
categorical_values[0] = int(month)
categorical_values[1] = int(day)
categorical_values[2] = int(weekday)
categorical_values[3] = int(week_num)
categorical_values[4] = int(dep_hour)
categorical_values[5] = int(carrier_code_num)
categorical_values[6] = int(origin_code_num)
categorical_values[7] = int(dest_code_num)
categorical_values_encoded = encoder.transform([categorical_values]).toarray()
travel = np.c_[numerical_values_scaled, categorical_values_encoded]
pred_delay = predictionModel.predict(travel)
return int(pred_delay[0]),error_info
except Exception as err:
print(error_info)
print ('Prediction error.', err)
return None, error_info
def test() :
tcarrier = ['AA', 'AS', 'DL', 'HA', 'UA']
tday = [1,10, 6, 9, 23, 30, 26, 12, 6, 9]
tmonth = [1,2, 3, 4, 5, 6, 7, 8, 9, 10,11,12]
tcode = ['BOS', 'JFK', 'SEA', 'SAN', 'DCA']
tdep_hour = [1, 2, 4, 7, 9, 12, 10, 15, 14, 17, 19, 20, 21, 22, 23]
for i in range(1000) :
origcode = random.choice(tcode)
destcode = random.choice(tcode)
carrier = random.choice(tcarrier)
day = random.choice(tday)
month = random.choice(tmonth)
dep_hour = random.choice(tdep_hour)
d = delay_prediction(origcode, destcode, carrier, day, month, 2018, dep_hour)
if d is not None :
if d > 5 :
print(origcode, destcode,carrier,day, month, dep_hour)
print("delay", d)
print("----------")
| makboulhoussen/flightdelay | web-interface/webdelay/delayapi/flightDelayPred.py | flightDelayPred.py | py | 7,291 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.DataFrame",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"... |
14159077384 | # coding: utf-8
from __future__ import unicode_literals
from django.db import models
from .utils import get_models_from_file
class DynamicModelManager(models.Manager):
def __init__(self, model, instance=None):
super(DynamicModelManager, self).__init__()
self.model = model
self.instance = instance
def get_queryset(self):
if self.instance is None:
return super(DynamicModelManager, self).get_queryset()
_filter = {self.instance._meta.pk.name: self.instance.pk}
return super(DynamicModelManager, self).get_queryset().filter(**_filter)
class DynamicModelDescriptor(object):
def __init__(self, model):
self.model = model
def __get__(self, instance):
if instance is None:
return DynamicModelManager(self.model)
return DynamicModelManager(self.model, instance)
class DynamicModel(object):
registry = {}
def contribute_to_class(self, cls, name):
self.manager_name = name
models.signals.class_prepared.connect(self.finalize, sender=cls)
def finalize(self, sender, **kwargs):
models_dict = get_models_from_file()
for model in models_dict:
dynamic_model = self.create_dynamic_model(model)
descriptor = DynamicModelDescriptor(dynamic_model)
setattr(sender, self.manager_name, descriptor)
def create_dynamic_model(self, model=None):
"""
Create a dynamic model from dict data.
"""
if not model:
return None
attrs = self.get_dynamic_model_fields(model)
# byte string looks sad
attrs.update(Meta=type(b'Meta', (), self.get_meta_fields(model)))
name = b'{}DynamicModel'.format(model['name'].title())
dynamic_model = type(name, (models.Model,), attrs)
self.__class__.registry[name] = dynamic_model
return dynamic_model
def __contains__(self, module_name):
return module_name in self.__class__.registry
def get_dynamic_model(self, module_name):
return self.__class__.registry.get(module_name, None)
def get_dynamic_model_fields(self, model=None):
fields = {
'id': models.AutoField(primary_key=True),
'__module__': self.__module__,
'__unicode__': lambda x: u'#{} - {}'.format(x.id, model['name'])
}
fields.update(model['fields'])
return fields
def get_meta_fields(self, model=None):
return {
'ordering': ('-id',),
'verbose_name': unicode(model['verbose_name'] if model else 'Name'),
'verbose_name_plural': unicode(model['verbose_name'] if model else 'Names'),
}
class Model(models.Model):
models = DynamicModel()
| ToxicWar/travail-de-tests | testtask/models.py | models.py | py | 2,767 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Manager",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.signals.class_prepared.connect",
"line_number": 36,
"usage_type": "call"
... |
44501822840 | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, FloatField, IntegerField, FileField, validators
class DishForm(FlaskForm):
name = StringField('Name', [
validators.DataRequired(),
validators.Length(min=2, max=100)
])
description = TextAreaField('Description', [
validators.Optional(),
validators.Length(max=500)
])
price = FloatField('Price', [
validators.DataRequired(),
validators.NumberRange(min=0)
])
image = FileField('Image', [
validators.Optional()
])
category_id = IntegerField('Category ID', [
validators.DataRequired()
])
| stroud91/DietCrusherProject | app/forms/dishes.py | dishes.py | py | 668 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask_wtf.FlaskForm",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "wtforms.StringField",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.DataRequired",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "... |
9185141020 | import os
from absl import flags
FLAGS = flags.FLAGS
def get_executable_path(py_binary_name):
"""Returns the executable path of a py_binary.
This returns the executable path of a py_binary that is in another Bazel
target's data dependencies.
On Linux/macOS, the path and __file__ has the same root directory.
On Windows, bazel builds an .exe file and we need to use the MANIFEST file
the location the actual binary.
Args:
py_binary_name: string, the name of a py_binary that is in another Bazel
target's data dependencies.
Raises:
RuntimeError: Raised when it cannot locate the executable path.
"""
if os.name == 'nt':
py_binary_name += '.exe'
manifest_file = os.path.join(FLAGS.test_srcdir, 'MANIFEST')
workspace_name = os.environ['TEST_WORKSPACE']
manifest_entry = '{}/{}'.format(workspace_name, py_binary_name)
with open(manifest_file, 'r') as manifest_fd:
for line in manifest_fd:
tokens = line.strip().split(' ')
if len(tokens) != 2:
continue
if manifest_entry == tokens[0]:
return tokens[1]
raise RuntimeError(
'Cannot locate executable path for {}, MANIFEST file: {}.'.format(
py_binary_name, manifest_file))
else:
# NOTE: __file__ may be .py or .pyc, depending on how the module was
# loaded and executed.
path = __file__
# Use the package name to find the root directory: every dot is
# a directory, plus one for ourselves.
for _ in range(__name__.count('.') + 1):
path = os.path.dirname(path)
root_directory = path
return os.path.join(root_directory, py_binary_name)
| bazelbuild/bazel | third_party/py/abseil/absl/testing/_bazelize_command.py | _bazelize_command.py | py | 1,658 | python | en | code | 21,632 | github-code | 6 | [
{
"api_name": "absl.flags.FLAGS",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "absl.flags",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "os.name",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_... |
70911317947 | # -*- coding: utf-8 -*-
import scrapy
class AmazonBooksSpiderSpider(scrapy.Spider):
name = 'amazon_books_spider'
# allowed_domains = ['amazon.com']
start_urls = ['https://www.amazon.com/s?i=stripbooks&bbn=283155&rh=n%3A283155%2Cp_n_publication_date%3A1250226011%2Cp_n_feature_browse-bin%3A618073011&s=review-count-rank&dc&fst=as%3Aoff&qid=1588545134&rnid=618072011&ref=sr_pg_2']
def parse(self, response):
print(response)
all_books = response.xpath('//div[@class="sg-col-20-of-24 s-result-item s-asin sg-col-0-of-12 sg-col-28-of-32 sg-col-16-of-20 sg-col sg-col-32-of-36 sg-col-12-of-16 sg-col-24-of-28"]')
for book in all_books:
title = book.xpath('.//h2//span/text()').extract_first()
author = book.xpath('.//a[@class="a-size-base a-link-normal"]/text()').extract_first()
rating = book.xpath('.//span[@class="a-icon-alt"]/text()').extract_first()
vote = book.xpath('.//a[@class="a-link-normal"]/span/text()').extract_first()
kindle_price = book.xpath('.//span[@class="a-offscreen"]/text()').extract_first()
yield {
'title': title,
'author': author,
'rating': rating,
'vote': vote,
'kindle_price': kindle_price
}
| ArRosid/Scrapy-Project | scrapy_project/spiders/amazon_books_spider.py | amazon_books_spider.py | py | 1,322 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 5,
"usage_type": "attribute"
}
] |
16474430323 | from rest_framework import serializers
from .models import Quizzes, Question, Answer,Score
class QuizSerializer(serializers.ModelSerializer):
class Meta:
model = Quizzes
fields = [
'title','id'
]
class ScoreSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Score
fields = [
'quiz',
'score',
'user',
]
class AnswerSerializer(serializers.ModelSerializer):
class Meta:
model = Answer
fields = [
'id',
'answer_text',
'is_right',
]
class RandomQuestionSerializer(serializers.ModelSerializer):
answer = AnswerSerializer(many=True, read_only=True)
class Meta:
model = Question
fields = [
'title','answers',
]
class QuestionSerializer(serializers.ModelSerializer):
answers = AnswerSerializer(many=True, read_only=True)
# quiz = QuizSerializer(read_only=True)
class Meta:
model = Question
fields = [
'quiz','title','answers',
]
class QuestionCreateSerializer(serializers.ModelSerializer):
answers = AnswerSerializer(many=True)
class Meta:
model = Question
fields = [
'title','answers',
]
def create(self, validated_data):
answers_data = validated_data.pop('answers')
question = Question.objects.create(**validated_data)
# for answer_data in answers_data:
# Answer.objects.create(question=question, **answer_data)
answers.set(answers_data)
return question
class QuizCreateSerializer(serializers.ModelSerializer):
question = QuestionCreateSerializer(many=True)
class Meta:
model = Quizzes
fields = [
'title','question',
]
def create(self, validated_data):
questions_data = validated_data.pop('question')
print(questions_data)
quiz = Quizzes.objects.create(**validated_data)
for question_data in questions_data:
Question.objects.create(quiz=quiz, **question_data)
return quiz
| Rinz-Code/Fasalu-Rahman-Portfolio | server/quiz/serializers.py | serializers.py | py | 2,218 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "models.Quizzes",
"line_number": 8,
"usage_type": "name"
},
{... |
29497895962 | from utils.flask.app import app
from utils.db import Book
from flask import request, jsonify
import json
@app.route('/updatespitslot', methods=['GET', 'POST'])
def upload_spitslotinfo():
data = json.loads(request.get_data(as_text=True))
if data['key'] != 'updatespitslot' or 'stu_uuid' not in data.keys() or 'info' not in data.keys():
return jsonify(
RetCode=1,
Message='failed because mismatching info'
)
stu_uuid, spit_info, book = data['stu_uuid'], data['info'], Book()
book.insert_spitslot(stu_uuid, spit_info)
app.logger.info(f"{stu_uuid} upload spit_info: {spit_info}")
return jsonify(
RetCode=0,
Message='上传吐槽信息成功!'
)
@app.route('/recentspitslot', methods=['GET', 'POST'])
def get_spitslotinfo():
data = json.loads(request.get_data(as_text=True))
book = Book()
data = book.get_recent_spitslot(spit_num=20)
return jsonify(
RetCode=0,
data=data,
Message='fetch recent spitslot successfully..!'
) | Emanual20/StuinfoDisplayProject | server/utils/router/spitslot.py | spitslot.py | py | 1,062 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.loads",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.request.get_data",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"lin... |
43899986443 | import os
import test
import shutil
import unittest
from xml.dom import minidom
from xmp import XMP
class XMPTestCase(unittest.TestCase):
"""Tests for `xmp.py`."""
def test_decode_tag_size(self):
"""decode_tag_size - Read section size from byte pair"""
self.assertEqual(XMP.decode_tag_size(b'\x00\xff'), 255)
self.assertEqual(XMP.decode_tag_size(b'\xff\x00'), 65280)
self.assertEqual(XMP.decode_tag_size(b'\x00\x00'), 0)
self.assertEqual(XMP.decode_tag_size(b'\xab\xcd'), 43981)
def test_encode_tag_size(self):
"""encode_tag_size - Convert section size to byte pair"""
self.assertEqual(XMP.encode_tag_size(255), b'\x00\xff')
self.assertEqual(XMP.encode_tag_size(65280), b'\xff\x00')
self.assertEqual(XMP.encode_tag_size(0), b'\x00\x00')
self.assertEqual(XMP.encode_tag_size(43981), b'\xab\xcd')
def test_get_xmp(self):
"""get_xmp - Retrieve existing XMP data from file"""
self.assertEqual(XMP.get_xmp(test.path('img/test-no-XMP.jpg')), '')
self.assertTrue(len(XMP.get_xmp(test.path('img/test-XMP.jpg'))) > 0)
def test_set_xmp(self):
"""set_xmp - Write XMP to file"""
shutil.copy(test.path('img/test-no-XMP.jpg'), test.path('img/test-no-xmp-temp.jpg'))
xmp_raw = XMP.get_xmp(test.path('img/test-XMP.jpg'))
XMP.set_xmp(test.path('img/test-no-xmp-temp.jpg'), xmp_raw)
self.assertTrue(len(XMP.get_xmp(test.path('img/test-no-xmp-temp.jpg'))) > 0)
os.remove(test.path('img/test-no-xmp-temp.jpg'))
shutil.copy(test.path('img/test-XMP.jpg'), test.path('img/test-xmp-temp.jpg'))
self.assertTrue(len(XMP.get_xmp(test.path('img/test-xmp-temp.jpg'))) > 0)
XMP.set_xmp(test.path('img/test-xmp-temp.jpg'), XMP.XMP_IDENTIFIER)
self.assertTrue(XMP.get_xmp(test.path('img/test-xmp-temp.jpg')) == XMP.XMP_IDENTIFIER)
os.remove(test.path('img/test-xmp-temp.jpg'))
def test_xmp_to_minidom(self):
"""xmp_to_minidom - Convert raw XMP data to minidom object"""
xmp_raw = XMP.get_xmp(test.path('img/test-XMP.jpg'))
xmp_minidom = XMP.xmp_to_minidom(xmp_raw)
self.assertIsInstance(xmp_minidom, minidom.Document)
xmp_minidom = XMP.xmp_to_minidom(b'')
self.assertIsInstance(xmp_minidom, minidom.Document)
def test_minidom_to_xmp(self):
"""minidom_to_xmp - Convert minidom object into raw XMP data"""
xmp_raw = XMP.get_xmp(test.path('img/test-XMP.jpg'))
xmp_minidom = XMP.xmp_to_minidom(xmp_raw)
xmp_raw = XMP.minidom_to_xmp(xmp_minidom)
self.assertTrue(XMP.XMP_IDENTIFIER in xmp_raw)
self.assertTrue(XMP.XMP_PACKET_BEGIN in xmp_raw)
self.assertTrue(XMP.XMP_PACKET_END in xmp_raw)
xmp_minidom = XMP.xmp_to_minidom(b'')
xmp_raw = XMP.minidom_to_xmp(xmp_minidom)
self.assertTrue(XMP.XMP_IDENTIFIER in xmp_raw)
self.assertTrue(XMP.XMP_PACKET_BEGIN in xmp_raw)
self.assertTrue(XMP.XMP_PACKET_END in xmp_raw)
def test_add_panorama_xmp(self):
"""add_panorama_xmp - Add panorama marker to file XMP"""
shutil.copy(test.path('img/test-no-XMP.jpg'), test.path('img/test-no-xmp-temp.jpg'))
XMP.add_panorama_xmp(test.path('img/test-no-xmp-temp.jpg'))
self.assertTrue(b'GPano' in XMP.get_xmp(test.path('img/test-no-xmp-temp.jpg')))
os.remove(test.path('img/test-no-xmp-temp.jpg'))
if __name__ == '__main__':
unittest.main() | ntieman/blender-facebook-360 | test/test_xmp.py | test_xmp.py | py | 3,506 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "xmp.XMP.decode_tag_size",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "xmp.XMP",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "xmp.XMP.decode_... |
24577754108 | import random
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
import json
URL = "https://www.luogu.com.cn/training/list"
options = Options()
options.add_argument("--headless") # 无头模式
options.set_preference("permissions.default.image", 2) # 无图模式
profile = FirefoxProfile()
profile.set_preference("permissions.default.frame", 3) # 禁用加载 iframe 的功能 (bilibili嵌套)
options.profile = profile
driver = webdriver.Firefox(options=options)
driver.get(URL)
print("[LOG] 加载索引")
TITLE_XPATH_TEMPLATE = '//*[@id="app"]/div[2]/main/div/div[2]/div/div[1]/div[2]/div[TDNUM]/span[2]/a'
TDID_XPATH_TEMPLATE = '//*[@id="app"]/div[2]/main/div/div[2]/div/div[1]/div[2]/div[TDNUM]/span[1]'
title_elements = list()
titles = list()
tdid_elements = list()
tdids = list()
for i in range(1, 41):
ele1 = driver.find_element(By.XPATH, TITLE_XPATH_TEMPLATE.replace("TDNUM", str(i)));
ele2 = driver.find_element(By.XPATH, TDID_XPATH_TEMPLATE.replace("TDNUM", str(i)));
title_elements.append(ele1)
tdid_elements.append(ele2)
for title_element in title_elements:
titles.append(title_element.text)
for tdid_element in tdid_elements:
tdids.append(tdid_element.text)
print("[LOG] 成功加载索引")
# print(titles)
# print(tdids)
TID_TEMPLATE = '//*[@id="app"]/div[2]/main/div/div[2]/div/div[1]/div[2]/div[TNUM]/span[2]'
cnt = 0
plancfg = list()
descriptions = list()
for tdid in tdids:
print("[LOG] 加载编号: " + tdid)
cnt += 1
tids = list()
driver.get("https://www.luogu.com.cn/training/" + tdid)
eleone = driver.find_element(By.XPATH, '//*[@id="app"]/div[2]/main/div/div[2]/section[2]/div/div[2]')
descriptions.append(eleone.text)
tab2 = driver.find_element(By.XPATH, '//*[@id="app"]/div[2]/main/div/div[1]/div/ul/li[2]/span')
tab2.click()
totalnum_ele = driver.find_element(By.XPATH, '//*[@id="app"]/div[2]/div[1]/div[2]/div[2]/div[1]/div/div[1]/span[2]')
for i in range(1, int(totalnum_ele.text)):
tidone = driver.find_element(By.XPATH, TID_TEMPLATE.replace("TNUM", str(i)))
tid = tidone.text
tids.append("LG" + tid) # 适配 XJYOJ
totalinf = dict()
totalinf = {"_id": cnt, "title": titles[cnt - 1], "requireNids": [], "pids": tids}
plancfg.append(totalinf)
markdown_description = ""
for i,j in zip(descriptions, titles):
markdown_description += "\n"
markdown_description += "## "
markdown_description += j
markdown_description += "\n"
markdown_description += i
jsoncfg = json.dumps(plancfg)
with open('cfg.json', 'w') as file1:
file1.write(jsoncfg)
with open('description.md', 'w') as file2:
file2.write(markdown_description)
with codecs.open('cfg.json', 'r', encoding='unicode_escape') as f:
content = f.read()
with codecs.open('cfg.json', 'w', encoding='utf-8') as f:
f.write(content)
with open('description.md', 'r') as f:
content = f.read()
modified_content = content.replace('\n', ' \n')
with open('description.md', 'w') as f:
f.write(modified_content)
driver.quit()
| david-ajax/LGSpider-HydroOJ | main.py | main.py | py | 3,329 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "selenium.webdriver.firefox.options.Options",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.firefox.firefox_profile.FirefoxProfile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Firefox",
"line_numbe... |
41385226539 | #!/usr/bin/env python
# coding: utf-8
# # Design of a Multi-Zone VAV System (the Shorter Way)
# ---
# In this notebook the example from the previous notebook **Design of a Multi-Zone VAV System (the Long Way)** is repeated, but now the `VAVSystem` class will be used, which automates the design procedure of a multi-zone VAV system. This class resides in the module `hvac.air_conditioning.vav_system.design`. This class can be used for a multi-zone VAV system with cooling and/or heating, having a preheat coil, a cooling coil, and reheat coils at the entrance of the zones. For winter conditions the air is assumed to be totally dry (i.e. only sensible loads are considered).
# In[1]:
from deps import load_packages
load_packages()
# In[2]:
import jupyter_addons as ja
ja.set_css()
# In[3]:
from hvac import Quantity
from hvac.fluids import HumidAir
from hvac.air_conditioning.vav_system.design import Zone, Season, VAVSystem
from hvac.charts import PsychrometricChart, StatePoint
# In[4]:
Q_ = Quantity
# ## Create Zones with Design Data
# The design data of a zone is bundled in a `Zone` data class. First of all, a zone must have a name. The design data concerning the summer peak design day and the design data concerning the winter peak design day are grouped into two separate instances of the `Season` class. The design data are the sensible and latent heat load of the zone and the desired state of the zone air. The `Season` instance with the design data for the summer peak design day is passed through the `summer` parameter of the `Zone` class constructor. The `Season` instance with the design data of the winter peak design day is passed through the `winter` parameter. Should the VAV system only be used for summer cooling, then the `winter` parameter can be simply omitted.
# ### Zone A
# In[5]:
zone_A = Zone(
name='zone A',
summer=Season(
Q_sen=Q_(224_844, 'Btu / hr'),
Q_lat=Q_(56_000, 'Btu / hr'),
zone_air=HumidAir(Tdb=Q_(75, 'degF'), RH=Q_(50, 'pct'))
),
winter=Season(
Q_sen=Q_(-143_000, 'Btu / hr'),
Q_lat=Q_(0.0, 'Btu / hr'),
zone_air=HumidAir(Tdb=Q_(75, 'degF'), RH=Q_(0, 'pct'))
)
)
# ### Zone B
# In[6]:
zone_B = Zone(
name='zone B',
summer=Season(
Q_sen=Q_(103_308, 'Btu / hr'),
Q_lat=Q_(20_000, 'Btu / hr'),
zone_air=HumidAir(Tdb=Q_(75, 'degF'), RH=Q_(50, 'pct'))
),
winter=Season(
Q_sen=Q_(49_092, 'Btu / hr'),
Q_lat=Q_(0.0, 'Btu / hr'),
zone_air=HumidAir(Tdb=Q_(75, 'degF'), RH=Q_(0, 'pct'))
)
)
# ## Create VAV System
# Besides the design data of the zones, the global design data about the outdoor air in summer and winter and the design volume flow rate of outdoor air ventilation must be specified.
# **Outdoor Air Condition on Summer and Winter Design Day**
# In[7]:
outdoor_air_summer = HumidAir(Tdb=Q_(97, 'degF'), Twb=Q_(76, 'degF'))
outdoor_air_winter = HumidAir(Tdb=Q_(7, 'degF'), RH=Q_(0, 'pct'))
# **Design Volume Flow Rate of Outdoor Air Ventilation**
# In[8]:
V_vent = Q_(2400, 'ft ** 3 / min')
# **Instantiate the `VAVSystem` Class with the Design Data**
# In[9]:
vav_system = VAVSystem(
zones=[zone_A, zone_B],
outdoor_air_summer=outdoor_air_summer,
outdoor_air_winter=outdoor_air_winter,
V_vent=V_vent
)
# ## COOLING DESIGN DAY
# After instantiation of the `VAVSystem` class, call the method `design_summer`. This method can take a number of keyword arguments:
# - the maximum allowed temperature difference between the supply air temperature and the zone air temperature in order to enable proper mixing of the supply air with the zone air: `dT_supply`
# - the pressure of the supply air fan: `supply_fan_pressure`
# - the efficiency of the supply air fan: `supply_fan_efficiency`
# - heat gain of the supply duct: `supply_duct_heat_gain`
# - the pressure of the return air fan: `return_fan_pressure`
# - the efficiency of the return air fan: `return_fan_efficiency`
# - heat gain of the return duct: `return_duct_gain`
#
# These arguments are not mandatory and can be omitted if they are not known. The supply fan and return fan can only be specified after the volume flow rate of supply and return air have first been determined. As such, the first time the notebook would be executed without values for `supply_fan_pressure`, `supply_fan_efficiency`,...
# In[10]:
summer_results = vav_system.design_summer(
dT_supply=Q_(20, 'delta_degF'),
supply_fan_pressure=Q_(3, 'inch_H2O_60F'),
supply_fan_efficiency=Q_(60, 'pct')
)
# The method `design_summer` returns a dictionary with the results as shown below. These results are all `Quantity` objects.
#
# ```
# results = {
# 'cooling coil load': self.summer.cooling_coil.Q,
# 'sensible cooling coil load': self.summer.cooling_coil.Q_sen,
# 'latent cooling coil load': self.summer.cooling_coil.Q_lat,
# 'supply air volume flow rate': self.summer.V_supply,
# 'return air volume flow rate': self.summer.V_return,
# 'system supply air temperature': self.summer.supply_air.Tdb,
# 'system return air temperature': self.summer.return_air.Tdb
# }
# return results
# ```
#
# To quickly show these results in a notebook you may use the (static) method `show_results_markdown` of the `VAVSystem` instance. For this you need to pass the returned results from `design_summer` together with a dictionary `units` containing the units in which you want the results to be displayed and the number of decimals behind the decimal point, as is demonstrated below.
# In[11]:
ja.display_list(
vav_system.show_results_markdown(
summer_results,
units={
'Q': ('Btu / hr', 0),
'V': ('ft ** 3 / min', 0),
'K': ('degF', 1)
}
)
)
# ### Psychrometric Chart
# The data attributes of the `summer` (and `winter`) attribute of the `VAVSystem` class are all accesible. The code below shows the `__init__` method of the `Summer` subclass of the `VAVSystem` class with all its data attributes. The names of the data attributes should speak for themselves.
#
# ```
# def __init__(self, outdoor_air: HumidAir, V_vent: Quantity, system: 'VAVSystem'):
# self.outdoor_air = outdoor_air
# self.m_vent = V_vent * outdoor_air.rho
# self.system = system # reference to the instance of the `VAVSystem` parent class
# self.T_supply: Quantity = Q_(float('nan'), 'degC')
# self.supply_air: Optional[HumidAir] = None
# self.m_supply: Quantity = Q_(float('nan'), 'kg /s')
# self.V_supply: Quantity = Q_(float('nan'), 'kg /s')
# self.T_cold: Quantity = Q_(float('nan'), 'degC')
# self.cooled_air: Optional[HumidAir] = None
# self.m_return: Quantity = Q_(float('nan'), 'kg /s')
# self.V_return: Quantity = Q_(float('nan'), 'kg /s')
# self.return_air: Optional[HumidAir] = None
# self.recirculated_air: Optional[HumidAir] = None
# self.mixed_air: Optional[HumidAir] = None
# self.cooling_coil: Optional[AirConditioningProcess] = None
# self.m_supply_part_load: Quantity = Q_(float('nan'), 'kg /s')
# self.V_supply_part_load: Quantity = Q_(float('nan'), 'kg /s')
# ```
# Taking the appropriate data attributes, it is possible to draw the pyschrometric chart and plot the air conditioning processes in the VAV system.
# In[12]:
chart = PsychrometricChart(fig_size=(8, 6))
chart.plot_process(
'mixing_chamber',
StatePoint(vav_system.summer.outdoor_air.Tdb, vav_system.summer.outdoor_air.W),
StatePoint(vav_system.summer.return_air.Tdb, vav_system.summer.return_air.W),
StatePoint(vav_system.summer.mixed_air.Tdb, vav_system.summer.mixed_air.W)
)
chart.plot_process(
'cooling coil',
StatePoint(vav_system.summer.mixed_air.Tdb, vav_system.summer.mixed_air.W),
StatePoint(vav_system.summer.cooled_air.Tdb, vav_system.summer.cooled_air.W)
)
# chart.plot_process(
# 'supply fan',
# StatePoint(vav_system.summer.cooled_air.Tdb, vav_system.summer.cooled_air.W),
# StatePoint(vav_system.summer.supply_air.Tdb, vav_system.summer.supply_air.W)
# )
chart.plot_process(
'zones',
StatePoint(vav_system.summer.supply_air.Tdb, vav_system.summer.supply_air.W),
StatePoint(vav_system.summer.return_air.Tdb, vav_system.summer.return_air.W)
)
chart.show()
# ### Zone Info
# The zones, instances of the `Zone` class, are kept in a list inside the `VAVSystem` class. A `Zone` object has two members `summer` and `winter` that refer to an instance of the `Season` dataclass that contains the design data for the zone. From the implementation of the `Season` dataclass, it can be seen which data attributes are available. Again the names of the data attributes should speak for themselves.
#
# ```
# @dataclass
# class Season:
# Q_sen: Quantity
# Q_lat: Quantity
# zone_air: HumidAir
# m_exhaust: Quantity = Q_(0.0, 'kg / s')
# m_supply: Optional[Quantity] = field(init=False, default=Q_(float('nan'), 'kg / s'))
# supply_air: Optional[HumidAir] = field(init=False, default=None)
# return_air: Optional[HumidAir] = field(init=False, default=None)
#
# @property
# def m_return(self) -> Quantity:
# return self.m_supply - self.m_exhaust
#
# @property
# def V_supply(self) -> Quantity:
# return self.m_supply * self.supply_air.v
#
#
# @dataclass
# class Zone:
# name: str
# summer: Optional[Season] = None
# winter: Optional[Season] = None
# reheat_coil: Optional[AirConditioningProcess] = field(init=False, default=None)
# ```
#
#
# > **Notes**<br>
# >- Attribute `m_exhaust` may refer to local exhaust of air in a zone.<br>
# >- To get at the resulting air state (in particular air humidity) of a zone, the `return_air` attribute should be used, as the `zone_air` attribute is used to specify the desired zone air state when instantiating the zone.
# In[13]:
ja.display_list([
f"return air at {zone.name}: <b>{zone.summer.return_air.Tdb.to('degF'):~P.1f} TDB, "
f"{zone.summer.return_air.RH.to('pct'):~P.0f} RH</b>, "
f"supply air volume flow rate: <b>{zone.summer.V_supply.to('ft ** 3 / min'):~P.0f}</b>"
for zone in vav_system.zones
])
# ## HEATING DESIGN DAY
# In[14]:
winter_results = vav_system.design_winter(
T_supply_max=Q_(105, 'degF'),
supply_fan_pressure=Q_(3.0, 'inch_H2O_60F'),
supply_fan_efficiency=Q_(60.0, 'pct')
)
# In[15]:
ja.display_list(
vav_system.show_results_markdown(
winter_results,
units={
'Q': ('Btu / hr', 0),
'V': ('ft ** 3 / min', 0),
'K': ('degF', 1)
}
)
)
# In[16]:
ja.display_list([
f"{zone.name}: supply air temperature = <b>{zone.winter.supply_air.Tdb.to('degF'):~P.1f}</b>, "
f"reheat load = <b>{zone.reheat_coil.Q_sen.to('Btu / hr'):~P.0f}</b>, "
f"supply air volume flow rate = <b>{zone.winter.V_supply.to('ft ** 3 / min'):~P.0f}</b>"
for zone in vav_system.zones
])
# In[ ]:
| TomLXXVI/Air-Conditioning | _build/jupyter_execute/vav_multizone_design_p2.py | vav_multizone_design_p2.py | py | 11,004 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "deps.load_packages",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "jupyter_addons.set_css",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "hvac.Quantity",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "hvac.air_cond... |
40014308279 | from collections import deque
class Cell:
def __init__(self, x: int, y: int):
self.x = x
self.y = y
class Node:
def __init__(self, pt: Cell, dist: int):
self.pt = pt
self.dist = dist
def is_valid(r, c, tr, tc):
return (r >= 0) and (r < tr) and (c >= 0) and (c < tc)
def shortest_path(maze, src, dest, r, c):
if maze[src.x][src.y] != 0 or maze[dest.x][dest.y] != 0:
return -1
visited = [[False for i in range(c)] for j in range(r)]
visited[src.x][src.y] = True
q = deque()
s = Node(src, 0)
q.append(s)
while q:
current = q.popleft()
pt = current.pt
if pt.x == dest.x and pt.y == dest.y:
return current.dist
for i in [[1, 0], [-1, 0], [0, -1], [0, 1]]:
row, col = pt.x + i[0], pt.y + i[1]
if is_valid(row, col, r, c) and maze[row][col] == 0 and not visited[row][col]:
visited[row][col] = True
neighbor = Node(Cell(row, col), current.dist + 1)
q.append(neighbor)
return -1
def main():
maze = [[0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0]]
source = Cell(0, 0)
dest = Cell(3, 3)
dist = shortest_path(maze, source, dest, len(maze), len(maze[0]))
if dist != -1:
print("Shortest Path:", dist)
else:
print("No path exists")
main()
| asmitak11/sample-project | main.py | main.py | py | 1,391 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 26,
"usage_type": "call"
}
] |
4086714077 | import random
import typing as t
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from langchain.embeddings import HuggingFaceInstructEmbeddings
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import MinMaxScaler
from bunkatopics.datamodel import BourdieuDimension, ContinuumDimension, Document, Term
from bunkatopics.functions.topic_document import get_top_documents
from bunkatopics.functions.topic_gen_representation import get_clean_topic_all
from bunkatopics.functions.topics_modeling import get_topics
from bunkatopics.visualisation.explainer import plot_specific_terms
from bunkatopics.visualisation.visu_utils import wrap_by_word
pd.options.mode.chained_assignment = None
def get_continuum(
embedding_model: HuggingFaceInstructEmbeddings,
docs: t.List[Document],
cont_name: str = "emotion",
left_words: list = ["hate", "pain"],
right_words: list = ["love", "good"],
scale: bool = False,
) -> t.List[Document]:
df_docs = pd.DataFrame.from_records([doc.dict() for doc in docs])
df_emb = df_docs[["doc_id", "embedding"]]
df_emb = df_emb.set_index("doc_id")
df_emb = pd.DataFrame(list(df_emb["embedding"]))
df_emb.index = df_docs["doc_id"]
continuum = ContinuumDimension(
id=cont_name, left_words=left_words, right_words=right_words
)
# Compute the extremity embeddings
left_embedding = embedding_model.embed_documents(continuum.left_words)
right_embedding = embedding_model.embed_documents(continuum.right_words)
left_embedding = pd.DataFrame(left_embedding).mean().values.reshape(1, -1)
right_embedding = pd.DataFrame(right_embedding).mean().values.reshape(1, -1)
# Make the difference to get the continnum
continuum_embedding = left_embedding - right_embedding
df_continuum = pd.DataFrame(continuum_embedding)
df_continuum.index = ["distance"]
# Compute the Cosine Similarity
full_emb = pd.concat([df_emb, df_continuum])
df_bert = pd.DataFrame(cosine_similarity(full_emb))
df_bert.index = full_emb.index
df_bert.columns = full_emb.index
df_bert = df_bert.iloc[-1:,].T
df_bert = df_bert.sort_values("distance", ascending=False).reset_index()
df_bert = df_bert[1:]
df_bert = df_bert.rename(columns={"index": "doc_id"})
final_df = pd.merge(df_bert, df_docs[["doc_id", "content"]], on="doc_id")
if scale:
scaler = MinMaxScaler(feature_range=(-1, 1))
final_df[["distance"]] = scaler.fit_transform(final_df[["distance"]])
final_df = final_df.set_index("doc_id")
final_df = final_df[["distance"]]
distance_dict = final_df.to_dict("index")
new_docs = docs.copy()
for doc in new_docs:
res = BourdieuDimension(
continuum=continuum, distance=distance_dict.get(doc.doc_id)["distance"]
)
doc.bourdieu_dimensions.append(res)
return new_docs
def plot_unique_dimension(
docs: t.List[Document],
id: str = id,
left: list = ["aggressivity"],
right: list = ["peacefullness"],
height=700,
width=600,
explainer: bool = True,
explainer_ngrams: list = [1, 2],
) -> go.Figure:
left = " ".join(left)
right = " ".join(right)
distances = [
x.distance
for doc in docs
for x in doc.bourdieu_dimensions
if x.continuum.id == id
]
doc_id = [x.doc_id for x in docs]
content = [x.content for x in docs]
df_distances = pd.DataFrame(
{"doc_id": doc_id, "distances": distances, "content": content}
)
name = "<" + right + "-" + left + ">"
df_fig = df_distances.rename(columns={"distances": name})
df_fig["content"] = df_fig["content"].apply(lambda x: wrap_by_word(x, 10))
fig = px.box(
df_fig,
y=name,
points="all",
hover_data=["content"],
height=height,
width=width,
template="plotly_white",
)
fig.add_shape(
dict(
type="line",
x0=df_fig[name].min(), # Set the minimum x-coordinate of the line
x1=df_fig[name].max(), # Set the maximum x-coordinate of the line
y0=0,
y1=0,
line=dict(color="red", width=4),
)
)
if explainer:
plot_specific_terms(
docs=docs,
left_words=left,
right_words=right,
id=id,
ngrams=explainer_ngrams,
quantile=0.80,
top_n=20,
)
return fig
def visualize_bourdieu_one_dimension(
docs: t.List[Document],
embedding_model,
left: str = ["aggressivity"],
right: str = ["peacefullness"],
height=700,
width=600,
explainer: bool = True,
explainer_ngrams: list = [1, 2],
) -> go.Figure:
id = str(random.randint(0, 10000))
new_docs = get_continuum(
embedding_model=embedding_model,
docs=docs,
cont_name=id,
left_words=left,
right_words=right,
scale=False,
)
fig = plot_unique_dimension(
new_docs,
id=id,
left=left,
right=right,
height=height,
width=width,
explainer=explainer,
explainer_ngrams=explainer_ngrams,
)
return fig
def visualize_bourdieu(
embedding_model,
generative_model,
docs: t.List[Document],
terms: t.List[Term],
x_left_words: t.List[str] = ["war"],
x_right_words: t.List[str] = ["peace"],
y_top_words: t.List[str] = ["men"],
y_bottom_words: t.List[str] = ["women"],
height: int = 1500,
width: int = 1500,
clustering: bool = True,
topic_gen_name: bool = False,
topic_n_clusters: int = 5,
topic_terms: int = 2,
topic_ngrams: list = [1, 2],
display_percent: bool = True,
use_doc_gen_topic: bool = False,
gen_topic_language: str = "english",
label_size_ratio_label: int = 50,
topic_top_terms_overall: int = 500,
manual_axis_name: dict = None,
radius_size: float = 0.3,
convex_hull: bool = True,
):
# Reset
for doc in docs:
doc.bourdieu_dimensions = []
# Compute Continuums
new_docs = get_continuum(
embedding_model,
docs,
cont_name="cont1",
left_words=x_left_words,
right_words=x_right_words,
)
new_docs = get_continuum(
embedding_model,
docs,
cont_name="cont2",
left_words=y_top_words,
right_words=y_bottom_words,
)
df_names = [
{
"names": [y.continuum.id for y in x.bourdieu_dimensions],
"left_words": [y.continuum.left_words for y in x.bourdieu_dimensions],
"right_words": [y.continuum.right_words for y in x.bourdieu_dimensions],
}
for x in new_docs
]
df_names = pd.DataFrame(df_names)
df_names = df_names.explode(["names", "left_words", "right_words"])
df_names["left_words"] = df_names["left_words"].apply(lambda x: "-".join(x))
df_names["right_words"] = df_names["right_words"].apply(lambda x: "-".join(x))
df_names = df_names.drop_duplicates()
df_names = df_names.set_index("names")
dict_bourdieu = df_names.to_dict(orient="index")
df_bourdieu = [
{
"doc_id": x.doc_id,
"coordinates": [y.distance for y in x.bourdieu_dimensions],
"names": [y.continuum.id for y in x.bourdieu_dimensions],
}
for x in new_docs
]
df_bourdieu = pd.DataFrame(df_bourdieu)
df_bourdieu = df_bourdieu.explode(["coordinates", "names"])
# Filter with only the top and bottom data to avoid getting results too far form the continnuums
df_content = [{"doc_id": x.doc_id, "content": x.content} for x in new_docs]
df_content = pd.DataFrame(df_content)
df_fig = df_bourdieu[["doc_id", "coordinates", "names"]]
df_fig = df_fig.pivot(index="doc_id", columns="names", values="coordinates")
df_fig = df_fig.reset_index()
# Remove the data inside the radius of 1/3 of max because central data does not mean mucj
df_fig["cont1"] = df_fig["cont1"].astype(float)
df_fig["cont2"] = df_fig["cont2"].astype(float)
import numpy as np
x_values = df_fig["cont1"].values
y_values = df_fig["cont2"].values
distances = np.sqrt(x_values**2 + y_values**2)
circle_radius = max(df_fig.cont1) * radius_size
df_fig["distances"] = distances
df_fig["outside"] = "0"
df_fig["outside"][df_fig["distances"] >= circle_radius] = "1"
outside_ids = list(df_fig["doc_id"][df_fig["outside"] == "1"])
df_fig = df_fig[df_fig["doc_id"].isin(outside_ids)]
df_fig = pd.merge(df_content, df_fig, on="doc_id")
df_fig["Text"] = df_fig["content"].apply(lambda x: wrap_by_word(x, 10))
x_axis_name = list(dict_bourdieu.keys())[0]
y_axis_name = list(dict_bourdieu.keys())[1]
x_left_words = dict_bourdieu[x_axis_name]["left_words"]
x_right_words = dict_bourdieu[x_axis_name]["right_words"]
y_top_words = dict_bourdieu[y_axis_name]["left_words"]
y_bottom_words = dict_bourdieu[y_axis_name]["right_words"]
fig = go.Figure(
go.Histogram2dContour(
x=df_fig[x_axis_name],
y=df_fig[y_axis_name],
colorscale="delta",
showscale=False,
),
)
scatter_fig = px.scatter(
df_fig,
x=x_axis_name,
y=y_axis_name,
color="outside",
color_discrete_map={"1": "white", "0": "grey"},
hover_data=["Text"],
template="simple_white",
height=height,
width=width,
opacity=0.3,
# title="Bourdieu Plot"
# color_discrete_sequence=["blue"],
)
for trace in scatter_fig.data:
fig.add_trace(trace)
# Set the axis to the max value to get a square
max_val = max(
abs(min(df_fig[y_axis_name])),
abs(max(df_fig[y_axis_name])),
abs(max(df_fig[x_axis_name])),
abs(min(df_fig[x_axis_name])),
)
# Add axis lines for x=0 and y=0
fig.add_shape(
type="line",
x0=0,
x1=0,
# y0=-max_val,
# y1=max_val,
y0=min(df_fig[y_axis_name]),
y1=max(df_fig[y_axis_name]),
line=dict(color="white", width=3), # Customize line color and width
)
fig.add_shape(
type="line",
x0=min(df_fig[x_axis_name]),
x1=max(df_fig[x_axis_name]),
# x0=-max_val,
# x1=max_val,
y0=0,
y1=0,
line=dict(color="white", width=3), # Customize line color and width
)
fig.update_layout(
font_size=25,
width=width,
height=height,
margin=dict(
t=width / 50,
b=width / 50,
r=width / 50,
l=width / 50,
),
# title=dict(font=dict(size=width / 40)),
)
fig.update_layout(showlegend=False)
"""
histogram2d_contour = go.Figure(
go.Histogram2dContour(
x=df_fig[x_axis_name],
y=df_fig[y_axis_name],
colorscale="delta",
showscale=False,
),
)
fig.add_trace(histogram2d_contour.data[0])
scatter_fig = px.scatter(
df_fig,
x=x_axis_name,
y=y_axis_name,
color="outside",
color_discrete_map={"1": "white", "0": "grey"},
hover_data=["Text"],
template="simple_white",
height=height,
width=width,
opacity=0.3,
# title="Bourdieu Plot"
# color_discrete_sequence=["blue"],
)
for trace in scatter_fig.data:
fig.add_trace(trace)
"""
"""
fig.update_xaxes(
showgrid=False,
showticklabels=False,
zeroline=True,
zerolinecolor="white",
zerolinewidth=2,
)
fig.update_yaxes(
showgrid=False,
showticklabels=False,
zeroline=True,
zerolinecolor="white",
zerolinewidth=2,
)
"""
if manual_axis_name is None:
y_top_name = y_top_words
y_bottom_name = y_bottom_words
x_left_name = x_left_words
x_right_name = x_right_words
else:
y_top_name = manual_axis_name["y_top_name"]
y_bottom_name = manual_axis_name["y_bottom_name"]
x_left_name = manual_axis_name["x_left_name"]
x_right_name = manual_axis_name["x_right_name"]
fig.update_layout(
annotations=[
dict(
x=0,
# y=max_val,
y=max(df_fig[y_axis_name]),
xref="x",
yref="y",
text=y_top_name,
showarrow=False,
xanchor="right",
yanchor="top",
font=dict(size=width / label_size_ratio_label, color="white"),
),
dict(
x=0,
y=min(df_fig[y_axis_name]),
# y=-max_val,
xref="x",
yref="y",
text=y_bottom_name,
showarrow=False,
xanchor="left",
yanchor="bottom",
font=dict(size=width / label_size_ratio_label, color="white"),
),
dict(
x=max(df_fig[x_axis_name]),
# x=max_val,
y=0,
xref="x",
yref="y",
text=x_left_name,
showarrow=False,
xanchor="right",
yanchor="top",
font=dict(size=width / label_size_ratio_label, color="white"),
),
dict(
x=min(df_fig[x_axis_name]),
# x=-max_val,
y=0,
xref="x",
yref="y",
text=x_right_name,
showarrow=False,
xanchor="left",
yanchor="bottom",
font=dict(size=width / label_size_ratio_label, color="white"),
),
]
)
if clustering:
df_bourdieu_pivot = df_bourdieu.pivot(
index="doc_id", columns="names", values="coordinates"
)
df_bourdieu_pivot = df_bourdieu_pivot.reset_index()
df_bourdieu_pivot.columns = ["doc_id", "x", "y"]
df_bourdieu_pivot = df_bourdieu_pivot.set_index("doc_id")
dict_doc = df_bourdieu_pivot[["x", "y"]].to_dict("index")
for doc in new_docs:
doc.x = dict_doc.get(doc.doc_id)["x"]
doc.y = dict_doc.get(doc.doc_id)["y"]
new_docs = [doc for doc in new_docs if doc.doc_id in outside_ids]
bourdieu_topics = get_topics(
docs=new_docs,
terms=terms,
n_clusters=topic_n_clusters,
ngrams=topic_ngrams,
name_lenght=topic_terms,
top_terms_overall=topic_top_terms_overall,
)
if topic_gen_name:
# Get top documents for the generative AI query
new_docs = get_top_documents(new_docs, bourdieu_topics, ranking_terms=20)
bourdieu_topics = get_clean_topic_all(
generative_model,
language=gen_topic_language,
topics=bourdieu_topics,
docs=new_docs,
use_doc=use_doc_gen_topic,
)
label_size_ratio_clusters = 100
topics_x = [x.x_centroid for x in bourdieu_topics]
topics_y = [x.y_centroid for x in bourdieu_topics]
topic_names = [x.name for x in bourdieu_topics]
topics_name_plotly = [wrap_by_word(x, 7) for x in topic_names]
# Display Topics
for x, y, label in zip(topics_x, topics_y, topics_name_plotly):
fig.add_annotation(
x=x,
y=y,
text=label,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_clusters,
color="red",
),
bordercolor="#c7c7c7",
borderwidth=width / 1000,
borderpad=width / 500,
bgcolor="white",
opacity=1,
)
if convex_hull:
try:
for topic in bourdieu_topics:
# Create a Scatter plot with the convex hull coordinates
trace = go.Scatter(
x=topic.convex_hull.x_coordinates,
y=topic.convex_hull.y_coordinates, # Assuming y=0 for simplicity
mode="lines",
name="Convex Hull",
line=dict(color="grey"),
showlegend=False,
)
fig.add_trace(trace)
except:
pass
if display_percent:
# Calculate the percentage for every box
df_fig_percent = df_fig[df_fig["doc_id"].isin(outside_ids)]
label_size_ratio_percent = 20
opacity = 0.4
case1_count = len(
df_fig_percent[
(df_fig_percent["cont1"] < 0) & (df_fig_percent["cont2"] < 0)
]
)
total_count = len(df_fig_percent)
case1_percentage = str(round((case1_count / total_count) * 100, 1)) + "%"
fig.add_annotation(
x=min(df_fig_percent[x_axis_name]),
y=min(df_fig_percent[y_axis_name]),
text=case1_percentage,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_percent,
color="grey",
),
opacity=opacity,
xanchor="left",
)
case2_count = len(
df_fig_percent[
(df_fig_percent["cont1"] < 0) & (df_fig_percent["cont2"] > 0)
]
)
case2_percentage = str(round((case2_count / total_count) * 100, 1)) + "%"
fig.add_annotation(
x=min(df_fig_percent[x_axis_name]),
y=max(df_fig_percent[y_axis_name]),
text=case2_percentage,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_percent,
color="grey",
),
opacity=opacity,
xanchor="left",
)
case3_count = len(
df_fig_percent[
(df_fig_percent["cont1"] > 0) & (df_fig_percent["cont2"] < 0)
]
)
case3_percentage = str(round((case3_count / total_count) * 100, 1)) + "%"
fig.add_annotation(
x=max(df_fig_percent[x_axis_name]),
y=min(df_fig_percent[y_axis_name]),
text=case3_percentage,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_percent,
color="grey",
),
opacity=opacity,
xanchor="left",
)
case4_count = len(
df_fig_percent[
(df_fig_percent["cont1"] > 0) & (df_fig_percent["cont2"] > 0)
]
)
case4_percentage = str(round((case4_count / total_count) * 100, 1)) + "%"
fig.add_annotation(
x=max(df_fig_percent[x_axis_name]),
y=max(df_fig_percent[y_axis_name]),
text=case4_percentage,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_percent,
color="grey",
),
opacity=opacity,
xanchor="left",
)
# Update the x-axis and y-axis labels
fig.update_xaxes(
title_text="",
scaleanchor="y",
scaleratio=1,
showgrid=False,
showticklabels=False,
zeroline=True,
zerolinecolor="white",
zerolinewidth=2,
)
fig.update_yaxes(
title_text="",
scaleanchor="x",
scaleratio=1,
showgrid=False,
showticklabels=False,
zeroline=True,
zerolinecolor="white",
zerolinewidth=2,
)
return fig, df_bourdieu
| charlesdedampierre/BunkaTopics | bunkatopics/visualisation/bourdieu.py | bourdieu.py | py | 20,127 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "pandas.options",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "langchain.embeddings.HuggingFaceInstructEmbeddings",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 23,
"usage_type": "attribute"
},
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.