seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
36049807319 | import traceback
from asyncio import sleep
from logging import getLogger
from typing import Dict, List
from aiohttp import ClientSession
from common.enums import EmailResult
from email_client.integrations.web.abstract import AbstractWebClient
logger = getLogger(__name__)
class WebClient(AbstractWebClient):
def __init__(
self,
web_url: str,
session: ClientSession,
retry_count: int = 3,
retry_backoff: int = 3,
):
self._web_url = web_url
self._session = session
self._retry_count = retry_count
self._retry_backoff = retry_backoff
async def report_job_status(self, statuses: Dict[EmailResult, List[Dict]]):
payload = {status.value: value for status, value in statuses.items()}
for i in range(self._retry_count + 1):
if i > 0:
await sleep(self._retry_backoff ** i)
try:
response = await self._session.post(self._web_url, json=payload)
if response.status == 200:
return
except (TimeoutError, ConnectionError):
logger.warning(
f"Error with connection when trying to send job updates: "
f"{traceback.format_exc()}"
)
except Exception:
logger.error(
f"Unexpected error when trying to send job updates: "
f"{traceback.format_exc()}"
)
logger.critical(f"Could not contact web to report job updates: {statuses}")
| MFrackowiak/sc_r_mailmarketing | email_client/integrations/web/client.py | client.py | py | 1,584 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "email_client.integrations.web.abstract.AbstractWebClient",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "aiohttp.ClientSession",
"line_number": 19,
"usage_type": "name... |
15227915962 | '''
Tag Frequency Analysis
Author: Audrey Yang
Date: October 23, 2022
'''
import nltk
from nltk.corpus import brown
tagged_words_uni = brown.tagged_words(categories="news", tagset = "universal")
tagged_words_brown = brown.tagged_words(categories="news",)
ttable = nltk.FreqDist()
# get the frequency of each tag
for(word, tag) in tagged_words_brown:
ttable[tag] += 1
# count the top tags encounted and print in descending order
tags_dict = {}
for tag in ttable.keys():
tags_dict.update({ttable[tag]:tag}) # this implementation is not universal
tags_dict_sorted = sorted(tags_dict.keys(), reverse=True)
print("Top 12 Tags in the Brown Tagset:")
i = 1
for tag_val in tags_dict_sorted[:12]:
print(i, tags_dict[tag_val], tag_val)
i += 1
print("\nMost Frequent Words for each Tag in Universal Tagset (Top Ten)")
# find 10 most frequent nouns
noun_table = nltk.FreqDist()
for(word, tag) in tagged_words_uni:
if tag == 'NOUN':
noun_table[word+"/"+tag]+=1
print("Most Frequent NOUNS:")
for (word_tag, count) in noun_table.most_common()[:10]:
print(count, word_tag)
# find 10 most frequent verbs
verb_table = nltk.FreqDist()
for(word, tag) in tagged_words_uni:
if tag == 'VERB':
verb_table[word+"/"+tag]+=1
print("\nMost Frequent VERBS:")
for (word_tag, count) in verb_table.most_common()[:10]:
print(count, word_tag)
# find 10 most frequent adpositions
adp_table = nltk.FreqDist()
for(word, tag) in tagged_words_uni:
if tag == 'ADP':
adp_table[word+"/"+tag]+=1
print("\nMost Frequent ADPOSITIONS:")
for (word_tag, count) in adp_table.most_common()[:10]:
print(count, word_tag)
# find 10 most frequent punctuations
pun_table = nltk.FreqDist()
for(word, tag) in tagged_words_uni:
if tag == '.':
pun_table[word+"/"+tag]+=1
print("\nMost Frequent PUNCTUATIONS:")
for (word_tag, count) in pun_table.most_common()[:10]:
print(count, word_tag)
# find 10 most frequent determiners
det_table = nltk.FreqDist()
for(word, tag) in tagged_words_uni:
if tag == 'DET':
det_table[word+"/"+tag]+=1
print("\nMost Frequent DETERMINERS:")
for (word_tag, count) in det_table.most_common()[:10]:
print(count, word_tag)
# find 10 most frequent adjectives
adj_table = nltk.FreqDist()
for(word, tag) in tagged_words_uni:
if tag == 'ADJ':
adj_table[word+"/"+tag]+=1
print("\nMost Frequent ADJECTIVES:")
for (word_tag, count) in adj_table.most_common()[:10]:
print(count, word_tag)
# find 10 most frequent adverbs
adv_table = nltk.FreqDist()
for(word, tag) in tagged_words_uni:
if tag == 'ADV':
adv_table[word+"/"+tag]+=1
print("\nMost Frequent ADVERBS:")
for (word_tag, count) in adv_table.most_common()[:10]:
print(count, word_tag)
# find 10 most frequent conjunctions
conj_table = nltk.FreqDist()
for(word, tag) in tagged_words_uni:
if tag == 'CONJ':
conj_table[word+"/"+tag]+=1
print("\nMost Frequent CONJUNCTIONS:")
for (word_tag, count) in conj_table.most_common()[:10]:
print(count, word_tag)
# find 10 most frequent pronouns
prn_table = nltk.FreqDist()
for(word, tag) in tagged_words_uni:
if tag == 'PRON':
prn_table[word+"/"+tag]+=1
print("\nMost Frequent PRONOUNS:")
for (word_tag, count) in prn_table.most_common()[:10]:
print(count, word_tag)
# find 10 most frequent particles
part_table = nltk.FreqDist()
for(word, tag) in tagged_words_uni:
if tag == 'PRT':
part_table[word+"/"+tag]+=1
print("\nMost Frequent PARTICLES:")
for (word_tag, count) in part_table.most_common()[:10]:
print(count, word_tag)
# find 10 most frequent (cardinal) numbers
num_table = nltk.FreqDist()
for(word, tag) in tagged_words_uni:
if tag == 'NUM':
num_table[word+"/"+tag]+=1
print("\nMost Frequent NUMBERS:")
for (word_tag, count) in num_table.most_common()[:10]:
print(count, word_tag)
# find 10 most frequent "other" words (e.g., foreign words, typos, abbrevs)
other_table = nltk.FreqDist()
for(word, tag) in tagged_words_uni:
if tag == 'X':
other_table[word+"/"+tag]+=1
print("\nMost Frequent OTHER WORDS:")
for (word_tag, count) in other_table.most_common()[:10]:
print(count, word_tag) | vagorsol/computational-linguistics | Assignment 4/tag_analysis.py | tag_analysis.py | py | 4,188 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.corpus.brown.tagged_words",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.brown",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.brown.tagged_words",
"line_number": 11,
"usage_type": "call"
},
{
"a... |
34604241513 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from math import sqrt
import cv2
import imutils
import random
from nltk import flatten
def get_neighborhood(data, features, y1, y2, image, dictionary):
feature_in_layer = []
for name in features:
globals()[name] = np.array(data[name])
feature_in_layer.append(name + '_layer')
value_z = np.unique(pos_z)
len_layer = len(value_z)
min_x, max_x, avg_x = np.min(pos_x), np.max(pos_x), np.average(pos_x)
min_y = np.min(pos_y)
for i in range(len_layer):
print('Layer:', i)
# All feature extracted from i_th layer
# ...i...
for name in feature_in_layer:
globals()[name] = globals()[name[:-6]][pos_z == value_z[i]]
indecies_points_on_object = []
for idx, (x_val, y_val) in enumerate(zip(pos_x_layer, pos_y_layer)):
if min_x < x_val < avg_x and min_y < y_val < y1:
indecies_points_on_object.append(idx)
elif avg_x < x_val < max_x and min_y < y_val < y2:
indecies_points_on_object.append(idx)
# print(len(indecies_points_on_object))
for name in feature_in_layer:
globals()[name] = globals()[name][indecies_points_on_object]
dictionary[name[:-6]].append(globals()[name].tolist())
pore_x = pos_x_layer[Label_layer == 1]
pore_y = pos_y_layer[Label_layer == 1]
plt.imshow(image[i], cmap='gray')
plt.scatter(pos_x_layer, pos_y_layer, s=4, c='b')
plt.scatter(pore_x, pore_y, s=4, c='r')
plt.show()
return dictionary
def preprocess(flag):
if flag == 'Sample 4':
path_img = 'New images/Sample 4'
data = pd.read_csv('New Datasets - Pixel/Pyrometer Data 4.csv')
angle = 10
y1 = 750
y2 = 1250
elif flag == 'Sample 5':
path_img = 'New images/Sample 5'
data = pd.read_csv('New Datasets - Pixel/Pyrometer Data 5.csv')
angle = 100
y1 = 700
y2 = 1150
elif flag == 'Sample 6':
path_img = 'New images/Sample 6'
data = pd.read_csv('New Datasets - Pixel/Pyrometer Data 6.csv')
angle = -85
y1 = 700
y2 = 1000
elif flag == 'Sample 8':
path_img = 'New images/Sample 8'
data = pd.read_csv('New Datasets - Pixel/Pyrometer Data 8.csv')
angle = 105
y1 = 650
y2 = 1100
images = []
for filename in os.listdir(path_img):
# print(filename)
img = cv2.imread(os.path.join(path_img, filename), cv2.IMREAD_GRAYSCALE)
rot = imutils.rotate_bound(img, angle=angle)
images.append(rot)
head_name = list(data.head())
# print(head_name)
dictionary = {}
for h in head_name:
dictionary[h] = []
dic = get_neighborhood(data, head_name, y1, y2, images, dictionary)
for h in head_name:
dic[h] = list(flatten(dic[h]))
return dic
flag = 'Sample 4'
dic = preprocess(flag)
df = pd.DataFrame.from_dict(dic)
print(df)
df.to_csv (r'DataPoints On Object/DataPoints Sample 4.csv', index = False, header=True)
| elahesalari/Porosity_Prediction_DED_Additive_Manufacturing | Data_Processing/Get points on pbject.py | Get points on pbject.py | py | 3,264 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 20,... |
36364117589 | import os
import re
import openpyxl.drawing.image
from openpyxl import *
from openpyxl.drawing.image import Image
import OpenEXR
class ExcelCreater:
def __init__(self):
self.wb = Workbook()
self.ws = self.wb.active
self.ws.title = 'Shot'
self._input_path = None
self._output_path = None
self.files_dict = {}
self.first_file_list = []
self.last_file_list = []
self.start_meta = None
self.last_meta = None
self.exr_meta_list = []
self.thumbnail_path = r"/home/west/HJ_root/ihj/production/temp/20221018_plate_scan_thumbnail"
self.img_file_list = []
@property
def input_path(self):
return self._input_path
@input_path.setter
def input_path(self, value):
if value is None or value == "":
raise ValueError("Input path is missing.")
self._input_path = value
@property
def output_path(self):
return self._output_path
@output_path.setter
def output_path(self, value):
self._output_path = value
def get_all_files(self):
self.files_dict = {}
for root, dirs, files in os.walk(self.input_path):
if files:
# print(f"ffff==={files}")
files.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
self.files_dict[root] = files
if len(self.files_dict.values()) == 0:
raise Exception("No files found in the directory.")
# print(f"olol==={self.files_dict}")
return self.files_dict
def get_first_and_last_file(self):
self.first_file_list = []
self.last_file_list = []
for root, files in self.files_dict.items():
if len(files) > 0:
self.first_file_list.append(root + "/" + files[0])
self.last_file_list.append(root + "/" + files[-1])
# print(f"111=={self.first_file_list}, 222=={self.last_file_list}")
return self.first_file_list, self.last_file_list
def get_meta(self):
# self.origin_data()
for i, exr in enumerate(self.first_file_list):
# print(f"bb=={i}=={exr}")
exr_start_file = OpenEXR.InputFile(exr)
self.start_meta = exr_start_file.header()
exr_last_file = OpenEXR.InputFile(exr)
self.last_meta = exr_last_file.header()
# print(f"333=={self.start_meta}, 444=={self.last_meta}")
file_data = re.match(r"(.*/)([^/]+)\.(\d+)\.(\w+)$", exr)
# 해상도
res = re.findall(r'\d+\d+', str(self.start_meta.get("dataWindow")))
resolutions = list(map(lambda x: str(int(x) + 1), res))
# 프레임
frames = re.findall(r'\d+\.\d+|\d+', str(self.start_meta.get("framesPerSecond")))
self.exr_meta_list.append(
{
"scan_path": file_data.group(1),
"scan_name": file_data.group(2),
"clip_name": self.start_meta.get("interim.clip.cameraClipName"),
"pad": '%0' + str(len(file_data.group(3))) + 'd',
"ext": file_data.group(4),
"resolutions": ' x '.join(resolutions),
"start_frame": int(frames[1]),
"and_frame": int(frames[0]),
"duration": int(frames[0]) - int(frames[1]) + 1,
"timecode_in": self.start_meta.get("arriraw/timeCode"),
"timecode_out": self.last_meta.get("arriraw/timeCode"),
"framerate": float(frames[2]),
"date": self.start_meta.get("capDate"),
}
)
# print(f"wvwv===={self.exr_meta_list}")
# def thumbnail_data(self):
# self.img_file_list = os.listdir(self.thumbnail_path)
#
# for i, img_file in enumerate(self.img_file_list):
# image_path = os.path.join(self.thumbnail_path, img_file)
# image = Image(image_path)
#
# image.width = 250
# image.height = 15
#
# col_width = image.width * 50 / 350
# row_height = image.height * 250 / 30
#
# self.ws.add_image(image, anchor='B' + str(i + 2))
# if i == 0:
# self.ws.column_dimensions['B'].width = col_width
# self.ws.row_dimensions[i + 2].height = row_height
# self.ws.cell(row=i + 2, column=2, value=img_file)
# def thumbnail_data(self):
#
# self.img_file_list = os.listdir(self.thumbnail_path)
# print(f"im=={self.img_file_list}")
#
# for i, img_file in enumerate(self.img_file_list):
# img_form = img_file.split('.')
# print(f"kk=={img_form}")
#
# # if img_form[0] == self.exr_meta_list[0]
#
# # for c, meta_img in enumerate(self.exr_meta_list[0]):
# # print(f"ff=={meta_img}")
# # if img_form[0] == meta_img.get("scan_name"):
#
#
#
# print(f"mmmm=={img_file}")
# image_path = os.path.join(self.thumbnail_path, img_file)
# image = Image(image_path)
# image.width = 250
# image.height = 150
#
# col_width = image.width * 50 / 350
# row_height = image.height * 250 / 300
#
# img_form = img_file.split('.')
# print(f"ss==={img_form}")
# # if img_form[0] == file_data.group(2):
# self.ws.add_image(image, anchor='B' + str(i + 2))
# if i == 0:
# self.ws.column_dimensions['B'].width = col_width
# self.ws.row_dimensions[i + 2].height = row_height
# self.ws.cell(row=i + 2, column=2, value=img_file)
def execl_form(self):
header_list = [
'check', 'thumbnail', 'roll', 'seq_name', 'shot_name', 'version', 'type',
'scan_path', 'scan_name', 'clip_name', 'pad', 'ext', 'resoultion',
'start_frame', 'end_frame', 'duration', 'retime_duration', 'retime_percent', 'retime_start_frame',
'timecode_in', 'timecode_out', 'just_in', 'just_out', 'framerate', 'date', 'clip_tag'
]
for i, title in enumerate(header_list):
# print(f"titi=={i}")
self.ws.cell(row=1, column=i + 1, value=title)
def excel_create(self):
self.execl_form()
# self.thumbnail_data()
self.get_meta()
for row, meta in enumerate(self.exr_meta_list, start=2):
# print(f"coco=={c}==={meta}")
self.ws.cell(row=row, column=8, value=meta.get("scan_path"))
self.ws.cell(row=row, column=9, value=meta.get("scan_name"))
self.ws.cell(row=row, column=10, value=meta.get("clip_name"))
self.ws.cell(row=row, column=11, value=meta.get("pad"))
self.ws.cell(row=row, column=12, value=meta.get("ext"))
self.ws.cell(row=row, column=13, value=meta.get("resolutions"))
self.ws.cell(row=row, column=14, value=meta.get("start_frame"))
self.ws.cell(row=row, column=15, value=meta.get("and_frame"))
self.ws.cell(row=row, column=16, value=meta.get("duration"))
self.ws.cell(row=row, column=20, value=meta.get("timecode_in"))
self.ws.cell(row=row, column=21, value=meta.get("timecode_out"))
self.ws.cell(row=row, column=24, value=meta.get("framerate"))
self.ws.cell(row=row, column=25, value=meta.get("date"))
self.excel_save()
def excel_save(self):
file_name = os.path.basename(self.input_path)
# print(f"name==={filename}")
new_file_name = file_name + '.csv'
save_path = os.path.join(self.output_path, new_file_name)
count = 1
while os.path.exists(save_path):
new_file_name = f"{file_name}_{count}.csv"
save_path = os.path.join(self.output_path, new_file_name)
count += 1
self.wb.save(save_path)
def main():
ec = ExcelCreater()
# setter test info
ec.input_path = r"/home/west/HJ_root/ihj/production/scan/20221018_plate_scan"
ec.output_path = r"/home/west/HJ_root/ihj/production/excel"
ec.get_all_files()
ec.get_first_and_last_file()
print(f"mack{ec.excel_create()}")
if __name__ == '__main__':
main()
| shotgrid-starter/crazy_fork | dev_hj/python/execl_created/excel_creater_mix.py | excel_creater_mix.py | py | 8,542 | python | en | code | null | github-code | 36 | [
{
"api_name": "os.walk",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "OpenEXR.InputFile",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "OpenEXR.InputFile",
"line_nu... |
5257479378 | from bs4 import BeautifulSoup
from lxml import etree
from collections import defaultdict
skipping_th = ["<th>Strecke</th>", "<th>Zeit</th>", "<th>Punkte</th>", "<th>Details</th>", "<th>Stadt</th>", "<th>Monat</th>"]
def chunks(lst, n):
n = max(1, n)
return list((lst[i:i+n] for i in range(0, len(lst), n)))
def find_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return ""
def change_list_item(lst, old_item):
for idx, item in enumerate(lst):
if old_item in item:
item = find_between(s=item, first="<td>", last="</td>")
lst[idx] = item
return lst
def extract_infos(raw_html):
#soup = BeautifulSoup(open("test.html"), 'html.parser')
soup = BeautifulSoup(raw_html, 'html.parser')
trs = list(soup.find_all("tr"))[5:]
return trs
def beautify_results(raw_html):
results_dict = defaultdict(list)
raw_results = extract_infos(raw_html=raw_html)
course = "sc"
tmp_list = []
for items in raw_results:
tmp_dict = {}
new_content = [i for i in items.contents if i != "\n"]
if str(new_content[0]) in skipping_th:
continue
if str(new_content[0]) in tmp_list:
course = "lc"
# key = find_between(s=str(new_content[0]), first="<td>", last="</td>")
tmp_dict[find_between(s=str(new_content[0]), first="<td>", last="</td>")] = {"time": find_between(s=str(new_content[1]), first="<td>", last="</td>"),
"location": find_between(s=str(new_content[2]), first="<td>", last="</td>"),
"date": find_between(s=str(new_content[3]), first="<td>", last="</td>")}
results_dict[course].append(tmp_dict)
tmp_list.append(str(new_content[0]))
return results_dict.items()
| ESBUINBOO/swimMember | backend/app/bs4_handler/bs4_handler.py | bs4_handler.py | py | 1,917 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 38,
"usage_type": "call"
}
] |
23972756418 | #!/usr/bin/env python
# coding: utf-8
from selenium.webdriver import Chrome
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
import time
import pymysql
import json
import re
import threading
from selenium.webdriver import ActionChains
def get_track(): # distance为传入的总距离
# 移动轨迹
distance = 258
track=[]
# 当前位移
current=0
# 减速阈值
mid=258*4/5
# 计算间隔
t=0.2
# 初速度
v=0
while current<distance:
if current<mid:
# 加速度为2
a=3
else:
# 加速度为-2
a=-2
v0=v
# 当前速度
v=v0+a*t
# 移动距离
move=v0*t+1/2*a*t*t
# 当前位移
current+=move
# 加入轨迹
track.append(round(move))
return track
def get_data(urls):
chromeOptions = Options()
# 下面代码为避免网站对selenium的屏蔽 =======无头模式已开启
chromeOptions.add_argument('--disable-dev-shm-usage')
chromeOptions.add_argument('--no-sandbox')
chromeOptions.add_argument('--headless')
chromeOptions.add_experimental_option("excludeSwitches", ["enable-automation"])
chromeOptions.add_experimental_option('useAutomationExtension', False)
count = 0
db = pymysql.connect("119.3.184.238","guest","guest","jobs")# 打开数据库连接(ip/数据库用户名/登录密码/数据库名)
for i in urls:
i = i.replace('\n','')
count += 1
print("====正在处理第%d条数据===="%count)
print(i)
try:
web = Chrome(options=chromeOptions)
web.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": """
Object.defineProperty(navigator, 'webdriver', {
get: () => undefined
})
"""
})
wait = WebDriverWait(web,3)#设置等待时间
web.get(i)
time.sleep(0.5)
try:
action = ActionChains(web)
source=web.find_element_by_xpath("//*[@id='nc_1_n1z']")#需要滑动的元素
action.click_and_hold(source).perform()
tracks = get_track()
for x in tracks:
action.move_by_offset(xoffset=x,yoffset=0).perform()
time.sleep(0.5)
action.release().perform()
time.sleep(0.1)
except:
pass
#获取数据
job_title = wait.until(EC.presence_of_all_elements_located((By.XPATH,'//h3[@class="summary-plane__title"]')))
job_company_name = wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class="company"]/a')))
job_company_url = wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class="company"]/a')))
job_location = wait.until(EC.presence_of_all_elements_located((By.XPATH,'//ul[@class="summary-plane__info"]/li/a')))
job_salary = wait.until(EC.presence_of_all_elements_located((By.XPATH,'//span[@class="summary-plane__salary"]')))
job_release_data = wait.until(EC.presence_of_all_elements_located((By.XPATH,'//span[@class="summary-plane__time"]')))
# for a,b,c,d,e,f,g in zip(job_title,job_url,job_company_name,job_company_url,job_location,job_salary,job_release_data):
f = job_salary[0].text
max_salary = 0
min_salary = 0
if '万' in f[:f.index('-')]: #最小单位为万
f = f.replace('万','0千')
max_salary = re.findall(r"\d+",f,)[1]+'000'
min_salary = re.findall(r"\d+",f,)[0]+'000'
if '.' in f[:f.index('-')]: #处理最小工资为小数
f = f.replace('.','',1)
f = f.replace('0千','千',1)
min_salary = re.findall(r"\d+",f,)[0]+'000'
if '.' in f[f.index('-'):]: #处理最大工资为小数
f = f.replace('.','',1)
f = f.replace('0千','千',1)
max_salary = re.findall(r"\d+",f,)[0]+'000'
elif '万' in f[f.index('-'):]: #如果最大工资单位为万
f = f.replace('万','0千')
max_salary = re.findall(r"\d+",f,)[1]+'000'
min_salary = re.findall(r"\d+",f,)[0]+'000'
if '.' in f[:f.index('-')]:
f = f.replace('.','',1) #处理工资为小数
min_salary = re.findall(r"\d+",f,)[0]+'00'
if '.' in f[f.index('-'):]:
f = f.replace('.','',1)
f = f.replace('0千','千',1)
max_salary = re.findall(r"\d+",f,)[1]+'000'
else: #工资单位都为一千
max_salary = re.findall(r"\d+",f,)[1]+'000'
min_salary = re.findall(r"\d+",f,)[0]+'000'
if '.' in f[:f.index('-')]:
a = f.replace('.','',1)
min_salary = re.findall(r"\d+",a,)[0]+'00'
if '.' in f[f.index('-'):]:
a = f[f.index('-'):].replace('.','',1)
max_salary = re.findall(r"\d+",a,)[0]+'00'
g = job_release_data[0].text
try:
text = re.findall(r"\d+月\d+日",g,)[0]
g_1 = re.findall(r"\d+月",g,)[0]
g_2 = re.findall(r"\d+日",g,)[0]
g = '2020'+'-'+g_1+'-'+g_2
except Exception as e:
print(e)
g = '2020-7-14'
dict = {
"job_sourse":"4",
"job_title":job_title[0].text,
"job_url":i,
"job_company_name":job_company_name[0].text,
"job_company_url":job_company_url[0].get_attribute('href'),
"job_location":job_location[0].text,
"job_salary":f,
"job_max_salary":max_salary,
"job_min_salary":min_salary,
"job_release_data":g,
"job_collect_data":"2020-7-15"
}
cursor = db.cursor()#保存到mysql
table = 'jobs'
keys = ','.join(dict.keys())
values = ','.join(['%s'] * len(dict))
sql = 'insert into {table}({keys}) VALUES({values})'.format(table=table, keys=keys, values=values)
try:
if cursor.execute(sql, tuple(dict.values())):
print('insert successful')
db.commit()
except Exception as e:
print("insert failed!", e)
db.rollback()
with open('/root/Python/zhilian_data_0714.json', 'a+',encoding='utf-8') as f: #本地也保留一份
dict = json.dumps(dict,ensure_ascii=False)
f.write(dict+'\n')
f.close()
web.close()
except :
print(i+'哎呀,这个页面获取失败了!')
web.close()
db.close()
if __name__ == '__main__':
f = open("/root/Python/urls_data_0714.json","r")
url_list = f.readlines()
i = int(len(url_list))/6
url_list1 = url_list[:1119]
url_list2 = url_list[1119:2*1119]
url_list3 = url_list[2*1119:3*1119]
url_list4 = url_list[3*1119:4*1119]
url_list5 = url_list[4*1119:5*1119]
url_list6 = url_list[5*1119:6*1119]
# 开启线程
# 开启合适的线程个数 接受分配好的任务
th1 = threading.Thread(target=get_data, args=(url_list1,))
th2 = threading.Thread(target=get_data, args=(url_list2,))
th3 = threading.Thread(target=get_data, args=(url_list3,))
th4 = threading.Thread(target=get_data, args=(url_list4,))
th5 = threading.Thread(target=get_data, args=(url_list5,))
th6 = threading.Thread(target=get_data, args=(url_list6,))
# 开启线程
th1.start()
th2.start()
th3.start()
th4.start()
th5.start()
th6.start()
# 线程等待 保证每个线程可以在主线程结束前结束
th1.join()
th2.join()
th3.join()
th4.join()
th5.join()
th6.join()
| shandongbd/lagou-boss-zhilian-data | zhilian_yun.py | zhilian_yun.py | py | 8,531 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pymysql.connect",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 60,
"usage_type": "call"
},
{
... |
24722163523 | #!/usr/bin/env python3
# wykys 2020
# databáze konverzací pro generování korpusu
from sqlalchemy import create_engine, text, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import as_declarative, declared_attr, declarative_base
from sqlalchemy import Column, Integer, Unicode, ForeignKey, DateTime
from sqlalchemy.pool import StaticPool, SingletonThreadPool
from .database_models import BaseModel, Chat, Participant, Message, Link
from .database_reduce import DbReduce
class Database(object):
def __init__(self, echo=True):
self.echo = echo
engine = create_engine(
#'sqlite:///test.db',
'sqlite:///:memory:',
echo=echo,
connect_args={'check_same_thread': False, 'timeout': 1000},
poolclass=StaticPool
)
BaseModel.metadata.create_all(engine)
session_factory = sessionmaker(
bind=engine,
autoflush=True
)
self.session = session_factory()
self.conn = engine.connect()
self.execute = self.conn.execute
self.text = text
self.func = func
self.query = self.session.query
self.add = self.session.add
self.flush = self.session.flush
self.commit = self.session.commit
self.delete = self.session.delete
self.Chat = Chat
self.Link = Link
self.Message = Message
self.Participant = Participant
def get_chats(self):
return self.query(Chat)
def get_participants(self):
return self.query(Participant)
def get_messages(self):
return self.query(Message)
def delete_all(self):
self.__init__(self.echo)
def sql(self, cmd):
return self.execute(text(cmd))
def reduce(self):
DbReduce(self)
db = Database(echo=False)
| wykys/chateen | src/chateen/database/database.py | database.py | py | 1,911 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.pool.StaticPool",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "database_models.BaseModel.metadata.create_all",
"line_number": 27,
"usage_type": "cal... |
814974039 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
fig = plt.figure()
ax=fig.add_subplot(projection='3d')
line, = ax.plot([], [], [])
ax.set_xlim(-1,1)
ax.set_ylim(-1,1)
ax.set_zlim(0,3)
n_points = 1001
tf = 3
f = 1
fps = int(np.ceil(n_points/tf))
t = np.linspace(0,tf,n_points)
frame_amount = int(n_points)
frame_speed = int(np.ceil(1000/fps))
print(fps)
X = 0.5*np.cos(2*np.pi*f*t)
Y = 0.5*np.sin(2*np.pi*f*t)
def animate(num):
line.set_xdata(X[0:num])
line.set_ydata(Y[0:num])
line.set_3d_properties(t[0:num])
# line.set_data(X[0:i],Y[0:i],t[0:i])
return line,
ani=animation.FuncAnimation(fig, animate,
frames=frame_amount,interval=frame_speed,repeat=False,blit=True)
# ani.save('spiral.mp4', fps=fps, dpi=120)
plt.show()
| mattwilliams06/AppliedControlSystems2 | animated_spiral.py | animated_spiral.py | py | 835 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "numpy.ceil",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
... |
10139864507 | import bpy
import os
import random
from os.path import isfile
output_fname_tpl = os.getenv('FRAME_FNAME','frame%06d')
def frame_done(n):
return isfile(output_fname_tpl % n + ".png")
def frames_todo():
all_frames = range(bpy.context.scene.frame_start, bpy.context.scene.frame_end+1)
return [n for n in all_frames if not frame_done(n)]
def render(frame):
bpy.context.scene.frame_set(frame)
bpy.context.scene.render.filepath = output_fname_tpl % (frame)
bpy.ops.render.render(write_still=True)
bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
bpy.context.scene.cycles.device = 'GPU'
random.seed()
frames = frames_todo()
while len(frames):
start = random.randrange(0,len(frames))
queue = frames[start:]+frames[0:start-1]
for n in queue:
if frame_done(n):
break
render(n)
frames = frames_todo()
| emanaev/render-test | blender/script.py | script.py | py | 869 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "bpy.context",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "bpy.context.scene.frame_set",... |
17497506686 | import json
import random
def calculate_prf(n_golds, n_preds, n_inters):
p = (n_inters * 1.0 / n_preds) * 100 if n_preds != 0 else 0
r = (n_inters * 1.0 / n_golds) * 100 if n_golds != 0 else 0
f = 2.0 * p * r / (p + r) if (p + r) != 0 else 0
p = round(p, 2)
r = round(r, 2)
f = round(f, 2)
return p, r, f
def eval_dif_len_ner(pred_file, max_len):
with open(pred_file, 'r') as f:
data = json.load(f)
dif_len_n_gold = [0 for _ in range(max_len)]
dif_len_n_pred = [0 for _ in range(max_len)]
dif_len_n_inter = [0 for _ in range(max_len)]
n_gold = 0
for d in data:
gold_spans = [tuple(s) for s in d["gold_data"]["spans"]]
pred_spans = [tuple(s) for s in d["pred_data"]["spans"]]
inter_spans = list(set(gold_spans) & set(pred_spans))
n_gold += len(gold_spans)
for s in gold_spans:
if s[1]-s[0] >= max_len:
continue
dif_len_n_gold[s[1]-s[0]] += 1
for s in pred_spans:
if s[1]-s[0] >= max_len:
continue
dif_len_n_pred[s[1]-s[0]] += 1
for s in inter_spans:
if s[1]-s[0] >= max_len:
continue
dif_len_n_inter[s[1]-s[0]] += 1
dif_len_prf = []
for i in range(max_len):
dif_len_prf.append(
calculate_prf(dif_len_n_gold[i], dif_len_n_pred[i], dif_len_n_inter[i])
)
dif_len_ratio = []
for i in range(max_len):
dif_len_ratio.append(
dif_len_n_gold[i] / n_gold
)
return dif_len_ratio, dif_len_prf
if __name__ == "__main__":
# "scierc", "conll04", # entity-relation dataset
# "ace04", "ace05", "genia", "nne", # nested NER dataset
# "conll03", # English flat NER dataset
# "weibo", "resume", "ontonote4", "msra",
# "people_daily", "ecommerce" # Chinese flat NER dataset
data_dir = f"/NAS2020/Workspaces/NLPGroup/juncw/database/NER/nested_ner/__results"
# pred_file = f"{data_dir}/ace05/span_bert_gcn/" \
# f"41_61 2021-11-02 22:24:29/test_41_37310.json" # ace05
pred_file = f"{data_dir}/ace05/span_bert_gcn/" \
f"41_82 2021-11-07 14:00:47/test_37_33670.json" # ace05
max_len = 10
dif_len_ratio, dif_len_prf = eval_dif_len_ner(pred_file, max_len)
for i in range(max_len):
ratio = round(100 * dif_len_ratio[i], 2)
p, r, f = dif_len_prf[i]
print(f"len = {i+1}, ratio prf: {ratio} & {p} & {r} & {f}")
| JorgenWan/NestedNER | scripts/eval_dif_len_ner.py | eval_dif_len_ner.py | py | 2,516 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
}
] |
40567890811 | import numpy as np
import torch
def calculate_tp(pred_boxes, pred_scores, gt_boxes, gt_difficult, iou_thresh = 0.5):
"""
calculate tp/fp for all predicted bboxes for one class of one image.
对于匹配到同一gt的不同bboxes,让score最高tp = 1,其它的tp = 0
Args:
pred_boxes: Tensor[N, 4], 某张图片中某类别的全部预测框的坐标 (x0, y0, x1, y1)
pred_scores: Tensor[N, 1], 某张图片中某类别的全部预测框的score
gt_boxes: Tensor[M, 4], 某张图片中某类别的全部gt的坐标 (x0, y0, x1, y1)
gt_difficult: Tensor[M, 1], 某张图片中某类别的gt中是否为difficult目标的值
iou_thresh: iou 阈值
Returns:
gt_num: 某张图片中某类别的gt数量
tp_list: 记录某张图片中某类别的预测框是否为tp的情况
confidence_score: 记录某张图片中某类别的预测框的score值 (与tp_list相对应)
"""
if gt_boxes.numel() == 0:
return 0, [], []
# 若无对应的boxes,则 tp 为空
if pred_boxes.numel() == 0:
return len(gt_boxes), [], []
# 否则计算所有预测框与gt之间的iou
ious = pred_boxes.new_zeros((len(gt_boxes), len(pred_boxes)))
for i in range(len(gt_boxes)):
gb = gt_boxes[i]
area_pb = (pred_boxes[:, 2] - pred_boxes[:, 0]) * (pred_boxes[:, 3] - pred_boxes[:, 1])
area_gb = (gb[2] - gb[0]) * (gb[3] - gb[1])
xx1 = pred_boxes[:, 0].clamp(min = gb[0].item()) # [N-1,]
yy1 = pred_boxes[:, 1].clamp(min = gb[1].item())
xx2 = pred_boxes[:, 2].clamp(max = gb[2].item())
yy2 = pred_boxes[:, 3].clamp(max = gb[3].item())
inter = (xx2 - xx1).clamp(min = 0) * (yy2 - yy1).clamp(min = 0) # [N-1,]
ious[i] = inter / (area_pb + area_gb - inter)
# 每个预测框的最大iou所对应的gt记为其匹配的gt
max_ious, max_ious_idx = ious.max(dim = 0)
not_difficult_gt_mask = gt_difficult == 0
gt_num = not_difficult_gt_mask.sum().item()
if gt_num == 0:
return 0, [], []
# 保留 max_iou 中属于 非difficult 目标的预测框,即应该去掉与 difficult gt 相匹配的预测框,不参与p-r计算
# 如果去掉与 difficult gt 对应的iou分数后,候选框的最大iou依然没有发生改变,则可认为此候选框不与difficult gt相匹配,应该保留
not_difficult_pb_mask = (ious[not_difficult_gt_mask].max(dim = 0)[0] == max_ious)
max_ious, max_ious_idx = max_ious[not_difficult_pb_mask], max_ious_idx[not_difficult_pb_mask]
if max_ious_idx.numel() == 0:
return gt_num, [], []
confidence_score = pred_scores.view(-1)[not_difficult_pb_mask]
tp_list = torch.zeros_like(max_ious)
for i in max_ious_idx[max_ious > iou_thresh].unique():
gt_mask = (max_ious > iou_thresh) * (max_ious_idx == i)
idx = (confidence_score * gt_mask.float()).argmax()
tp_list[idx] = 1
return gt_num, tp_list.tolist(), confidence_score.tolist()
def calculate_pr(gt_num, tp_list, confidence_score):
"""
calculate all p-r pairs among different score_thresh for one class, using `tp_list` and `confidence_score`.
Args:
gt_num (Integer): 某张图片中某类别的gt数量
tp_list (List): 记录某张图片中某类别的预测框是否为tp的情况
confidence_score (List): 记录某张图片中某类别的预测框的score值 (与tp_list相对应)
Returns:
recall
precision
"""
if gt_num == 0:
return [0], [0]
if isinstance(tp_list, (tuple, list)):
tp_list = np.array(tp_list)
if isinstance(confidence_score, (tuple, list)):
confidence_score = np.array(confidence_score)
assert len(tp_list) == len(confidence_score), "len(tp_list) and len(confidence_score) should be same"
if len(tp_list) == 0:
return [0], [0]
sort_mask = np.argsort(-confidence_score)
tp_list = tp_list[sort_mask]
recall = np.cumsum(tp_list) / gt_num
precision = np.cumsum(tp_list) / (np.arange(len(tp_list)) + 1)
return recall.tolist(), precision.tolist()
def voc_ap(rec, prec, use_07_metric = False):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if isinstance(rec, (tuple, list)):
rec = np.array(rec)
if isinstance(prec, (tuple, list)):
prec = np.array(prec)
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap | BIGcucumber/FEFD-yolov5 | Helmet detection based on YOLOV5/utils/get_ap.py | get_ap.py | py | 5,476 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.zeros_like",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_nu... |
2923880165 | import pandas as pd
import time
import sys
import numpy as np
import os
import torch
import torchvision
from numpy import sqrt
import math
from torch import nn
import latticeglass
from args import args
from resmade import MADE
from utils import (
clear_checkpoint,
clear_log,
get_last_checkpoint_step,
ignore_param,
init_out_dir,
my_log,
print_args,
EarlyStopping,
mysorting,
)
import itertools
import plotly.express as px
import math
from transformer import Transformer
from torch.nn import functional as F
def main():
start_time = time.time()
init_out_dir()
if args.clear_checkpoint:
clear_checkpoint()
last_step = get_last_checkpoint_step()
if last_step >= 0:
my_log('\nCheckpoint found: {}\n'.format(last_step))
else:
clear_log()
print_args()
with open('{}.myargs'.format(args.out_filename), 'w') as f:
print_args(f.write)
args.L=int(round(np.power(args.n,1/3)))
args.patch_size = int(args.L*0.5)
boundary_size = 5**3 - 3**3
ntokens = 5 # size of vocabulary
max_output_length = 29 #extra end of word token
net = Transformer(ntokens, max_output_length, 2**9).to(args.device)
net.to(args.device)
my_log('{}\n'.format(net))
params = list(net.parameters())
params = list(filter(lambda p: p.requires_grad, params))
nparams = int(sum([np.prod(p.shape) for p in params]))
my_log('Total number of trainable parameters: {}'.format(nparams))
named_params = list(net.named_parameters())
if args.optimizer == 'sgd':
optimizer = torch.optim.SGD(params, lr=args.lr)
elif args.optimizer == 'sgdm':
optimizer = torch.optim.SGD(params, lr=args.lr, momentum=0.9)
elif args.optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(params, lr=args.lr, alpha=0.99)
elif args.optimizer == 'adam':
optimizer = torch.optim.Adam(params, lr=args.lr, betas=(0.9, 0.999))
elif args.optimizer == 'adam0.5':
optimizer = torch.optim.Adam(params, lr=args.lr, betas=(0.5, 0.999))
else:
raise ValueError('Unknown optimizer: {}'.format(args.optimizer))
if args.lr_schedule:
#scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
# #optimizer, factor=0.5, patience=500, threshold=1e-4, min_lr=1e-5)
# optimizer, factor=0.92, patience=500, threshold=1e-4, min_lr=1e-5)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 10000, 0)
if args.early_stopping:
my_log('INFO: Initializing early stopping')
early_stopping = EarlyStopping(patience=5000)
if last_step >= 0:
state = torch.load('{}_save/{}.state'.format(args.out_filename, last_step),map_location=torch.device(args.device))
ignore_param(state['net'], net)
net.load_state_dict(state['net'])
if state.get('optimizer'):
optimizer.load_state_dict(state['optimizer'])
if args.lr_schedule and state.get('scheduler'):
scheduler.load_state_dict(state['scheduler'])
init_time = time.time() - start_time
my_log('init_time = {:.3f}'.format(init_time))
my_log('\n*** Patch size {} from a box of size {}'.format(args.patch_size,args.L))
nn_list_full = latticeglass.make_nn(args.L)
nn_list_patch = latticeglass.make_nn(args.patch_size)
#***********************************************************************************************************
#****************************************** IMPORT DATA FILES ********************************************
with torch.no_grad():
my_log('%%%% Loading the full sample')
rawfilename = 'LG_eq_configs_T_{}.txt'.format(args.T)
full_sample = latticeglass.read_sample(rawfilename)
# Now I measure the Energy of the full sample
samplefilename = 'Eequi_T{}_L{}_ps{}.npy'.format(args.T,args.L,args.patch_size)
if os.path.isfile(samplefilename):
E_equi = np.load(samplefilename)
else:
my_log('Calculating the energy')
full_sample_energy = torch.Tensor(np.asarray(latticeglass.energy(full_sample, nn_list_full, args.q)))
E_equi =full_sample_energy.mean()/(args.L**3)
np.save(samplefilename,E_equi)
my_log('*** Equilibrium E/N={}'.format(E_equi))
# pass to numpy which is faster to operate in parallel
full_sample = full_sample.to(dtype=int).detach().cpu().numpy()
# and then the energy of the patches
# ***[!]*** The patches are NOT defined in PBC, so to get their bulk energy I have to exclude the boundaries
# to do so I define a mask which is 0 on the boundaries
mask_patch= torch.ones(size=(1,args.patch_size,args.patch_size,args.patch_size))
mask_patch[:,0,:,:]=0
mask_patch[:,args.patch_size-1,:,:]=0
mask_patch[:,:,0,:]=0
mask_patch[:,:,args.patch_size-1,:]=0
mask_patch[:,:,:,0]=0
mask_patch[:,:,:,args.patch_size-1]=0
onedmask = torch.zeros(size=(1,args.patch_size**3))
for x in range(args.patch_size):
for y in range(args.patch_size):
for z in range(args.patch_size):
s = x + args.patch_size*y + args.patch_size*args.patch_size*z
onedmask[:, s] = mask_patch[:, x, y, z]
# save the mask
samplefilename = 'onedmask_T{}_L{}_ps{}.npy'.format(args.T,args.L,args.patch_size)
np.save(samplefilename,onedmask)
# I want to impose a different ordering for the variables in the patches that has the following requisites:
# 1) first I put the fixed boundaries
# 2) the first (mobile) spin is the center of the patch
# 3) then we have the center of the faces (1boundary connection)
# 4) then the edges (touching 2 boundaries)
# 5) finally the corners
# I can achieve this similarly to what I did in the coloring for the ordering based on the graph
# So I create an array of all the IDs in the order that I want
order_of_sites=[]
# (1)
for x in [0,args.patch_size-1]:
for y in range(args.patch_size):
for z in range(args.patch_size):
s = x + args.patch_size*y + args.patch_size*args.patch_size*z
order_of_sites.append(s)
for x in range(args.patch_size):
for y in [0,args.patch_size-1]:
for z in range(args.patch_size):
s = x + args.patch_size*y + args.patch_size*args.patch_size*z
order_of_sites.append(s)
for x in range(args.patch_size):
for y in range(args.patch_size):
for z in [0,args.patch_size-1]:
s = x + args.patch_size*y + args.patch_size*args.patch_size*z
order_of_sites.append(s)
# (2)
x = 2
y = 2
z = 2
s = x + args.patch_size*y + args.patch_size*args.patch_size*z
order_of_sites.append(s)
# (3)
y = 2
z = 2
for x in [1,3]:
s = x + args.patch_size*y + args.patch_size*args.patch_size*z
order_of_sites.append(s)
x = 2
z = 2
for y in [1,3]:
s = x + args.patch_size*y + args.patch_size*args.patch_size*z
order_of_sites.append(s)
x = 2
y = 2
for z in [1,3]:
s = x + args.patch_size*y + args.patch_size*args.patch_size*z
order_of_sites.append(s)
# (4)
x = 2
for y in [1,3]:
for z in [1,3]:
s = x + args.patch_size*y + args.patch_size*args.patch_size*z
order_of_sites.append(s)
y = 2
for x in [1,3]:
for z in [1,3]:
s = x + args.patch_size*y + args.patch_size*args.patch_size*z
order_of_sites.append(s)
z = 2
for x in [1,3]:
for y in [1,3]:
s = x + args.patch_size*y + args.patch_size*args.patch_size*z
order_of_sites.append(s)
# (5)
for x in [1,3]:
for y in [1,3]:
for z in [1,3]:
s = x + args.patch_size*y + args.patch_size*args.patch_size*z
order_of_sites.append(s)
# remove repetitions
order_of_sites = list(dict.fromkeys(order_of_sites))
my_log('## I ordered the sites in the following way:\n{}'.format(order_of_sites))
# I also need the inverse order which will be used to revert back the generated samples and compute their energy
reverse_order = np.arange(args.patch_size**3)
reverse_order = [x for _,x in sorted(zip(order_of_sites,reverse_order), key=lambda pair: pair[0])]
my_log('storing info')
samplefilename = 'ordering_T{}_L{}_ps{}.npy'.format(args.T,args.L,args.patch_size)
np.save(samplefilename,order_of_sites)
train_sample = full_sample[args.batch_size:]
val_sample = full_sample[:args.batch_size]#.to(args.device)
my_log('Preparing validation sample...')
# prepare the validation sample once
val_patches = torch.zeros(size=(args.batch_size, args.patch_size**3))
# ** (1) select patch center
centers = np.random.choice(np.arange(len(val_sample[0])),size=(args.batch_size,1)).astype(int)
# ** (2) select how to transform the patch
know_transformations = 13
transformation = np.random.choice(np.arange(know_transformations),size=(args.batch_size,1)).astype(int)
csamp = np.concatenate((centers,transformation,val_sample),axis=-1)
# ** (3) extract the patch and transform it
f = lambda x: latticeglass.patch_from_config_not_ordered(x[2:],x[0],args.L,False, True, x[1])
val_patches = torch.Tensor(np.apply_along_axis(f, 1, csamp).squeeze(axis=1))
patch_energy = torch.Tensor(np.asarray(latticeglass.patch_energy(val_patches, onedmask.cpu().detach().numpy(), nn_list_patch, args.q)))
my_log('The validation patches have E/N={}'.format(patch_energy.mean()/3**3))
# ** (4) sort the validation sample
for sample_i in range(val_patches.shape[0]):
val_patches[sample_i] = mysorting(order_of_sites,val_patches[sample_i])
# ** (5) measure the density (that I am planning to conserve)
zeros= (val_patches == 0).sum(dim=-1).unsqueeze(dim=-1)
ones = (val_patches == 1).sum(dim=-1).unsqueeze(dim=-1)
twos = (val_patches == 2).sum(dim=-1).unsqueeze(dim=-1)
Npatch = 5**3
Nall = args.batch_size*Npatch
#val_sample_wrho = torch.concat([zeros/Npatch,ones/Npatch,twos/Npatch,torch.Tensor(val_patches)],dim=-1)
val_sample_wrho = torch.concat([zeros/Npatch,ones/Npatch,twos/Npatch,val_patches],dim=-1)
# ** (6) pass the sample to deice for faster processing
val_sample_wrho = val_sample_wrho.to(device=args.device)
# create start and end tokens
start_token = torch.zeros(size=zeros.shape).to(device=args.device)
end_token = start_token + 1
criterion = nn.CrossEntropyLoss()
my_log('Training...')
newsample_resorted = torch.zeros(size=(args.batch_size,5**3))
oldsample_resorted = torch.zeros(size=(args.batch_size,5**3))
sample_time = 0
train_time = 0
start_time = time.time()
# **********************************************
# **************** TRAINING LOOP
# **********************************************
for step in range(last_step + 1, args.max_step + 1):
# Use the annealing rate same as Wu et al. to decrease T
beta = 1/args.T * (1 - args.beta_anneal**step)
# **** Maximum likelihood
if args.ARtype == 'maxlike':
sample_start_time = time.time()
# **** Batch preparation
# * (1) extract random configs
indices = np.random.choice(np.arange(train_sample.shape[0]),size=(args.batch_size))
sample = train_sample[indices][:]
# * (2) select patch center
centers = np.random.choice(np.arange(len(sample[0])),size=(args.batch_size,1)).astype(int)
# * (3) select how to transform the patch
transformation = np.random.choice(np.arange(know_transformations),size=(args.batch_size,1)).astype(int)
csamp = np.concatenate((centers,transformation,sample),axis=-1)
# * (4) extract the patch and transform it
f = lambda x: latticeglass.patch_from_config_not_ordered(x[2:],x[0],args.L,False, True, x[1])
patches = torch.Tensor(np.apply_along_axis(f, 1, csamp).squeeze(axis=1))
# * (5) sort the training sample
for sample_i in range(args.batch_size):
patches[sample_i] = mysorting(order_of_sites,patches[sample_i])
# * (6) measure the density (that I am planning to conserve)
zeros= (patches == 0).sum(dim=-1).unsqueeze(dim=-1).to(device=args.device)
ones = (patches == 1).sum(dim=-1).unsqueeze(dim=-1).to(device=args.device)
twos = (patches == 2).sum(dim=-1).unsqueeze(dim=-1).to(device=args.device)
patches = patches.to(device=args.device)
sample_wrho = torch.concat([start_token,zeros/Npatch,ones/Npatch,twos/Npatch,patches],dim=-1).to(device=args.device)
# Teacher forcing: use end and start tokens
data = patches[:,:98]
#data = torch.concat([data+2,end_token],dim=-1).to(device=args.device,dtype=torch.long)
data = data.to(device=args.device,dtype=torch.long)+2
target = patches[:,98:]
target = torch.concat([start_token,target+2,end_token],dim=-1).to(device=args.device,dtype=torch.long)
if args.print_step and step % args.print_step == 0 :
# generate a new sample to evaluate the energy that the network can generate and evalaute the acceptance
with torch.no_grad():
net.eval()
new_sample = net.predict(data)[:,1:-1]
# print('\n')
# print('\n')
# print(data)
# print(data.shape)
# print(target)
# print(target.shape)
# print(new_sample)
# print(new_sample.shape)
logits = net(data, target[:,:-1])
p = F.softmax(logits, dim = 1)
this_sample_p = torch.gather(p, 1, new_sample.unsqueeze(dim=1)).squeeze(dim=1)
entropy = -torch.log(this_sample_p).sum(dim=-1)
# * fix the start of word token
new_sample = new_sample-2
new_sample[new_sample<0] = 0
# check how many samples conserved the density
ok_samples=0
for i in range(args.batch_size):
if (target[i]==2).sum()==(new_sample[i]==0).sum():
if (target[i]==3).sum()==(new_sample[i]==1).sum():
if (target[i]==4).sum()==(new_sample[i]==2).sum():
ok_samples+=1
# I have to resort the samples to compute energy
for s_i in range(args.batch_size):
newsample_resorted[s_i] = mysorting(reverse_order, torch.concat([patches[s_i,:98],new_sample[s_i]],dim=-1))
oldsample_resorted[s_i] = mysorting(reverse_order, patches[s_i])
new_energy = torch.Tensor(np.asarray(latticeglass.patch_energy(newsample_resorted, onedmask.cpu().detach().numpy(), nn_list_patch, args.q)))
old_energy = torch.Tensor(np.asarray(latticeglass.patch_energy(oldsample_resorted, onedmask.cpu().detach().numpy(), nn_list_patch, args.q)))
zeros = (new_sample == 0).sum()
ones = (new_sample == 1).sum()
twos = (new_sample == 2).sum()
N = args.batch_size*(3**3)
my_log('\naverage density of zeros={:.3g}\tones={:.3g}\ttwos={:.3g}\n{}/{} samples conserved the density'.format(zeros/N,ones/N,twos/N,ok_samples,args.batch_size))
net.train()
logits = net(data, target[:,:-1])
loss = criterion(logits, target[:,1:]) #exclude from loss the start of word token
# and measure the validation loss
#val_loss = -net.log_prob(val_sample_wrho).mean()
val_loss = 0*loss
sample_time += time.time() - sample_start_time
train_start_time = time.time()
# ************
# Zero the gradient
optimizer.zero_grad()
# If required add regularization
if args.regularization == 'l1':
l1_norm = sum(p.sum() for p in net.parameters())
loss_reinforce += args.lambdal1 * l1_norm
if args.regularization == 'l2':
l2_norm = sum(p.pow(2.0).sum() for p in net.parameters())
loss_reinforce += args.lambdal2 * l2_norm
# Backpropagate
loss.backward()
if args.clip_grad:
nn.utils.clip_grad_norm_(params, args.clip_grad)
optimizer.step()
# If I am doing maxlike, I will start scheduler and early stop from epoch=50
# while if I am doing var, I will start only when beta~equi
#if args.lr_schedule and args.ARtype == 'maxlike' and step>50 :
# scheduler.step(loss.mean())
#elif args.lr_schedule and round(beta,3) == args.beta:
# scheduler.step(loss.mean())
if args.lr_schedule:
scheduler.step()
# Notice that early stopping is done over the validation loss
if args.early_stopping and args.ARtype == 'maxlike' and step>50 :
early_stopping(val_loss)
if early_stopping.early_stop:
my_log('Exiting for early stopping trigger')
break
elif args.early_stopping and round(beta,3) == args.beta:
early_stopping(val_loss)
if early_stopping.early_stop:
my_log('Exiting for early stopping trigger')
break
train_time += time.time() - train_start_time
if args.print_step and step % args.print_step == 0:
entropy_mean = float(entropy.mean())/((args.patch_size-2)**3)
energy_mean = float(new_energy.mean())/((args.patch_size-2)**3)
old_energy_mean = float(old_energy.mean())/((args.patch_size-2)**3)
free_energy = energy_mean - entropy_mean/ args.beta
if step > 0:
sample_time /= args.print_step
train_time /= args.print_step
used_time = time.time() - start_time
my_log(
'step = {}, F = {:.4g}, S/N = {:.4g}, E(generated)/N = {:.5g}, E(ref)/N = {:.5g}, E(equi)/N = {:.5g}, lr = {:.3g}, beta = {:.3g}, T={:.3g}, sample_time = {:.3f}, loss = {:.3g}, val_loss = {:.4g}, train_time = {:.3f}, used_time = {:.3f}'
.format(
step,
free_energy,
entropy_mean,
energy_mean,
old_energy_mean,
E_equi,
optimizer.param_groups[0]['lr'],
beta,
1./max(0.000001,beta),
sample_time,
loss,
val_loss,
train_time,
used_time,
))
sample_time = 0
train_time = 0
if args.save_sample:
state = {
'sample': sample,
'log_prob': log_prob,
'energy': energy,
'loss': loss,
}
torch.save(state, '{}_save/{}.sample'.format(
args.out_filename, step))
if (args.out_filename and args.save_step
and step % args.save_step == 0):
state = {
'net': net.state_dict(),
'optimizer': optimizer.state_dict(),
}
if args.lr_schedule:
state['scheduler'] = scheduler.state_dict()
torch.save(state, '{}_save/{}.state'.format(
args.out_filename, step))
if (args.out_filename and args.visual_step
and step % args.visual_step == 0):
if args.print_sample and 0:
energy_np = np.asarray(new_energy)
energy_count = np.stack(
np.unique(energy_np, return_counts=True)).T
my_log(
'\nsample\n{}\nlog_prob\n{}\nenergy\n{}\nloss\n{}\nenergy_count\n{}\n'
.format(
sample[:args.print_sample, :],
log_prob[:args.print_sample],
energy_np,
loss[:args.print_sample],
energy_count,
))
if args.print_grad:
my_log('grad max_abs min_abs mean std')
for name, param in named_params:
if param.grad is not None:
grad = param.grad
grad_abs = torch.abs(grad)
my_log('{} {:.3g} {:.3g} {:.3g} {:.3g}'.format(
name,
torch.max(grad_abs).item(),
torch.min(grad_abs).item(),
torch.mean(grad).item(),
torch.std(grad).item(),
))
else:
my_log('{} None'.format(name))
my_log('')
if __name__ == '__main__':
main()
| SCiarella/patchy_transformer | main.py | main.py | py | 21,956 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "utils.init_out_dir",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "args.args.clear_checkpoint",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "args.arg... |
73130978664 | from tempfile import mkdtemp
import multiprocessing
import environ
# ENVIRON
# ==============================================================================
env = environ.Env()
DEBUG = env.bool("DEBUG")
FORWARDED_ALLOW_IPS = env.str("GUNICORN_FORWARDED_ALLOW_IPS")
PROXY_ALLOW_IPS = env.str("GUNICORN_PROXY_ALLOW_IPS")
# Config
# ==============================================================================
config = "./gunicorn.conf.py" # 설정파일 경로
wsgi_app = "config.wsgi:application" # wsgi_app 경로, 파이썬모듈:변수
# Debugging
# ==============================================================================
# reload = False # 코드 변경시 워커 reload
reload = True if DEBUG else False
reload_engine = "auto" # 코드 변경 추적 엔진 설정
reload_extra_files = [] # reload 시 리로드되는 대상 확장
spew = False # 서버 실행 모든 라인 출력 (주의)
check_config = False # config가 정상적인지 확인하고 종료 (exit code, O=0, X=1)
print_config = False # config 출력
# Logging
# ==============================================================================
# accesslog = None # 액세스 로그 파일 경로, "-" = stdout
accesslog = "-" if DEBUG else "/var/log/gunicorn.access.log"
# errorlog = "-" # 에러 로그 파일 경로, "-" = stdout
errorlog = "-" if DEBUG else "/var/log/gunicorn.error.log"
capture_output = False # stdout + stderr를 errorlog 파일에 리디렉션
# loglevel = "info" # err log 출력 레벨, debug < info < warning < error < critical
loglevel = "debug" if DEBUG else "info"
disable_redirect_access_to_syslog = False # 리디렉션 액세스 로그 비활성화
access_log_format = (
'%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
if DEBUG
else '%({x-forwarded-for}i)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
)
logger_class = "gunicorn.glogging.Logger" # 로깅에 사용될 로거
logconfig = None # python logging 모듈 구성 파일 경로
logconfig_dict = {} # python logging 모듈에 사용될 dict, logconfig보다 우선
syslog = False # syslog에 로그를 전송
syslog_addr = "udp://localhost:514" # syslog를 보낼 주소
syslog_prefix = None # syslog에서 사용될 프로그램 이름 매개변수, gunicorn.<prefix>
syslog_facility = "user" # syslog 기능 이름
enable_stdio_inheritance = False # stdio 상속 활성화
statsd_host = None # StatsD 서버의 host:port
dogstatsd_tags = "" # StatsD에서 활용될 태그 목록, ,로 구분
statsd_prefix = "" # StatsD에 전송될 때 사용될 접두사
# Process Naming
# ==============================================================================
proc_name = None # gunicorn 프로세스 이름 설정, None일 경우 default_proc_name 사용
default_proc_name = "gunicorn" # gunicorn 기본 프로세스 이름
# SSL
# ==============================================================================
keyfile = None # SSL 키 파일 경로
certfile = None # SSL 인증서 파일 경로
do_handshake_on_connect = False if DEBUG else True
ssl_version = "TLSv1_2" # 사용될 ssl의 버전
cert_reqs = 0 # 클라이언트 인증서 필요 여부 (stdlib ssl 모듈 참조)
ca_certs = None # CA 인증서 파일 경로
suppress_ragged_eofs = True # 비정성 EOF 억제 (stdlib ssl 모듈 참조)
# do_handshake_on_connect = False # 소켓 연결 시 SSL 핸드 셰이크 수행 여부
ciphers = None # 암호화 방식, None일 경우 ssl 모듈의 기본 암호 list 활용
# Security
# ==============================================================================
limit_request_line = 1024 # HTTP request 최대 바이트 크기
limit_request_fields = 100 # HTTP request 헤더 최대 갯수 제한
limit_request_field_size = 1024 # HTTP request 헤더 필드 허용 크기 제한
# Server Mechanics
# ==============================================================================
# chdir = "/app" # app 로드 전에 chdir, Dockerfile에서 설정됨.
daemon = False # gunicorn 프로세스 백그라운드 실행 여부
preload_app = False # 워커 프로세스 fork 전 app code를 preload
secure_scheme_headers = {
"X-FORWARDED-PROTOCOL": "ssl",
"X-FORWARDED-PROTO": "https",
"X-FORWARDED-SSL": "on",
} # 프론트엔드 프록시가 HTTPS 요청을 표시하는데에 사용될 헤더 및 값 dict
# forwarded_allow_ips = "127.0.0.1" # 보안 헤더 처리 가능 프론트엔드 IP (쉼표 구분, * 사용 가능, None일 경우 FORWARDED_ALLOW_IPS ENV 활용)
# proxy_allow_ips = "127.0.0.1" # 프록시 요청 수락 허용 프론트엔드 IP (쉼표 구분, * 사용 가능)
proxy_allow_ips = "*" if DEBUG else PROXY_ALLOW_IPS
forwarded_allow_ips = "*" if DEBUG else FORWARDED_ALLOW_IPS
proxy_protocol = False # 프록시 프로토콜 감지 활성화
# user = 1005 # worker의 프로세스를 해당 작업자로 실행
user = None
# group = 205 # worker의 프로세스를 해당 group으로 실행
group = None
umask = 0 # gunicorn 작성 파일의 파일 모드 비트마스크
initgroups = False # True일 경우 해당 그룹만 워커 프로세스 엑세스
# worker_tmp_dir = None # worker가 임시 파일을 다룰 때 사용할 디렉토리
worker_tmp_dir = mkdtemp(prefix="gunicorn_")
sendfile = None # sendfile() 활성화 여부, None일 경우 SENDFILE 환경변수 활용
reuse_port = False # 동일 포트에 여러 listen 소켓 bind 허용 여부
raw_env = [] # 환경변수 추가 설정, ["foo=bar"]
pidfile = None # PID 파일 이름
tmp_upload_dir = None # 임시 요청 데이터 저장 디렉토리
pythonpath = None # 파이썬 경로
paste = None # PasteDeploy 구성 파일 로드
raw_paste_global_conf = [] # PasteDeploy 환경변수 설정, ['foo=bar']
strip_header_spaces = False # 헤더 빈 공간 strip, 비사용 권장
# Server Socket
# ==============================================================================
# bind = None # None일 경우 ["127.0.0.8000"], PORT 환경변수 ["0.0.0.0:$PORT"]
bind = ["0.0.0.0:8000"]
backlog = 1024 # 보류 최대 연결 수
# Worker Processes
# ==============================================================================
# workers = 1 # request 처리 worker 프로세스 수, 일반적으로 2-4 x $(NUM_CORES)
workers = multiprocessing.cpu_count() * 2 + 1
if DEBUG:
workers = 1
# worker_class = 'sync' # 사용 worker 종류
worker_class = "sync" if DEBUG else "gevent"
# threads = 1 # gthread 전용, 워커의 스레드 개수, 일반적으로 2-4 x $(NUM_CORES), gthread를 사용하지 않음.
worker_connections = 1000 # eventlet, gevent 전용, 동시 클라이언트 최대 수
max_requests = 10000 # 워커 처리 최대 요청 수, 넘길 경우 reload, 0일 경우 worker reload 비활성
max_requests_jitter = 1000 # worker의 max_request = randint(0, max_requests_jitter)
timeout = 30 # 해당 초 동안 응답 없는 작업자는 리로드 트리거
graceful_timeout = 30 # 해당 초 동안 살아있는 작업자도 리로드 트리거
keepalive = 3 # sync 제외, 해당 초동안 request를 받기 위해 connection 유지
# Server Hooks
# ==============================================================================
def on_starting(server):
"""
마스터 프로세스의 __init__ 직전 호출
"""
def on_reload(server):
"""
SIGHUP를 통해 worker가 reload될 때 호출
"""
def when_ready(server):
"""
서버가 시작된 직후 호출
"""
def pre_fork(server, worker):
"""
워커가 fork되기 직전 호출
"""
def post_fork(server, worker):
"""
워커가 fork된 직후 호출
"""
def post_worker_init(worker):
"""
워커가 __init__ 작업을 끝낸 직후 호출
"""
def worker_int(worker):
"""
워커가 SIGINT 또는 SIGQUIT에서 종료된 직후 호출
"""
def worker_abort(worker):
"""
워커가 SIGABRT 신호를 받았을 때 호출, 일반적으로 시간 초과시 발생
"""
def pre_exec(server):
"""
새로운 마스터 프로세스가 fork되기 직전 호출
"""
def pre_request(worker, req):
"""
워커가 request를 처리하기 직전에 호출
"""
worker.log.debug("%s %s", req.method, req.path)
def post_request(worker, req, environ, resp):
"""
워커가 request를 처리한 직후 호출
"""
def child_exit(server, worker):
"""
마스터 프로세스에서 워커가 종료된 직후 호출
"""
def worker_exit(server, worker):
"""
워커 프로세스에서 워커가 종료된 직후 호출
"""
def nworkers_changed(server, new_value, old_value):
"""
num_workers가 변경된 직후 호출
"""
def on_exit(server):
"""
gunicorn 종료 직전 호출
"""
| by-Exist/django-skeleton | backend/gunicorn.conf.py | gunicorn.conf.py | py | 8,837 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "environ.Env",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 135,
"usage_type": "call"
}
] |
16662100384 | from typing import Optional
from pythongame.core.ability_effects import register_ability_effect, AbilityWasUsedSuccessfully, AbilityResult
from pythongame.core.buff_effects import get_buff_effect, AbstractBuffEffect, register_buff_effect, \
StatModifyingBuffEffect
from pythongame.core.common import AbilityType, Millis, BuffType, UiIconSprite, SoundId, PeriodicTimer, HeroUpgrade, \
HeroStat
from pythongame.core.game_data import register_ability_data, AbilityData, register_ui_icon_sprite_path, \
register_buff_text, ABILITIES
from pythongame.core.game_state import GameState, WorldEntity, NonPlayerCharacter, Event, BuffEventOutcome, \
PlayerUsedAbilityEvent, PlayerLostHealthEvent
from pythongame.core.hero_upgrades import register_hero_upgrade_effect
from pythongame.core.visual_effects import VisualCircle
ABILITY_TYPE = AbilityType.STEALTH
BUFF_STEALTH = BuffType.STEALTHING
BUFF_POST_STEALTH = BuffType.AFTER_STEALTHING
DURATION_STEALTH = Millis(15000)
DURATION_POST_STEALTH = Millis(2500)
SPEED_DECREASE = 0.3
DODGE_CHANCE_BONUS = 0.05
def _apply_ability(game_state: GameState) -> AbilityResult:
game_state.player_state.force_cancel_all_buffs()
game_state.player_state.gain_buff_effect(get_buff_effect(BUFF_STEALTH), DURATION_STEALTH)
return AbilityWasUsedSuccessfully()
class Stealthing(StatModifyingBuffEffect):
def __init__(self):
super().__init__(BUFF_STEALTH,
{HeroStat.MOVEMENT_SPEED: -SPEED_DECREASE, HeroStat.DODGE_CHANCE: DODGE_CHANCE_BONUS})
def apply_start_effect(self, game_state: GameState, buffed_entity: WorldEntity, buffed_npc: NonPlayerCharacter):
super().apply_start_effect(game_state, buffed_entity, buffed_npc)
game_state.player_state.is_invisible = True
def apply_end_effect(self, game_state: GameState, buffed_entity: WorldEntity, buffed_npc: NonPlayerCharacter):
super().apply_end_effect(game_state, buffed_entity, buffed_npc)
game_state.player_state.is_invisible = False
def buff_handle_event(self, event: Event) -> Optional[BuffEventOutcome]:
used_ability = isinstance(event, PlayerUsedAbilityEvent) and event.ability != AbilityType.STEALTH
player_lost_health = isinstance(event, PlayerLostHealthEvent)
if used_ability or player_lost_health:
return BuffEventOutcome.cancel_effect()
class AfterStealthing(AbstractBuffEffect):
def __init__(self):
self.timer = PeriodicTimer(Millis(160))
def apply_middle_effect(self, game_state: GameState, buffed_entity: WorldEntity, buffed_npc: NonPlayerCharacter,
time_passed: Millis):
if self.timer.update_and_check_if_ready(time_passed):
visual_effect = VisualCircle(
(250, 150, 250), buffed_entity.get_center_position(), 18, 25, Millis(220), 1, buffed_entity)
game_state.visual_effects.append(visual_effect)
def apply_end_effect(self, game_state: GameState, buffed_entity: WorldEntity, buffed_npc: NonPlayerCharacter):
game_state.player_state.modify_stat(HeroStat.DODGE_CHANCE, -DODGE_CHANCE_BONUS)
def get_buff_type(self):
return BUFF_POST_STEALTH
def _upgrade_mana_cost(_game_state: GameState):
ABILITIES[ABILITY_TYPE].mana_cost = 20
def register_stealth_ability():
ui_icon_sprite = UiIconSprite.ABILITY_STEALTH
register_ability_effect(ABILITY_TYPE, _apply_ability)
description = "Become invisible to enemies. After effect ends, gain +" + \
"{:.0f}".format(DODGE_CHANCE_BONUS * 100) + "% dodge chance for " + \
"{:.1f}".format(DURATION_POST_STEALTH / 1000) + "s"
mana_cost = 25
cooldown = Millis(6000)
ability_data = AbilityData("Stealth", ui_icon_sprite, mana_cost, cooldown, description, SoundId.ABILITY_STEALTH)
register_ability_data(ABILITY_TYPE, ability_data)
register_ui_icon_sprite_path(ui_icon_sprite, "resources/graphics/sneak_icon.png")
register_buff_effect(BUFF_STEALTH, Stealthing)
register_buff_text(BUFF_STEALTH, "Stealthed")
register_buff_effect(BUFF_POST_STEALTH, AfterStealthing)
register_buff_text(BUFF_POST_STEALTH, "Element of surprise")
register_hero_upgrade_effect(HeroUpgrade.ABILITY_STEALTH_MANA_COST, _upgrade_mana_cost)
| gx-55/pygame-for-5725 | pythongame/game_data/abilities/ability_stealth.py | ability_stealth.py | py | 4,293 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pythongame.core.common.AbilityType.STEALTH",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pythongame.core.common.AbilityType",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pythongame.core.common.BuffType.STEALTHING",
"line_number"... |
17262201723 | import requests
import json
class Geolocation:
def __init__(self, ip_address):
'''
Initializes url with user inputted IP address.
args: (str) IP address
return: none
'''
self.url = f'https://geolocation-db.com/jsonp/{ip_address}'
def get(self):
'''
Requests and pulls API through URL and returns a result based on the parameters.
args: none
return: (str) Latitude and longitude of IP address
'''
r = requests.get(self.url)
result = r.content.decode()
result = result.split("(")[1].strip(")")
result = json.loads(result)
ip_lat = result.get('latitude')
ip_long = result.get('longitude')
return ip_lat, ip_long
#print("latitude: ", ip_latitude)
#print("longitude: ", ip_longitude)
| bucs110a0spring22/final-exam-fujikashimada | IPGeolocationAPI.py | IPGeolocationAPI.py | py | 782 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 23,
"usage_type": "call"
}
] |
6217946633 | """
Export an object that will handle all the communication with the Walabot.
"""
import json
import select
import socket
from threading import Thread
from tinydb import Query
from DBHandler import DBHandler as TinyDB
from config import DB_PATH, UTF_FORMAT, ROOM_FIELD, NUMBER_OF_PEOPLE_FIELD, ROOMS_DATA_TABLE, MAX_PEOPLE_FIELD
CONNECTION_CLOSE_MESSAGE = ''
BACKLOG = 5
MAX_MESSAGE_SIZE = 1024
CONNECTION_SOCKET_INDEX = 0
ADDRESS_INDEX = 1
SELECT_TIMEOUT = 5
class FreeRoomsServer:
"""
The server that will listen to Walabot connection and then will store the Walabot data in the db.
:ivar server_address: The server address containing the ip, port as a tuple.
:type server_address: C{tuple} of C{str}, C{int}
:ivar server_socket: The socket that will listen to any new connections.
:type server_socket: C{socket.socket}
:ivar free_rooms_db: The db to insert all the values gotten from the Walabot app
:type free_rooms_db: L{tinydb.TinyDB}
:ivar room: The Query object for doing queries in the TinyDB object.
:type room: L{tinydb.Query}
:ivar connections: The queue to insert to the new connections.
:type connections: C{SharedList}
"""
def __init__(self, ip, port):
"""
Instanitate the server socket.
:param ip: The ip the server will listen to.
:type ip: C{str}
:param port: The port that the server will listen to.
:type port: C{int}
"""
self.server_address = ip, port
self.server_socket = socket.socket()
self.server_socket.bind(self.server_address)
self.server_socket.listen(BACKLOG)
self.free_rooms_db = TinyDB(DB_PATH, default_table=ROOMS_DATA_TABLE)
self.room = Query()
self.connections = []
def start(self):
"""
The function that will start the server, start a thread that will listen to any clients' connections.
Also we start a thread that will handle all the clients.
"""
print("Start server functionality, listen on address: {0}".format(self.server_address))
connections_thread = Thread(target=self.accept_connections)
connections_thread.start()
print("Start handling clients.")
handle_clients_thread = Thread(target=self.handle_clients)
handle_clients_thread.start()
def accept_connections(self):
"""
Will accept connection and insert them into a shared queue.
"""
while True:
print("Waiting for new connections.")
client_socket, address = self.server_socket.accept()
print("Got connection from -> {0}".format(address))
print("Putting connection in the shared queue.")
self.connections.append((client_socket, address))
def handle_connection_close(self, client_connection, address):
"""
Handle the situation that the client connection given in the arguments has closed the connection.
:param client_connection: The client's socket connected to our server.
:type client_connection: C{socket.socket}
:param address: The address of the client
:type address: C{tuple} of C{str}, C{int}
"""
print("Connection was closed by the client {0}.".format(address))
client_connection.close()
# Removing the the connection tuple that was just closed from the connections list.
self.connections.remove((client_connection, address))
print("Client {0} was removed from the connections list.".format(address))
def handle_client(self, client_connection, address):
"""
Handle a client, read the data from its connected socket and determine what to do with it.
:param client_connection: The socket connected to the client
:type client_connection: C{socket.socket}
:param address: The address of the connected client.
:type address: C{tuple} of C{str}, C{int}
"""
try:
msg = client_connection.recv(MAX_MESSAGE_SIZE).decode(UTF_FORMAT)
# if the msg is empty that means that the client closed the connection.
if msg == CONNECTION_CLOSE_MESSAGE:
self.handle_connection_close(client_connection, address)
# It means that we got a message from the Walabot and we need to handle it.
else:
data = json.loads(msg)
print("Got {0} from client {1}".format(data, address))
# If there isn't a room row in the db, so we insert the row.
if not self.free_rooms_db.search(self.room.name == data[ROOM_FIELD]):
print("Inserting new row.")
self.free_rooms_db.insert(data)
# Update the existing row with the new data.
else:
print("Updating room with new data, number of people is: {0}".format(data[NUMBER_OF_PEOPLE_FIELD]))
self.free_rooms_db.update({NUMBER_OF_PEOPLE_FIELD: data[NUMBER_OF_PEOPLE_FIELD],
MAX_PEOPLE_FIELD: data[MAX_PEOPLE_FIELD]},
self.room.name == data[ROOM_FIELD])
except socket.error:
self.handle_connection_close(client_connection, address)
def handle_clients(self):
"""
Handle all the clients by waiting for the clients to be ready for reading from their sockets and
then handle each socket.
"""
while True:
# Getting all the connections that are ready for reading.
client_connections = [connection_tuple[CONNECTION_SOCKET_INDEX] for connection_tuple in self.connections]
if client_connections:
rlist, wlist, exlist = select.select(client_connections, [], [], SELECT_TIMEOUT)
for connection in rlist:
# Getting the corresponding address of the connection socket.
address = [connection_tuple[ADDRESS_INDEX] for connection_tuple in self.connections
if connection_tuple[CONNECTION_SOCKET_INDEX] == connection]
# Taking the first index because its a list with one item.
self.handle_client(connection, address[0])
| Walabot-Projects/Walabot-MeetingRoom | server/FreeRoomsServer.py | FreeRoomsServer.py | py | 6,324 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "socket.socket",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "DBHandler.DBHandler",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "config.DB_PATH",
"line_number": 50,
"usage_type": "argument"
},
{
"api_name": "config.ROOMS_DAT... |
8393586286 | import json
import re
from collections import Counter
import matplotlib.pyplot as plt
import pandas as pd
import requests
from nltk.corpus import stopwords
df = pd.read_csv("./misc/chanlog.csv")
def fetch_emotes(url: str, id: str) -> str:
try:
resp = requests.get(f"{url}{id}")
json = resp.json()
except requests.exceptions.RequestException as e:
print(e)
return json
global_emotes = []
with open("global_emotes.txt", "r") as f:
for line in f.readlines():
line = line.replace("\n", "")
global_emotes.append(line)
# https://twitchemotes.com/apidocs
data = fetch_emotes(url="https://api.twitchemotes.com/api/v4/channels/", id="207813352")
channel_emotes = [emote["code"] for emote in data["emotes"]]
# https://www.frankerfacez.com/developers
data = fetch_emotes(url="https://api.frankerfacez.com/v1/room/", id="hasanabi")
_set = str(data["room"]["set"])
frankerz_emotes = [emote["name"] for emote in data["sets"][_set]["emoticons"]]
# https://github.com/pajbot/pajbot/issues/495
data = fetch_emotes(url="https://api.betterttv.net/3/cached/users/twitch/", id="207813352")
betterttv_emotes = [emote["code"] for emote in data["channelEmotes"]]
betterttv_emotes += [emote["code"] for emote in data["sharedEmotes"]]
emotes = global_emotes + channel_emotes + frankerz_emotes + betterttv_emotes
STOPWORDS = set(stopwords.words('english'))
words = []
for msg in df["user_msg"]:
msg = str(msg)
msg = msg.split()
for word in msg:
if word not in emotes:
word = word.lower()
if word not in STOPWORDS:
words.append(word)
words = dict(Counter(words).most_common(10))
x = list(words.keys())
y = list(words.values())
plt.barh(x, y)
plt.xlabel("Word count")
plt.ylabel("Word")
plt.title("Top 10 words used")
plt.tight_layout()
plt.savefig("top_words.png")
| smehlhoff/twitch-chat-election | top_words.py | top_words.py | py | 1,876 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "nltk.corpus.sto... |
17579834132 | from __future__ import annotations
import os
from pathlib import Path
from typing import Any
ALLOWED_HOSTS: list[str] = []
BASE_DIR = Path(__file__).resolve().parent
DEBUG_ENV = os.environ.get("DEBUG")
DEBUG = DEBUG_ENV == "True"
DATABASE_NAME = ":memory:" if not DEBUG else BASE_DIR / "db.sqlite3"
DATABASES: dict[str, dict[str, Any]] = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": DATABASE_NAME,
},
}
INSTALLED_APPS = [
# Third Party
"django_view_decorator",
# Contrib
"django.contrib.auth",
"django.contrib.sessions",
"django.contrib.contenttypes",
"django.contrib.staticfiles",
# Local
"test_app",
"app_config_test",
]
MIDDLEWARE: list[str] = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
]
ROOT_URLCONF = "tests.urls"
SECRET_KEY = "NOTASECRET"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"DIRS": [BASE_DIR / "templates" / "django"],
"OPTIONS": {"context_processors": []},
},
]
USE_TZ = True
STATICFILES_DIRS = [
BASE_DIR / "static",
]
STATIC_URL = "/static/"
| valberg/django-view-decorator | tests/settings.py | settings.py | py | 1,235 | python | en | code | 45 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"line_num... |
7548657447 | from lina import lina
import argparse
import os
def reverse(dispersed_dir, output_file, original_size):
filenames = os.listdir(dispersed_dir)
filenames.sort()
dispersed_data_list = []
for filename in filenames:
f = open(dispersed_dir + "/" + filename, "rb")
dispersed_data_list.append("".join(lina.message_to_binary(f.read())))
f.close()
i = 0
result = ""
while True:
j = i % len(filenames)
k = i // len(filenames)
if i / 8 > original_size:
break
result += dispersed_data_list[j][k]
i += 1
f = open(output_file, "wb")
f.write(lina.split(result))
f.close()
def disperse(filepath, output_dir):
output_file_length = 8
if output_dir is None:
output_dir = "./output"
os.mkdir(output_dir)
f = open(filepath, "rb")
binary = "".join(lina.message_to_binary(f.read()))
f.close()
results = []
for i in range(output_file_length):
results.append("")
for b in range(len(binary)):
bit = binary[b]
bi = b % output_file_length
results[bi] += bit
for i in range(len(results)):
results[i] += "0" * (8 - (len(results[i]) % 8))
for i in range(output_file_length):
results[i] = lina.split(results[i])
f = open(output_dir + "/data-" + str(i).zfill(8), "wb")
f.write(results[i])
f.close()
def main():
parser = argparse.ArgumentParser(description="mist-dispersion is a file disperser")
parser.add_argument("mode", help="disperse or reverse")
parser.add_argument("-f", "--file")
parser.add_argument("-d", "--dir")
parser.add_argument("-o", "--output")
parser.add_argument("-s", "--size")
args = parser.parse_args()
d = args.dir
if args.mode == "disperse":
disperse(args.file, d)
elif args.mode == "reverse":
if args.output is None or args.size == None:
parser.print_help()
else:
reverse(d, args.output, int(args.size))
if __name__ == "__main__":
main()
| TakutoYoshikai/mist-dispersion | mist_dispersion/mist_dispersion.py | mist_dispersion.py | py | 2,069 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "lina.lina.message_to_binary",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "lina.lina",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "lina.lina.split",
... |
21201257329 | # -*- coding: utf-8 -*-
from libs.base_strategy import Strategy
from collections import deque
import time
import math
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
class MyStrategy(Strategy):
#----------------------------------------------------------------------------
# 起動時に呼ばれる関数
#----------------------------------------------------------------------------
def initialize(self):
self._ask = self._bid = 0
self._last_evented_time = time.time()
self._ordered_id_list = deque(maxlen=100) # 発注済みID
self._ordered_id_list.append( {} )
self._buy_volume_list = deque(maxlen=1000) # 買いボリューム計算用
self._buy_volume_list.append(0)
self._sell_volume_list = deque(maxlen=1000) # 売りボリューム計算用
self._sell_volume_list.append(0)
self.__price_list = deque({},maxlen=100) # 指標の確認用リスト
self.__index_rate = deque({},maxlen=10000) # 指標の結果
self.__index_delta = deque({},maxlen=10000) # 指標の結果
self.__last_minutes = 0
#----------------------------------------------------------------------------
# Websocketのon_message内から呼び出される関数
#----------------------------------------------------------------------------
def executions(self,recept_data):
start = time.time()
# 初回の初期化
if self._ask == 0 : self._ask = recept_data[0]['price']
if self._bid == 0 : self._bid = recept_data[0]['price']
# 約定履歴の処理
for i in recept_data:
if i['side']=='BUY' :
self._ask = int(i['price'])
self._buy_volume_list[-1] += i['size']
else:
self._bid = int(i['price'])
self._sell_volume_list[-1] += i['size']
# 前回のオーダーからinterval_time秒以上経っていたらイベントをセット
if( self._last_evented_time + self._strategy_config['interval'] < start and (not self.order_signal_event.is_set()) ):
self._last_evented_time = start
self.order_signal_event.set()
#----------------------------------------------------------------------------
# self.order_signal_eventが発生したら呼び出される関数
#----------------------------------------------------------------------------
def realtime_logic(self):
# 売りの取引高と買いの取引高の差分を求める
buy_volume = sum(self._buy_volume_list)
sell_volume = sum(self._sell_volume_list)
vol_rate = math.sqrt(buy_volume) - math.sqrt(sell_volume)
# 指標の妥当性検証用(指標と現在価格と時刻を格納)
self.__price_list.append( {'price':self.ltp, 'rate':vol_rate, 'time':time.time()} )
# 稼働状況のログ
self._logger.info( ' Vol{:+3.1f} LTP:{:.0f} Profit:{:>+7.0f} Pos:{:>7.3f} API:{:>3} Delay:{:>4.0f}ms({:>4.0f}ms) {}'.format(
vol_rate, self.ltp, self.current_profit, self.current_pos, self.api_count,
self.server_latency, self.server_latency_rate, "" if self.server_health == "NORMAL" else " "+self.server_health ))
# 売買高の差が指定値以下であればエントリーしない
if math.fabs(vol_rate) < self._strategy_config['volume_th'] :
return False
id = ''
if vol_rate > 0 :
# 現在ポジがmaxに近づくにつれて発注サイズを減らしてく
size = math.tanh(self._strategy_config['lotsize'] * (self._strategy_config['max_lot'] - max(0,self.current_pos)) / self._strategy_config['max_lot'])
if size > 0.01 :
responce = self._limit_buy( price=self._bid-self._strategy_config['depth'], size=size )
if responce and "JRF" in str(responce) : id = responce['child_order_acceptance_id']
if vol_rate < 0 :
# 現在ポジがmaxに近づくにつれて発注サイズを減らしてく
size = math.tanh(self._strategy_config['lotsize'] * (self._strategy_config['max_lot'] + min(0,self.current_pos)) / self._strategy_config['max_lot'])
if size > 0.01 :
responce = self._limit_sell( price=self._ask+self._strategy_config['depth'], size=size )
if responce and "JRF" in str(responce) : id = responce['child_order_acceptance_id']
# オーダーした場合にはidをdictに保存しておく
if id!='' : self._ordered_id_list[-1][id]=1
return (id!='')
#----------------------------------------------------------------------------
# server_healthに関係なく 1 (秒)ごとに回ってくるので、ロスカットチェックなどはこちらで行う
# 売買が発生したら戻り値をTrueに → emergency_waitのタイマー発動
#----------------------------------------------------------------------------
def loss_cut_check(self):
while len(self._ordered_id_list)>self._strategy_config['cancel_time'] : # cancel_time 個以上のキューが有れば、
id_dict = self._ordered_id_list.popleft() # キューから一番古いものを取り出して
if id_dict!={} : # オーダーデータがあればキャンセル実行
for id,val in id_dict.items() : # dictに入っているid全てを
self._cancel_childorder( id ) # 順次キャンセル発行
# 時間経過管理用のキューをシフト
self._ordered_id_list.append( {} )
self._buy_volume_list.append( 0 )
self._sell_volume_list.append( 0 )
while len(self._buy_volume_list)>self._strategy_config['volume_period'] : # volume_period以上のものは
info = self._buy_volume_list.popleft() # 取り出して捨てる
while len(self._sell_volume_list)>self._strategy_config['volume_period'] : # volume_period以上のものは
info = self._sell_volume_list.popleft() # 取り出して捨てる
# 指標の有効性確認(scatter_seconds秒後の価格変動を調査)
for i in range(len(self.__price_list)):
info = self.__price_list.popleft() # 一番古いものを取り出して
if info['time']>time.time()-self._strategy_config['scatter_seconds'] : # scatter_seconds秒経っていなかったら
self.__price_list.appendleft( info ) # リストに戻して
break; # ループを抜ける
self.__index_rate.append(info['rate']) # scatter_seconds秒経っていたら指標と
self.__index_delta.append(self.ltp-info['price']) # 価格の上下変動を保存
while len(self.__index_rate)>self._strategy_config['scatter_buff_len'] : # 保管上限個数以上のものは
info = self.__index_rate.popleft() # 取り出して捨てる
while len(self.__index_delta)>self._strategy_config['scatter_buff_len'] : # 保管上限個数以上のものは
info = self.__index_delta.popleft() # 取り出して捨てる
# scatter_plot_interval 分ごとに、散布図をプロットしてdiscordへ送信
if len(self.__index_rate)>2 and self.__last_minutes != int(time.time()/self._strategy_config['scatter_plot_interval']/60) :
self.__last_minutes = int(time.time()/self._strategy_config['scatter_plot_interval']/60)
self.plot_scatter(np.array(self.__index_rate), np.array(self.__index_delta))
return False
# 散布図と相関係数のプロット---------------------------------------------------------------
# 参考URL : https://note.mu/ycrypthon/n/n324c550f2830
def plot_scatter(self, x, returns, normalize=True):
"""
:param np.ndarray x: 指標
:param np.ndarray returns: リターン
:param bool normalize: x をスケーリングするかどうか
"""
# ログフォルダ内にファイルを作成
image_file = self._parent._parameters._strategy['log_folder']+str(type(self))[17:-13]+'_rate_delta.png'
assert(len(x) == len(returns))
# 正規化
x = (x - x.mean()) / x.std() if normalize else x
# 散布図
plt.plot(x, returns, 'x')
# 回帰直線
reg = np.polyfit(x, returns, 1)
plt.plot(x, np.poly1d(reg)(x), color='c', linewidth=2)
# 区間平均値
plt.plot(*_steps(x, returns), drawstyle='steps-mid', color='r', linewidth=2)
# 相関係数(情報係数)
ic = np.corrcoef(x, returns)[0, 1]
plt.title(f'IC={ic:.3f}, y={reg[0]:.3f}x{reg[1]:+.3f}')
plt.grid()
plt.savefig(image_file)
plt.close()
# discordへ送信 (画像付き)
self._send_discord( '指標&リターンの相関のグラフ {}samples'.format(len(self.__index_rate)), image_file )
def _steps(x, y):
int_x = np.round(x)
ret_x = np.unique(int_x)
ret_y = []
for xa in ret_x:
ret_y.append(np.average(y[int_x == xa]))
return ret_x, np.array(ret_y)
| PP-lib/BFS | BFS-X/strategy/mm_volume2.py | mm_volume2.py | py | 9,867 | python | ja | code | 2 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "libs.base_strategy.Strategy",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "collections.deque... |
11604375733 | import configparser
COMMENT_PREFIX = ";#*"
COMMENT_KEY = "__COMMENTS__"
class TABConfigParser(configparser.RawConfigParser):
"""
This class is used to override the Python built-in ConfigParser, because
TA builder needs to support:
1. Read/write .conf files with comments
2. Additional comment prefix such as *
3. Support multiline end with \
"""
def _read(self, fp, fpname):
"""
Override the built-in _read() method to read comments
"""
from configparser import DEFAULTSECT, ParsingError
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
comment_index = 0
self.top_comments = []
self.fields_outside_stanza = []
add_space_to_next_line = False
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
line = line.strip(" ")
# comment or blank line?
if line.strip() == "" or line[0] in COMMENT_PREFIX:
# save the lineno & comments
if cursect:
name = f"{COMMENT_KEY}{comment_index}"
comment_index += 1
cursect[name] = line
else:
self.top_comments.append(line)
continue
if line.split(None, 1)[0].lower() == "rem" and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
# support multiline with \
if add_space_to_next_line:
line = " " + line
if line.strip().endswith("\\"):
line = line.rstrip("\\ ")
add_space_to_next_line = True
else:
add_space_to_next_line = False
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname].append(value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group("header")
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect["__name__"] = sectname
self._sections[sectname] = cursect
self._proxies[sectname] = configparser.SectionProxy(
self, sectname
)
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
# disable the exception since splunk allows the field outside stanzas
# raise MissingSectionHeaderError(fpname, lineno, line)
self.fields_outside_stanza.append(line)
# an option line?
else:
mo = self._optcre.match(line)
if mo:
optname, vi, optval = mo.group("option", "vi", "value")
optname = self.optionxform(optname.rstrip())
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
if vi in ("=", ":") and ";" in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(";")
if pos != -1 and optval[pos - 1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ""
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
# join the multi-line values collected while reading
all_sections = [self._defaults]
all_sections.extend(list(self._sections.values()))
for options in all_sections:
for name, val in list(options.items()):
if isinstance(val, list):
options[name] = "\n".join(val)
def write(self, fp):
"""
Override the write() method to write comments
"""
DEFAULTSECT = "DEFAULT"
if hasattr(self, "top_comments"):
for comment in self.top_comments:
fp.write(comment)
if hasattr(self, "fields_outside_stanza"):
for field in self.fields_outside_stanza:
fp.write(field)
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in list(self._defaults.items()):
fp.write("{} = {}\n".format(key, str(value).replace("\n", "\n\t")))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in list(self._sections[section].items()):
if key == "__name__":
continue
if key.startswith(COMMENT_KEY):
# only write the non empty line
if len(value.strip()) > 0:
fp.write(value)
# should continue as long as it is a comment line
continue
if (value is not None) or (self._optcre == self.OPTCRE):
key = " = ".join((key, str(value).replace("\n", "\n\t")))
fp.write("%s\n" % (key))
# write the separator line for stanza
fp.write("\n")
def optionxform(self, optionstr):
return optionstr
def items(self, section):
"""
Override the items() method to filter out the comments
"""
items = configparser.RawConfigParser.items(self, section)
res = []
for k, v in items:
if k.startswith(COMMENT_KEY):
continue
res.append((k, v))
return res
def options(self, section):
options = configparser.RawConfigParser.options(self, section)
res = []
for opt in options:
if opt.startswith(COMMENT_KEY):
continue
res.append(opt)
return res
def item_dict(self):
res = {}
sections = dict(self._sections)
for section, key_values in list(sections.items()):
kv = {}
for k, v in list(key_values.items()):
if (
not isinstance(k, str)
or k.startswith(COMMENT_KEY)
or k == "__name__"
):
continue
kv[k] = v
res[section] = kv
return res
| splunk/addonfactory-splunk-conf-parser-lib | addonfactory_splunk_conf_parser_lib.py | addonfactory_splunk_conf_parser_lib.py | py | 7,985 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "configparser.RawConfigParser",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "configparser.DEFAULTSECT",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "configparser.SectionProxy",
"line_number": 83,
"usage_type": "call"
},
{
... |
29963289578 | """demo1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from .views import *
from django.conf.urls import include
from django.contrib.auth.views import LoginView, LogoutView
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('api-auth/', include('rest_framework.urls')),
path('', home),
path('login', loginUserView),
path('logout', LogoutView.as_view(template_name='logout.html')),
path('Register', CreateUserView, name='register'),
path('profile', profile, name='profile'),
path('editprofile', editprofile, name='editprofile'),
path('deleteprofile', deleteprofile, name='deleteprofile'),
path('createProject', createProject, name='createProject'),
path('myProjects', myProjects, name='myProjects'),
path('allProjects', allProjects, name='allProjects'),
path('viewprojectInvalid', viewprojectInvalid, name='viewprojectInvalid'),
path('viewProjects', viewProjects, name='projects'),
path('viewProjects/<projectTitle>', viewProjects, name='projects'),
path('donateProject/<title>',
donateProject, name='donateProject'),
path('rateProject/<title>/<int:val>', rateProject, name='rateProject'),
path('reportProject/<title>', reportProject, name='reportProject'),
path('cancelProject/<title>', cancelProject, name='cancelProject'),
path('addimages', addimages, name='addimages'),
path('addtags', addtags, name='addtags'),
path('search',search, name='search'),
path('verify/<str:username>/<dates>', verify, name='verify'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| yahiaelpronc/Django_Project | Project/crowdFund/urls.py | urls.py | py | 2,261 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.url... |
31474181781 | #!/home/apollo/anaconda3/bin/python3
#-*- coding: utf-8 -*-
#******************************************************************************
# Author : jtx
# Create : 2020-03-31 19:05
# Last modified: 2020-04-09 14:18
# Filename : company_kbp.py
# Description : 企业清洗库转移到企业知识库,处理:产业/产业领域标签添加、企业标签schema添加
#******************************************************************************
import configparser
import sys
from pymongo import MongoClient
from pymongo import errors
from pyArango.connection import Connection as ArangoConnection
from pyArango.theExceptions import AQLFetchError
import pymysql
from dateutil import parser
import datetime
import json
import logging
import re
import copy
import requests
import os
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
dir_path = os.path.dirname(__file__)
kbp_path = os.path.dirname(dir_path)
config_path = os.path.join(kbp_path,"config.ini")
class RelationPipeline(object):
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read(config_path)
self.arango_con = ArangoConnection(arangoURL=self.config.get("arango","arango_url"),username= self.config.get("arango","user"),password=self.config.get("arango","passwd"))
self.arango_db = self.arango_con[self.config.get("arango","db")]
self.kb_conference = self.arango_db[self.config.get("arango","kb_conference")]
self.conference_url = self.config.get("url","conference_classifier")
self._init_division_schema() # init division_schema from mysql
self._init_industry_schema()
self._init_conference_tag_schema()
self.count_graph_update = 0 # arango更新关系数据数量
self.total = 0 # 处理日期总共需要添加关系的数量
def _init_conference_tag_schema(self):
# 会议分类类型加载
self.conference_tags_schema = {}
sql_conn = pymysql.connect( host = self.config.get("mysql","host") ,
user = self.config.get("mysql","user") ,
passwd = self.config.get("mysql","passwd"),
port = self.config.getint("mysql","port") ,
db = self.config.get("mysql","db"),
charset = "utf8" )
sql_cur = sql_conn.cursor()
# 查询企业相关的标签信息
sql_state = self.config.get("mysql","conference_tags_query").replace("eq","=")
sql_cur.execute(sql_state)
datas = sql_cur.fetchall()
for data in datas:
tag_name, tag_type, tag_id = data
tag = {
"type":tag_type,
"name":tag_name,
"id": tag_id
}
self.conference_tags_schema[tag_id] = tag
sql_cur.close()
sql_conn.close()
def _init_division_schema(self):
'''
行政区域实体关系加载
'''
self.division_schema = {}
sql_conn = pymysql.connect( host = self.config.get("mysql","host") ,
user = self.config.get("mysql","user") ,
passwd = self.config.get("mysql","passwd"),
port = self.config.getint("mysql","port") ,
db = self.config.get("mysql","db"),
charset = "utf8" )
sql_cur = sql_conn.cursor()
# 初始化行政区域的关系schema
sql_query_industry = "select name, id, level, parent_id from {}".format(self.config.get("mysql","res_division"))
sql_cur.execute(sql_query_industry)
divisions = sql_cur.fetchall()
for division in divisions:
division_name, division_id, division_level, division_parent_id = division
self.division_schema[division_name] = {
"relation_type":"concept_relation/100004",
"object_name":division_name,
"object_type": "division",
"object_id": division_id
}
sql_cur.close()
sql_conn.close()
logger.info("MYSQL division schema 加载完成")
def _init_industry_schema(self):
'''
init loading industry schema at mysql res_industry table
'''
self.industry_schema = {}
sql_conn = pymysql.connect( host = self.config.get("mysql","host") ,
user = self.config.get("mysql","user") ,
passwd = self.config.get("mysql","passwd"),
port = self.config.getint("mysql","port") ,
db = self.config.get("mysql","db"),
charset = "utf8" )
sql_cur = sql_conn.cursor()
# 初始化产业/产业领域 schema
sql_query_industry = "select name, id, parent_id from {}".format(self.config.get("mysql","res_industry"))
sql_cur.execute(sql_query_industry)
labels = sql_cur.fetchall()
for industry in labels:
industry_name, industry_id, parent_id = industry
self.industry_schema[industry_id] = {
"relation_type":"concept_relation/100011",
"object_name":industry_name,
"object_type": "industry",
"object_id": industry_id,
"object_parent_id": parent_id
}
sql_cur.close()
sql_conn.close()
logger.info("MYSQL industry schema 加载完成")
def get_related_industry_tags(self, industry_id):
'''
根据子领域名称递归返回领域及所有父领域标签
'''
relations = []
# 过滤招商领域与图谱定义不一致的
if not industry_id in self.industry_schema:
return relations
relations.append(self.industry_schema[industry_id])
parent_id = self.industry_schema[industry_id]["object_parent_id"]
while (parent_id):
node = self.industry_schema[parent_id]
relations.append(node)
parent_id = node["object_parent_id"]
return relations
def query_datas(self, process_date):
if process_date == "yesterday":
process_date = (datetime.date.today() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
elif process_date == "today":
process_date = datetime.today().strftime("%Y-%m-%d")
elif len(process_date.split("-")) == 3:
process_date = process_date
else:
raise Exception("无效参数")
self.process_date = process_date
iso_date_str = process_date + 'T00:00:00+08:00'
iso_date = parser.parse(iso_date_str)
aql = "FOR conference IN {} FILTER conference.create_time >= '{}' SORT kb_conference.create_time return conference".format(
self.config.get("arango","kb_conference"), iso_date)
#aql = "FOR conference IN {} FILTER conference._key=='5ea0f3c9bf45745dcf5d38b5' return conference".format(self.config.get("arango","kb_conference"))
try:
res = self.arango_db.fetch_list(aql)
except AQLFetchError as e:
'''没有查到数据时,fetch_list会抛出异常'''
res = []
logger.warn("Arango会议库没有查到数据",e)
self.total = len(res)
logger.info("[{}],会议知识库查到待处理数据[{}]个".format(process_date, self.total))
return res
def process_division_rel(self, properties):
div_rel = []
province = properties["province"]
city = properties["city"]
area = properties["area"]
if province and province in self.division_schema.keys():
if province in ["北京市","上海市","重庆市","天津市"]:
province = province.replace("市","")
div_rel.append(self.division_schema[province])
if city and city in self.division_schema.keys():
if city in ["北京","上海","重庆","天津"]:
city = city + "市"
div_rel.append(self.division_schema[city])
if area and area in self.division_schema.keys():
div_rel.append(self.division_schema[area])
return div_rel
def process_industry_rel(self,properties):
'''
会议分类标签ID化添加
'''
industry_rel = []
conference_tag = []
industry_field_tags = []
post_data = {
"conference_list": [ {
"title": properties["name"],
"content": properties["desc"]
} ],
}
try:
res = requests.post(self.conference_url, data=json.dumps(post_data))
if res.status_code == 200:
classify_res = res.json().get("body")[0]
# 验证是否返回对应字段的分类值
if "domain" in classify_res and classify_res["domain"]:
industry_field_tags.append(classify_res["domain"])
if "industry" in classify_res and classify_res["industry"]:
industry_field_tags.append(classify_res["industry"])
if "type" in classify_res and classify_res["type"]:
type_id = self.conference_tags_schema[classify_res["type"]]
conference_tag.append(type_id)
except Exception as e:
logging.error("获取会议分类结果失败,会议=[{}],接口=[{}]".format(properties["name"], self.conference_url),e)
#logger.info("会议分类结果=[{}]".format(industry_field_tags))
for industry_id in industry_field_tags:
for industry_node in self.get_related_industry_tags(industry_id):
if industry_node not in industry_rel:
industry_rel.append(industry_node)
return industry_rel, conference_tag
def process_relations(self, properties):
'''
添加关系:行政区域、产业类别、渠道信息
'''
relations = []
division_rel = self.process_division_rel(properties)
relations.extend(division_rel)
industry_rel, conference_tag = self.process_industry_rel(properties)
relations.extend(industry_rel)
return relations, conference_tag
def process(self, scan_date):
datas = self.query_datas(scan_date)
count = 0
# arango数据库企业信息处理
for data in datas:
#logger.info("处理会议关系,会议名=[{}]".format(data["name"]))
conference_key = data["_key"]
#if data["properties"]['city'] not in ["北京","上海","重庆","天津"]:
# continue
relations, conference_tag = self.process_relations(data["properties"])
# 删除无分类属性的会议?
# if not relations:
# try:
# doc = self.kb_conference[conference_key]
# doc.delete()
# self.count_graph_update += 1
# logger.info("会议被移除,会议ID=[{}],原因:该会议不属于任何产业领域".format(conference_key))
# continue
# except Exception as e:
# logger.error("会议数据移除失败,会议ID=[{}]".format(conference_key))
try:
doc = self.kb_conference[conference_key]
doc["relations"] = relations
doc["tags"] = conference_tag
doc["update_time"] = datetime.datetime.today()
doc.save()
#print('relations:',doc['relations'])
self.count_graph_update += 1
except Exception as e:
logger.error("会议标签、关系添加失败,会议名=[{}]".format(data["name"]))
count += 1
if count % 500 == 0 or count == self.total:
logger.info("前[{}]个会议标签、关系添加完成".format(count))
logger.info("日期[{}]清洗库共找到会议{}个,arango会议库更新关系{}个".format(
self.process_date, self.total, self.count_graph_update))
if __name__=="__main__":
# 最早日期 2019-06-03
rel = RelationPipeline()
if len(sys.argv) > 1:
rel.process(sys.argv[1])
else:
rel.process("yesterday")
| RogerJTX/KbpPipeline_ExpertSystem | conference/conference_relation.py | conference_relation.py | py | 12,777 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.dirna... |
73706222505 | from django.urls import path
from .views import addVehiculo, registroView, loginView, listarVehiculo, logoutView
from . import views
# from . import views <-- do the same of line above
urlpatterns = [
path('add/', addVehiculo, name='addVehiculo'),
# path('vehiculo/add/', addVehiculo, name='addVehiculo'),
path('listar/', listarVehiculo, name='listar'),
# path('vehiculo/listar/', listarVehiculo, name='listar'),
path('registro/', registroView, name='registro'),
# path('vehiculo/registro/', registroView, name='registro'),
path('login/', loginView, name='login'),
# path('vehiculo/login/', loginView, name='login'),
path('logout/', logoutView, name='logout'),
# path('vehiculo/logout/', logoutView, name='logout'),
]
| daus2020/dj_cars | vehiculo/urls.py | urls.py | py | 761 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "views.addVehiculo",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "views.listarVeh... |
43162877389 | # 用卷积神经网络训练cifar10数据集:
# 搭建一个一层卷积 两层全连接的网络:
# 使用6个5*5的卷积核,过一个步长为2且大小为2*2的池化核,过128个神经元的全连接层,
# 因label是10分类,过10个神经元的全连接层。
# 1) 5*5 conv, filters=6 2)2*2 pool, strides=2 3)Dense 128 4)Dense 10
# C:(核:6*5*5, 步长:1, 填充:same)
# B:(Yes)
# A:(relu)
# P:(max, 核:2*2, 步长:2, 填充:same)
# D:(0.2)
# Flatten
# Dense(神经元:128, 激活:relu, Dropout:0.2)
# Dense(神经元:10, 激活:softmax) #使输出符合概率分布
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Dense, Flatten
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
import os
# 导入数据集
cifar10 = tf.keras.datasets.cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train, x_test = x_train/255.0, x_test/255.0
# 定义网络结构
class Baseline(Model):
def __init__(self):
super(Baseline, self).__init__()
self.c = Conv2D(filters=6, kernel_size=(5, 5), padding='same')
self.b = BatchNormalization()
self.a = Activation('relu')
self.p = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
self.d = Dropout(0.2)
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.fd = Dropout(0.2)
self.d2 = Dense(10, activation='softmax')
def call(self, x):
x = self.c(x)
x = self.b(x)
x = self.a(x)
x = self.p(x)
x = self.d(x)
x = self.flatten(x)
x = self.d1(x)
x = self.fd(x)
y = self.d2(x)
return y
# 初始化网络
model = Baseline()
# 指定优化器、损失函数、衡量指标
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
# 断点续训
checkpoint_save_path = './checkpoint/cifar.ckpt'
if os.path.exists(checkpoint_save_path + '.index'):
print('-------------------Loading Model-------------------')
model.load_weights(checkpoint_save_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_save_path,
save_weights_only=True,
save_best_only=True)
# 训练模型
history = model.fit(x_train, y_train, batch_size=32, epochs=5,
validation_data=(x_test, y_test),
validation_freq=1,
callbacks=[cp_callback])
model.summary()
# 生成参数文件
with open('./weights.txt', 'w') as f:
for weight in model.trainable_weights:
f.write(str(weight.name) + '\n')
f.write(str(weight.shape) + '\n')
f.write(str(weight.numpy()) + '\n')
# 可视化
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.subplot(1, 2, 1)
plt.plot(acc, label='Train ACC')
plt.plot(val_acc, label='Test ACC')
plt.title('Train and Test ACC')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(loss, label='Train loss')
plt.plot(val_acc, label='Test loss')
plt.title('Train and Test loss')
plt.legend()
plt.show()
| Demonya/tensorflow_basic | P5/P5.10:卷积神经网络搭建示例.py | P5.10:卷积神经网络搭建示例.py | py | 3,454 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.keras",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.models.Model",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 33,
"usage_type": "call"
},
{
... |
9186533792 | """Contains pathfinding and maze generation algorithms"""
# Handles how much C++ the the program should use
from src.pathfinding.cpp_or_py import use_square_h
if use_square_h:
from src.pathfinding.cpp.modules import Square
else:
from src.pathfinding.py.square import Square
from lib.timer import sleep
from threading import Lock
from dataclasses import dataclass, field
from queue import PriorityQueue
from time import perf_counter_ns
import random
@dataclass(slots=True)
class AlgoState:
"""Stores the state of the algorithms, whether they are finished or not"""
# Possible phases
PHASE_ALGO: int = field(init=False)
PHASE_MAZE: int = field(init=False)
# Possible algorithms
ALGO_DIJKSTRA: int = field(init=False)
ALGO_A_STAR: int = field(init=False)
ALGO_BI_DIJKSTRA: int = field(init=False)
ALGO_BEST_PATH: int = field(init=False)
ALGO_RECURSIVE_MAZE: int = field(init=False)
# The current phase and current/last algorithm.
_phase: int = field(init=False)
_algo: int = field(init=False)
_finished: bool = False # Cobination with ALGO preserves past
# Special variables
_unique_int: int = 0 # Starts +1 when called by self._next_int()
NONE: int = _unique_int # Value is 0 which returns false when casted to bool
lock: Lock = Lock()
# Run options
_start: Square = None
_mid: Square = None
_end: Square = None
_ignore_square: Square = None
# Control the speed of algorithms
_DEFAULT_BEST_PATH_DELAY_MS: int = 3
_best_path_delay_ms: int = field(init=False)
_DEFAULT_RECURSIVE_MAZE_DELAY_US: int = 250
_recursive_maze_delay_us: int = field(init=False)
# Timer for algorithms
timer_total: float = 0
timer_avg: float = None
timer_max: float = float("-inf")
timer_min: float = float("inf")
timer_count: int = 0
_timer_start_time: float = None
def __post_init__(self):
"""Initialize variables with their unique values."""
self.PHASE_ALGO = self._generate_unique_int()
self.PHASE_MAZE = self._generate_unique_int()
self.ALGO_DIJKSTRA = self._generate_unique_int()
self.ALGO_A_STAR = self._generate_unique_int()
self.ALGO_BI_DIJKSTRA = self._generate_unique_int()
self.ALGO_BEST_PATH = self._generate_unique_int()
self.ALGO_RECURSIVE_MAZE = self._generate_unique_int()
self.reset()
def start_loop(self) -> None:
"""Starts the algo loop. Place on a daemon thread."""
self._algo_loop()
def run_options(self, start, mid, end, ignore_square) -> None:
"""Set the options that will be performed on run"""
with self.lock:
self._start = start
self._mid = mid
self._end = end
self._ignore_square = ignore_square
def run(self, phase, algo) -> None:
"""Start an algorithm using PHASE and ALGO, NULL where applicable."""
self._set_phase(phase)
self._set_algo(algo)
self._set_finished(False)
def check_phase(self) -> int:
"""Checks the phase"""
with self.lock:
return self._phase
def check_algo(self) -> int:
"""Checks the algo"""
with self.lock:
return self._algo
def check_finished(self) -> bool:
"""Checks if algo is finished"""
with self.lock:
return self._finished
def reset(self) -> None:
"""Resets options to their default values"""
with self.lock:
self._phase = self.NONE
self._algo = self.NONE
self._start = None
self._mid = None
self._end = None
self._ignore_square = None
self._finished = False
self._best_path_delay_ms = self._DEFAULT_BEST_PATH_DELAY_MS
self._recursive_maze_delay_us = self._DEFAULT_RECURSIVE_MAZE_DELAY_US
def set_best_path_delay(self, ms: int) -> None:
"""Change the delay for the next best path"""
with self.lock:
self._best_path_delay_ms = ms
def set_recursive_maze_delay(self, us: int) -> None:
"""Change the delay for the next recursive maze"""
with self.lock:
self._recursive_maze_delay_us = us
def _set_phase(self, phase: int) -> None:
"""Change the phase. Use PHASE constants."""
with self.lock:
self._phase = phase
def _set_algo(self, algo: int) -> None:
"""Change the algo. Use ALGO constants."""
with self.lock:
self._algo = algo
def _set_finished(self, x: bool) -> None:
"""Set finshed to true or false"""
with self.lock:
self._finished = x
def _algo_loop(self) -> None:
"""This loop is placed on a daemon thread and watches for updates."""
while True:
# Check if algo
if self.check_phase() == self.PHASE_ALGO and not self.check_finished():
previous_algo = self.check_algo()
if not self._mid:
if self.check_algo() == self.ALGO_DIJKSTRA:
dijkstra(self, self._start, self._end, self._ignore_square, draw_best_path=True)
elif self.check_algo() == self.ALGO_A_STAR:
a_star(self, self._start, self._end, self._ignore_square, draw_best_path=True)
elif self.check_algo() == self.ALGO_BI_DIJKSTRA:
bi_dijkstra(self, self._start, self._end, self._ignore_square, draw_best_path=True)
else:
start_mid_end(self, self._start, self._mid, self._end)
self.set_best_path_delay(self._DEFAULT_BEST_PATH_DELAY_MS) # Set to 0 with no vis
self._set_algo(previous_algo) # Preserves more info
self._set_finished(True)
self._set_phase(self.NONE)
# Check if maze
elif self.check_phase() == self.PHASE_MAZE and not self.check_finished():
if self.check_algo() == self.ALGO_RECURSIVE_MAZE:
recursive_maze(self)
self.set_recursive_maze_delay(self._DEFAULT_RECURSIVE_MAZE_DELAY_US)
self._set_finished(True)
self._set_phase(self.NONE)
def _timer_start(self) -> None:
"""Start timer for algo. Not for general use."""
self._timer_start_time = perf_counter_ns()
def _timer_end(self, count=True) -> None:
"""End timer for algo. Not for general use."""
end = perf_counter_ns()
total = (end - self._timer_start_time) / 10**9 # Time in seconds
self.timer_total += total
if count:
self.timer_count += 1
if self.timer_count:
self.timer_avg = self.timer_total / self.timer_count
if total: # Make it obvious for 0 max values
self.timer_max = max(self.timer_max, total)
if total: # 0 min values are trivial
self.timer_min = min(self.timer_min, total)
def _timer_reset(self) -> None:
"""Resets timer. Not for general use."""
self.timer_total: float = 0
self.timer_avg: float = None
self.timer_max: float = float("-inf")
self.timer_min: float = float("inf")
self.timer_count: int = 0
self._timer_start_time: float = None
def _generate_unique_int(self) -> int:
"""Assign unique int on every call"""
self._unique_int += 1
return self._unique_int
def thread_lock(self) -> None:
"""For use in custom context manager for consistency with C++"""
self.lock.acquire()
def thread_unlock(self) -> None:
"""For use in custom context manager for consistency with C++"""
self.lock.release()
def dijkstra(algo: AlgoState, start: Square, end: Square, ignore_square: Square, draw_best_path: bool) -> dict:
"""Code for the dijkstra algorithm"""
# Clear previous and start timer here to include setup of algo into timer
algo._timer_reset()
algo._timer_start()
# Used to determine the order of squares to check. Order of args helper decide the priority.
open_set = PriorityQueue()
queue_pos = 0
open_set.put((0, queue_pos, start))
# Determine what is the best square to check
g_score = {Square.get_square(row, col): float("inf") for row in range(Square.get_num_rows()) for col in range(Square.get_num_cols())}
g_score[start] = 0
# Keeps track of next square for every square in graph. A linked list basically.
came_from = {}
# End timer here to start it again in loop
algo._timer_end(count=False)
# Continues until every square has been checked or best path found
while not open_set.empty():
# Time increments for each square being checked
algo._timer_start()
# Gets the square currently being checked
curr_square: Square = open_set.get()[2]
# Terminates if found the best path
if curr_square == end:
if draw_best_path:
_best_path(algo, came_from, end)
return came_from
# Decides the order of neighbours to check
nei: Square
for nei in curr_square.get_neighbours():
# Ignore walls
if nei.is_wall():
continue
# Only check square if not already checked.
temp_g_score = g_score[curr_square] + 1
if temp_g_score < g_score[nei]:
came_from[nei] = curr_square
g_score[nei] = temp_g_score
queue_pos += 1
open_set.put((g_score[nei], queue_pos, nei))
# Set nei to open under certain conditions
if not nei.is_closed() and nei != end and nei != ignore_square:
with algo.lock:
nei.set_open()
# Sets square to closed after finished checking
if curr_square != start and curr_square != ignore_square:
with algo.lock:
curr_square.set_closed()
# End timer to increment count
algo._timer_end()
return came_from
def a_star(algo: AlgoState, start: Square, end: Square, ignore_square: Square, draw_best_path: bool) -> dict:
"""Code for the A* algorithm"""
# Clear previous and start timer here to include setup of algo into timer
algo._timer_reset()
algo._timer_start()
# Used to determine the order of squares to check. Order of args helper decide the priority.
open_set = PriorityQueue()
queue_pos = 0
open_set.put((0, queue_pos, start))
# Determine what is the best square to check
g_score = {Square.get_square(row, col): float("inf") for row in range(Square.get_num_rows()) for col in range(Square.get_num_cols())}
g_score[start] = 0
f_score = {Square.get_square(row, col): float("inf") for row in range(Square.get_num_rows()) for col in range(Square.get_num_cols())}
f_score[start] = _heuristic(start.get_pos(), end.get_pos())
# Keeps track of next square for every square in graph. A linked list basically.
came_from = {}
# End timer here to start it again in loop
algo._timer_end(count=False)
# Continues until every square has been checked or best path found
while not open_set.empty():
# Time increments for each square being checked
algo._timer_start()
# Gets the square currently being checked
curr_square: Square = open_set.get()[2]
# Terminates if found the best path
if curr_square == end:
if draw_best_path:
_best_path(algo, came_from, end)
return came_from
# Decides the order of neighbours to check
nei: Square
for nei in curr_square.get_neighbours():
# Ignore walls
if nei.is_wall():
continue
# Only check square if not already checked.
temp_g_score = g_score[curr_square] + 1
if temp_g_score < g_score[nei]:
came_from[nei] = curr_square
g_score[nei] = temp_g_score
f_score[nei] = temp_g_score + _heuristic(nei.get_pos(), end.get_pos())
queue_pos += 1
open_set.put((f_score[nei], queue_pos, nei))
# Set nei to open under certain conditions
if not nei.is_closed() and nei != end and nei != ignore_square:
with algo.lock:
nei.set_open()
# Sets square to closed after finished checking
if curr_square != start and curr_square != ignore_square:
with algo.lock:
curr_square.set_closed()
# End timer to increment count
algo._timer_end()
return came_from
def _heuristic(pos1: tuple, pos2: tuple) -> int:
"""Used by A* to prioritize traveling towards next square"""
x1, y1 = pos1
x2, y2 = pos2
return abs(x1 - x2) + abs(y1 - y2)
def bi_dijkstra(algo: AlgoState, start: Square, end: Square, ignore_square: Square, draw_best_path: bool) -> dict | Square:
"""Code for Bi-directional Dijkstra algorithm. Custom algorithm made by me."""
# Clear previous and start timer here to include setup of algo into timer
algo._timer_reset()
algo._timer_start()
# Used to determine the order of squares to check. Order of args helper decide the priority.
open_set = PriorityQueue()
queue_pos = 0
FIRST_SWARM = "FIRST_SWARM"
open_set.put((0, queue_pos, start, FIRST_SWARM))
queue_pos += 1
SECOND_SWARM = "SECOND_SWARM"
open_set.put((0, queue_pos, end, SECOND_SWARM))
# Determine what is the best square to check
g_score = {Square.get_square(row, col): float("inf") for row in range(Square.get_num_rows()) for col in range(Square.get_num_cols())}
g_score[start] = 0
g_score[end] = 0
# Keeps track of next square for every square in graph. A linked list basically.
came_from = {}
# Track the last squares for each swarm
first_swarm = set()
second_swarm = set()
first_swarm_meet_square: Square = None
second_swarm_meet_square: Square = None
# End timer here to start it again in loop
algo._timer_end(count=False)
# Continues until every square has been checked or best path found
while not open_set.empty():
# Terminates if the swarms meet each other
if first_swarm_meet_square or second_swarm_meet_square:
# Allow access the meet squares using the known start and end squares
if draw_best_path:
_best_path_bi_dijkstra(algo, came_from, first_swarm_meet_square, second_swarm_meet_square)
return came_from, first_swarm_meet_square, second_swarm_meet_square
# Time increments for each square being checked
algo._timer_start()
# Gets the square currently being checked.
temp = open_set.get()
curr_square: Square = temp[2]
swarm = temp[3]
# Decides the order of neighbours to check for both swarms.
for nei in curr_square.get_neighbours():
# Ignore walls
if nei.is_wall():
continue
# Only check square if not already checked.
temp_g_score = g_score[curr_square] + 1
if temp_g_score < g_score[nei]:
came_from[nei] = curr_square
g_score[nei] = temp_g_score
queue_pos += 1
open_set.put((g_score[nei], queue_pos, nei, swarm))
# Set nei to open under certain conditions
if not nei.is_closed() and nei != ignore_square:
if swarm == FIRST_SWARM and nei != end:
first_swarm.add(nei)
with algo.lock:
nei.set_open()
elif swarm == SECOND_SWARM and nei != start:
second_swarm.add(nei)
with algo.lock:
nei.set_open()
# Conditions for when path is found
elif swarm == FIRST_SWARM and nei in second_swarm:
first_swarm_meet_square = curr_square
second_swarm_meet_square = nei
break
elif swarm == SECOND_SWARM and nei in first_swarm:
first_swarm_meet_square = nei
second_swarm_meet_square = curr_square
break
# Sets square to closed after finished checking
if curr_square != start and curr_square != end and curr_square != ignore_square:
with algo.lock:
curr_square.set_closed()
# End timer to increment count
algo._timer_end()
return came_from, first_swarm_meet_square, second_swarm_meet_square
def _best_path_bi_dijkstra(algo: AlgoState, came_from: dict,
first_swarm_meet_square: Square,second_swarm_meet_square: Square) -> None:
"""Used by bi_dijkstra to draw best path for both swarms"""
_best_path(algo, came_from, first_swarm_meet_square)
# Best path skips these two naturally so need to set them here.
with algo.lock:
first_swarm_meet_square.set_path()
second_swarm_meet_square.set_path()
_best_path(algo, came_from, second_swarm_meet_square, reverse=True)
def _best_path(algo: AlgoState, came_from: dict, curr_square: Square, reverse: bool = False) -> None:
"""Main algo for reconstructing path"""
# Update info
algo._set_algo(algo.ALGO_BEST_PATH)
# Puts square path into list so it's easier to traverse in either direction and choose start and end points
path: list = []
while curr_square in came_from:
curr_square = came_from[curr_square]
path.append(curr_square)
# Need to traverse in reverse depending on what part of algo
square: Square
if reverse:
for square in path[:-1]:
sleep(algo._best_path_delay_ms, unit="ms")
with algo.lock:
square.set_path()
else:
for square in path[len(path) - 2 :: -1]:
sleep(algo._best_path_delay_ms, unit="ms")
with algo.lock:
square.set_path()
def start_mid_end(algo: AlgoState, start: Square, mid: Square, end: Square) -> None:
"""Used if algos need to reach mid square first"""
# Selects the correct algo to use
if algo.check_algo() == algo.ALGO_DIJKSTRA:
start_to_mid = dijkstra(algo, start, mid, end, draw_best_path=False)
mid_to_end = dijkstra(algo, mid, end, start, draw_best_path=False)
# Fixes squares disappearing when dragging
with algo.lock:
start.set_start()
mid.set_mid()
end.set_end()
_best_path(algo, start_to_mid, mid)
_best_path(algo, mid_to_end, end)
elif algo.check_algo() == algo.ALGO_A_STAR:
start_to_mid = a_star(algo, start, mid, end, draw_best_path=False)
mid_to_end = a_star(algo, mid, end, start, draw_best_path=False)
# Fixes squares disappearing when dragging
with algo.lock:
start.set_start()
mid.set_mid()
end.set_end()
_best_path(algo, start_to_mid, mid)
_best_path(algo, mid_to_end, end)
elif algo.check_algo() == algo.ALGO_BI_DIJKSTRA:
temp = bi_dijkstra(algo, start, mid, end, draw_best_path=False)
start_to_mid = temp[0]
first_swarm_meet_square = temp[1]
second_swarm_meet_square = temp[2]
temp = bi_dijkstra(algo, mid, end, start, draw_best_path=False)
mid_to_end = temp[0]
third_swarm_meet_square = temp[1]
fourth_swarm_meet_square = temp[2]
# Fixes squares disappearing when dragging
with algo.lock:
start.set_start()
mid.set_mid()
end.set_end()
_best_path_bi_dijkstra(algo, start_to_mid, first_swarm_meet_square, second_swarm_meet_square)
_best_path_bi_dijkstra(algo, mid_to_end, third_swarm_meet_square, fourth_swarm_meet_square)
def recursive_maze(algo: AlgoState, chamber: tuple = None,
division_limit: int = 3, num_gaps: int = 3) -> None:
"""Creates maze using recursive division."""
# Only perform these on first call
if not chamber:
algo._timer_reset()
# Start timer here to include setup in timer
algo._timer_start()
# Creates chambers to divide into
if chamber is None:
chamber_width: int = Square.get_num_rows()
chamber_height: int = Square.get_num_cols()
chamber_left: int = 0
chamber_top: int = 0
else:
chamber_width: int = chamber[2]
chamber_height: int = chamber[3]
chamber_left: int = chamber[0]
chamber_top: int = chamber[1]
# Helps with location of chambers
x_divide = int(chamber_width / 2)
y_divide = int(chamber_height / 2)
# End timer here to resume in loop
algo._timer_end(count=False)
# Draws vertical maze line within chamber
if chamber_width >= division_limit:
for y in range(chamber_height):
algo._timer_start()
square: Square = Square.get_square(chamber_left + x_divide, chamber_top + y)
with algo.lock:
square.set_wall()
sleep(algo._recursive_maze_delay_us, unit="us")
algo._timer_end()
# Draws horizontal maze line within chamber
if chamber_height >= division_limit:
for x in range(chamber_width):
algo._timer_start()
square: Square = Square.get_square(chamber_left + x, chamber_top + y_divide)
with algo.lock:
square.set_wall()
sleep(algo._recursive_maze_delay_us, unit="us")
algo._timer_end()
# Start timer again
algo._timer_start()
# Terminates if below division limit
if chamber_width < division_limit and chamber_height < division_limit:
return
# Defining limits on where to draw walls
top_left: tuple = (chamber_left, chamber_top, x_divide, y_divide)
top_right: tuple = (
chamber_left + x_divide + 1,
chamber_top,
chamber_width - x_divide - 1,
y_divide,
)
bottom_left: tuple = (
chamber_left,
chamber_top + y_divide + 1,
x_divide,
chamber_height - y_divide - 1,
)
bottom_right: tuple = (
chamber_left + x_divide + 1,
chamber_top + y_divide + 1,
chamber_width - x_divide - 1,
chamber_height - y_divide - 1,
)
# Combines all chambers into one object
chambers: tuple = (top_left, top_right, bottom_left, bottom_right)
# Defines location of the walls
left: tuple = (chamber_left, chamber_top + y_divide, x_divide, 1)
right: tuple = (
chamber_left + x_divide + 1,
chamber_top + y_divide,
chamber_width - x_divide - 1,
1,
)
top: tuple = (chamber_left + x_divide, chamber_top, 1, y_divide)
bottom: tuple = (
chamber_left + x_divide,
chamber_top + y_divide + 1,
1,
chamber_height - y_divide - 1,
)
# Combines walls into one object
walls: tuple = (left, right, top, bottom)
# Prevents drawing wall over gaps
gaps_to_offset: list = [x for x in range(num_gaps - 1, Square.get_num_rows(), num_gaps)]
# End timer here to resume in loop
algo._timer_end(count=False)
# Draws the gaps into the walls
for wall in _get_random_sample(walls, num_gaps):
# Continue timer here
algo._timer_start()
if wall[3] == 1:
x = _get_randrange(wall[0], wall[0] + wall[2])
y = wall[1]
if x in gaps_to_offset and y in gaps_to_offset:
if wall[2] == x_divide:
x -= 1
else:
x += 1
if x >= Square.get_num_rows():
x = Square.get_num_rows() - 1
else:
x = wall[0]
y = _get_randrange(wall[1], wall[1] + wall[3])
if y in gaps_to_offset and x in gaps_to_offset:
if wall[3] == y_divide:
y -= 1
else:
y += 1
if y >= Square.get_num_rows():
y = Square.get_num_rows() - 1
square: Square = Square.get_square(x, y)
with algo.lock:
square.reset()
algo._timer_end()
# Recursively divides chambers
for chamber in chambers:
recursive_maze(algo, chamber)
def _get_random_sample(population: tuple, k: int) -> list:
"""Returns a k length list of unique elements from population"""
return random.sample(population, k)
def _get_randrange(start: int, stop: int) -> int:
"""Return a random int within a range"""
return random.randrange(start, stop)
| ShanaryS/algorithm-visualizer | src/pathfinding/py/algorithms.py | algorithms.py | py | 24,976 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "src.pathfinding.cpp_or_py.use_square_h",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 24,
"usage_type": "call"
},
{
"api_nam... |
28309754653 | #!/usr/bin/env python
"""
Springboard compiler
:author: Athanasios Anastasiou
:date: Mar 2022
"""
import os
import pyparsing
import click
import urllib
class SpringboardError(Exception):
pass
class SymbolRedefined(SpringboardError):
pass
class SymbolUndefined(SpringboardError):
pass
class CircularDependency(SpringboardError):
pass
class CircularDefinition(SpringboardError):
pass
class SpringboardProgram:
def __init__(self):
self._parser = self.get_parser()
self._symbol_defs = {"+": "+", "-": "-",
">": ">", "<": "<",
"[": "[", "]": "]",
",": ",", ".": "."}
self._code_info = None
@property
def imports(self):
return self._code_info[0]["imports"]
@property
def symbol_defs(self):
return self._symbol_defs
@property
def code(self):
return self._code_info[0]["code"]
def from_string(self, a_program, previous_imports=[]):
"""
Initialises a Springboard program given a string.
:param a_program: A string that conforms to Springboard's grammar
:type a_program: str
:param previous_imports: A list of all imports in the main namespace to avoid circular references.
:type previous_imports: list
:returns: A Springboard program initialised with all its three sections.
:rtype: SpringboardProgram
:raises CircularDependency: If a file being imported imports a file that refers to the file being imported.
:raises SymbolRedefined: Self explanatory.
"""
self._code_info = self._parser.parseString(a_program)
# If there are imports, prepopulate the symbol definition table
cwd = os.getcwd()
p_imports=previous_imports + []
for an_import in self.imports:
# Break on circular references
if an_import not in p_imports:
p_imports.append(an_import)
else:
raise CircularDependency(f"Circular dependency involving {an_import} and {','.join(previous_imports)}.")
# Change the current working directoy to enable relative imports
import_path, import_file = os.path.split(an_import)
if len(import_path) > 0:
os.chdir(import_path)
u = SpringboardProgram().from_file(import_file, p_imports)
os.chdir(cwd)
self._symbol_defs.update(u.symbol_defs)
# Append the locally defined symbols
for a_symbol_def in self._code_info[0]["symbol_defs"]:
if a_symbol_def["symbol"] not in self._symbol_defs:
self._symbol_defs[a_symbol_def["symbol"]] = a_symbol_def["code"]
else:
raise SymbolRedefined(f"Attempt to redefine symbol {a_symbol_def['symbol']}, from {self._symbol_defs[a_symbol_def['symbol']]} to {a_symbol_def['code']}.")
return self
def from_file(self, a_file, previous_imports=[]):
"""
Initialises a Springboard program given a file name.
:param a_file: The filename of a text file that contains Springboard code.
:type a_file: str
:param previous_imports: See `Springboard.from_string`
:type previous_imports: list
:returns: A SpringboardProgram object with all its three sections populated.
:rtype: SpringboardProgram
"""
p_imports = previous_imports + []
with open(a_file, "rt") as fd:
data = fd.read()
return self.from_string(data, p_imports)
def compile(self, a_program=None, symbols_compiled=[]):
"""
Compiles a Springboard program to brainfuck.
:param a_program: A string containing Springboard code.
:type a_program: str
:param symbols_compiled: The set of symbols whose definition requires compilation of a given symbol.
:type symbols_compiled: list[str]
:returns: A string that contains purely brainfuck code (i.e. composed entirely of the brainfuck grammar's symbols).
:rtype: str
:raises CircularDefinition: If a symbol being defined requires the compilation of a symbol being defined.
:raises SymbolUndefined: If a symbol being defined refers to a symbol that is not defined anywhere.
"""
source_code = a_program
if a_program is None:
source_code = list(self.code)
compiled_code = ""
# While there are symbols, keep substituting them
for a_symbol in source_code:
if a_symbol in symbols_compiled:
raise CircularDefinition(f"Circular definition involving {a_symbol} and {','.join(symbols_compiled)}.")
if a_symbol not in self._symbol_defs:
raise SymbolUndefined(f"Symbol {a_symbol} is undefined.")
symbol_code = self._symbol_defs[a_symbol]
if type(symbol_code) is not str:
self.symbol_defs[a_symbol] = "".join(self.compile(symbol_code, symbols_compiled + [a_symbol]))
compiled_code = compiled_code + self.symbol_defs[a_symbol]
return compiled_code
@staticmethod
def get_parser():
"""
Parses Springboard's grammar.
springboard_program := imports_section defs_section code_section
imports_section := import_statement*
import_statement := import \".*?\"
defs_section := def_statement*
def_statement := : symbol_identifier code_section ;
symbol_identifier := [a-zA-Z0-9_]+
code_section := (basic_code_block | loop_code_block)*
basic_code_block := "<"|">"|"+"|"-"|"."|","
loop_code_block := basic_code_block | ("[" (basic_code_block | loop_code_block)* "]")
"""
symbol_id = pyparsing.Regex("[a-zA-Z0-9_]+")
code_section = pyparsing.ZeroOrMore(pyparsing.Regex("[+\-\.,<>\[\]]") ^ symbol_id)
def_statement = pyparsing.Group(pyparsing.Suppress(":") + symbol_id("symbol") + code_section("code") + pyparsing.Suppress(";"))
defs_section = pyparsing.ZeroOrMore(def_statement)
import_statement = pyparsing.Suppress("import") + pyparsing.QuotedString("\"")
imports_section = pyparsing.ZeroOrMore(import_statement)
sb_program = pyparsing.Group(imports_section("imports") + defs_section("symbol_defs") + code_section("code"))
sb_program.ignore(pyparsing.Literal("#") + pyparsing.rest_of_line())
return sb_program
@click.command()
@click.argument("input_file", type=click.File(mode="r"))
@click.argument("output_file", type=click.File(mode="w"))
@click.option("-b", "--base-url",
type=click.STRING,
default="https://aanastasiou.github.io/brainfuck-visualizer/?bf=",
help="Sets the base URL towards a try-it-online service.")
@click.option("--url/--no-url",
default=False,
help="If set, returns the program encoded in URL form, ready to "
"be included in a link")
def sbc(input_file, output_file, base_url, url):
"""
Springboard compiler.
The springboard compiler accepts two arguments:\n
- input_file\n
- output_file\n
Both can be stdin/stdout, by using "-".
Two options are provided to control posting to a try-it-online URL:\n
- --base-url\n
- --url
Examples:\n
- echo "+>+[-<+>]"|./sbc.py - -\n
- echo "+>+[-<+>]"|./sbc.py - - --url
The default base URL is: https://aanastasiou.github.io/brainfuck-visualizer/?bf=
"""
try:
# Generate unoptimised code (contains successive <> or +-)
code = ''.join(SpringboardProgram().from_string(input_file.read()).compile())
# TODO: HIGH, Sort the parse actions in the following rules
r1 = pyparsing.Regex("[<>][<>]+").set_parse_action(lambda s, l, t: (">" if str(t).count(">") >= str(t).count("<") else "<") * abs(str(t).count(">") - str(t).count("<")))
r2 = pyparsing.Regex("[\+\-][\+\-]+").set_parse_action(lambda s, l, t: ("+" if str(t).count("+") >= str(t).count("-") else "-") * abs(str(t).count("+") - str(t).count("-")))
# Optimise the code by simplifying continuous segments of <> or +- characters
optimised_code = r2.transform_string(r1.transform_string(code))
if url:
optimised_code = f"{base_url}{urllib.parse.quote(optimised_code)}"
output_file.write(f"{optimised_code}\n")
except SpringboardError as e:
click.echo(f"{e}")
if __name__ == "__main__":
sbc()
| aanastasiou/springboard | sbc.py | sbc.py | py | 8,586 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "os.chdir",
"line_number": 83... |
27024295019 | import openai
openai.api_base = 'http://localhost:1234/v1'
openai.api_key = ''
# 'Llama2 Chat' prompt format:
prefix = "[INST]"
suffix = "[/INST]"
def get_completion(prompt, temperature=0.0):
formatted_prompt = f"{prefix}{prompt}{suffix}"
response = openai.ChatCompletion.create(
model="local model",
temperature=temperature,
messages=[{"role": "user", "content": formatted_prompt}]
)
return response.choices[0].message["content"]
| lmstudio-ai/examples | Poor-Man's_Vector-Database/chat.py | chat.py | py | 497 | python | en | code | 139 | github-code | 36 | [
{
"api_name": "openai.api_base",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "openai.api_key",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "openai.ChatCompletion.create",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "o... |
72811988585 | from dotenv import load_dotenv
load_dotenv(override=True)
import os
appOAuthServer=os.getenv('OAUTH_SERVER')
appOAuthCredential=os.getenv('OAUTH_CRED')
appOAuthRedirectUrl=os.getenv('OAUTH_REDIRECT_URL')
import hvac
vault=hvac.Client(
url=os.getenv('VAULT_ADDR'),
token=os.getenv('VAULT_TOKEN')
)
import uuid
from flask import Flask,redirect,request,session
app = Flask(__name__)
app.secret_key=str(uuid.uuid4())
from flask_session import Session
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
import requests
@app.route('/')
def index():
creds=vault.read(f'oauthapp/creds/{appOAuthCredential}')
if creds is None:
state=str(uuid.uuid4())
session['state']=state
response=vault.write(f'oauthapp/auth-code-url', server=appOAuthServer,
redirect_url=appOAuthRedirectUrl,
scopes='spark:people_read',
state=state)
url=response['data']['url']
href=f"location.href='{url}'"
return f'<p>Unauthorized! <button onclick="{href}">Authorize</button></p>'
access_token=creds['data']['access_token']
response=requests.get('https://webexapis.com/v1/people/me', headers={'Authorization': f'Bearer {access_token}'})
displayName=response.json()['displayName']
return f'<p>Authorized! User display name: {displayName}</p>'
@app.route('/auth')
def auth():
code=request.args.get('code')
state=request.args.get('state')
if not state == session.get('state'):
return 'Mismatched state'
response=vault.write(f'oauthapp/creds/{appOAuthCredential}',
server=appOAuthServer,
redirect_url=appOAuthRedirectUrl,
code=code)
return redirect('/') | CiscoDevNet/webex-vault-samples | people_me_flask.py | people_me_flask.py | py | 1,736 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 7,... |
72721076263 | from django.conf.urls import url, include
from rest_framework import routers
from .views import (UserViewSet, GroupViewSet, GenderList, GenderDetail, CountryList, CountryDetail, LanguageList,
LanguageDetail, CredentialList, CredentialDetail, PersonList, PersonDetail, CategoryList,
CategoryDetail, ArticleList, ArticleDetail, UserProfileList, UserProfileDetail, MyUserProfile)
router = routers.DefaultRouter()
router.register(r'users', UserViewSet)
router.register(r'groups', GroupViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^genders/$', GenderList.as_view()),
url(r'^genders/(?P<pk>[0-9]+)/$', GenderDetail.as_view()),
url(r'^countries/$', CountryList.as_view()),
url(r'^countries/(?P<pk>[0-9]+)/$', CountryDetail.as_view()),
url(r'^languages/$', LanguageList.as_view()),
url(r'^languages/(?P<pk>[0-9]+)/$', LanguageDetail.as_view()),
url(r'^credentials/$', CredentialList.as_view()),
url(r'^credentials/(?P<pk>[0-9]+)/$', CredentialDetail.as_view()),
url(r'^persons/$', PersonList.as_view()),
url(r'^persons/(?P<pk>[0-9]+)/$', PersonDetail.as_view()),
url(r'^categories/$', CategoryList.as_view()),
url(r'^categories/(?P<pk>[0-9]+)/$', CategoryDetail.as_view()),
url(r'^articles/$', ArticleList.as_view()),
url(r'^articles/(?P<pk>[0-9]+)/$', ArticleDetail.as_view()),
url(r'^userprofiles/$', UserProfileList.as_view()),
url(r'^userprofiles/(?P<pk>[0-9]+)/$', UserProfileDetail.as_view()),
url(r'^myuserprofile/$', MyUserProfile.as_view()),
#url(r'^articlecomments/$', ArticleCommentList.as_view()),
#url(r'^articlecomments/(?P<pk>[0-9]+)/$', ArticleCommentDetail.as_view()),
#url(r'^threads/$', ThreadList.as_view()),
#url(r'^threads/(?P<pk>[0-9]+)/$', ThreadDetail.as_view()),
#url(r'^threadcomments/$', ThreadCommentList.as_view()),
#url(r'^threadcomments/(?P<pk>[0-9]+)/$', ThreadCommentDetail.as_view()),
]
| fortena/GakktuServer | gakktu/urls.py | urls.py | py | 2,163 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "views.UserViewSet",
"line_number": 8,
"usage_type": "argument"
},
{
"ap... |
5407159052 | from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
path('' , views.dashboard , name='dashboard'),
path('teachers', views.teachers, name ='teachers'),
path('students', views.students, name='students'),
path('staff', views.staff, name='staff' ),
path('add_student', views.add_student, name='add_student'),
path('add_staff', views.add_staff, name='add_staff'),
path('add_teacher', views.add_teacher, name='add_teacher'),
path('search/', views.search_results, name='search_results'),
path("neighbourhood/<str:pk>/", views.single_neighbourhood, name='single_neighbourhood'),
] | eliki-hue/school_DBMS | myschool/urls.py | urls.py | py | 701 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
75051245544 | from argparse import ArgumentParser
from json import dump, dumps, load, loads
from logging import INFO, basicConfig, getLogger
from subprocess import run
from emoji import emojize
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update
from telegram.ext import (CallbackContext, CallbackQueryHandler,
CommandHandler, PicklePersistence, Updater)
from structures.parking import Parking as Parking
from structures.stats import Stats as Stats
def start(update: Update, context: CallbackContext) -> None:
"""Send first messages to user."""
if (config['whitelist'] and str(update.effective_user.id) in users
or not config['whitelist']):
manage_user(update, context)
parking = context.bot_data['parking']
markup = make_keyboard(context, str(update.effective_user.id))
welcome = (r'Вас приветствует *Парковочный бот Logrocon*\!' +
'\nВыберете место кнопками ниже')
update.effective_message.reply_text(welcome, parse_mode='MarkdownV2')
update.effective_message.reply_text(
parking.state_text, reply_markup=markup)
log_event(update, 'Отправил start')
def stop(update: Update, context: CallbackContext) -> None:
"""Say goodbye, delete user data."""
if (config['whitelist'] and str(update.effective_user.id) in users
or not config['whitelist']):
try:
log_event(update, 'Отправил stop')
update.effective_message.reply_text(
'Вы перестали получать уведомления. Вы можете в любой момент' +
' вернутся к их получению командой /start.')
manage_user(update, context, False)
except KeyError:
log_event(update, 'Отправил stop повторно')
def parking_handler(update: Update, context: CallbackContext) -> None:
"""Handler for place selection buttons."""
if (config['whitelist'] and str(update.effective_user.id) in users
or not config['whitelist']):
manage_user(update, context)
number = update.callback_query.data
try:
parking = context.bot_data['parking']
stats = context.bot_data['stats']
for place in parking.places:
if place.number == number:
stats.count(place)
place.toggle_state(str(update.effective_user.id))
state = place.state
if state == 'reserved':
action_text = 'зарезервировал'
elif state == 'occupied':
action_text = 'занял'
elif state == 'free':
action_text = 'освободил'
update.callback_query.answer(f'Вы {action_text}и место {number}')
action = (f'*{users[str(update.effective_user.id)]}* ' +
f'{action_text} место *{number}*')
update_state(update, context, action)
log_event(update, action)
except ValueError:
update.callback_query.answer(f'Место {number} не свободно!')
log_event(update, f'Нажал на несвободное место {number}')
def cancel_handler(update: Update, context: CallbackContext) -> None:
"""Handler for cancel reserve button."""
if (config['whitelist'] and str(update.effective_user.id) in users
or not config['whitelist']):
manage_user(update, context)
number = update.callback_query.data.split('.')[1]
parking = context.bot_data['parking']
try:
for place in parking.places:
if place.number == number:
place.cancel_reserve(str(update.effective_user.id))
update.callback_query.answer(f'Вы отменили резерв места {number}')
action = (f'*{users[str(update.effective_user.id)]}* ' +
f'отменил резерв места *{number}*')
update_state(update, context, action)
log_event(update, action)
except ValueError:
update.callback_query.answer(
'Используйте клавиатуру из последнего сообщения!')
log_event(update, 'Пытался отменить резерв на старой клавиатуре')
def clear_handler(update: Update, context: CallbackContext) -> None:
"""Handler for clear parking button."""
if (config['whitelist'] and str(update.effective_user.id) in users
or not config['whitelist']):
manage_user(update, context)
try:
parking = context.bot_data['parking']
places = parking.clear()
if places:
stats = context.bot_data['stats']
for place in places:
stats.count(place)
update.callback_query.answer('Вы выбрали очистку парковки')
action = (f'*{users[str(update.effective_user.id)]}* ' +
'очистил парковочное пространство')
update_state(update, context, action)
log_event(update, action)
except ValueError:
update.callback_query.answer(
'Используйте клавиатуру из последнего сообщения!')
log_event(update, 'Пытался очистить парковку на старой клавиатуре')
def statistics_handler(update: Update, context: CallbackContext) -> None:
"""Handler for statistic button."""
if (config['whitelist'] and str(update.effective_user.id) in users
or not config['whitelist']):
manage_user(update, context)
update.callback_query.answer('Вы запросили статистику')
stats = context.bot_data['stats']
update_state(update, context, stats.message_text, True)
log_event(update, 'Запросил статистику')
def update_state(update: Update, context: CallbackContext, info: str,
personal=False) -> None:
"""Sends personal or bulk messages to users.
Args:
update: for identifying users and getting bot for bulk send.
context: for getting bot and user data.
info: info string for info message.
personal (optional): should this message be personal only.
Defaults to False.
"""
parking = context.bot_data['parking']
if personal:
markup = make_keyboard(context, str(update.effective_user.id))
update.effective_message.reply_text(info, parse_mode='MarkdownV2')
update.effective_message.reply_text(parking.state_text,
reply_markup=markup)
else:
for user in users:
markup = make_keyboard(context, user)
update.effective_message.bot.send_message(
text=info, chat_id=user, parse_mode='MarkdownV2')
update.effective_message.bot.send_message(
text=parking.state_text, chat_id=user, reply_markup=markup)
log_event(update, f'Отправили уведомление {users[user]}')
def make_keyboard(context: CallbackContext,
user_id: str) -> InlineKeyboardMarkup:
"""Making of personalized keyboards."""
keyboard = []
parking = context.bot_data['parking']
for place in parking.state:
place_sign, state, number, occupant = place
if occupant is not None:
person = users[str(occupant)]
else:
person = 'место свободно'
caption = ' '.join([place_sign, number, person])
place_button = InlineKeyboardButton(caption, callback_data=number)
cancel_button = InlineKeyboardButton(' '.join(
[emojize(':right_arrow_curving_left:'), 'Отменить резерв']),
callback_data=''.join(['cancel.', number]))
clear_button = InlineKeyboardButton(' '.join(
[emojize(':FREE_button:'), 'Очистить парковку']),
callback_data='clear')
statistics_button = InlineKeyboardButton(' '.join(
[emojize(':bar_chart:'), 'Статистика']),
callback_data='statistics')
if state == 'reserved' and occupant == user_id:
keyrow = []
keyrow.append(place_button)
keyrow.append(cancel_button)
keyboard.append(keyrow)
else:
keyboard.append([place_button])
if not parking.is_free:
keyrow = []
keyrow.append(clear_button)
keyrow.append(statistics_button)
keyboard.append(keyrow)
else:
keyboard.append([statistics_button])
return InlineKeyboardMarkup(keyboard)
def manage_user(update: Update, context: CallbackContext, check=True) -> None:
"""Managing users.
Args:
check (optional): if not check - than it's user remove.
Defaults to True.
"""
user_id = str(update.effective_user.id)
if check:
if update.effective_user.full_name is None:
username = update.effective_user.username
else:
username = update.effective_user.full_name
# Replace for telegram markdown v2
for ch in ['_', '*', '[', ']', '(', ')', '~', '`', '>',
'#', '+', '-', '=', '|', '{', '}', '.', '!']:
username = username.replace(ch, '')
if user_id not in users or users[user_id] != username:
users[user_id] = username
save_json(config['users_file'], users)
stats = context.bot_data['stats']
stats.update_users(users)
log_event(update, 'Добавили пользователя')
elif not check:
users.pop(user_id)
save_json(config['users_file'], users)
log_event(update, 'Удалили пользователя')
def log_event(update: Update, action: str) -> None:
try:
username = f'{users[str(update.effective_user.id)]}'
except KeyError:
username = update.effective_user.username
action = action.replace('*', '')
logger.log(INFO, f'{username} - {action}')
def load_json(filename: str) -> dict:
try:
with open(filename) as file:
data = load(file)
return data
except FileNotFoundError:
exit(f'File "{filename}" does not exist')
def save_json(filename: str, data: dict) -> None:
with open(filename, 'w') as file:
dump(data, file, indent=4, sort_keys=True)
def get_config() -> dict:
parser = ArgumentParser(
prog='Logrocon Parking Bot v.3')
parser.add_argument('-c', '--config', default='config.json', metavar='C',
help='config file name')
args = vars(parser.parse_args())
config = load_json(args['config'])
return config
def toggle_whitelist(update: Update, context: CallbackContext) -> None:
"""Toggling bot's whitelist on and off by bot owner."""
if update.effective_user.id == config['owner_id']:
config['whitelist'] = not config['whitelist']
update.effective_message.reply_text('Whitelist mode: ' +
f'{config["whitelist"]}')
log_event(update, 'Переключил режим whitelist на ' +
f'{config["whitelist"]}')
else:
log_event(update, 'Отправил whitelist, хотя не должен о ней знать')
def get_logs(update: Update, context: CallbackContext) -> None:
"""Getting logs by messages from bot by bot owner."""
if update.effective_user.id == config['owner_id']:
if not context.args:
length = config['logging']['log_length']
log_event(update, f'Отправил logs без аргументов, берем {length}')
else:
length = context.args[0]
log_event(update, f'Отправил logs с аргументом {length}')
result = run(['tail', '-n', length, config['logging']['log_file']],
capture_output=True, universal_newlines=True)
log = result.stdout
if len(log) > 4096:
for x in range(0, len(log), 4096):
update.effective_message.reply_text(log[x:x+4096])
else:
update.effective_message.reply_text(log)
else:
log_event(update, 'Отправил logs, хотя не должен о ней знать')
def get_stats(update: Update, context: CallbackContext) -> None:
"""Getting statistics by message from bot by bot owner."""
if update.effective_user.id == config['owner_id']:
update.effective_message.reply_text(
dumps(context.bot_data['stats'].as_dict, indent=4))
log_event(update, 'Экспортировал статистику в json')
else:
log_event(update, 'Отправил get_stats, хотя не должен о ней знать')
def set_stats(update: Update, context: CallbackContext) -> None:
"""Setting statistics by message from bot owner to bot."""
if update.effective_user.id == config['owner_id']:
if not context.args:
update.effective_message.reply_text('Отсутствуют аргументы')
log_event(update, f'Отправил set_stats без аргументов')
else:
stats = ''
for item in context.args:
stats = stats + ' ' + item
context.bot_data['stats'].as_dict = loads(stats)
update.effective_message.reply_text('Статистика импортирована')
log_event(update, 'Импортировал статистику из json')
else:
log_event(update, 'Отправил set_stats, хотя не должен о ней знать')
config = get_config()
"""dict: all config options."""
users = load_json(config['users_file'])
"""dict: bot users."""
# Set logging
log_format = '%(asctime)s %(levelname)s %(name)s %(message)s'
basicConfig(filename=config['logging']['log_file'],
format=log_format, level=INFO)
logger = getLogger(__name__)
handlers = [CommandHandler('start', start),
CommandHandler('stop', stop),
CallbackQueryHandler(cancel_handler, pattern='cancel.*'),
CallbackQueryHandler(clear_handler, pattern='clear'),
CallbackQueryHandler(statistics_handler, pattern='statistics'),
CallbackQueryHandler(parking_handler),
CommandHandler('whitelist', toggle_whitelist),
CommandHandler('logs', get_logs, pass_args=True),
CommandHandler('get_stats', get_stats),
CommandHandler('set_stats', set_stats, pass_args=True)]
def main():
updater = Updater(token=config['token'], persistence=PicklePersistence(
filename=config['data_file'], store_chat_data=False,
store_user_data=False, on_flush=False))
dispatcher = updater.dispatcher
dispatcher.bot_data['stats'] = dispatcher.bot_data.get('stats',
Stats(users))
dispatcher.bot_data['parking'] = dispatcher.bot_data.get(
'parking', Parking(config['places']))
# Create new parking if places in config changed
if ([x[2] for x in dispatcher.bot_data['parking'].state]
!= config['places']):
dispatcher.bot_data['parking'] = Parking(config['places'])
for handler in handlers:
dispatcher.add_handler(handler)
updater.start_polling(drop_pending_updates=True)
updater.idle()
if __name__ == '__main__':
main()
| GarikFirst/PyParkingBot | parking_bot.py | parking_bot.py | py | 15,861 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "telegram.Update",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "telegram.ext.CallbackContext",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "telegram.Update",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "telegram... |
72607798505 | #!/usr/bin/env python3
import rospy
import datetime
import sys, os
from rosgraph_msgs.msg import Log
rospy.init_node('NECST_logger')
###config
save_to = '/home/amigos/log'
try:
file_name = sys.argv[1]
except:
file_name = ''
###
def save_file_conf():
today = datetime.date.today()
year = today.year
month = today.month
day = today.day
_save_to = os.path.join(save_to, str(year), str(month), str(day))
_file_name = file_name.split('.')[0]+'.txt'
if os.path.exists(_save_to):pass
else:
os.makedirs(_save_to, exist_ok = True)
print('Log is save to {}'.format(_save_to))
return _save_to, _file_name
def save(req):
ret = save_file_conf()
savefile = os.path.join(ret[0], ret[1])
if not req.file == file_name:return
if '#' in list(req.msg):
args = req.msg.split('#')[1]
f_name = req.msg.split('#')[0]
log = '[{}] : ({}) : {}{}'.format(datetime.datetime.fromtimestamp(req.header.stamp.to_time()), req.file,f_name, args)
print(log)
else:
log = '[{}] : ({}) : {}'.format(datetime.datetime.fromtimestamp(req.header.stamp.to_time()), req.file,req.msg)
print(log)
f = open(savefile,'a')
f.write(log+'\n')
f.close()
if __name__ == '__main__':
if file_name == '':
file_name = input('Pleae input script name [ex : ROS_controller.py]')
sub = rospy.Subscriber('rosout_agg', Log, save, queue_size=100)
print('*** Logger Start {} ***'.format(file_name))
rospy.spin()
| nanten2/necst-ros | scripts/record/ROS_save_logger.py | ROS_save_logger.py | py | 1,525 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rospy.init_node",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.date",
... |
37915846775 | # %%
from pathlib import Path
import csv
import matplotlib.pyplot as plt
from datetime import datetime
path = Path("weather_data/en_climate_daily_ON_6158355_2023_P1D.csv")
lines = path.read_text().splitlines()
reader = csv.reader(lines)
header_row = next(reader)
# Extract the percipitation.
dates, percips = [], []
for row in reader:
current_date = datetime.strptime(row[4], "%Y-%m-%d")
percip = float(row[11])
if percip != 0:
dates.append(current_date)
percips.append(percip)
# Plot the Total Precipitation (mm).
# The sum of the total rainfall and the water equivalent of the total snowfall in millimetres (mm), observed at the location during a specified time interval.
plt.style.use("seaborn-v0_8")
fig, ax = plt.subplots(figsize=(10,6))
ax.scatter(dates, percips, color='green')
# Format plot.
ax.set_title("Daily Total Perciptations (mm), From January 1, 2023 to August 16, 2023", fontsize=16)
ax.set_xlabel("Dates (YYYY-MM-DD)")
fig.autofmt_xdate()
plt.ylabel('Total Precipitation (mm)')
plt.tight_layout()
plt.show() | hharpreetk/python-toronto-weather-data-viz | percip_visual.py | percip_visual.py | py | 1,060 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",... |
26612458702 | import logging
from django.conf import settings
class ShowDatabaseQueries(logging.Filter):
def filter(self, record):
return settings.DATABASE_DEBUG
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'require_database_debug_true': {
'()': ShowDatabaseQueries,
}
},
'formatters': {
'django.server': {
'()': 'django.utils.log.ServerFormatter',
'format': '[{server_time}] {message}',
'style': '{',
}
},
'handlers': {
'docker': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'database': {
'level': 'DEBUG',
'filters': ['require_database_debug_true'],
'class': 'logging.StreamHandler',
},
'django.server': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'django.server',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'': {
'handlers': ['console',],
'level': 'DEBUG',
},
'django': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
},
'django.server': {
'handlers': ['django.server'],
'level': 'INFO',
'propagate': False,
},
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['database'],
'propagate': False,
}
},
}
| codecraft63/django-base | settings/system/logging.py | logging.py | py | 2,031 | python | hi | code | 0 | github-code | 36 | [
{
"api_name": "logging.Filter",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.DATABASE_DEBUG",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 7,
"usage_type": "name"
}
] |
18336730125 | import json
from functions import get_access_token, get_animal_types, get_animals_by_type, \
get_animals_dataset, print_babies_adults, print_animal_types, \
digit_check, string_check, string_check_1, add_animal, info_about_pet
with open('data.json') as infile:
dataset = json.load(infile)
print("Welcome to the Pet Project!")
while True:
print( "Enter your option:\n",
"Enter 1 if you want to see all possible types of animals\n",
"Enter 2 if you want to delete adopted animal\n",
"Enter 3 if you want to add new animal to dataset\n",
"Enter 4 if you want to update data from the website\n",
"Enter 5 if you want to exit the programm\n")
option1 = digit_check(5, 1)
if option1 == 1:
while True:
print_animal_types(dataset)
print(" Enter 1 if you want to adopt animal of some type\n",
" Enter 2 if you want to exit to the main menu")
option2 = digit_check(2, 1)
if option2 == 1:
type_animal = string_check(dataset, "Enter, animal of what type you would like to adopt: ")
print_babies_adults(dataset, type_animal)
name_adopt = string_check(dataset[type_animal], "Enter name of animal which characteristics you would like to see: ")
info_about_pet(dataset, type_animal, name_adopt)
answer = string_check_1("Yes", "No", "Do you confirm that you want to adopt this pet? Enter Yes or No: ")
if answer == "Yes":
del dataset[type_animal][name_adopt]
if option2 == 2:
break
if option1 == 2:
while True:
print(" Enter 1 if you want to delete pet from the dataset\n",
" Enter 2 if you want to exit to the main menu\n")
option3 = digit_check(2, 1)
if option3 == 1:
print_animal_types(dataset)
type_delete = string_check(dataset, "Enter, animal of what type you would like to delete from the dataset: ")
print_babies_adults(dataset, type_delete)
name_delete = string_check(dataset[type_delete], "Enter name of animal which you want to delete: ")
del dataset[type_delete][name_delete]
if option3 == 2:
break
if option1 == 3:
while True:
print(" Enter 1 if you want to add pet to the dataset\n",
" Enter 2 if you want to exit to the main menu\n")
option4 = digit_check(2, 1)
if option4 == 1:
print_animal_types(dataset)
type_add = string_check(dataset, "Enter, animal of what type you would like to add to the dataset: ")
name_add = add_animal(dataset, type_add)
info_about_pet(dataset, type_add, name_add)
print("\n")
else:
break
if option1 == 4:
token = get_access_token()
types = get_animal_types(token)
dataset = get_animals_dataset(types, token)
print("Data was successfully updated! \n")
if option1 == 5:
break
with open('data.json', 'w') as outfile:
json.dump(dataset, outfile)
| evnng/Animal-Project | main_programm.py | main_programm.py | py | 3,472 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "functions.digit_check",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "functions.print_animal_types",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "functions... |
38037219571 | import logging
import re
class ColorUtil:
@staticmethod
def ShadeColor(hexColor, percent):
try:
hexNum = int(hexColor[1:], 16)
t = 0 if percent < 0 else 255
p = percent * -1 if percent < 0 else percent
R = hexNum >> 16
G = hexNum >> 8 & 0x00FF
B = hexNum & 0x0000FF
# print(hexColor, percent)
# print(R, G, B)
Rhex = hex(round((t - R) * p) + R)[2:].zfill(2)
Ghex = hex(round((t - G) * p) + G)[2:].zfill(2)
BHex = hex(round((t - B) * p) + B)[2:].zfill(2)
# print(Rhex, Ghex, BHex)
hexString = '#' + Rhex + Ghex + BHex
# print(hexString)
# print()
return hexString
except Exception as e:
logging.error(logging.exception("Error creating shade of color"))
# https://stackoverflow.com/questions/20275524/how-to-check-if-a-string-is-an-rgb-hex-string
@staticmethod
def IsValidHexColor(hexColor):
hexstring = re.compile(r'#[a-fA-F0-9]{3}(?:[a-fA-F0-9]{3})?$')
return bool(hexstring.match(hexColor))
| bhavesh-jadav/Power-BI-Theme-Generator | src/main/python/Util.py | Util.py | py | 1,158 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "logging.error",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "logging.exception",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 38,
"usage_type": "call"
}
] |
18216820693 | import pygame
class Bullet(pygame.sprite.Sprite):
def __init__(self, gun):
pygame.sprite.Sprite.__init__(self)
self.image=pygame.Surface((10,10))
self.rect=self.image.get_rect()
self.image.fill((255,0,0))
self.rect.centerx=gun.rect.centerx
self.rect.centery=gun.rect.centery
def update(self):
self.rect.y-=10
self.score=0
| Vladimirk229/My-projects | pythongame/pythongame/bullet.py | bullet.py | py | 410 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.sprite",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pyga... |
3458590367 | import numpy as np
from typing import List
#本质:元素的左上角元素都相同
class Solution:
def isToeplitzMatrix(self, matrix: List[List[int]]) -> bool:
if not matrix or not matrix[0]:
return True
m, n = len(matrix), len(matrix[0])
for i in range(m):
for j in range(n):
if 0 <= i - 1 < m and j + 1 < n:
if matrix[i-1][j] != matrix[i][j+1]:
return False
return True
if __name__ == '__main__':
matrix = [[1, 2, 3, 4], [5, 1, 2, 3], [9, 5, 1, 2]]
matrix = [[1,2],[2,2]]
matrix = [[41, 45], [81, 41], [73, 81], [47, 73], [0, 47], [79, 76]]
print(Solution().isToeplitzMatrix(matrix)) | pi408637535/Algorithm | com/study/algorithm/daily/766. Toeplitz Matrix.py | 766. Toeplitz Matrix.py | py | 728 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
}
] |
15492848684 | """pip/setuptools packaging
Based off https://github.com/pypa/sampleproject/blob/master/setup.py
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path, remove
import shutil
from malboxes._version import __version__
here = path.abspath(path.dirname(__file__))
_tempfiles = []
def _prepare():
"""Preparing files for package"""
# Here are files I want packaged but also in top-level directory as it is mostly
# unrelated to actual build
# pip will install data_files in an odd location so I copy them in package at
# build time
root_data_files = ['LICENSE', 'README.adoc', 'TODO.adoc']
for f in root_data_files:
_tempfiles.append(shutil.copy(path.join(here, f),
path.join(here, 'malboxes')))
# docs
shutil.copytree(path.join(here, 'docs'), path.join(here, 'malboxes/docs'),
ignore=shutil.ignore_patterns('presentation'))
def _teardown():
"""Removing temporary files"""
for f in _tempfiles:
remove(path.join(here, f))
shutil.rmtree(path.join(here, 'malboxes/docs'))
# Get the long description from the README file
# TODO process README to make it pure plaintext
with open(path.join(here, 'README.adoc'), encoding='utf-8') as f:
long_description = f.read()
_prepare()
setup(
name='malboxes',
version=__version__,
description='Build Malware VMs (boxes) or whole environments based on '
'templates. Useful for analysts, sandboxes or honeypots. '
'Leverages devops workflow with Vagrant and Packer.',
long_description=long_description,
url='https://github.com/gosecure/malboxes',
author='Malboxes Team',
author_email='obilodeau@gosecure.ca',
license='GPLv3+',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Information Technology',
'Topic :: Security',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 or later '
'(GPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
#'Programming Language :: Python :: 2',
#'Programming Language :: Python :: 2.6',
#'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='virtual-machine malware reverse-engineering vagrant packer',
# Once we have more code we'll migrate to a package and use find_packages()
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['appdirs', 'Jinja2', 'jsmin', 'boto3'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
include_package_data = True,
zip_safe = False,
# install malboxes executable
entry_points={
'console_scripts': [
'malboxes=malboxes:main',
],
},
)
_teardown()
| GoSecure/malboxes | setup.py | setup.py | py | 4,016 | python | en | code | 1,015 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "shutil.copy",
"line_numbe... |
3381408882 | from rest_framework import serializers
from .models import PetModel, FA
from datetime import date,datetime,timedelta
from django.utils import timezone
class PetSerial(serializers.ModelSerializer):
class Meta:
model=PetModel
fields=(
"Category",
"Name",
"Sex",
"BirthDt",
"City",
"Departement",
"Statut",
"OK_CHAT",
"OK_CHIEN",
"OK_ENFANT",
"FA"
)
def create(self, validated_data):
return PetModel.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.Category=validated_data.get("Category", instance.Category)
instance.Name=validated_data.get("Name", instance.Name)
instance.Sex=validated_data.get("Sex", instance.Sex)
instance.BirthDt=validated_data.get("BirthDt", instance.BirthDt)
instance.City=validated_data.get("City", instance.City)
instance.Departement=validated_data.get("Departement", instance.Departement)
instance.Statut=validated_data.get("Statut", instance.Statut)
instance.OK_CHAT=validated_data.get("OK_CHAT", instance.OK_CHAT)
instance.OK_CHIEN=validated_data.get("OK_CHIEN", instance.OK_CHIEN)
instance.OK_ENFANT=validated_data.get("OK_ENFANT", instance.OK_ENFANT)
instance.save()
return instance | tapaloeil/PetAdmin | Pet/serializers.py | serializers.py | py | 1,218 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "models.PetModel",
"line_number": 8,
"usage_type": "name"
},
... |
43823710793 | import json
from collections import Counter
from template import *
relation_ground_truth_count = json.load(open("../../data/relation_count.json"))
for relation in relation_list[:1]:
result_data = []
with open(f"./data/alias/query_result/{relation}.json", "r") as f:
for line in f:
json_obj = json.loads(line)
result_data.append(json_obj)
vote_data = {}
with open(f"./data/alias/query_result_vote/{relation}.json", "w") as out:
for query in result_data:
if query['input']['query_id'] in vote_data:
vote_data[query['input']['query_id']]["answer"].append(query['output'])
else:
vote_data[query['input']['query_id']] = {"sentence": query['input']['query']['prompt'], "head": query['input']['head'], "label": query['input'][
'label'], "answer": [query['output']]}
tp, fp, tn, fn = 0, 0, 0, 0
for query_id, query_data in vote_data.items():
answer_list = query_data['answer']
result = [a for a in answer_list]
vote_answer = max(result, key=answer_list.count)
copy_vote_data = query_data.copy()
copy_vote_data['tail'] = vote_answer
copy_vote_data['vote_answer'] = vote_answer
copy_vote_data['head'] = query_data['head']
if vote_answer == "yes" and copy_vote_data['label'] == "tp":
tp += 1
copy_vote_data['result'] = "true_p"
if vote_answer == "yes" and copy_vote_data['label'] == "fp":
fp += 1
copy_vote_data['result'] = "false_p"
if vote_answer == "no" or vote_answer == "unknown" and copy_vote_data['label'] == "tp":
fn += 1
copy_vote_data['result'] = "false_n"
if vote_answer == "no" or vote_answer == "unknown" and copy_vote_data['label'] == "fp":
tn += 1
copy_vote_data['result'] = "true_n"
out.write(json.dumps(copy_vote_data) + "\n")
print(f"{relation} tp:{tp} fp:{fp} tn:{tn} fn:{fn}, recall:{tp / relation_ground_truth_count[relation]}")
| bigdante/nell162 | backup/verification/chatgpt_gen_yes_no/check_gen.py | check_gen.py | py | 2,169 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 42,
"usage_type": "call"
}
] |
15169034619 | """
This module contains functions for creating transformer models that operate on grid-like inputs.
Functions:
- grid_transformer: returns a callable transformer model constructor that can operate on grid-like inputs.
"""
from typing import Optional, Tuple, Callable, Union
import core_tools.ops as K
import tensorflow as tf
from tensorflow.keras.layers import Dense
from core_tools.core import rgb, get_extractor, log_shape, Extractor
from grid_transformer.constants import TILE
from grid_transformer.utils import get_map_fn
# from tensorflow.python.keras.layers import LayerNormalization, Lambda, Conv2D, Layer,Embedding
def Tokenizer(
batch_no: Optional[int] = None,
col: int = 3,
row: int = 3,
channel: Optional[str] = None,
extractor_input: Union[int, Tuple[int, int, int]] = 224,
extractor: Union[str, Callable] = "ef",
output_size: int = 10,
pooling: Optional[Union[str, int, Callable]] = "flat2d",
map_fn: Union[str, Callable] = "batch",
return_extractor_input: bool = False,
show_shape: Union[bool, Callable] = False,
**kwargs
) -> Callable:
"""
Applies a grid transformation to an image.
Parameters
----------
extractor_input : Union[int, Tuple[int, int, int]]
Shape of the image extractor.
batch_no : Optional[int]
Number of batches to be used.
col : int
Number of columns in the grid.
row : int
Number of rows in the grid.
no : int
Number of attention heads.
extractor : Union[str, Callable]
Extractor to be used for extracting features from the image.
output_size : int
Size of the output of the transformer model.
pos_emd : str
Type of positional encoding to be used.
last : Union[str, Callable]
Last layer of the transformer model.
pooling : Optional[Union[str, int, Callable]]
Pooling function to be used.
model : Optional[tf.keras.Model]
Transformer model to be used.
map_fn : Union[str, Callable]
Function to map the extracted features to the transformer model.
channel : Optional[str]
How channels are handled.
last_index : Optional[int]
Index of the last layer.
return_extractor_input : bool
If True, returns the input of the extractor.
return_attention_scores : bool
If True, returns the attention scores.
**kwargs :
Additional keyword arguments to be passed to the transformer model.
Returns
-------
Callable
Grid Transformer constructor.
"""
extractor_input = (extractor_input, extractor_input, 3) if isinstance(extractor_input,
int) else extractor_input
batch_no = batch_no
extractor = extractor
output_size = output_size
pooling = pooling
channel = channel
map_fn = get_map_fn(map_fn)
# def input_show(*args, **kwargs):
# if hasattr(args[0][0], "numpy"):
# ims(args[0][0].transpose((2, 1, 0)))
# @log_shape(show_shape, "Tokenizer", in_fn=input_show)
@log_shape(show_shape, "Tokenizer")
def apply(x: tf.Tensor) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]]:
nonlocal batch_no
nonlocal extractor
nonlocal pooling
nonlocal channel
shape = tf.shape(x)
if channel == TILE:
x = tf.transpose(x[..., None], [0, 3, 1, 2, 4])
x = rgb((1, 1, 1, 1, 3))(x)
else:
x = tf.reshape(x, tf.concat([shape[:-1], [int(x.shape[-1] / 3), 3]], axis=0))
x = tf.transpose(x, [0, 3, 1, 2, 4])
if batch_no is None:
batch_no = int(x.shape[1] / (row * col))
if row > 1 or col > 1:
x = K.create_image_grid2(x, row=row, col=col)
x = x[:, :, :extractor_input[0], :extractor_input[1]]
if batch_no > 1:
extractor = map_fn(get_extractor(data=extractor_input, batch=False, model=extractor))
else:
x = x[:, 0]
if callable(extractor) and pooling is None:
y = log_shape(show_shape, "Extractor")(extractor)(x)
if y.shape[-1] != output_size:
y = log_shape(show_shape, "IT: Extractor pooling")(Dense(output_size))(y)
elif extractor:
y = Extractor(
data=tuple(x.shape),
model=extractor,
projection=output_size,
pooling=pooling,
show_shape=show_shape,
)(x)
else:
y = x
if return_extractor_input:
return y, x
return y
return apply
| jakubkwiatkowski/compositional_transformer | grid_transformer/tokenizer.py | tokenizer.py | py | 4,891 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line... |
11693199631 | from allauth.account.forms import SignupForm
from django import forms as d_forms
from django.contrib.auth import forms, get_user_model
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.contrib.auth.models import Group
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.utils.translation import gettext_lazy as _
User = get_user_model()
class CustomUserCreationForm(UserCreationForm):
email = d_forms.EmailField(required=True, help_text="Required")
error_message = UserCreationForm.error_messages.update(
{"duplicate_username": _("This username has already been taken.")}
)
class Meta(UserCreationForm.Meta):
model = User
def clean_email(self):
"""
Verify email is available.
"""
email = self.cleaned_data.get("email")
qs = User.objects.filter(email=email)
if qs.exists():
raise forms.ValidationError(
"Email is either invalid or already taken"
)
return email
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise ValidationError(self.error_messages["duplicate_username"])
class CustomUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["email"].required = True
self.fields["name"].required = True
# Django allauth authentication
class AcademySignUpForm(SignupForm):
# specify a choice field that matches choice fields on our model
type = d_forms.ChoiceField(
choices=[("STUDENT", "Student"), ("INSTRUCTOR", "Instructor")]
)
def custom_signup(self, request, user):
# set the user type from the form response
user.type = self.cleaned_data["type"]
user_group = Group.objects.get(name="Instructor")
if user.type == User.Types.INSTRUCTOR:
user_group.user_set.add(user)
user.save()
| decorouz/elearningforfarmers | users/forms.py | forms.py | py | 2,176 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.forms.EmailField",
"line_number": 13,
"usage_type": "... |
70175969705 | import datetime
from pandas_datareader import data as pdr
import queue
import threading
from pprint import pprint
def pullHistoricalData(holdings, benchmarks, start_date, end_date):
def assign_data_to_ticker(holdings, benchmarks, data):
for ticker in list(holdings.keys()):
holdings[ticker]['historicalData'] = data[ticker]['historicalData']
holdings[ticker]['ninetyDayData'] = data[ticker]['ninetyDayData']
for ticker in list(benchmarks.keys()):
benchmarks[ticker]['historicalData'] = data[ticker]['historicalData']
benchmarks[ticker]['ninetyDayData'] = data[ticker]['ninetyDayData']
return holdings, benchmarks
def worker(num):
def ensure_90_days(ticker, ninety_day_start, start_date, end_date):
#print("About to make call with ninety start: " + str(ninety_day_start))
try:
yahoo_data = pdr.get_data_yahoo(ticker, ninety_day_start, end_date)
#print("Received yahoo_data for " + str(ticker))
except:
print("ensure_90_days has failed for: " + str(ticker))
difference = 90 - len(list(yahoo_data['Close'][:start_date]))
#print("The current difference is: " + str(difference))
if difference > 0:
ninety_day_start = ninety_day_start - datetime.timedelta(days=difference)
#print("About to call function with new ninety start date of: " + str(ninety_day_start))
yahoo_data = ensure_90_days(ticker, ninety_day_start, start_date, end_date)
return yahoo_data
while True:
#print("Queue length: " + str(q.qsize()))
item = q.get()
if item is None:
break
ticker, ninety_day_start, start_date, end_date = item
start = datetime.datetime.strptime(start_date, '%Y-%m-%d')
ninety_day_start = start - datetime.timedelta(days=129)
if True:
if ticker not in done:
#print("ticker: " + ticker)
#print("done: " + str(done))
try:
yahoo_data = ensure_90_days(ticker, ninety_day_start, start_date, end_date)
except:
print("FAILURE POINT A")
try:
data[ticker] = {}
data[ticker]['ninetyDayData'] = list(yahoo_data['Close'][:start_date])
data[ticker]['historicalData'] = list(yahoo_data['Close'][start_date:])
#print("Completed " + str(item))
done.add(ticker)
#print("appending " + ticker + "to done")
except:
print("FAILURE POINT B")
else:
print("Already completed: " + str(ticker) + ". Rejecting.")
#print(yahoo_data['Close'][:start_date])
q.task_done()
requests = []
start = datetime.datetime.strptime(start_date, '%Y-%m-%d')
ninety_day_start = start - datetime.timedelta(days=129)
for ticker in holdings.keys():
requests.append((ticker, ninety_day_start, start_date, end_date))
for ticker in benchmarks.keys():
if ticker not in holdings.keys():
requests.append((ticker, ninety_day_start, start_date, end_date))
#print("requests: ")
#pprint(requests)
q = queue.Queue()
done = set()
data = {}
threads = []
thread_count = len(requests)
#print("Creating " + str(thread_count) + " threads...")
for i in range(thread_count):
t = threading.Thread(target=worker, args=(i,))
t.start()
threads.append(t)
#print("i: " + str(i))
for item in requests:
#print("item: " + str(item))
q.put(item)
q.join()
#print("q has joined.")
for i in range(len(threads)):
q.put(None)
for t in threads:
t.join
return assign_data_to_ticker(holdings, benchmarks, data)
def printDates():
filename = 'files/AMN.csv'
file = open(filename, 'r')
dates = []
for line in file:
lineArray = line.split(',')
dates.append(lineArray[0])
print(dates)
def getDates():
filename = 'files/AMN.csv'
file = open(filename, 'r')
dates = []
for line in file:
lineArray = line.split(',')
dates.append(lineArray[0])
return dates
def pullImpliedVols(connection, startDate, endDate):
dummyArray = [1, 2, 3]
| lamothe-hub/gsif-portfolio-risk-platform | gsif/dashboards/calculations/dataAccess.py | dataAccess.py | py | 4,579 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas_datareader.data.get_data_yahoo",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas_datareader.data",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 30,
"usage_type": "call"
},
{
"a... |
34477179356 | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 23 15:48:44 2023
@author: shikh
"""
import streamlit as st
import main_gp as gp
st.set_option('deprecation.showPyplotGlobalUse', False)
#gp.GaussianEngine(1,0.014,230,1,"C")
#gp.plot()
st.title("Air dispersion simulation")
st.text("Using Gaussian plume model")
def main():
st.title("Stack properties:")
slider_label1 = "Stack height (meter)?"
stack_height = st.slider(slider_label1, 0, 10, 1)
st.text_label1=("Emission rate (gram/second) ?")
emission_rate = st.text_input(st.text_label1,0.001)
st.title("Wind properties:")
slider_label2 = "Wind direction ?"
wind_direction = st.slider(slider_label2, 0, 360, 60)
st.text_label3=("wind speed (meter/second) ?")
wind_speed = st.text_input(st.text_label3,1)
options = ['A','B','C','D','E','F']
stability_class = st.selectbox("Select a stability class", options)
#gp.GaussianEngine(stack_height,emission_rate,wind_direction,wind_speed,"C")
st.title("---------------Model result---------------")
result=gp.GaussianEngine(stack_height,float(emission_rate),wind_direction,float(wind_speed),stability_class)
gp.plot(result,wind_direction)
if __name__=="__main__":
main()
| shikhar58/Air-dispersion-model | run_file.py | run_file.py | py | 1,280 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.set_option",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "streamlit.text",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "streamlit.title",... |
73631480103 | import argparse
import random
import sys
from game import *
class OpeningBook():
"""
Very, very simple opening book to give the bot some direction.
"""
def __init__(self, game):
self.game = game
def findMove(self):
"""
. . .
/ \
. |wG1| .
\ /
. . .
1. wG1
"""
if self.game.turnNumber == 1:
return Move(self.game.currentPlayer.pieces['G1'], Point.NONE, Point(0,0,0))
"""
. . . .
/ \ / \
. |bG1|w__| .
\ / \ /
. . . .
1. w__, 2. bG1 -w__
"""
if self.game.turnNumber == 2:
return Move(self.game.currentPlayer.pieces['G1'], Point.NONE, Point(-1,-1,0))
"""
. . . .
/ \
. . |wQ | .
/ \ / \ /
. |b__|wG1| .
\ / \ /
. . . .
1. wG1, 2. b__ -wG1, 3. wQ wG1/
"""
if self.game.turnNumber == 3:
return Move(self.game.currentPlayer.pieces['Q'], Point.NONE, Point(1,0,0))
class MoveSearch():
WIN_SCORE = 2**15 - 1
def __init__(self, game):
self.game = game
self.horizonDepth = 1
self.bestMove = None
self.heuristic = Heuristic(game)
def findMove(self):
self.minimax(self.horizonDepth, float('-infinity'), float('infinity'))
return self.bestMove
def minimax(self, depth, alpha, beta):
"""
Basic Alpha-Beta min-maxing
"""
val = self.checkWinScore()
if not val == None:
return val
if depth <= 0:
val = self.evaluate()
return val
validMoves = self.game.getValidMoves()
for move in validMoves:
self.game.makeMove(move)
val = -self.minimax(depth - 1, -beta, -alpha)
self.game.unmakeMove(move)
if val >= beta: # our opponent won't let us get to this move, it's too good
return beta
if val > alpha: # yea! a better move that we can get to
alpha = val
if depth == self.horizonDepth:
self.bestMove = move
return alpha
def checkWinScore(self):
winner = self.game.getWinner()
if winner == Game.WINNER_NONE:
return None
signFlip = 1
if self.game.currentPlayer.color == Player.BLACK:
signFlip = -1
if winner == Game.WINNER_DRAW:
return signFlip * MoveSearch.CONTEMPT_FACTOR
return signFlip * (MoveSearch.WIN_SCORE + depth) * winner
def evaluate(self):
signFlip = 1
if self.game.currentPlayer.color == Player.BLACK:
signFlip = -1
return signFlip * self.heuristic.evaluate()
class Heuristic():
"""
Positive => WHITE is winning!
"""
def __init__(self, game):
self.game = game
def evaluate(self):
return random.randrange(-10, 10)
class Randy():
""" A bot that will play random moves in Hive """
def __init__(self, args):
self.args = self._parseArgs(args)
self.game = Game('' , '', self.args['times'], self.args['moves'], self.args['expansions'])
self.player = self.game.currentPlayer
self.bestMove = None
def _parseArgs(self, args):
parser = argparse.ArgumentParser(prog="randy", argument_default='')
parser.add_argument(args[0], default='')
parser.add_argument('--times', default='30000,0,0') # game time, white used, black used (ms)
parser.add_argument('--moves', default='') # 1. wS1, 2. bG1 -wS1, 3. wQ wS1/, ...
parser.add_argument('--expansions', default='') # LMD
args = parser.parse_args(args)
return vars(args)
def run(self):
#self.game.printBoard()
self.bestMove = OpeningBook(self.game).findMove()
if not self.bestMove:
self.bestMove = MoveSearch(self.game).findMove()
self.printMove()
def printMove(self):
if not self.bestMove:
sys.stdout.write('pass')
else:
sys.stdout.write(self.game.getMoveNotation(self.bestMove))
if __name__ == "__main__":
Randy(sys.argv).run()
| tylerxprice/hive-framework | randy.py | randy.py | py | 4,060 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "random.randrange",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "sys.stdo... |
42090960900 | from pathlib import Path
import mmcv
from mmcls.apis import inference_model
from mmdet.apis import inference_detector
from mmseg.apis import inference_segmentor
from mmrazor.apis import init_mmcls_model, init_mmdet_model, init_mmseg_model
def _sync_bn2bn(config: mmcv.Config) -> None:
def dfs(cfg_dict) -> None:
if isinstance(cfg_dict, dict):
for k, v in cfg_dict.items():
if k == 'norm_cfg':
if v['type'] == 'SyncBN':
v['type'] = 'BN'
dfs(v)
dfs(config._cfg_dict)
def test_init_mmcls_model() -> None:
from mmcls.datasets import ImageNet
config_file = 'configs/nas/spos/spos_subnet_shufflenetv2_8xb128_in1k.py'
config = mmcv.Config.fromfile(config_file)
config.model = None
# Replace SyncBN with BN to inference on CPU
_sync_bn2bn(config)
mutable_file = 'configs/nas/spos/SPOS_SHUFFLENETV2_330M_IN1k_PAPER.yaml'
model = init_mmcls_model(
config,
device='cpu',
cfg_options={'algorithm.mutable_cfg': mutable_file})
model.CLASSES = ImageNet.CLASSES
assert not hasattr(model, 'architecture')
assert hasattr(model, 'backbone')
assert hasattr(model, 'neck')
assert hasattr(model, 'head')
img = mmcv.imread(Path(__file__).parent.parent / 'data/color.jpg', 'color')
result = inference_model(model, img)
assert isinstance(result, dict)
assert result.get('pred_label') is not None
assert result.get('pred_score') is not None
assert result.get('pred_class') is not None
def test_init_mmdet_model() -> None:
config_file = \
'configs/nas/detnas/detnas_subnet_frcnn_shufflenetv2_fpn_1x_coco.py'
config = mmcv.Config.fromfile(config_file)
config.model = None
# Replace SyncBN with BN to inference on CPU
_sync_bn2bn(config)
mutable_file = \
'configs/nas/detnas/DETNAS_FRCNN_SHUFFLENETV2_340M_COCO_MMRAZOR.yaml'
model = init_mmdet_model(
config,
device='cpu',
cfg_options={'algorithm.mutable_cfg': mutable_file})
assert not hasattr(model, 'architecture')
img = mmcv.imread(Path(__file__).parent.parent / 'data/color.jpg', 'color')
result = inference_detector(model, img)
assert isinstance(result, list)
def test_init_mmseg_model() -> None:
config_file = 'configs/distill/cwd/' \
'cwd_cls_head_pspnet_r101_d8_pspnet_r18_d8_512x1024_cityscapes_80k.py'
config = mmcv.Config.fromfile(config_file)
config.model = None
# Replace SyncBN with BN to inference on CPU
_sync_bn2bn(config)
# Enable test time augmentation
config.data.test.pipeline[1].flip = True
model = init_mmseg_model(config, device='cpu')
assert not hasattr(model, 'architecture')
assert hasattr(model, 'backbone')
assert hasattr(model, 'decode_head')
assert hasattr(model, 'auxiliary_head')
img = mmcv.imread(Path(__file__).parent.parent / 'data/color.jpg', 'color')
result = inference_segmentor(model, img)
assert result[0].shape == (300, 400)
| Gumpest/AvatarKD | tests/test_apis/test_inference.py | test_inference.py | py | 3,065 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "mmcv.Config",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "mmcv.Config.fromfile",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "mmcv.Config",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "mmrazor.apis.i... |
29557419686 | import pytest
from brownie import chain, RewardsManager, reverts
from math import floor
from utils.config import network_name
from os.path import exists
import json
rewards_period = 3600 * 24 * 7
rewards_amount = 5_000 * 10**18
def test_acceptance(
ldo_token,
stranger,
rewards_contract,
helpers,
accounts,
ldo_holder,
):
deployment_file_path = f"deployed-{network_name().replace('fork', 'main')}.json"
if not exists(deployment_file_path):
pytest.skip(f"no RewardsManager deployed on {network_name()}")
chain.sleep(3600 * 24 * 7)
chain.mine()
f = open(deployment_file_path, "r")
deployment_data = json.load(f)
rewards_manager = RewardsManager.at(
deployment_data["curveRewardsManager"]["baseAddress"]
)
f.close()
balance_before = ldo_token.balanceOf(rewards_contract)
reward_data = rewards_contract.reward_data(ldo_token)
if reward_data[0] != rewards_manager:
current_distributor = accounts.at(reward_data[0], {"force": True})
rewards_contract.set_reward_distributor(
ldo_token, rewards_manager, {"from": current_distributor}
)
reward_data = rewards_contract.reward_data(ldo_token)
assert reward_data[0] == rewards_manager
assert reward_data[1] == rewards_manager.period_finish()
for month in range(2):
with reverts("manager: low balance"):
rewards_manager.start_next_rewards_period({"from": stranger})
ldo_token.transfer(rewards_manager, 4 * rewards_amount, {"from": ldo_holder})
balance_before = ldo_token.balanceOf(rewards_contract)
reward_data = rewards_contract.reward_data(ldo_token)
assert reward_data[1] == rewards_manager.period_finish()
assert rewards_manager.is_curve_rewards_period_finished() == True
tx = rewards_manager.start_next_rewards_period({"from": stranger})
assert rewards_manager.is_curve_rewards_period_finished() == False
reward_data = rewards_contract.reward_data(ldo_token)
assert reward_data[1] + rewards_period * 3 == rewards_manager.period_finish()
helpers.assert_single_event_named(
"NewRewardsPeriodStarted", tx, {"amount": rewards_amount}
)
helpers.assert_single_event_named(
"WeeklyRewardsAmountUpdated", tx, {"newWeeklyRewardsAmount": rewards_amount}
)
chain.sleep(rewards_period - 10)
chain.mine()
assert rewards_manager.is_curve_rewards_period_finished() == False
with reverts("manager: rewards period not finished"):
rewards_manager.start_next_rewards_period({"from": stranger})
assert ldo_token.balanceOf(rewards_contract) == balance_before + rewards_amount
chain.sleep(10)
chain.mine()
for week in range(3):
reward_data = rewards_contract.reward_data(ldo_token)
assert (
reward_data[1] + rewards_period * (3 - week)
== rewards_manager.period_finish()
)
balance_before = ldo_token.balanceOf(rewards_contract)
assert rewards_manager.is_curve_rewards_period_finished() == True
tx = rewards_manager.start_next_rewards_period({"from": stranger})
assert rewards_manager.is_curve_rewards_period_finished() == False
reward_data = rewards_contract.reward_data(ldo_token)
assert (
reward_data[1] + rewards_period * (3 - week - 1)
== rewards_manager.period_finish()
)
helpers.assert_single_event_named(
"NewRewardsPeriodStarted", tx, {"amount": rewards_amount}
)
chain.sleep(rewards_period - 10)
chain.mine()
assert rewards_manager.is_curve_rewards_period_finished() == False
with reverts("manager: rewards period not finished"):
rewards_manager.start_next_rewards_period({"from": stranger})
assert (
ldo_token.balanceOf(rewards_contract) == balance_before + rewards_amount
)
chain.sleep(10)
chain.mine()
| lidofinance/curve-rewards-manager | tests/test_acceptance.py | test_acceptance.py | py | 4,186 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.config.network_name",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pytest.skip",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "utils.config.net... |
16048365956 | # -*- coding: UTF-8 -*-
"""
Name: gui_app.py
Porpose: bootstrap for Vidtuber app.
Compatibility: Python3, wxPython Phoenix
Author: Gianluca Pernigotto <jeanlucperni@gmail.com>
Copyleft - 2023 Gianluca Pernigotto <jeanlucperni@gmail.com>
license: GPL3
Rev: March.17.2023
Code checker: flake8, pylint
This file is part of Vidtuber.
Vidtuber is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Vidtuber is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Vidtuber. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
from shutil import which, rmtree
import builtins
import wx
try:
from wx.svg import SVGimage
except ModuleNotFoundError:
pass
from vidtuber.vdms_sys.argparser import arguments
from vidtuber.vdms_sys.configurator import DataSource
from vidtuber.vdms_sys import app_const as appC
from vidtuber.vdms_utils.utils import del_filecontents
# add translation macro to builtin similar to what gettext does
builtins.__dict__['_'] = wx.GetTranslation
class Vidtuber(wx.App):
"""
bootstrap the wxPython GUI toolkit before
starting the main_frame.
"""
def __init__(self, redirect=True, filename=None, **kwargs):
"""
- redirect=False will send print statements to a console
window (in use)
- redirect=True will be sent to a little textbox window.
- filename=None Redirect sys.stdout and sys.stderr
to a popup window.
- filename='path/to/file.txt' Redirect sys.stdout
and sys.stderr to file
See main() function below to settings it.
"""
self.locale = None
self.appset = {'DISPLAY_SIZE': None,
'IS_DARK_THEME': None,
'GETLANG': None,
# short name for the locale
'SUPP_LANGs': ['it_IT', 'en_US', 'ru_RU'],
# supported langs for online help (user guide)
}
self.data = DataSource(kwargs) # instance data
self.appset.update(self.data.get_fileconf()) # data system
self.iconset = None
wx.App.__init__(self, redirect, filename) # constructor
wx.SystemOptions.SetOption("osx.openfiledialog.always-show-types", "1")
# -------------------------------------------------------------------
def OnInit(self):
"""Bootstrap interface."""
if self.appset.get('ERROR'):
wx.MessageBox(f"FATAL: {self.appset['ERROR']}\n\nSorry, unable "
f"to continue...", 'Vidtuber - ERROR', wx.ICON_STOP)
return False
self.appset['DISPLAY_SIZE'] = wx.GetDisplaySize() # get monitor res
if hasattr(wx.SystemSettings, 'GetAppearance'):
appear = wx.SystemSettings.GetAppearance()
self.appset['IS_DARK_THEME'] = appear.IsDark()
self.iconset = self.data.icons_set(self.appset['icontheme'][0])
# locale
wx.Locale.AddCatalogLookupPathPrefix(self.appset['localepath'])
self.update_language(self.appset['locale_name'])
ytdlp = self.check_youtube_dl()
if ytdlp is False:
return False
noffmpeg = self.check_ffmpeg()
if noffmpeg:
self.wizard(self.iconset['vidtuber'])
return True
from vidtuber.vdms_main.main_frame import MainFrame
main_frame = MainFrame()
main_frame.Show()
self.SetTopWindow(main_frame)
return True
# -------------------------------------------------------------------
def check_youtube_dl(self):
"""
Check for `yt_dlp` python module.
"""
if self.appset['downloader'] == 'yt_dlp':
try:
import yt_dlp
except ModuleNotFoundError as err:
wx.MessageBox(f"ERROR: {err}\n\nyt-dlp is missing, "
f"please install it.", 'Vidtuber - ERROR',
wx.ICON_STOP)
return False
return None
# -------------------------------------------------------------------
def check_ffmpeg(self):
"""
Get the FFmpeg's executables. A permission check
is also performed on Unix/Unix-like systems.
"""
for link in [self.appset['ffmpeg_cmd'],
self.appset['ffprobe_cmd'],
]:
if self.appset['ostype'] == 'Windows': # check for exe
# HACK use even for unix, if not permission is equal
# to not binaries
if not which(link, mode=os.F_OK | os.X_OK, path=None):
return True
else:
if not os.path.isfile(f"{link}"):
return True
if not self.appset['ostype'] == 'Windows':
# check for permissions when linked locally
for link in [self.appset['ffmpeg_cmd'],
self.appset['ffprobe_cmd'],
]:
if which(link, mode=os.F_OK | os.X_OK, path=None):
permissions = True
else:
wx.MessageBox(_('Permission denied: {}\n\n'
'Check execution permissions.').format
(link), 'Vidtuber', wx.ICON_STOP)
permissions = False
break
return False if not permissions else None
return None
# -------------------------------------------------------------------
def wizard(self, wizardicon):
"""
Show an initial dialog to setup the application
during the first start-up.
"""
from vidtuber.vdms_dialogs.wizard_dlg import Wizard
main_frame = Wizard(wizardicon)
main_frame.Show()
self.SetTopWindow(main_frame)
return True
# ------------------------------------------------------------------
def update_language(self, lang=None):
"""
Update the language to the requested one.
Make *sure* any existing locale is deleted before the new
one is created. The old C++ object needs to be deleted
before the new one is created, and if we just assign a new
instance to the old Python variable, the old C++ locale will
not be destroyed soon enough, likely causing a crash.
:param string `lang`: one of the supported language codes
https://docs.wxpython.org/wx.Language.enumeration.html#wx-language
"""
# if an unsupported language is requested, default to English
selectlang = appC.supLang.get(lang, wx.LANGUAGE_ENGLISH)
if self.locale:
assert sys.getrefcount(self.locale) <= 2
del self.locale
# create a locale object for this language
self.locale = wx.Locale(selectlang[0])
if self.locale.IsOk():
self.locale.AddCatalog(appC.langDomain)
self.appset['GETLANG'] = self.locale.GetName()
else:
self.locale = None
self.appset['GETLANG'] = "en_US"
# -------------------------------------------------------------------
def OnExit(self):
"""
OnExit provides an interface for exiting the application.
The ideal place to run the last few things before completely
exiting the application, eg. delete temporary files etc.
"""
if self.appset['clearcache']:
tmp = os.path.join(self.appset['cachedir'], 'tmp')
if os.path.exists(tmp):
for cache in os.listdir(tmp):
fcache = os.path.join(tmp, cache)
if os.path.isfile(fcache):
os.remove(fcache)
elif os.path.isdir:
rmtree(fcache)
if self.appset['clearlogfiles']:
logdir = self.appset['logdir']
if os.path.exists(logdir):
flist = os.listdir(logdir)
if flist:
for logname in flist:
logfile = os.path.join(logdir, logname)
try:
del_filecontents(logfile)
except Exception as err:
wx.MessageBox(_("Unexpected error while deleting "
"file contents:\n\n"
"{0}").format(err),
'Vidtuber', wx.ICON_STOP)
return False
return True
# -------------------------------------------------------------------
def main():
"""
Without command line arguments starts the
wx.App mainloop with default keyword arguments.
"""
if not sys.argv[1:]:
kwargs = {'make_portable': None}
else:
kwargs = arguments()
app = Vidtuber(redirect=False, **kwargs)
app.MainLoop()
| jeanslack/Vidtuber | vidtuber/gui_app.py | gui_app.py | py | 9,442 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "builtins.__dict__",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "wx.GetTranslation",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "wx.App",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "vidtuber.vd... |
28861454822 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
from techism.models import Event, EventTag, Location, Organization, OrganizationTag, Setting, TweetedEvent
from django.contrib import admin
import reversion
class EventInline(admin.TabularInline):
model = Event
fields = ['title', 'date_time_begin', 'date_time_end', 'url', 'tags', 'published']
readonly_fields = fields
ordering = ['-date_time_begin']
can_delete = False
max_num = 0
class LocationAdmin(admin.ModelAdmin):
search_fields = ['name']
list_display = ['name', 'street', 'city', 'latitude', 'longitude', 'historized_since']
ordering = ['historized_since', 'name']
inlines = [
EventInline,
]
class EventAdmin(reversion.VersionAdmin):
search_fields = ['title']
list_filter = ['published']
list_display = ['title', 'date_time_begin', 'date_time_end', 'location', 'user', 'published']
date_hierarchy = 'date_time_begin'
filter_horizontal = ['tags']
raw_id_fields = ['location', 'user', 'organization']
class EventTagAdmin(admin.ModelAdmin):
search_fields = ['name']
list_display = ['name']
class EventChangeLogAdmin(admin.ModelAdmin):
list_filter = ['change_type']
list_display = ['event', 'event_title', 'change_type', 'date_time']
class TweetedEventAdmin(admin.ModelAdmin):
list_display = ['event', 'tweet', 'date_time_created']
class OrganizationAdmin(admin.ModelAdmin):
search_fields = ['title']
list_display = ['title', 'url']
class OrganizationTagAdmin(admin.ModelAdmin):
search_fields = ['name']
list_display = ['name']
class SettingAdmin(admin.ModelAdmin):
search_fields = ['name']
list_display = ['name']
admin.site.register(Location, LocationAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(EventTag, EventTagAdmin)
admin.site.register(TweetedEvent, TweetedEventAdmin)
admin.site.register(Organization, OrganizationAdmin)
admin.site.register(OrganizationTag, OrganizationTagAdmin)
admin.site.register(Setting, SettingAdmin)
| techism/techism | techism/admin.py | admin.py | py | 2,057 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "django.contrib.admin.TabularInline",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "techism.models.Event",
"line_number": 9,
"usage_type": "name"
},
{
"ap... |
73137395944 | # simulate_one_group.py
# Benjamin Crestel, 2020-12-22
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
from simulations import simulate_normal
def simulate_one_group(number_samples: int, mean: float = 100.0, std: float = 15.0):
"""
Generate samples of IQ tests and plot
:param number_samples: number of samples
:param mean: mean of the distribution
:param std: standard deviation of the distribution
:return: samples and axes of the plot
"""
samples = simulate_normal(
sample_mean=100, sample_std=15, sample_size=number_samples, number_simulations=1
)
samples_mean = samples.mean()
samples_std = samples.std()
fig, ax = plt.subplots(1, 1)
bounds = samples.min() - 10.0, samples.max() + 10.0
ax.hist(samples, range=bounds)
plt.text(100, 0.15 * number_samples, f"mean = {samples_mean:.2f}")
plt.text(100, 0.1 * number_samples, f"std = {samples_std:.2f}")
ax2 = ax.twinx()
xx = np.linspace(bounds[0], bounds[1], 100)
ax2.plot(xx, norm(100, 15).pdf(xx), "--r")
return samples, (ax, ax2)
| bcrestel/coursera_statisticalinferences | src/simulate_one_group.py | simulate_one_group.py | py | 1,114 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "simulations.simulate_normal",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name... |
41396486126 | """A module for implementing communication tasks."""
from typing import Optional, Tuple
import numpy as np
from communication_tasks import monotones
from utils.utils import matrix_is_rowstochastic, sample_random_row_stochastic_matrix
class CommunicationMatrix:
"""A class which defines all relevant features of communication tasks."""
def __init__(self, matrix: np.ndarray) -> None:
if not matrix_is_rowstochastic(matrix):
raise ValueError("Input matrix is not row-stochastic!")
self.matrix = matrix
self.nrows: int = matrix.shape[0]
self.ncols: int = matrix.shape[1]
self.rank: Optional[int] = None
self.lambda_min: Optional[float] = None
self.lambda_max: Optional[float] = None
self.iota: Optional[int] = None
def calculate_rank(self) -> int:
"""Calculate the rank of the communication matrix.
Returns:
int: rank of the matrix
"""
rank_of_matrix = monotones.rank(self.matrix)
self.rank = rank_of_matrix
return rank_of_matrix
def calculate_lambda_min(self) -> float:
"""Calculate the lambda min of the communication matrix.
Returns:
float: lambda min of the matrix
"""
lambda_min = monotones.lambda_min(self.matrix)
self.lambda_min = lambda_min
return lambda_min
def calculate_lambda_max(self) -> float:
"""Calculate the lambda max of the communication matrix.
Returns:
float: lambda max of the matrix
"""
lambda_max = monotones.lambda_max(self.matrix)
self.lambda_max = lambda_max
return lambda_max
def calculate_iota(self) -> int:
"""Calculate the iota of the communication matrix.
Returns:
int: maximal number of orthogonal rows of the matrix
"""
iota = monotones.iota(self.matrix)
self.iota = iota
return iota
class RandomCommunicationMatrix(CommunicationMatrix):
"""Initialize a random communication matrix with the given shape."""
def __init__(self, shape: Tuple[int, int]) -> None:
matrix = sample_random_row_stochastic_matrix(shape)
super().__init__(matrix)
| oskarikerppo/communication-tasks | communication_tasks/communication_matrices.py | communication_matrices.py | py | 2,247 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "utils.utils.matrix_is_rowstochastic",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 19,
"usage_type": "name"
},
{
"api_name":... |
73302038824 |
import pymongo
import csv
from csv import writer
from pymongo import MongoClient
import pandas as pd
# Conexão
try:
myclient = pymongo.MongoClient(
"mongodb+srv://teste:teste123@cluster0.1ylmc4y.mongodb.net/test")
mydb = myclient["prova2"]
print("\nConectado com sucesso\n")
except:
print("\nPor favor tente mais tarde. Banco de dados fora do ar\n")
# funções robos:
def inserirRobos():
print("Inserir dados do robo.\n\n")
id = input("id: ")
mercadoria = input("mercadoria: ")
prateleira = input("prateleira: ")
avulso = {"id": id, "mercadoria": mercadoria, "prateleira": prateleira}
x = mycol.insert_one(avulso)
print(x)
csvfile = open("robos.csv", "r")
csv.DictReader(csvfile)
enviaCSV = [id, mercadoria, prateleira]
with open('robos.csv', 'a') as f_object:
writer_object = writer(f_object)
writer_object.writerow(enviaCSV)
f_object.close()
def lerRobo():
for x in mycol.find():
print(x)
csvfile = open("robos.csv", "r")
header = ["id", "mercadoria", "prateleira"]
reader = csv.DictReader(csvfile)
print("\n")
print(csvfile.read())
def updateRobo():
print("Modificar dados do robo.\n\n")
id = input("id: ")
mercadoria = input("mercadoria: ")
prateleira = input("prateleira: ")
myquery = {"id": id}
newvalue = {
"$set": {"id": id, "mercadoria": mercadoria, "prateleira": prateleira}}
mycol.update_many(myquery, newvalue)
df = pd.read_csv("robos.csv")
df.loc[id, "id"] = id
df.loc[id, "mercadoria"] = mercadoria
df.loc[id, "prateleira"] = prateleira
df.to_csv("robos.csv", index=False)
print(df)
def deletarRobo():
id = input("id: ")
myquery = { "id": id }
mycol.delete_one(myquery)
print(id)
df = pd.read_csv("robos.csv")
df_s = df[:int(id)]
df_s.set_index('id', inplace=True)
df_s = df_s.drop(int(id))
print(df_s)
# funções admnistração:
def inserirADM():
print("Inserir dados do colaborador.\n\n")
matricula = input("matricula: ")
nome = input("nome: ")
CPF = input("CPF: ")
cargo = input("cargo: ")
filial = input("filial: ")
avulso = {"matricula": matricula, "nome": nome,
"CPF": CPF, "cargo": cargo, "filial": filial}
x = mycol.insert_one(avulso)
print(x)
csvfile = open("adm.csv", "r")
csv.DictReader(csvfile)
enviaCSV = [matricula, nome, CPF, cargo, filial]
with open('adm.csv', 'a') as f_object:
writer_object = writer(f_object)
writer_object.writerow(enviaCSV)
f_object.close()
def lerADM():
for x in mycol.find():
print(x)
csvfile = open("adm.csv", "r")
header = ["matricula", "nome", "CPF", "cargo", "filial"]
reader = csv.DictReader(csvfile)
print("\n")
print(csvfile.read())
def updateADM():
print("Modificar dados do colaborador.\n\n")
matricula = input("matricula: ")
nome = input("nome: ")
CPF = input("CPF: ")
cargo = input("cargo: ")
filial = input("filial: ")
myquery = {"matricula": matricula}
newvalue = {"$set": {"matricula": matricula, "nome": nome,
"CPF": CPF, "cargo": cargo, "filial": filial}}
mycol.update_many(myquery, newvalue)
df = pd.read_csv("adm.csv")
df.loc[matricula, "matricula"] = matricula
df.loc[matricula, "nome"] = nome
df.loc[matricula, "CPF"] = CPF
df.loc[matricula, "cargo"] = cargo
df.loc[matricula, "filial"] = filial
df.to_csv("adm.csv")
print(df)
def deletarADM():
matricula = input("matricula: ")
myquery = {"matricula": matricula}
mycol.delete_one(myquery)
print(matricula)
df = pd.read_csv("adm.csv", index_col=False)
df_s = df[:int(matricula)]
print("df: \n", df_s)
print("df index: ", df_s.index)
df_s.set_index('matricula', inplace=True)
df_s = df_s.drop(int(matricula))
print(df_s)
# Menu
print("Digite 1 para: Robos;")
print("Digite 2 para: Administração;")
menu = input("\n Digite o banco de dados desejado >>> ")
if (menu == "1"):
mycol = mydb["robos"]
print("\nDigite 1 para: inserir;")
print("Digite 2 para: ler;")
print("Digite 3 para: atualizar;")
print("Digite 4 para: deletar.\n")
menuRobo = input("\n Digite a ação dentro do banco >>> ")
if (menuRobo == "1"):
inserirRobos()
elif (menuRobo == "2"):
lerRobo()
elif (menuRobo == "3"):
updateRobo()
elif (menuRobo == "4"):
deletarRobo()
if (menu == "2"):
mycol = mydb["adm"]
print("\nDigite 1 para: inserir;")
print("Digite 2 para: ler;")
print("Digite 3 para: atualizar;")
print("Digite 4 para: deletar.\n")
menuADM = input("\n Digite a ação dentro do banco >>> ")
if (menuADM == "1"):
inserirADM()
elif (menuADM == "2"):
lerADM()
elif (menuADM == "3"):
updateADM()
elif (menuADM == "4"):
deletarADM()
| LuWroblewski/FaculdadePython | prova2Banco/crud1.py | crud1.py | py | 5,222 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"l... |
73880579625 | #
# This file is part of Pytricia.
# Joel Sommers <jsommers@colgate.edu>
#
# Pytricia is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pytricia is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pytricia. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import unittest
import pytricia
import gzip
def dumppyt(t):
print ("\nDumping Pytricia")
for x in t.keys():
print ("\t {}: {}".format(x,t[x]))
class PyTriciaLoadTest(unittest.TestCase):
_files = ['routeviews-rv2-20160202-1200.pfx2as.gz', 'routeviews-rv6-20160202-1200.pfx2as.gz']
def testLoaditUp(self):
pyt = pytricia.PyTricia(128)
# load routeviews prefix -> AS mappings; all of them.
for f in PyTriciaLoadTest._files:
print ("loading routeviews data from {}".format(f))
with gzip.GzipFile(f, 'r') as inf:
for line in inf:
ipnet,prefix,asn = line.split()
network = '{}/{}'.format(ipnet.decode(), prefix.decode())
if network in pyt:
pyt[network].append(asn.decode())
else:
pyt[network] = [asn.decode()]
# verify that everything was stuffed into pyt correctly
for f in PyTriciaLoadTest._files:
print ("verifying routeviews data from {}".format(f))
with gzip.GzipFile(f, 'r') as inf:
for line in inf:
ipnet,prefix,asn = line.split()
asn = asn.decode()
network = '{}/{}'.format(ipnet.decode(), prefix.decode())
self.assertIn(network, pyt)
ipnet = str(ipnet.decode())
self.assertIn(ipnet, pyt)
asnlist = pyt[network]
self.assertIn(asn, asnlist)
# dump everything out...
# dumppyt(pyt)
print("removing everything.")
netlist = [ n for n in pyt.keys() ]
for net in netlist:
del pyt[net]
self.assertEqual(len(pyt), 0)
if __name__ == '__main__':
unittest.main()
| jsommers/pytricia | testload.py | testload.py | py | 2,625 | python | en | code | 203 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "pytricia.PyTricia",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "gzip.GzipFile",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "gzip.GzipFile"... |
27649483225 | from django.shortcuts import render
from .models import ProductInBasket
from django.http import JsonResponse
from ordering.views import is_user_registered
def show_basket(request):
session_key = request.session.session_key
if is_user_registered(session_key): # Перевірка, чи є користувач зареєстрованим
user = is_user_registered(session_key) # Отримання зареєстрованого користувача
products_in_basket = ProductInBasket.objects.filter(user=user) # Фільтрування продуктів в кошику для даного користувача
else:
products_in_basket = ProductInBasket.objects.filter(session_key=session_key) # Фільтрування продуктів в кошику за сесією
# Обчислення вартості продуктів у кошику
for product_in_basket in products_in_basket:
product_in_basket.products_price = int(product_in_basket.product.price) * int(product_in_basket.amount)
product_in_basket.save()
full_price = 0 # Загальна вартість кошика
for product_in_basket in products_in_basket:
full_price += int(product_in_basket.products_price) # Підрахунок загальної вартості
return render(request, 'basket/basket.html', context={'products_in_basket': products_in_basket, "full_price": full_price})
def delete_from_basket(request):
product_id = request.POST.get('pk_product') # Отримання ідентифікатора продукту, який потрібно видалити з кошика
ProductInBasket.objects.get(pk=product_id).delete() # Видалення продукту з кошика
return JsonResponse({}) # Повернення пустої відповіді
def change_amount(request):
operation = request.POST.get('operation') # Отримання операції зміни кількості продукту (збільшення або зменшення)
product = ProductInBasket.objects.get(pk=request.POST.get('product_pk')) # Отримання продукту в кошику
if product.amount + int(operation) <= 0:
product.amount = 1 # Зміна кількості на мінімальне значення, якщо кількість стає від'ємною або нульовою
elif product.amount + int(operation) >= 99:
product.amount = 99 # Зміна кількості на максимальне значення, якщо кількість стає більшою за 99
else:
product.amount = product.amount + int(operation) # Збільшення або зменшення кількості продукту
product.products_price = product.amount * int(product.product.price)
product.save() # Збереження змін
return JsonResponse({}) # Повернення пустої відповіді
def update_basket_counter(request):
session_key = request.session.session_key
if is_user_registered(session_key): # Перевірка, чи є користувач зареєстрованим
user = is_user_registered(session_key) # Отримання зареєстрованого користувача
products_in_basket = len(ProductInBasket.objects.filter(user=user)) # Кількість продуктів в кошику для даного користувача
else:
products_in_basket = len(ProductInBasket.objects.filter(session_key=session_key)) # Кількість продуктів в кошику за сесією
return JsonResponse({'count_product': products_in_basket}) # Повернення кількості продуктів у кошику у відповіді JSON
| akarumeis/CoffeMania | basket/views.py | views.py | py | 3,932 | python | uk | code | 1 | github-code | 36 | [
{
"api_name": "ordering.views.is_user_registered",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ordering.views.is_user_registered",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.ProductInBasket.objects.filter",
"line_number": 10,
"usage_type"... |
31065339378 | ### Shuffle Cog ###
### Imports ###
# General
import json
# Library
import nextcord
from nextcord.ext import commands
from tinydb import TinyDB, Query
from utils import embeds
# Internal
from utils.content import ShuffleContent
from utils.embeds import embed_shuffle, embed_invalid_shuffle, embed_shuffle_records, embed_payment, embed_unsuccesful_pay, embed_successful_pay
from utils.shufflehelper import ShuffleHelper
from utils.user import User
# Initialize database.
shuffles_path = 'databases/shuffles.json'
db = TinyDB(shuffles_path)
Shuffle = Query()
# Views
class ShuffleView(nextcord.ui.View):
'''Stores the components for a valid shuffle element.'''
def __init__(self, client):
super().__init__(timeout=None)
self.client = client
self.active = True
@nextcord.ui.button(label='Enter', emoji='🎲', style=nextcord.ButtonStyle.blurple, custom_id='pv:enter_shuffle')
async def enter_shuffle(self, button: nextcord.ui.Button, interaction: nextcord.Interaction):
'''Enters the user into the shuffle linked to the embed element.'''
# Defer the interaction to give time to complete the transaction.
await interaction.response.defer()
# Initialize the shuffle helper class.
element_id = interaction.message.id
Shuffle = ShuffleHelper(element_id)
# Setup the embed and view for the payment message.
pay_embed = embed_payment(Shuffle.price)
pay_view = PaymentView(Shuffle.price)
await interaction.user.send(embed=pay_embed, view=pay_view)
await pay_view.wait()
success = pay_view.value
content = ShuffleContent(Shuffle.collection, Shuffle.available, Shuffle.price, Shuffle.shuffle_type)
if success:
# This block of text updates the databases.
await interaction.followup.send(content.successful_entry, ephemeral=True)
# Initialize user helper class.
user_id = interaction.user.id
# Get the first 'unassigned' asset.
assigned_asset = [(asset, idx) for idx, asset in enumerate(Shuffle.assets) if asset['assigned'] == False][0]
asset, idx = assigned_asset
# Update the asset to mark it as assigned.
asset['assigned'] = True
updated_assets = Shuffle.assets[idx] = asset
# Update the entries.
updated_entry = Shuffle.entries.append(
{
'user_id': user_id,
'assigned_asset': asset,
'sent': False,
'shuffle_id': interaction.message.id
})
remaining = Shuffle.decrement_shuffle()
Shuffle.db.update(
{
'assets': updated_assets, # updates the assigned asset so it won't be reused
'entries': updated_entry, # appends the new entry to the existing ones
'remaining': remaining # int representing remaining assets
}, Shuffle.Shuffle.id == Shuffle.id)
# Updates user database for claim portal.
user_help = User(user_id)
upd_claimable = user_help.claimable.append(asset)
user_help.db.upsert({'claimable': upd_claimable}, user_help.Query.id == user_id)
# Shutdown shuffle is all assets are exhausted.
if remaining == 0:
self.active = False
await self.shutdown(interaction.message.id)
else:
await interaction.followup.send(content.unsuccessful_entry, ephemeral=True)
@nextcord.ui.button(label='Records', emoji='🎟', style=nextcord.ButtonStyle.grey, custom_id='pv:shuffle_records')
async def user_records(self, button: nextcord.ui.Button, interaction: nextcord.Interaction):
'''Responds with data about how the user has interacted with the shuffle previously.'''
# Obtain relevant data needed for displaying records.
user_id = interaction.user.id
display_name = interaction.user.display_name
display_avatar = interaction.user.display_avatar
element_id = interaction.message.id
shuffle = ShuffleHelper(element_id)
user_entries = shuffle.get_entries(user_id)
embed = embed_shuffle_records(display_name, display_avatar, element_id, user_entries)
await interaction.send(embed=embed, ephemeral=True)
async def shutdown(self, message_id: int):
'''A method for shutting down the shuffle.'''
msg = await self.client.fetch_message(message_id)
embed = embed_invalid_shuffle()
await msg.edit(embed=embed, view=RecordsOnlyView())
class PaymentView(nextcord.ui.View):
'''A view containing the components for entry payment.'''
def __init__(self, price: int):
super().__init__()
self.value = False
self.price = int(price)
@nextcord.ui.button(label='Confirm', style=nextcord.ButtonStyle.green, emoji='🟩')
async def confirmtxn(self, button: nextcord.ui.Button, interaction: nextcord.Interaction):
'''Lets Meji know when to begin searching for the transaction.'''
# Defer the interaction again to give Meji time to search for the txn.
await interaction.response.defer()
# Initialize the user helper class and check their transactions.
user = User(interaction.user.id)
success = await user.checktxns(1, self.price, 'Shuffle Entry', checks=1)
if success:
embed = embed_successful_pay(interaction.id)
self.value = success
await interaction.message.edit(embed=embed, view=None)
self.stop()
else:
self.value = success
embed = embed_unsuccesful_pay(interaction.id)
await interaction.message.edit(embed=embed, view=None)
self.stop()
await interaction.followup.send('Thanks for choosing the Mesiverse!', ephemeral=True)
@nextcord.ui.button(label='Cancel', style=nextcord.ButtonStyle.red, emoji='🟥')
async def canceltxn(self, button: nextcord.ui.Button, interaction: nextcord.Interaction):
'''Cancels the checking process for the shuffle entry.'''
embed = embed_unsuccesful_pay(interaction.id)
await interaction.edit(embed=embed, view=None)
class RecordsOnlyView(nextcord.ui.View):
'''A view only with the records button for invalid shuffles.'''
def __init__(self):
super().__init__(timeout=None)
@nextcord.ui.button(label='Records', emoji='🎟', style=nextcord.ButtonStyle.grey, custom_id='pv:shuffle_only_records')
async def user_records(self, button: nextcord.ui.Button, interaction: nextcord.Interaction):
'''Responds with data about how the user has interacted with the shuffle previously.'''
# Obtain relevant data needed for displaying records.
user_id = interaction.user.id
display_name = interaction.user.display_name
display_avatar = interaction.user.display_avatar
element_id = interaction.message.id
shuffle = ShuffleHelper(element_id)
user_entries = shuffle.get_entries(user_id)
embed = embed_shuffle_records(display_name, display_avatar, element_id, user_entries)
await interaction.send(embed=embed, ephemeral=True)
# Cog
class Shuffle(commands.Cog):
def __init__(self, client: commands.Bot):
self.client = client
self.pv = False
# Generates shuffle command.
@commands.command()
@commands.has_role('MesiTeam')
async def shuffle(self, ctx: commands.Context, collection: str, available: str, price: str, shuffle_type: str, url: str = None):
'''Generates shuffle module that can be entered into.'''
# Transform some parameters.
available = int(available)
price = int(price)
# Testing mode enabled y/n
if url == 'test':
embed = embed_shuffle(collection, available, price, shuffle_type, None)
view = ShuffleView(self.client)
await ctx.send('@everyone', embed=embed, view=view)
await ctx.message.delete()
else:
embed = embed_shuffle(collection, available, price, shuffle_type, url)
view = ShuffleView(self.client)
shuffle_element = await ctx.send('@everyone', embed=embed, view=view)
# Initialize the database entry for the shuffle.
db.insert({
'id': shuffle_element.id,
'collection': collection,
'available': available,
'remaining': available,
'price': price,
'shuffle_type': shuffle_type,
'assets': [],
'entries': []
})
# Add assets from the collection to the shuffle element.
shuffle = ShuffleHelper(shuffle_element.id)
shuffle.add_assets()
await ctx.message.delete()
# Format static data for collections.
@commands.command()
@commands.has_role('MesiTeam')
async def convert_static(self, ctx: commands.Context):
'''Converts static json files of creator wallet assets and formats them into useable data.'''
collection = ctx.message.content.split()[2].strip().lower()
# Initialize local database.
static_db_path = f'databases/{collection}.json'
open(static_db_path, 'w+')
static_db = TinyDB(static_db_path)
# Load in the static data and map important values.
assets = json.load(open(f'static/{collection}.json'))['assets']
for asset in assets:
asset_idx = asset['index']
asset_name = asset['params']['name']
asset_unit = asset['params']['unit-name']
asset_code = ''.join(filter(str.isdigit, asset_unit))
static_db.insert({
'asset_id': asset_idx,
'asset_name': asset_name,
'asset_unit': asset_unit,
'asset_code': asset_code,
'used_in_shuffle': False,
'owned': True
})
await ctx.send(f'Static data for `{collection}` formatted successfully.')
await ctx.message.delete()
# Command to prompt Meji to mark a shuffle as invalid.
@commands.command()
@commands.has_role('MesiTeam')
async def shuffle_invalidate(self, ctx: commands.Context):
'''Prompts Meji to change a shuffle to invalid which denies further entries to it.'''
element_id = int(ctx.message.content.split()[2].strip().lower())
element = await ctx.channel.fetch_message(element_id)
Shuffle = ShuffleHelper(element_id)
embed = embed_invalid_shuffle(Shuffle.collection, Shuffle.available, Shuffle.price, Shuffle.shuffle_type)
view = RecordsOnlyView()
await element.edit(embed=embed, view=view)
await ctx.message.delete()
@commands.Cog.listener()
async def on_ready(self):
if not self.pv:
self.client.add_view(ShuffleView(self.client))
self.client.add_view(RecordsOnlyView())
def setup(client):
client.add_cog(Shuffle(client))
| dbchristenson/meji | cogs/shuffle.py | shuffle.py | py | 11,298 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tinydb.TinyDB",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tinydb.Query",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "nextcord.ui",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "nextcord.ui",
"line_n... |
40967086967 | from django import forms
from django.forms import extras
from .models import *
class registered_user_form(forms.ModelForm):
username = forms.CharField(max_length=30)
password = forms.CharField(max_length=30)
date_of_birth = forms.DateField(widget=extras.SelectDateWidget(years=range(1900,2017)))
class Meta:
model = RegisteredUser
fields = ['name',
'date_of_birth',
'gender',
'phone_number',
'annual_income']
'''
class user_form(forms.ModelForm):
class Meta:
model = User
fields = ['username','password']
'''
class supplier_form(forms.ModelForm):
class Meta:
model = supplier
fields = [
'supplier_id',
'company_name',
'address',
'phone_number',
'category',
'revenue']
class credit_card_form(forms.ModelForm):
class Meta:
model = credit_cards
fields = ['card_number',
'expiration_month',
'expiration_year',
'security_code']
class address_form(forms.Form):
street = forms.CharField(max_length=50)
city = forms.CharField(max_length=50)
zip_code = forms.IntegerField()
'''
class Meta:
model = addresses
fields = ['street',
'city',
'zip_code'
]
'''
class email_form(forms.ModelForm):
class Meta:
model= emails
fields = [
'address',
'domain'
]
class upload_list_item_form(forms.ModelForm):
class Meta:
model = sale_items
fields = ['item_name',
'description',
'category',
'image_url',
'url',
'place_of_origin',
'amount_in_stock',
'initial_sale_date',
'listed_price']
#remember to set valid auction field
class upload_auction_item_form(forms.ModelForm):
#image = forms.ImageField()
class Meta:
model = sale_items
fields = ['item_name',
'description',
'category',
'image_url',
'url',
'place_of_origin',
'amount_in_stock',
'initial_sale_date',
'reserve_price',
'auction_end_date',
'auction_end_time']
class sell_item_form(forms.ModelForm):
class Meta:
model = sells
fields = ['type']
class purchase_amount_form(forms.Form):
amount = forms.IntegerField()
def credit_card_choices(self,username):
credit_set = credit_cards.objects.filter(user__user__username=username)
choices = []
for c in credit_set:
choices += (c.card_number,c.card_number)
return choices
class orders_form(forms.Form):
ship_date = forms.DateField(help_text='YYYY-MM-DD')
class Meta:
model = orders
fields = ['credit_card','ship_date','ship_address']
#overload init so we can prepopulate creditcards/address for users
def __init__(self,*args,**kwargs):
#_choices_list = kwargs.pop('_choices',None)
username = kwargs.pop('username')
super(orders_form,self).__init__(*args,**kwargs)
if username is not None:
#self.fields['username'] = username
self.fields['credit_card'] = forms.ChoiceField(label="Credit Card Numbers",
choices=[(x.card_number, x.card_number) for x in
credit_cards.objects.filter(user__user__username=username)])
self.fields['ship_address'] = forms.ChoiceField(label="Addresses",
choices=[(x.street, x.street) for x in
new_addresses.objects.filter(user__user__username=username)])
#credit_cards = forms.IntegerField(choices=credit_card_choices(username))
class place_bid_form(forms.Form):
bid_amount = forms.DecimalField(max_digits=8,decimal_places=2)
class reviews_form(forms.Form):
stars = forms.DecimalField(max_digits=5,decimal_places=4)
description = forms.CharField(max_length=500)
class add_item_list_form(forms.Form):
class Meta:
model = user_list
fields = ['list_name']
def __init__(self,*args,**kwargs):
#_choices_list = kwargs.pop('_choices',None)
username = kwargs.pop('username')
super(add_item_list_form,self).__init__(*args,**kwargs)
if username is not None:
#self.fields['username'] = username
self.fields['list_name'] = forms.ChoiceField(label="Lists",
choices=[(x.list_name, x.list_name) for x in
user_list.objects.filter(user__user__username=username)])
class add_list_form(forms.Form):
list_name = forms.CharField(max_length=30)
class Meta:
model = user_list
fields = ['list_name']
class search_form(forms.Form):
options = categories.objects.all()
category = forms.ModelChoiceField(options, initial={'All':'All'}, label='')
search = forms.CharField(max_length=100, label='', required=False, widget=forms.TextInput(attrs={'placeholder': 'Search our inventory...'}))
class relist_list_form(forms.Form):
item_amount = forms.IntegerField()
item_price = forms.DecimalField(max_digits=8,decimal_places=2)
class relist_auction_form(forms.Form):
item_amount = forms.IntegerField()
auction_reserve = forms.DecimalField(max_digits=8,decimal_places=2)
auction_end_date = forms.DateField(help_text='YYYY-MM-DD')
auction_end_time = forms.TimeField(help_text='24:00')
| dsunchu/431W | congo/database/forms.py | forms.py | py | 5,910 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.f... |
74509507304 | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 25 10:59:09 2021
@author: gw
"""
from tkinter import *
from tkinter.tix import Tk,Control,ComboBox #升级的组合控件包
from tkinter.messagebox import showinfo,showwarning,showerror #各种消息提示框
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg #画布
from matplotlib.figure import Figure
#界面初始设置
root = Tk() #初始化Tk
root.title("神经网络_GDAL_tkinter")
root.geometry("800x600")
root.resizable(width=True,height=True)
root.tk.eval('package require Tix')
var = StringVar() #文本变量储存器
#------------------------------------------------------------------------------
import numpy as np
from osgeo import gdal
import cv2
from PIL import Image,ImageTk # 导入图像处理函数库
from scipy import io as spio
from scipy import optimize
from matplotlib import pyplot as plt
from scipy import optimize
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14) # 解决windows环境下画图汉字乱码问题
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
import time
global input_layer_size,hidden_layer_size,out_put_layer
input_layer_size=6
hidden_layer_size=25
out_put_layer=8
#1.定义sigmoid函数和代价函数
def sigmoid(z):
h = np.zeros((len(z),1))
h = 1.0/(1.0+np.exp(-z))
return h
def nnCostFunction(nn_params,input_layer_size,hidden_layer_size,num_labels,Lambda,X,y):
length = nn_params.shape[0]
Theta1 = nn_params[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,
input_layer_size+1)
Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):length].reshape(num_labels,
hidden_layer_size+1)
m = X.shape[0]
class_y = np.zeros((m,num_labels))
for i in range(num_labels):
class_y[:,i] = np.int32(y==i).reshape(1,-1)
#计算正则化项
Theta1_x = Theta1[:,1:Theta1.shape[1]]
Theta2_x = Theta2[:,1:Theta2.shape[1]]
term = np.dot(np.transpose(np.vstack((Theta1_x.reshape(-1,1),Theta2_x.reshape(-1,1)))),
np.vstack((Theta1_x.reshape(-1,1),Theta2_x.reshape(-1,1))))
#正向传播
a1 = np.hstack((np.ones((m,1)),X))
z2 = np.dot(a1,np.transpose(Theta1))
a2 = sigmoid(z2)
a2 = np.hstack((np.ones((m,1)),a2))
z3 = np.dot(a2,np.transpose(Theta2))
h = sigmoid(z3)
J = -(np.dot(np.transpose(class_y.reshape(-1,1)),np.log(h.reshape(-1,1)))+
np.dot(np.transpose(1-class_y.reshape(-1,1)),np.log(1-h.reshape(-1,1)))+
Lambda*term/2)/m
return np.ravel(J)
#2.定义梯度Sigmoid函数和梯度下降函数
def gradSigmoid(z):
g = np.zeros((z.shape))
g = sigmoid(z)*(1-sigmoid(z))
return g
def nnGradient(nn_params,input_layer_size,hidden_layer_size,num_labels,Lambda,X,y):
length = nn_params.shape[0]
Theta1 = nn_params[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,
input_layer_size+1).copy()
Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):length].reshape(num_labels,
hidden_layer_size+1
).copy()
m = X.shape[0]
class_y = np.zeros((m,num_labels))
for i in range(num_labels):
class_y[:,i] = np.int32(y==(i+1)).reshape(1,-1)
Theta1_x = Theta1[:,1:Theta1.shape[1]]
Theta2_x = Theta2[:,1:Theta2.shape[1]]
#正向传播
n = X.shape[1]
a1 = np.hstack((np.ones((m,1)),X))
z2 = np.dot(a1,np.transpose(Theta1))
a2 = sigmoid(z2)
a2 = np.hstack((np.ones((m,1)),a2))
z3 = np.dot(a2,np.transpose(Theta2))
h = sigmoid(z3)
Theta1_grad = np.zeros((Theta1.shape))
Theta2_grad = np.zeros((Theta2.shape))
#反向传播
detal3 = np.zeros((m,num_labels))
detal2 = np.zeros((m,hidden_layer_size))
for i in range(m):
#detal3[i,:] = (h[i,:]-class_y[i,:])*gradSigmoid(z3[i,:]) # 均方误差的误差率
detal3[i,:] = h[i,:] - class_y[i,:] #交叉熵误差率
Theta2_grad = Theta2_grad+np.dot(np.transpose(detal3[i,:].reshape(1,-1)),
a2[i,:].reshape(1,-1))
detal2[i,:] = np.dot(detal3[i,:].reshape(1,-1),Theta2_x)*gradSigmoid(z2[i,:])
Theta1_grad = Theta1_grad+np.dot(np.transpose(detal2[i,:].reshape(1,-1)),
a1[i,:].reshape(1,-1))
Theta1[:,0]=0
Theta2[:,0]=0
grad = (np.vstack((Theta1_grad.reshape(-1,1),Theta2_grad.reshape(-1,1)))+Lambda*
np.vstack((Theta1.reshape(-1,1),Theta2.reshape(-1,1))))/m
return np.ravel(grad)
#3.定义debug初始化权重函数、随机初始化权重函数和验证梯度计算是否正确
def debugInitializ_Weights(fan_in,fan_out):
W = np.zeros((fan_out,fan_in+1))
x = np.arange(1,fan_out*(fan_in+1)+1)
W = np.sin(x).reshape(W.shape)/10
return W
# 随机初始化权重theta
def randInitializeWeights(L_in,L_out):
W = np.zeros((L_out,1+L_in)) # 对应theta的权重
epsilon_init = (6.0/(L_out+L_in))**0.5
W = np.random.rand(L_out,1+L_in)*2*epsilon_init-epsilon_init
# np.random.rand(L_out,1+L_in)产生L_out*(1+L_in)大小的随机矩阵
return W
def checkGradient(Lambda=0):
input_layer_size = 3
hidden_layer_size = 5
num_labels = 3
m = 5
initial_theta1 = debugInitializ_Weights(input_layer_size, hidden_layer_size)
initial_theta2 = debugInitializ_Weights(hidden_layer_size, num_labels)
X = debugInitializ_Weights(input_layer_size-1, m)
y = np.transpose(np.mod(np.arange(1,m+1),num_labels)).reshape(-1,1)
nn_params = np.vstack((initial_theta1.reshape(-1,1),initial_theta2.reshape(-1,1)))
'''BP求出梯度'''
grad = nnGradient(nn_params, input_layer_size, hidden_layer_size, num_labels, Lambda, X, y)
'''使用数值法计算梯度'''
num_grad = np.zeros((nn_params.shape[0]))
step = np.zeros((nn_params.shape[0]))
e=1e-4
for i in range(nn_params.shape[0]):
step[i] = e
loss1 = nnCostFunction(nn_params-step.reshape(-1,1), input_layer_size, hidden_layer_size,
num_labels, Lambda, X, y)
loss2 = nnCostFunction(nn_params+step.reshape(-1,1), input_layer_size, hidden_layer_size,
num_labels, Lambda, X, y)
num_grad[i] = (loss2-loss1)/(2*e)
step[i] = 0
#显示两列的比较
res = np.hstack((num_grad.reshape(-1,1),grad.reshape(-1,1)))
# print("梯度计算的结果,第一列为数值法计算得到的,第二列为BP得到的:")
# print(res)
return res
#5.定义预测函数
def predict(Theta1,Theta2,X):
m = X.shape[0]
num_labels = Theta2.shape[0]
#p = np.zeros((m,1))
'''正向传播,预测结果'''
X = np.hstack((np.ones((m,1)),X))
h1 = sigmoid(np.dot(X,np.transpose(Theta1)))
h1 = np.hstack((np.ones((m,1)),h1))
h2 = sigmoid(np.dot(h1,np.transpose(Theta2)))
'''
返回h中每一行最大值所在的列号
- np.max(h, axis=1)返回h中每一行的最大值(是某个数字的最大概率)
- 最后where找到的最大概率所在的列号(列号即是对应的数字)
'''
#np.savetxt("h2.csv",h2,delimiter=',')
p = np.array(np.where(h2[0,:] == np.max(h2, axis=1)[0]))
for i in np.arange(1, m):
t = np.array(np.where(h2[i,:] == np.max(h2, axis=1)[i]))
p = np.vstack((p,t))
return p
#打开影像和训练数据
def openData():
global image_array
image = gdal.Open(r"./data/hiwater_xiayou_2014.tif")
nCols = image.RasterXSize
nRows = image.RasterYSize
image_array = image.ReadAsArray(0,0,nCols,nRows)
r = image_array[3,:,:]
g = image_array[2,:,:]
b = image_array[1,:,:]
image_RGB = cv2.merge([r,g,b])
Imax = np.max(image_array)
Imin = np.min(image_array)
image_RGB = ((image_RGB - Imin) * (1/(Imax-Imin)) * 255).astype('uint8')
# plt.imshow(image_RGB)
# plt.xticks([])
# plt.yticks([])
# plt.show()
#缩放影像
scale_percent = 20 # percent of original size
width = int(image_RGB.shape[1] * scale_percent / 100)
height = int(image_RGB.shape[0] * scale_percent / 100)
dim = (width, height)
resized = cv2.resize(image_RGB, dim, interpolation = cv2.INTER_AREA)
var.set('已显示')
global img_png,label_Img
resized = Image.fromarray(resized)
img_png = ImageTk.PhotoImage(resized)
label_Img = Label(root, image=img_png)
label_Img.place(x=450,y=100)
global X_train,y_train
data = np.loadtxt("./data/hiwater_xiayou_class_practice2.txt",delimiter=",",dtype=np.float64)
X_train=data[:,0:-1]
y_train=data[:,-1]
def neuralNetwork(input_layer_size,hidden_layer_size,out_put_layer,X_train,y_train):
X = X_train
y = y_train
m,n = X.shape
Lambda = 1
initial_Theta1 = randInitializeWeights(input_layer_size,hidden_layer_size);
initial_Theta2 = randInitializeWeights(hidden_layer_size,out_put_layer)
initial_nn_params = np.vstack((initial_Theta1.reshape(-1,1),initial_Theta2.reshape(-1,1)))
#展开theta
#np.savetxt("testTheta.csv",initial_nn_params,delimiter=",")
start = time.time()
print(X.shape)
result = optimize.fmin_cg(nnCostFunction, initial_nn_params, fprime=nnGradient,
args=(input_layer_size,hidden_layer_size,out_put_layer,Lambda,X,y),
maxiter=100)
print (u'执行时间:',time.time()-start)
print (result)
global Theta1,Theta2
length = result.shape[0]
Theta1 = result[0:hidden_layer_size*(input_layer_size+1)].reshape(
hidden_layer_size,input_layer_size+1)
Theta2 = result[hidden_layer_size*(input_layer_size+1):length].reshape(
out_put_layer,hidden_layer_size+1)
'''预测'''
p = predict(Theta1,Theta2,X)
pred_r = (u"预测准确度为:%f%%"%np.mean(np.float64(p == y.reshape(-1,1))*100))
print(pred_r)
var.set(pred_r)
# res = np.hstack((p,y.reshape(-1,1)))
# np.savetxt("predict.csv", res, delimiter=',')
def predict_image(X,Theta1,Theta2):
var.set('已预测')
xShape1=X.shape[1]
xShape2=X.shape[2]
result_image = np.zeros((X.shape[1],X.shape[2]))
X = X.reshape(X.shape[0],X.shape[1]*X.shape[2])
X = np.transpose(X)
p = predict(Theta1,Theta2,X)
result_image = p.reshape(xShape1,xShape2)
Imax = np.nanmax(result_image)
Imin = np.nanmin(result_image)
result_image = ((result_image - Imin) * (1/(Imax-Imin)) * 255).astype('uint8')
#图像及画布
fig = plt.figure(figsize=(4.5,4),dpi=100)#图像比例
f_plot =fig.add_subplot(111)#划分区域
canvas_spice = FigureCanvasTkAgg(fig,root)
canvas_spice.get_tk_widget().place(x=300,y=100)#放置位置
#缩放影像
scale_percent = 20 # percent of original size
width = int(result_image.shape[1] * scale_percent / 100)
height = int(result_image.shape[0] * scale_percent / 100)
dim = (width, height)
resized = cv2.resize(result_image, dim, interpolation = cv2.INTER_AREA)
plt.imshow(resized)
plt.xticks([])
plt.yticks([])
#plt.show()
canvas_spice.draw()
#------------------------------------------------------------------------------
#组件
#1.标签
label = Label(root, text="实现遥感分类-BP神经网络",
bg="pink",bd=10,font=("Airal",12),width=28,height=1)
label.pack(side=TOP)
#2.按钮
button1 = Button(root, text = "QUIT", command = root.destroy,
activeforeground = "black",
activebackground = "blue", bg = "red", fg = "white")
button1.pack(side=BOTTOM)
button2 = Button(root, text="显示原始影像", command = openData,
activeforeground = "black",
activebackground = "blue", bg = "Turquoise", fg = "white")
button2.place(x=100,y=100)
button3 = Button(root, text="神经网络训练", command = lambda:neuralNetwork(input_layer_size,hidden_layer_size,out_put_layer,X_train,y_train),
activeforeground = "black",
activebackground = "blue", bg = "Turquoise", fg = "white")
button3.place(x=100,y=150)
button4 = Button(root, text="预测并显示结果", command = lambda:predict_image(image_array,Theta1,Theta2),
activeforeground = "black",
activebackground = "blue", bg = "Turquoise", fg = "white")
button4.place(x=100,y=200)
#3.菜单
def click():
print("点击了一次")
menubar = Menu(root)
fileMenu = Menu(menubar,tearoff = 0)
fileMenu.add_command(label="新建...",command=click)
fileMenu.add_command(label="打开...",command=click)
fileMenu.add_command(label="保存...",command=click)
fileMenu.add_command(label="退出...",command=root.destroy)
menubar.add_cascade(label="文件",menu = fileMenu)
root.config(menu=menubar)
#4.创建文本窗口,显示当前操作状态
Label_show = Label(root,
textvariable = var,
bg="blue",font=("Airal",12),width=28,height=2)
Label_show.place(x=100,y=300)
#5.ComboBox
cb = ComboBox(root,label="可选地表参数(供参考):",editable = True)
for parameter in ("NDVI","FVC","NPP","LAI"):
cb.insert(END,parameter)
cb.pack()
#------------------------------------------------------------------------------
root.mainloop() | 1135063213/Machine_Learning_GDAL_Interface | 神经网络_GDAL_tkinter.py | 神经网络_GDAL_tkinter.py | py | 13,873 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tkinter.tix.Tk",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.font_manager.FontProperties",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "num... |
9738142506 | from django.urls import path
from . import views
app_name = 'todolist'
urlpatterns = [
path('home/', views.home, name="主页"),
path('about/', views.about, name="关于"),
path('edit/<每一件事_id>', views.edit, name="编辑"),
path('delete/<每一件事_id>', views.delete, name="删除"),
path('cross/<每一件事_id>', views.cross, name="划掉"),
]
| AIM-1993/To_Do_List | Django_Projects/to_do_list/todolist/urls.py | urls.py | py | 379 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
31062259395 |
from ..utils import Object
class ToggleMessageSenderIsBlocked(Object):
"""
Changes the block state of a message sender. Currently, only users and supergroup chats can be blocked
Attributes:
ID (:obj:`str`): ``ToggleMessageSenderIsBlocked``
Args:
sender_id (:class:`telegram.api.types.MessageSender`):
Identifier of a message sender to block/unblock
is_blocked (:obj:`bool`):
New value of is_blocked
Returns:
Ok
Raises:
:class:`telegram.Error`
"""
ID = "toggleMessageSenderIsBlocked"
def __init__(self, sender_id, is_blocked, extra=None, **kwargs):
self.extra = extra
self.sender_id = sender_id # MessageSender
self.is_blocked = is_blocked # bool
@staticmethod
def read(q: dict, *args) -> "ToggleMessageSenderIsBlocked":
sender_id = Object.read(q.get('sender_id'))
is_blocked = q.get('is_blocked')
return ToggleMessageSenderIsBlocked(sender_id, is_blocked)
| iTeam-co/pytglib | pytglib/api/functions/toggle_message_sender_is_blocked.py | toggle_message_sender_is_blocked.py | py | 1,026 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "utils.Object.read",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "utils.Object",
"line_number": 34,
"usage_type": "name"
}
] |
23165828885 | import csv
from flask import render_template,request,redirect
from app import app
from app.forms import SubmitForm
@app.route('/')
@app.route('/index',methods=['GET','POST'])
def index():
form=SubmitForm()
if request.method == "POST":
abc=request.form['query']
print(abc)
csv_file=open('data.csv','w')
writer=csv.writer(csv_file)
writer.writerow(abc)
redirect('/')
return render_template('index.html',form=form)
| jain-abhi007/Fake-news-detection | app/routes.py | routes.py | py | 478 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "app.forms.SubmitForm",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.re... |
42388719525 | from peewee import *
from datetime import datetime
from flaskblog import db, login_manager
from flask_login import UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
@login_manager.user_loader
def load_user(user_id):
return User.get_by_id(int(user_id))
class BaseModel(Model):
id = PrimaryKeyField(unique=True)
class Meta:
database = db
order_by = 'id'
class User(BaseModel, UserMixin):
username = CharField(max_length=20, unique=True, null=False)
email = CharField(max_length=20, unique=True, null=False)
image_file = CharField(max_length=20, null=False, default='default.jpg')
password = CharField(max_length=60, null=False)
def get_reset_token(self, expire_sec=1800):
s = Serializer(current_app.config['SECRET_KEY'], expire_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
user_id = s.loads(token).get('user_id')
if user_id is None:
return None
return User.get_by_id(user_id)
def __repr__(self):
return f'User("{self.username}","{self.email}","{self.image_file}")'
class Meta:
db_table = 'Users'
class Post(BaseModel):
title = CharField(max_length=20, unique=True, null=False)
date_posted = DateTimeField(null=False, default=datetime.utcnow())
content = TextField(null=False)
user_id = ForeignKeyField(User, backref='posts', null=False)
def __repr__(self):
return f'Post("{self.title}","{self.date_posted}")'
class Meta:
db_table = 'Posts'
def create_database():
with db:
db.create_tables([User, Post])
| Braindead3/vladislav_blog | flaskblog/models.py | models.py | py | 1,778 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flaskblog.login_manager.user_loader",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "flaskblog.login_manager",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "flaskblog.db",
"line_number": 18,
"usage_type": "name"
},
{
"api_n... |
73970127144 | from twilio import twiml
from twilio.rest import TwilioRestClient
f = open('.key.txt', "r")
key = f.readlines()
f.close()
MSG_HELP = "Valid commands:\n/find - find random partner\n/leave - leave current chat\n/share - share your phone number with partner\n/fun - more fun commands!"
MSG_FUN = "Fun fun! :D\n/count - shows how popular your channel is"
ACCOUNT_SID = key[0][:-1]
AUTH_TOKEN = key[1][:-1]
client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN)
API_LOGIN_SUCCESS = "valid"
API_LOGIN_FAIL = "error"
API_CONNECT_SUCCESS = "connect"
API_QUEUE_SUCCESS = "queue"
API_NO_PENDING_MSG = "nopending"
MSG_LOGIN_SUCCESS = "Welcome to the chatroom!"
MSG_LOGIN_FAIL = "I didn't recognize that room key, was it typed correctly?"
MSG_CONNECT_SUCCESS = "You've been connected!"
MSG_QUEUE_SUCCESS = "Locating a new partner...\ntype /leave to cancel search"
api_message_queue = {}
def get_queued(number):
if number in api_message_queue.keys():
queue = api_message_queue[number]
if len(queue) != 0:
return respond(None, queue.pop(0))
return respond(None, API_NO_PENDING_MSG)
def send(user, message):
if user and user.sms:
client.messages.create(
to=user.number,
from_="+17342742718",
body=message
)
return
else:
# if there is a queue for this number
if user.number in api_message_queue.keys():
api_message_queue[user.number].append(message)
print( "Current message queue for ", user.number, " is ", api_message_queue[user.number], len(api_message_queue[user.number]))
else:
api_message_queue[user.number] = [message]
print( "Creating queue for ", user.number );
# Respond a message to a user
def respond(user, msg):
resp = twiml.Response()
if msg != None:
resp.message(msg)
return str(resp)
# Respond the help message to an invalid user input
def respond_help(user):
return respond(user, MSG_HELP)
# Respond the fun message to an invalid user input
def respond_fun(user):
return respond(user, MSG_FUN)
# Responds to the user when a login was successful
def respond_roomkey_success(user):
if user.sms:
return respond(user, MSG_LOGIN_SUCCESS)
else:
return respond(user, API_LOGIN_SUCCESS)
# Responds to the user when a login failed
def respond_roomkey_fail(user):
if user and user.sms:
return respond(user, MSG_LOGIN_FAIL)
else:
return respond(user, API_LOGIN_FAIL)
# Responds to the user when a connection was succesfully made
def respond_connect(user):
if user.sms:
return respond(user, MSG_CONNECT_SUCCESS)
else:
send(user, MSG_CONNECT_SUCCESS)
return respond(user, API_CONNECT_SUCCESS)
# Responds to the user when they are placed in a queue
def respond_queue(user):
if user.sms:
return respond(user, MSG_QUEUE_SUCCESS)
else:
send(user, MSG_QUEUE_SUCCESS)
return respond(user, API_QUEUE_SUCCESS)
| imondrag/project-kitten | server_files/twiliocomms.py | twiliocomms.py | py | 2,867 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "twilio.rest.TwilioRestClient",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "twilio.twiml.Response",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "twilio.twiml",
"line_number": 56,
"usage_type": "name"
}
] |
33203889383 | import folium
from geopy.geocoders import Nominatim
film_studio_cities = ['Los Angeles', 'Rome', 'Wellington', 'Saint-Denis', 'Mumbai', 'Ouarzazate', 'Berlin']
film_studio = ['Hollywood', 'Cinecitta', 'Weta', 'La Cité du Cinéma', 'Bollywood', ' Atlas', 'Filmpark Babelsberg']
locations = list()
lst = list()
titles = list()
name = str()
city = str()
map = folium.Map()
year = input('Enter the year: ')
films = folium.FeatureGroup(name='Location')
studio = folium.FeatureGroup(name='Famous studio')
with open('C:\\Users\\Home\\Downloads\\locations.list') as text:
for line in text:
if (year in line) and ('(' in line):
lst = line.strip().split()
if (len(lst) == 5) and (year in lst[1]):
titles.append(lst[0])
del (lst[:2])
name = list(lst[0])
if name.pop() == ',':
for j in range(len(name)):
city += name[j]
if city not in locations:
locations.append(city)
city = str()
name = str()
if len(locations) == 25:
break
geolocator = Nominatim(user_agent="Map")
for i in range(len(locations)):
location = geolocator.geocode(locations[i])
if location == None:
continue
else:
films.add_child(folium.Marker(location=[location.latitude, location.longitude],
popup=titles[i],
icon=folium.Icon()))
for j in range(len(film_studio)):
location = location = geolocator.geocode(film_studio_cities[j])
if location == None:
continue
else:
studio.add_child(folium.CircleMarker(location=[location.latitude, location.longitude],
radius=10,
popup=film_studio[j],
fill_color='green',
color='red',
fill_opacity=0.5))
map.add_child(films)
map.add_child(studio)
map.save('films.html')
| lurak/web-map | map.py | map.py | py | 2,246 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "folium.Map",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "folium.FeatureGroup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "folium.FeatureGroup",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "geopy.geocoders.No... |
73973980585 | import numpy as np
import transforms3d as t3d
from scipy.linalg import expm, sinm, cosm
class SE3(object):
"""
3d rigid transform.
"""
def __init__(self, R, t):
self.R = R
self.t = t
def matrix(self):
m = np.eye(4)
m[:3, :3] = self.R
m[:3, 3] = self.t
return m
def __mul__(self, T):
R = self.R @ T.R
t = self.R @ T.t + self.t
return SE3(R, t)
def inverse(self):
return SE3(self.R.T, -self.R.T @ self.t)
def skew(a):
"""
converts vector to skew symmetric matrix in batch
:param a: size n x 3, input vector
:return: S: size n x 3 x 3, skew symmetric matrix
"""
S = np.empty(a.shape[:-1] + (3, 3))
S[..., 0, 0].fill(0)
S[..., 0, 1] = -a[..., 2]
S[..., 0, 2] = a[..., 1]
S[..., 1, 0] = a[..., 2]
S[..., 1, 1].fill(0)
S[..., 1, 2] = -a[..., 0]
S[..., 2, 0] = -a[..., 1]
S[..., 2, 1] = a[..., 0]
S[..., 2, 2].fill(0)
return S
def Hl_operator(omega):
"""
implements Hl operator in eq 20
"""
omega_norm = np.linalg.norm(omega)
term1 = (1/2)*np.eye(3)
term2 = np.nan_to_num((omega_norm - np.sin(omega_norm)) / (omega_norm**3)) * skew(omega)
term3 = np.nan_to_num((2*(np.cos(omega_norm) - 1) + omega_norm**2) / (2*(omega_norm**4))) * (skew(omega) @ skew(omega))
Hl = term1 + term2 + term3
return Hl
def Jl_operator(omega):
"""
implements Jl operator in eq 20
"""
omega_norm = np.linalg.norm(omega)
term1 = np.eye(3)
term2 = np.nan_to_num((1 - np.cos(omega_norm)) / (omega_norm**2)) * skew(omega)
term3 = np.nan_to_num((omega_norm - np.sin(omega_norm)) / (omega_norm**3)) * (skew(omega) @ skew(omega))
Jl = term1 + term2 + term3
return Jl
def imu_kinametics_local(p, R, v, omega, acc, dt):
"""
update SE3 pose based on imu measurements based on closed form integration
:param p: size 1x3, position before update
:param R: size 3x3, rotation before update
:param v: size 1x3, velocity before update
:param omega: size 1x3, unbiased angular velocity
:param acc: size 1x3, unbiased linear acceleration
:param dt: scalar of time difference
:return: p_new: size 1x3, updated position
R_new: size 3x3, update rotation
"""
# update rotation
delta_R = axangle2rot(dt*omega)
R_new = R @ delta_R
# Gravity vector in the world frame
g = np.array([0., 0., -9.81])
# update position
Hl = Hl_operator(dt*omega)
p_new = p + dt*v + g*((dt**2)/2) + R @ Hl @ acc * (dt**2)
# update velocity
Jl = Jl_operator(dt*omega)
v_new = v + g*dt + R @ Jl @ acc * dt
return R_new, p_new, v_new
def imu_kinametics(p, R, v, omega, acc, dt):
"""
function to call
closed form propagation using right perturbation
original msckf propagation
"""
# closed form propagation using local frame kinematics
R_new, p_new, v_new = imu_kinametics_local(p, R, v, omega, acc, dt)
return R_new, p_new, v_new
def pose_kinametics(T, x):
"""
update SE3 pose based on se3 element x
:param T: size nx4x4, SE3 pose
:param x: size nx1x6, se3 element
:return: size nx4x4, update pose
"""
return T @ axangle2pose(x)
def axangle2pose(x):
"""
converts se3 element to SE3 in batch
:param x: size n x 6, n se3 elements
:return: size n x 4 x 4, n elements of SE(3)
"""
return twist2pose(axangle2twist(x))
def twist2pose(T):
'''
converts an n x 4 x 4 twist (se3) matrix to an n x 4 x 4 pose (SE3) matrix
'''
rotang = np.sqrt(np.sum(T[...,[2,0,1],[1,2,0]]**2,axis=-1)[...,None,None]) # n x 1
Tn = np.nan_to_num(T / rotang)
Tn2 = Tn@Tn
Tn3 = Tn@Tn2
eye = np.zeros_like(T)
eye[...,[0,1,2,3],[0,1,2,3]] = 1.0
return eye + T + (1.0 - np.cos(rotang))*Tn2 + (rotang - np.sin(rotang))*Tn3
def axangle2twist(x):
"""
converts 6-vector to 4x4 hat form in se(3) in batch
:param x: size n x 6, n se3 elements
:return: size n x 4 x 4, n elements of se(3)
"""
T = np.zeros(x.shape[:-1] + (4, 4))
T[..., 0, 1] = -x[..., 5]
T[..., 0, 2] = x[..., 4]
T[..., 0, 3] = x[..., 0]
T[..., 1, 0] = x[..., 5]
T[..., 1, 2] = -x[..., 3]
T[..., 1, 3] = x[..., 1]
T[..., 2, 0] = -x[..., 4]
T[..., 2, 1] = x[..., 3]
T[..., 2, 3] = x[..., 2]
return T
def inversePose(T):
"""
performs batch inverse of transform matrix
:param T: size n x 4 x 4, n elements of SE(3)
:return: size n x 4 x 4, inverse of T
"""
iT = np.empty_like(T)
iT[..., 0, 0], iT[..., 0, 1], iT[..., 0, 2] = T[..., 0, 0], T[..., 1, 0], T[..., 2, 0]
iT[..., 1, 0], iT[..., 1, 1], iT[..., 1, 2] = T[..., 0, 1], T[..., 1, 1], T[..., 2, 1]
iT[..., 2, 0], iT[..., 2, 1], iT[..., 2, 2] = T[..., 0, 2], T[..., 1, 2], T[..., 2, 2]
iT[..., :3, 3] = -np.squeeze(iT[..., :3, :3] @ T[..., :3, 3, None])
iT[..., 3, :] = T[..., 3, :]
return iT
def axangle2rot(a):
'''
converts axis angle to SO3 in batch
@Input:
a = n x 3 = n axis-angle elements
@Output:
R = n x 3 x 3 = n elements of SO(3)
'''
na = np.linalg.norm(a, axis=-1) # n x 1
# cannot add epsilon to denominator
ana = np.nan_to_num(a / na[..., None]) # n x 3
ca, sa = np.cos(na), np.sin(na) # n x 1
mc_ana = ana * (1 - ca[..., None]) # n x 3
sa_ana = ana * sa[..., None] # n x 3
R = np.empty(a.shape + (3,))
R[..., 0, 0] = mc_ana[..., 0] * ana[..., 0] + ca
R[..., 0, 1] = mc_ana[..., 0] * ana[..., 1] - sa_ana[..., 2]
R[..., 0, 2] = mc_ana[..., 0] * ana[..., 2] + sa_ana[..., 1]
R[..., 1, 0] = mc_ana[..., 0] * ana[..., 1] + sa_ana[..., 2]
R[..., 1, 1] = mc_ana[..., 1] * ana[..., 1] + ca
R[..., 1, 2] = mc_ana[..., 2] * ana[..., 1] - sa_ana[..., 0]
R[..., 2, 0] = mc_ana[..., 0] * ana[..., 2] - sa_ana[..., 1]
R[..., 2, 1] = mc_ana[..., 1] * ana[..., 2] + sa_ana[..., 0]
R[..., 2, 2] = mc_ana[..., 2] * ana[..., 2] + ca
return R
def odotOperator(ph):
'''
@Input:
ph = n x 4 = points in homogeneous coordinates
@Output:
odot(ph) = n x 4 x 6
'''
zz = np.zeros(ph.shape + (6,))
zz[...,:3,3:6] = -skew(ph[...,:3])
zz[...,0,0],zz[...,1,1],zz[...,2,2] = ph[...,3],ph[...,3],ph[...,3]
return zz
def circledCirc(ph):
'''
@Input:
ph = n x 4 = points in homogeneous coordinates
@Output:
circledCirc(ph) = n x 6 x 4
'''
zz = np.zeros(ph.shape[:-1] + (6,4))
zz[...,3:,:3] = -skew(ph[...,:3])
zz[...,:3,3] = ph[...,:3]
return zz
def poseSE32SE2(T, force_z_to_zero_flag = False):
yaw = t3d.euler.mat2euler(T[:3, :3], axes='rzyx')[0]
if not force_z_to_zero_flag:
# note that we keep T[2, 3] instead of force z = 0
T = np.array([[np.cos(yaw), -np.sin(yaw), 0.0, T[0, 3]],
[np.sin(yaw), np.cos(yaw), 0.0, T[1, 3]],
[0.0, 0.0, 1.0, T[2, 3]],
[0.0, 0.0, 0.0, 1.0]])
else:
T = np.array([[np.cos(yaw), -np.sin(yaw), 0.0, T[0, 3]],
[np.sin(yaw), np.cos(yaw), 0.0, T[1, 3]],
[0.0, 0.0, 1.0, 0],
[0.0, 0.0, 0.0, 1.0]])
return T
def to_quaternion(R):
"""
Convert a rotation matrix to a quaternion.
Pay attention to the convention used. The function follows the
conversion in "Indirect Kalman Filter for 3D Attitude Estimation:
A Tutorial for Quaternion Algebra", Equation (78).
The input quaternion should be in the form [q1, q2, q3, q4(scalar)]
"""
if R[2, 2] < 0:
if R[0, 0] > R[1, 1]:
t = 1 + R[0,0] - R[1,1] - R[2,2]
q = [t, R[0, 1]+R[1, 0], R[2, 0]+R[0, 2], R[1, 2]-R[2, 1]]
else:
t = 1 - R[0,0] + R[1,1] - R[2,2]
q = [R[0, 1]+R[1, 0], t, R[2, 1]+R[1, 2], R[2, 0]-R[0, 2]]
else:
if R[0, 0] < -R[1, 1]:
t = 1 - R[0,0] - R[1,1] + R[2,2]
q = [R[0, 2]+R[2, 0], R[2, 1]+R[1, 2], t, R[0, 1]-R[1, 0]]
else:
t = 1 + R[0,0] + R[1,1] + R[2,2]
q = [R[1, 2]-R[2, 1], R[2, 0]-R[0, 2], R[0, 1]-R[1, 0], t]
q = np.array(q) # * 0.5 / np.sqrt(t)
return q / np.linalg.norm(q)
def to_rotation(q):
"""
Convert a quaternion to the corresponding rotation matrix.
Pay attention to the convention used. The function follows the
conversion in "Indirect Kalman Filter for 3D Attitude Estimation:
A Tutorial for Quaternion Algebra", Equation (78).
The input quaternion should be in the form [q1, q2, q3, q4(scalar)]
"""
q = np.squeeze(q)
q = q / np.linalg.norm(q)
vec = q[:3]
w = q[3]
R = (2*w*w-1)*np.identity(3) - 2*w*skew(vec) + 2*vec[:, None]*vec
return R
def perturb_T(T):
"""
this function perturbs T if it's too close to the origin
T size is 4 x 4
"""
if np.allclose(T[:3, 3], 0):
warnings.warn("Numerical derivatives around origin are bad, because"
+ " the distance function is non-differentiable. Choosing a random"
+ " position. ")
T[:3, 3] = np.random.rand(3) * 2 - 1
return T | shanmo/OrcVIO | python_scripts/object_map_eval/se3.py | se3.py | py | 9,254 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "numpy.eye",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_numb... |
41245528707 | import numpy as np
import torch.nn
from transformers import LukeTokenizer, LukeForEntityClassification, LukeConfig, TrainingArguments, Trainer, AutoTokenizer
from transformers import DataCollatorForTokenClassification, DataCollatorWithPadding
from tqdm import trange
from datasets import ClassLabel, load_dataset
import json
from datasets import Dataset, load_dataset
import logging
from tqdm import trange
from pynvml import *
logging.basicConfig(level=logging.INFO)
torch.cuda.empty_cache()
def load_examples(dataset_file):
with open(dataset_file, "r") as f:
data = json.load(f)
examples = []
for item in data:
examples.append(dict(
text=item["sent"],
entity_spans=[(item["start"], item["end"])],
label=item["labels"]
))
return examples
test_examples = load_examples("/mnt/shared/home/jose.luis.malaquias.ext/new_convertLUKE/OpenEntity/test.json")
logging.info("Data Memory before Loading Models")
#print_gpu_utilization()
logging.info("############### LOAD MODEL ###################")
#my_config = LukeConfig.from_json_file("./ET_for_FIGER_model_v2/config.json")
#print(my_config)
# model LongLuke NoGlobal - OpenEntity
#model = LukeForEntityClassification.from_pretrained("/mnt/shared/home/jose.luis.malaquias.ext/new_convertLUKE/LongLukeOpenEntity/Vanilla_long_3Agosto")
model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity")
#model.config = my_config
#model.luke.config = my_config
model.eval()
# Load the tokenizer
#tokenizer = LukeTokenizer.from_pretrained("/mnt/shared/home/jose.luis.malaquias.ext/new_convertLUKE/LongLukeOpenEntity/Vanilla_long_3Agosto")
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity")
logging.info("Data Memory after Loading Models")
#print_gpu_utilization()
logging.info("CHOOSE GPU")
########################## Choose GPU ########################
# set the GPU device to use
cuda_device= 0 # mudar para 0 para dar o cuda
if cuda_device < 0:
device = torch.device("cpu")
else:
device = torch.device(f"cuda:{cuda_device}")
model = model.to(device)
batch_size = 128
num_predicted = 0
num_gold = 0
num_correct = 0
all_predictions = []
all_labels = []
for batch_start_idx in trange(0, len(test_examples), batch_size):
batch_examples = test_examples[batch_start_idx:batch_start_idx + batch_size]
texts = [example["text"] for example in batch_examples]
entity_spans = [example["entity_spans"] for example in batch_examples]
gold_labels = [example["label"] for example in batch_examples]
inputs = tokenizer(texts, entity_spans=entity_spans, return_tensors="pt", padding=True)
inputs = inputs.to("cuda")
with torch.no_grad():
outputs = model(**inputs)
num_gold += sum(len(l) for l in gold_labels)
for logits, labels in zip(outputs.logits, gold_labels):
for index, logit in enumerate(logits):
if logit > 0:
num_predicted += 1
predicted_label = model.config.id2label[index]
if predicted_label in labels:
num_correct += 1
precision = num_correct / num_predicted
recall = num_correct / num_gold
f1 = 2 * precision * recall / (precision + recall)
print(f"\n\nprecision: {precision} recall: {recall} f1: {f1}")
with open("results_OpenEntity_eval_LongLuke_5Aug_simple.txt", "w") as text_file:
text_file.write(f"RESULTS \n precision: {precision} \n Recall: {recall} \n F1: {f1}")
| joseMalaquias/tese | Agosto/OpenEntity/evaluate_simple.py | evaluate_simple.py | py | 3,520 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.cuda.empty_cache",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch... |
29442744987 | from brownie import network, accounts, config, MockV3Aggregator
from web3 import Web3
DECIMALS = 8
STARTING_PRICE = 200000000000
# you can see all env of brownie by running "brownie networks list" in terminal
LOCAL_BLOCKCHAIN_ENVIRONMENT = ["development", "ganache-local"]
FORKED_LOCAL_ENVIRONMENT = ["mainnet-fork"]
# If blockchain env is local or forked then get the address from loal
# basically the 0th index address
# else take the address from the brownie-config.yaml file
def get_account():
if (
network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENT
or network.show_active() in FORKED_LOCAL_ENVIRONMENT
):
return accounts[0]
else:
return accounts.add(config["wallets"]["from_key"])
# we need to deploy mocks for getting local price_feed
def deploy_mocks():
print(f"The active network is {network.show_active()}")
print("Deploying Mocks...")
# check if MockV3Aggregator is deploying for the first time.
if len(MockV3Aggregator) <= 0:
MockV3Aggregator.deploy(
DECIMALS, Web3.toWei(STARTING_PRICE, "ether"), {"from": get_account()}
)
print("Mocks Deployed!")
| PremMehta01/web3_brownie_fundMe | scripts/helpful_scripts.py | helpful_scripts.py | py | 1,163 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "brownie.network.show_active",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "brownie.network",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "brownie.network.show_active",
"line_number": 18,
"usage_type": "call"
},
{
"api_name"... |
439277880 | """Used to call an external NLP API for a given text input """
# Library imports
import cProfile
import pstats
import logging
import os
import PyPDF2 as pypdf
import nltk
from nltk.sentiment import SentimentIntensityAnalyzer
import src.Internal_API.db_connector as db_connector
# Setup logging for text analysis module
logger = logging.getLogger('text_analysis_logger')
logger.setLevel(logging.DEBUG)
# Logging handler and setting logging level to DEBUG
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# format for log entries
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# adding format to handler
ch.setFormatter(formatter)
# adding handling to logger
logger.addHandler(ch)
# API_error value for testing only, will be removed when NLP API is implemented
def text_analyze(input):
"""Handles text analysis from a given text and uses external API, returns raw data from NLP analysis"""
# Error Checks:
if input is None or input.strip() == '':# Checks if input is empty
logging.error("ERROR 101: Input not found")
return 101
if not isinstance(input, str): # Checks if input text is a string
logging.error("ERROR 102: Input is not a string")
return 102
if len(input) < 3:# Checks if length of input is within bounds, is lower for testing
logging.error("ERROR 103: Input is too short")
return 103
if len(input) > 15: # Checks if length of input is within bounds, is higher for testing
logging.error("ERROR 104: Input is too long")
return 104
sia = SentimentIntensityAnalyzer()
scores = sia.polarity_scores(input)
sentiment_score = scores["compound"]
db_connector.query_db("INSERT INTO Paragraphs (paragraphID,documentID,sentimentNum) VALUES (1, 1234, 0.7)")
print("Text Analyzed")
return sentiment_score
# extracted_text and pdf_page are for testing only, will be removed when pypdf is implemented
def pdf_to_text(file_name, extracted_text, pdf_page):
"""Handles the conversion from PDF to a text string, returns string of text or list of strings"""
if not os.path.isfile(file_name):
logger.error("ERROR 201: File not found")
return 201
if not file_name.endswith('.pdf'):
logger.error("ERROR 202: Invalid file format")
return 202
try:
pdf_object = open(file_name, 'rb')
pdf_reader = pypdf.PdfReader(pdf_object)
except pypdf.utils.PdfReadError:
logger.error("ERROR 203: Error reading PDF file")
return 203
if len(pdf_reader.pages) == 0:
logger.error("ERROR 204: Selected PDF has no pages")
return 204
db_connector.query_db(f"INSERT INTO Paragraphs (paratext) VALUES ('{extracted_text}') WHERE paragraphID = 1")
print("Text Extracted!")
return 0
def text_analysis_profiler():
"""Runs a profile on text analysis Module and prints results to the console"""
profiler = cProfile.Profile()
profiler.enable()
text_analyze("Hello World!", False)
print('Text Analysis Successful')
profiler.disable()
stats = pstats.Stats(profiler).sort_stats('ncalls')
stats.print_stats()
return 0
| CMander02/SmartNewsAnalyzer | src/ExternalAPI/text_analysis.py | text_analysis.py | py | 3,192 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.DE... |
11686474471 |
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
from torch.utils.data import *
from skimage import io, transform
import scipy.ndimage as sci
plt.ion()
from common_functions import *
#image processing
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = transform.resize(image, (new_h, new_w))
return {'image': img, 'label': label}
class RandomCrop(object):
"""Crop randomly the image in a sample.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h,
left: left + new_w]
return {'image': image, 'label': label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, label = sample['image'], sample['label']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return (torch.from_numpy(image),
torch.from_numpy(label))
data_transforms = {
'train': transforms.Compose([
Rescale(256),
RandomCrop(224),
ToTensor()
]),
'test': transforms.Compose([
Rescale(256),
RandomCrop(224),
ToTensor()
])
}
def get_indices(root_dir, datafolder):
sizes = []
path = os.path.join(root_dir, datafolder)
for batch in sorted(os.listdir(path)):
path2 = os.path.join(path, batch)
if(os.path.isdir(path2)):
x = subprocess.check_output(['ls','-l', '{}'.format(path2)])
x = len(x.splitlines()) - 1
sizes.append(x)
cum_sizes = [0] * len(sizes)
for i in range(len(sizes)):
for j in range(i+1):
cum_sizes[i] += sizes[j]
indices = [0]*len(sizes)
for i in range(len(indices)):
if(i - 1 < 0):
indices[i] = list(range(cum_sizes[i]))
else:
indices[i] = list(range(cum_sizes[i-1],cum_sizes[i]))
return indices
get_indices("./", "train")
r = list(range(33, 123)) #keyboard values of ascii table
blacklist = [92,94,95,35,36,37,38, 39]
r = [chr(x) for x in r if x not in blacklist] #remove special characters and escape characters
class_names = r + supported_characters + [' ', "#", "$", "&"]
#print(class_names)
class BatchSampler(torch.utils.data.sampler.BatchSampler):
def __init__(self, folder, batch_size=0, drop_last=False):
'''if not isinstance(sampler, torch.utils.data.sampler.SequentialSampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.SequentialSampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
'''
self.batch_size = batch_size
self.drop_last = drop_last
self.currentbatch = 0
self.batches = get_indices("./", folder)
def __iter__(self):
#if(self.currentbatch < len(self.batches)):
# yield self.batches[self.currentbatch]
#self.currentbatch += 1
return iter(self.batches)
def __len__(self):
return len(self.batches)
class SymbDataset(Dataset):
"""Dataset Class For CNN"""
def __init__(self, root_dir, classnames=None, transform=None):
"""
Args:
root_dir (string): Directory containing all of the images and tex files.
classnames (list): List of all of the possible classes
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.len = None #calculate length only once
self.classnames = classnames
self.docs = []
for file in os.listdir(root_dir):
#print(file)
if file.endswith(".tex"):
path = os.path.join(root_dir, file)
with open(path, 'r') as f:
self.docs.append( ( file , simplify(f.read(), 0) ) ) #tup containing file, expected result values pairs
self.root_dir = root_dir
self.transform = transform
#print(self.docs)
def __len__(self): #returns number of images
path = self.root_dir
tot = get_indices("./", path)[-1][-1]
self.len = tot
return tot
def len2(self): #returns number of batches
return len(self.docs)
def get_idx(self, idx):
#finds the batch number given an index of all the images
batch = 0
cum = 0
l=0
while(idx > 0):
path = os.path.join(self.root_dir, str(batch))
l = len(os.listdir(path))
if(idx >= l):
batch += 1
idx -= l
cum +=l
else: break
self.idx1 = batch
self.idx2 = idx
def __getitem__(self, idx):
self.get_idx(idx)
idx1 = self.idx1
idx2 = self.idx2
imglabel = self.docs[idx1][1] #label with file contents
#print(imglabel)
imglabel = np.array([self.classnames.index(classname) for classname in imglabel]) #array with the indices for each class in classnames
#print(imglabel)
imgdir = os.path.join(self.root_dir, self.docs[idx1][0].strip(".tex"))
img = None
l = idx2
for file in sorted(os.listdir(imgdir)):
file = os.path.join(imgdir, file)
print(file)
if(l == 0):
img = sci.imread(file, mode="RGB")
if(img is None):
return __getitem__(idx+1)
l -= 1
#sample = np.array((img , imglabel))
#print(img.shape, imglabel.shape)
sample = {'image': img, 'label': imglabel}
if self.transform:
sample = self.transform(sample)
return sample
data_dir = "./"
image_datasets = {x: SymbDataset(os.path.join(data_dir, x), classnames = class_names ,
transform = data_transforms[x])
for x in ['train', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_sampler = BatchSampler("./", x),
num_workers=0)
for x in ['train', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'test']}
use_gpu = torch.cuda.is_available()
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
#print(repr(inputs), repr(classes))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)
| Tea-Script/HandwritingRecognition | datasets.py | datasets.py | py | 9,114 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "skimage.transform.resize",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "skim... |
35225497072 | import logging
import pathlib
import sys
from helloworld.errors import OutputFileExists
_logger = logging.getLogger(__name__)
def write_message(message, output_file, clobber=False):
if output_file != '-':
# only overwrite output_file if --clobber specified.
if pathlib.Path(output_file).exists() and not clobber:
raise OutputFileExists(output_file)
_logger.debug("Writing message to %s",
output_file)
with open(output_file, "w") as _outfile:
_outfile.write(message)
else:
_logger.debug("Writing message to stdout")
sys.stdout.write(message + "\n")
def create_logger(level='INFO', logfile=None):
logger = logging.getLogger()
logger.setLevel(getattr(logging, level, None))
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s'
' - %(message)s')
if logfile:
lh = logging.FileHandler(logfile, mode='w')
else:
lh = logging.StreamHandler()
lh.setLevel(getattr(logging, level, None))
lh.setFormatter(formatter)
logger.addHandler(lh)
return logger
| KeithMnemonic/python-helloworld | helloworld/scripts/cli_utils.py | cli_utils.py | py | 1,155 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "helloworld.errors.OutputFileExists",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.... |
24288468105 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 16 17:56:41 2021
@author: s2073467
"""
import os
import scipy.io
import matplotlib.pyplot as plt
import scipy.signal
import voltron_ROI as ROI
import pandas as pd
import numpy as np
from airPLS import airPLS
#%% Load triggered mode data
'''triggered: lowpass cutoff:2000Hz, fOR 4KHznoise
rolling window 250, bigger rolling window, more false positive, smaller window will loss spikes
thre: 3std'''
dpath= "G:/SPAD/SPADData"
mat_1 = scipy.io.loadmat(os.path.join(dpath, "trace_ref.mat"))
trace_raw = mat_1['trace_ref'][:,0]
#%% Defined functions
def get_smoothed_trace (trace_raw, lowpass_cutoff=2000,bin_window=5):
'''Basic filter and smooth'''
trace_raw=trace_raw.astype(np.float64)
plot_trace(trace_raw, name='raw_trace')
'''reverse the trace (voltron is reversed)'''
trace_reverse=np.negative(trace_raw)
plot_trace(trace_reverse, name='raw_trace_reverse')
'''2000Hz low pass filter'''
trace_filtered=ROI.butter_filter(trace_reverse,'low',cutoff=lowpass_cutoff)
plot_trace(trace_filtered, name='trace_2kHz filtered')
'''5 frames as a rolling window to bin the data'''
trace_binned = pd.Series(trace_filtered).rolling(window=bin_window,min_periods=bin_window,center=True).mean()
trace_binned.fillna(method="bfill",inplace=True)
trace_binned.fillna(method="ffill",inplace=True)
trace_smooth = np.array(trace_binned)
plot_trace(trace_smooth, name='trace_smooth')
return trace_smooth
def plot_trace(trace, name='signal',ax=None,color='r',zorder=1,linewidth=1):
if ax is None:
fig = plt.figure(figsize=[16,8])
ax = fig.add_subplot(111)
ax.plot(trace,color,linewidth=linewidth,zorder=zorder)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.title(name, fontsize=20)
return ax
def plotSpikeOnTrace(trace, spiketimes,name='signal'):
fig = plt.figure(figsize=[20,8])
plt.plot(trace,'c-',linewidth=1,zorder=1)
plt.scatter(spiketimes,trace[spiketimes],
s=30,c='k',marker='o',zorder=2)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.title(name, fontsize=20)
return fig
def get_pdTrace(trace_smooth,high_freq,spiketrain):
data=np.array([trace_smooth,high_freq,spiketrain]).T
traceSpike_pd = pd.DataFrame(data,columns=['trace', 'high_freq', 'spiketrain'])
return traceSpike_pd
def get_subset(traceSpike_pd,low_idx,high_idx):
traceSpike_sub=traceSpike_pd[low_idx:high_idx]
spiketime_sub=traceSpike_sub.index[traceSpike_sub['spiketrain']!=0]
return traceSpike_sub,spiketime_sub
def plot_spike_trace(pdTrace,spiketimes,ax,window_frame=600):
for i in range(len(spiketimes)):
trace_sub,_=get_subset(pdTrace,int(spiketimes[i]-window_frame/2),
int(spiketimes[i]+window_frame/2))
ax=plot_trace(np.array(trace_sub.high_freq),name='spikes',ax=ax,color='c')
return ax
def get_spike_mean(pdTrace,spiketimes,ax,window_frame=600):
spike_mean=np.zeros(window_frame)
spike_sum=np.zeros(window_frame)
for i in range(len(spiketimes)):
trace_sub,_=get_subset(pdTrace,int(spiketimes[i]-window_frame/2),
int(spiketimes[i]+window_frame/2))
spike_i=np.array(trace_sub.high_freq)
spike_sum=spike_i+spike_sum
spike_mean=spike_sum/len(spiketimes)
return spike_mean
#%%
'''Use original function with spike shape template
unpack the result'''
trace_smooth=get_smoothed_trace (trace_raw)
spike_data=ROI.get_spikes(trace_smooth, superfactor=10, threshs=(.4, .6, .75))
sub_thresh2=spike_data[0]
high_freq=spike_data[1]
spiketimes=spike_data[2]
spiketrain=spike_data[3]
spikesizes=spike_data[4]
super_times, super_sizes, super_times2, super_sizes2 =spike_data[5:9]
kernel, upsampled_kernel, super_kernel,best_tlimit,best_thre=spike_data[9:14]
#%%
plotSpikeOnTrace(trace_smooth,spiketimes)
plotSpikeOnTrace(high_freq,spiketimes)
plot_trace(kernel, name='kernel')
#plot_trace(spikesizes, name='spikesizes')
plot_trace(spiketrain, name='spiketrain')
#%% plot traces on kernel
ax=plot_trace(kernel, name='kernel',color='k',zorder=2,linewidth=3)
pdTrace=get_pdTrace(trace_smooth,high_freq,spiketrain)
ax=plot_spike_trace(pdTrace,spiketimes,ax,window_frame=len(kernel)-1)
#%%
spike_mean=get_spike_mean(pdTrace,spiketimes,ax,window_frame=600)
ax1=plot_trace(spike_mean, name='spike_mean',color='k',zorder=2,linewidth=3)
pdTrace=get_pdTrace(trace_smooth,high_freq,spiketrain)
ax1=plot_spike_trace(pdTrace,spiketimes,ax1,window_frame=len(kernel)-1)
#%% exponential fit
from pylab import *
from math import log
import expfit
tList = arange(0.0,0.03,0.0001) # pick 10 ms after the spike
'''use Kernel to fit, can also use spike_mean data'''
#yList = kernel[301:]
yList = spike_mean[300:]
#yList=ROI.butter_filter(yTrace,'low',cutoff=500) #cutoff=308 works for fitExponent
#trace_binned = pd.Series(trace_filtered).rolling(window=bin_window,min_periods=bin_window,center=True).mean()
'''linear fit'''
#(amplitudeEst,tauEst) = expfit.fitExponent(tList,yList,ySS=0)
#amplitudeEst,tauEst= expfit.fit_exp_linear(tList,yList,0)
'''nonlinear fit'''
amplitudeEst,K,yBaseline= expfit.fit_exp_nonlinear(tList,yList)
tauEst=-1/K
print ('Amplitude estimate = %f, tau estimate = %f'
% (amplitudeEst,tauEst))
yEst = amplitudeEst*(exp(-tList/tauEst))+yBaseline
figure(1)
plot(tList,yList,'b')
#plot(tSamples,yMeasured,'+r',markersize=12,markeredgewidth=2)
plot(tList,yEst,'--g')
xlabel('seconds')
legend(['True value','Estimated value'])
grid(True)
show()
#%%
#%%
'''Df/F, need a for loop'''
F_base=np.mean(trace_smooth[100:1100])
dfonF_array=np.zeros(len(spiketimes))
for i in range(len(spiketimes)):
spike_i=trace_smooth[spiketimes[i]]
df_i=spike_i-F_base
dfonF_i=-(df_i)/F_base
dfonF_array[i]=dfonF_i
plot_trace(dfonF_array, name='df/f')
average_dff=np.mean(dfonF_array)
'''
df/f values in percentage
6.95896
6.76306
7.6022
8.47525
7.71551
7.36897
7.37012
7.51645
7.84111
8.51643
'''
#%%
'''SNR'''
F_base_std=np.std(trace_smooth[100:1100])
SNR_array=np.zeros(len(spiketimes))
for i in range(len(spiketimes)):
spike_i=trace_smooth[spiketimes[i]]
df_i=spike_i-F_base
snr_i=df_i/F_base_std
SNR_array[i]=snr_i
plot_trace(SNR_array, name='SNR')
average_snr=np.mean(SNR_array)
''' SNR values
7.13128
6.93052
7.79044
8.68511
7.90656
7.55144
7.55262
7.70257
8.03527
8.72732
'''
#%%
'''spike time compare with ephys
SPAD frequency 9938.4'''
| MattNolanLab/SPAD_in_vivo | SPAD_ex_vivo_analysis/Spike_trigger.py | Spike_trigger.py | py | 6,758 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.io.io.loadmat",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "scipy.io.io",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "scipy.io",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"lin... |
42232863780 | # -*- coding:utf-8 -*-
import mysql.connector
from mysql.connector import Error
from ConfigParser import ConfigParser
from Errors import Errors
from datetime import datetime
class DbConnector():
def __init__(self):
pass
def getConnection(self):
conn=None
dbconfig=self._getDBConfig(filename='config.ini', section='mysql')
try:
conn=mysql.connector.connect(**dbconfig)
except Error as e:
self._showError(u'Ошибка', u'Ошибка подключения к базе данных')
print (e)
return conn
def _getDBConfig(self, **param):
filename=param['filename']
section=param['section']
parser=ConfigParser()
parser.read(filename)
config={}
if parser.has_section(section):
items=parser.items(section)
for item in items:
config[item[0]]=item[1]
else:
self._showError(u'Ошибка', u'Ошибка файла конфигурации БД. Отсутствует секция.')
return config
def getDataFromDb(self, query, type='all'):
conn = cur = None
try:
conn=self.getConnection()
cur=conn.cursor()
cur.execute(query)
if type=='all':
result = cur.fetchall()
if type=='one':
result=cur.fetchone()
return result
except:
self._showError(u'Ошибка', u'Ошибка подключения к базе данных')
finally:
if cur is not None: cur.close()
if conn is not None: conn.close()
def insertDataToDB(self, query):
conn = cur = None
try:
conn=self.getConnection()
cur=conn.cursor()
cur.execute(query)
conn.commit()
return True
except:
self._showError(u'Ошибка', u'Ошибка подключения к базе данных')
return False
finally:
if cur is not None: cur.close()
if conn is not None: conn.close()
def deleteDataFromTable(self, query):
conn = cur = None
try:
conn=self.getConnection()
cur=conn.cursor()
cur.execute(query)
conn.commit()
except:
self._showError(u'Ошибка', u'Ошибка подключения к базе данных')
return False
finally:
if cur is not None: cur.close()
if conn is not None: conn.close()
return True
#+++++ Items +++++
def getItems(self, hidden):
query='SELECT idItem, itemName, ItemPrice from Items where hidden=%d' %(hidden)
result = self.getDataFromDb(query)
return result
def addItem(self, itemName, itemPrice, ItemIcon, hidden):
query='''Insert into vending.Items (ItemName, ItemPrice, ItemIcon, hidden) values ('%s', %d, '%s', %d)''' %\
(itemName, itemPrice, ItemIcon, hidden)
result=self.insertDataToDB(query)
return result
def editItem(self, itemName, itemPrice, ItemIcon, hidden, itemId):
query='''Update vending.Items SET ItemName='%s', ItemPrice=%d, ItemIcon='%s', hidden=%d WHERE idItem=%d''' %\
(itemName, itemPrice, ItemIcon, hidden, itemId)
result=self.insertDataToDB(query)
return result
def deleteItem(self, idItem):
query='Delete from vending.Items WHERE idItem=%d' %(idItem)
result=self.insertDataToDB(query)
return result
def sellsOfItem(self, idItem):
query='Select idSales, saleDate, saledItemId, price from Sales where saledItemId=%d' %(idItem)
result=self.getDataFromDb(query, 'all')
return result
def movementsOfItem(self, idItem):
query='Select IdMovement, idItem, OperationDate, OperationType, qty from ItemsMovements where idItem=%d' %(idItem)
result=self.getDataFromDb(query, 'all')
return result
def setItemHide(self, idItem, isHidden):
query='Update vending.Items SET hidden=%d WHERE idItem=%d' %(isHidden, idItem)
result=self.insertDataToDB(query)
return result
def getIdItemByName(self, itemName, hidden):
query='Select idItem from Items where itemName like \'%s\' and hidden=%d' %(itemName, hidden)
result=self.getDataFromDb(query, 'all')
return result
def getQtyOfItemsByType(self):
query='Select Magazins.ItemId, sum(Magazins.ItemQTY), Items.ItemName from Magazins, Items ' +\
'where Magazins.ItemId=Items.idItem ' +\
'group by ItemId '+\
'order by Items.itemName'
result=self.getDataFromDb(query, 'all')
return result
#+++++ Magazins +++++
def addMagazin(self, idMagazins, ItemId, ItemQTY):
query='Insert into Magazins (idMagazins, ItemId, ItemQTY) values (%d, %d, %d)' %\
(idMagazins, ItemId, ItemQTY)
result=self.insertDataToDB(query)
return result
def dropMagazinesTable(self):
query='Delete from Magazins'
result= self.deleteDataFromTable(query)
return result
def getIconById(self, idItem):
query='SELECT ItemIcon from Items where idItem={}'.format(idItem)
result=self.getDataFromDb(query, 'one')
return result
def getMagazinsItemsMap(self):
query= ('select idMagazins, itemName, ItemQty, Items.idItem from Magazins,'+
' Items where Magazins.ItemId=Items.idItem')
result=self.getDataFromDb(query)
return result
def getMagazinesContainItem(self, idItem):
magList=''
query= ('select idMagazins, ItemQty, itemId from Magazins'+
' where Magazins.ItemId=%d') %(idItem)
result=self.getDataFromDb(query)
if len(result)!=0:
for magazine in result:
magList+=str(magazine[0])+', '
magList=magList[:-2]
return magList
def getMagazinLoadTable(self):
query='select M.idMagazins, I.ItemName, M.ItemQTY from Magazins as M, Items as I '+\
'where M.ItemId=I.idItem'
result=self.getDataFromDb(query)
return result
#+++++ Movements +++++
def getMaxMovementId(self):
query='Select max(IdMovement) from ItemsMovements'
result=self.getDataFromDb(query, 'one')
return result
def addMovement(self, idRecharge, idItem, date, OperationType, itemQty):
query='Insert into ItemsMovements (IdMovement, idItem, OperationDate, OperationType, qty) '+\
'VALUES (%d, %d, \'%s\', \'%s\', %d)' %(idRecharge, idItem, date, OperationType, itemQty)
result=self.insertDataToDB(query)
return result
def getMovements(self):
query='select IM.IdMovement, Items.itemName, IM.OperationDate, IM.OperationType, '+\
'IM.qty from ItemsMovements as IM, Items '+\
'where IM.IdMovement=(select max(IdMovement) from ItemsMovements) '+\
'and IM.idItem=Items.idItem '+\
'order by Items.itemName'
result=self.getDataFromDb(query, 'all')
return result
def getInfoForInkass(self):
query='Select max(IncasDate), max(idIncas) from Incas'
result=self.getDataFromDb(query, 'one')
incasDate=result[0]
incasId=result[1]
query='Select sum(Sales.payment) from Sales where Sales.saleDate> \'%s\'' %(incasDate)
result=self.getDataFromDb(query, 'one')
accountedCash=result[0]
if accountedCash is None:
accountedCash=0
result=(accountedCash, incasId, incasDate)
#query='Select sum(d.payment) as cash, max(p.incasid) as lastIncasId, max(p.incasdate) as LastIncasDate from '+\
# '(Select max(idIncas) as incasid, max(IncasDate) as incasdate from Incas) as p, (Select Sales.payment, '+\
# 'Sales.saleDate from Sales'+\
# ' where Sales.saleDate> (Select max(IncasDate) from Incas)) as d'
#result=self.getDataFromDb(query, 'one')
return result
def getCashInNoteReseiver(self, lastIncasDate):
query='Select sum(NoteValue) from ReceivedNotes where DateReceiving>\'%s\'' %(lastIncasDate)
result=self.getDataFromDb(query, 'one')
return result
def writeInkass(self, inkassPayment, inkassator=''):
query='INSERT INTO `vending`.`Incas` (`IncasDate`, `IncasSum`, `Incasator`) '+\
'VALUES (\'%s\', %d, \'%s\')' %(datetime.now(), inkassPayment, inkassator)
result=self.insertDataToDB(query)
return result
#+++++ Log +++++
def writeLog(self, logMessages):
for logMessage in logMessages:
priority=logMessage.priority
source=logMessage.sourse
event=logMessage.message
query='Insert into Log (EventType, Source, EventDate, Event)'+\
' values (\'%s\', \'%s\', \'%s\', \'%s\')' \
%(priority, source, str(datetime.now()), event)
self.insertDataToDB(query)
def getLog(self):
query='Select EventType, Source, EventDate, Event from Log'
result=self.getDataFromDb(query, 'all')
return result
def clearLog(self, source):
query='Delete from Log where Source like \'%s\'' %(source)
self.insertDataToDB(query)
def _showError(self, header, message):
self.message=Errors(message)
self.message.window.setWindowTitle(header)
self.message.window.show()
| FelixMailwriter/VendService | DAL/DBConnector.py | DBConnector.py | py | 9,921 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mysql.connector.connector.connect",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 19,
"usage_type": "name"
},
{
"... |
37319866519 | import pickle
import json
import numpy as np
import pygame
import cv2
import copy
from overcooked_ai_py.env import OverCookedEnv
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedState
from overcooked_ai_py.visualization.state_visualizer import StateVisualizer
from overcooked_ai_py.mdp.overcooked_mdp import Recipe
from tqdm import tqdm
def _convert_action_to_int(joint_action) -> list:
action_set = []
for _action in joint_action:
if _action == (-1, 0): #
action_set.append(3)
elif _action == (0, 1): # down
action_set.append(1)
elif _action == (1, 0): # right
action_set.append(2)
elif _action == (0, -1): # Up
action_set.append(0)
elif _action == 'interact':
action_set.append(5)
else:
action_set.append(4)
return action_set
from replay_buffer import ReplayBuffer
# Load data
with open('../overcooked_ai_py/data/human_data/clean_train_trials.pickle', 'rb') as f:
dfA = pickle.load(f)
with open('../overcooked_ai_py/data/human_data/clean_test_trials.pickle', 'rb') as f:
dfB = pickle.load(f)
df = dfA.append(dfB, ignore_index=True)
df = df[df['layout_name'] == 'asymmetric_advantages']
print(df)
env = OverCookedEnv(scenario="asymmetric_advantages", episode_length=500)
replay_buffer = ReplayBuffer(
obs_shape=(2, 10056),
action_shape=(2, 1),
reward_shape=(2, 1),
dones_shape=(2, 1),
device='cpu',
capacity=20385
)
visualizer = StateVisualizer()
for i, data in df.iterrows():
state = data['state'].replace('\'', '\"').replace("False", 'false').replace("True", 'true')
state_dict = json.loads(state)
from pprint import pprint
state = OvercookedState.from_dict(state_dict)
state._all_orders = [Recipe(('onion', 'onion', 'onion'))] # { "ingredients" : ["onion", "onion", "onion"]}]
#
# state.all_orders = [('onion', 'onion', 'onion')]
action = data['joint_action']
action = json.loads(action.replace('\'', '\"').lower())
print(state)
image = visualizer.render_state(state=state, grid=env.overcooked.mdp.terrain_mtx,
hud_data=StateVisualizer.default_hud_data(state))
# image = visualizer.render_state(state=state, grid=env.overcooked.mdp.terrain_mtx,
# hud_data=None)
buffer = pygame.surfarray.array3d(image)
image = copy.deepcopy(buffer)
image = np.flip(np.rot90(image, 3), 1)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
image = cv2.resize(image, (528, 464))
cv2.imshow('Display', image)
print(i, action)
cv2.waitKey(1)
| bic4907/Overcooked-AI | test/visualize_dataframe.py | visualize_dataframe.py | py | 2,684 | python | en | code | 19 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "overcooked_ai_py.env.OverCookedEnv",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "replay_buf... |
27022438847 | """JSON loaders."""
from flask import request
from .errors import MarshmallowErrors
def marshmallow_loader(schema_class):
"""Marshmallow loader for JSON requests."""
def json_loader():
request_json = request.get_json()
context = {}
pid_data = request.view_args.get('pid_value')
if pid_data:
pid, _ = pid_data.data
context['pid'] = pid
result = schema_class(context=context).load(request_json)
if result.errors:
raise MarshmallowErrors(result.errors)
return result.data
return json_loader
def json_patch_loader():
"""Dummy loader for json-patch requests."""
return request.get_json(force=True)
| slint/cookiecutter-invenio-datamodel | {{cookiecutter.project_shortname}}/{{cookiecutter.package_name}}/loaders/json.py | json.py | py | 714 | python | en | code | null | github-code | 36 | [
{
"api_name": "flask.request.get_json",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.request.view_args.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flas... |
22602819959 | import numpy as np
import matplotlib.pyplot as plt
import os
from glob import glob
import json
def plot_item(key='train_loss'):
sub_dirs = glob("./logs/*/", recursive=False)
plt.figure(figsize=(6, 4))
plt.xticks(range(21))
for sub_dir in sub_dirs:
_ = sub_dir[sub_dir.index('_')+1:]
network_name = _[:_.index('_')]
with open(sub_dir+'training_log.json', 'r') as f:
training_log = json.load(f)
plt.plot(training_log[key], label=network_name)
plt.grid(linestyle='--')
plt.title(key.replace('acc', 'accuracy').replace('_', ' '))
plt.legend()
plt.savefig('./logs/'+key+'.jpg', bbox_inches='tight')
plt.show()
plt.close()
print(f'{key} plotted...')
if __name__ == '__main__':
plot_item('train_loss')
plot_item('val_loss')
plot_item('test_loss')
plot_item('train_acc')
plot_item('val_acc')
plot_item('test_acc')
| Gariscat/HouseX | plot.py | plot.py | py | 935 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot... |
8361752991 |
import os
import requests
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
# variables loaded in from .env file
COC_TOKEN = os.environ.get("COC_TOKEN")
headers = {
'Accept': 'application/json',
'authorization': 'Bearer ' + COC_TOKEN
}
class Player(object):
"""
docstring
"""
def __init__(self, user_id):
response = requests.get(f'https://api.clashofclans.com/v1/players/%{user_id}', headers=headers)
player_dict = response.json()
for key in player_dict:
setattr(self, key, player_dict[key])
self.max_heroes_for_each_th = {
13: [75, 75, 50, 20],
12: [65, 65, 40],
11: [50, 50, 20],
10: [40, 40],
9: [30, 30],
8: [10],
7: [5],
6: [],
5: [],
4: [],
3: [],
2: [],
1: []
}
self.troops_lookup = {}
for troop in self.troops:
self.troops_lookup[troop['name']] = []
self.troops_lookup[troop['name']].append(troop['level'])
self.troops_lookup[troop['name']].append(troop['maxLevel'])
self.troops_lookup[troop['name']].append(troop['village'])
self.spells_lookup = {}
for spell in self.spells:
self.spells_lookup[spell['name']] = []
self.spells_lookup[spell['name']].append(spell['level'])
self.spells_lookup[spell['name']].append(spell['maxLevel'])
self.heroes_lookup = {}
for hero in self.heroes:
self.heroes_lookup[hero['name']] = []
self.heroes_lookup[hero['name']].append(hero['level'])
self.heroes_lookup[hero['name']].append(hero['maxLevel'])
self.heroes_lookup[hero['name']].append(hero['village'])
# ideas
# 1. list achievements
# 2. "your labels"
def get_labels(self):
return [entry['name'] for entry in self.labels]
# 3. troop, spell, and hero levels
def get_troop_info(self, troop):
base = self.troops_lookup[troop][2]
if base == 'home':
return f"Your {troop} is a home base troop and is level {self.troops_lookup[troop][0]} out of {self.troops_lookup[troop][1]}"
else:
return f"Your {troop} is a builder base troop and is level {self.troops_lookup[troop][0]} out of {self.troops_lookup[troop][1]}"
def get_spell_info(self, spell):
spell = spell + ' Spell'
return f"Your {spell} is level {self.spells_lookup[spell][0]} out of {self.spells_lookup[spell][1]}"
def get_hero_info(self, hero):
base = self.heroes_lookup[hero][2]
if base == 'home':
return f"Your {hero} is from home base and is level {self.heroes_lookup[hero][0]} out of {self.heroes_lookup[hero][1]}"
else:
return f"Your {hero} is from builder base and is level {self.heroes_lookup[hero][0]} out of {self.heroes_lookup[hero][1]}"
# show them how long until heroes are maxed for their TH??
def until_max_heroes(self):
cur_th_level = self.townHallLevel
max_heroes_for_th = self.max_heroes_for_each_th[cur_th_level]
hero_levels = []
for hero in self.heroes:
if hero['name'] != 'Battle Machine':
hero_levels.append(hero['level'])
if len(hero_levels) == 1:
til_max_king = max_heroes_for_th[0] - hero_levels[0]
return [til_max_king]
if len(hero_levels) == 2:
til_max_king = max_heroes_for_th[0] - hero_levels[0]
til_max_queen = max_heroes_for_th[1] - hero_levels[1]
return [til_max_king, til_max_queen]
if len(hero_levels) == 3:
til_max_king = max_heroes_for_th[0] - hero_levels[0]
til_max_queen = max_heroes_for_th[1] - hero_levels[1]
til_max_gw = max_heroes_for_th[2] - hero_levels[2]
return [til_max_king, til_max_queen, til_max_gw]
if len(hero_levels) == 4:
til_max_king = max_heroes_for_th[0] - hero_levels[0]
til_max_queen = max_heroes_for_th[1] - hero_levels[1]
til_max_gw = max_heroes_for_th[2] - hero_levels[2]
til_max_rc = max_heroes_for_th[3] - hero_levels[3]
return [til_max_king, til_max_queen, til_max_gw, til_max_rc]
return "No heroes at this time"
class Clan(object):
"""
docstring
"""
def __init__(self, clantag):
response = requests.get(f'https://api.clashofclans.com/v1/clans/%{clantag}', headers=headers)
clan_dict = response.json()
for key in clan_dict:
setattr(self, key, clan_dict[key])
# def search_clan(name):
# # submit a clan search
# response = requests.get(f'https://api.clashofclans.com/v1/clans?name={name}', headers=headers)
# clan_json = response.json()
# for clan in clan_json['items']:
# print(clan['name'] + ' is level ' + str(clan['clanLevel']))
player_ids = ['9L9GLQLJ', '9VCYV8G9', 'PLGQLPGRJ', 'L9GGJOJYP']
for player in player_ids:
test = Player(player)
print(test.until_max_heroes())
# player = Player('9VCYV8G9')
# print(player.heroes)
# search_clan('Raz3 Predators')
# Viz ideas
# 1. bar graph of members based on TH level
# down the road ideas with ML
# 1. recommend clans to join (clans that have people like you)
# 2. recommend who in your clan to friendly challenge
# 3. predict how long until you max your TH?
# 4. predict which clan will win the war | dougscohen/ClashStash | clash.py | clash.py | py | 5,842 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"li... |
24398908749 | from flask import Flask, jsonify
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_cors import CORS
from sqlalchemy import or_ , cast
from sqlalchemy.dialects.postgresql import TEXT
url = open("databaseuri.txt", "r").read().rstrip()
app = Flask(__name__)
CORS(app)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = url
db = SQLAlchemy(app)
migrate = Migrate(app,db)
from model import *
# Return achievement based on input id
@app.route('/api/v1/achievement/id/<id>', methods = ['GET'])
def get_achievement(id):
x = AchievementModel.query.filter_by(id = id).all()
return jsonify([s.serialized() for s in x])
# Return all achievements
@app.route('/api/v1/achievement/all')
def get_all_achievements():
x = AchievementModel.query.all()
return jsonify([s.serialized() for s in x])
# Return all achievements based on input dlc
@app.route('/api/v1/achievement/dlc/<dlc>')
def get_achievement_dlc(dlc):
search_str = f'%{dlc.lower()}%'
x = AchievementModel.query.filter(cast(AchievementModel.dlc, TEXT).ilike(search_str)).all()
return jsonify([s.serialized() for s in x])
#Get achievements that dont require dlc
@app.route('/api/v1/achievement/dlc/None')
def get_no_dlc():
x = AchievementModel.query.filter(AchievementModel.dlc.is_(None)).all()
return jsonify([s.serialized() for s in x])
#Get achievements that require dlc
@app.route('/api/v1/achievement/dlc/Required')
def get_achievements_any_dlc():
x = AchievementModel.query.filter(AchievementModel.dlc.isnot(None)).all()
return jsonify([s.serialized() for s in x])
#TODO : Include versions before version specified
@app.route('/api/v1/achievement/version/<version>')
def get_version_achievements(version):
x=AchievementModel.query.filter(version=version).all()
return jsonify([s.serialized() for s in x])
#General search endpoint
@app.route('/api/v1/achievement/search/<search>')
def get_search(search):
search_str = f'%{search.lower()}%'
try:
version = float(search)
x = AchievementModel.query.filter(or_(
cast(AchievementModel.id, db.String()).ilike(search_str),
AchievementModel.name.ilike(search_str),
AchievementModel.description.ilike(search_str),
AchievementModel.difficulty.ilike(search_str),
cast(AchievementModel.dlc, TEXT).ilike(search_str),
AchievementModel.version == version
)).all()
except ValueError:
x = AchievementModel.query.filter(or_(
AchievementModel.name.ilike(search_str),
AchievementModel.requirements.ilike(search_str),
AchievementModel.starting_condition.ilike(search_str),
cast(AchievementModel.id, db.String()).ilike(search_str),
AchievementModel.description.ilike(search_str),
AchievementModel.difficulty.ilike(search_str),
cast(AchievementModel.dlc, TEXT).ilike(search_str)
)).all()
return jsonify([s.serialized() for s in x])
# Search by difficulty
@app.route('/api/v1/achievement/difficulty/<difficulty>')
def get_difficulty_achievements(difficulty):
x=AchievementModel.query.filter(difficulty = difficulty).all()
return jsonify([s.serialized() for s in x])
@app.route('/api/v1/achievement/difficulty/VE')
def get_difficulty_VE():
x=AchievementModel.query.filter(AchievementModel.difficulty == 'VE').all()
return jsonify([s.serialized() for s in x])
@app.route('/api/v1/achievement/difficulty/E')
def get_difficulty_E():
x=AchievementModel.query.filter(AchievementModel.difficulty =='E').all()
return jsonify([s.serialized() for s in x])
@app.route('/api/v1/achievement/difficulty/M')
def get_difficulty_M():
x=AchievementModel.query.filter(AchievementModel.difficulty == 'M').all()
return jsonify([s.serialized() for s in x])
@app.route('/api/v1/achievement/difficulty/H')
def get_difficulty_H():
x=AchievementModel.query.filter(AchievementModel.difficulty == 'H').all()
return jsonify([s.serialized() for s in x])
@app.route('/api/v1/achievement/difficulty/VH')
def get_difficulty_VH():
x=AchievementModel.query.filter(AchievementModel.difficulty=='VH').all()
return jsonify([s.serialized() for s in x])
@app.route('/api/v1/achievement/difficulty/I')
def get_difficulty_I():
x=AchievementModel.query.filter(AchievementModel.difficulty=='I').all()
return jsonify([s.serialized() for s in x])
if __name__=='__main__':
app.run(debug=True)
| CarsenKennedy/EU4-flask-api | app.py | app.py | py | 4,707 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlc... |
27574117500 | import libpysal as lps
import geopandas as gpd
import csv
state = "ok"
file = ("./ok_boundary.json")
shp = gpd.read_file(file)
rW = lps.weights.Rook.from_dataframe(shp, idVariable="GEOID")
outputName = state + "_neighbors.csv"
header = ['id','NEIGHBORS']
with open(outputName, 'w', newline='') as csv_out:
writeCSV = csv.writer(csv_out, delimiter=',')
writeCSV.writerow(header)
for row in rW:
id = row[0]
neighbors = row[1]
neighborIDs = list(neighbors.keys())
n = []
for i in neighborIDs:
n.append(i)
neighborString = ",".join(n)
writeCSV.writerow([row[0], neighborString])
csv_out.close()
| kenchin3/CSE416_Warriors | client/preprocessing/neighbors.py | neighbors.py | py | 682 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "geopandas.read_file",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "libpysal.weights.Rook.from_dataframe",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "libpysal.weights",
"line_number": 10,
"usage_type": "attribute"
},
{
"api... |
32144801473 | from typing import List, Any
from sqlalchemy.orm import Session
from api import schemas, models, crud
from api.votes import schemas as votes_schemas
def get_votes_from_game_id(db: Session, game_id: int) -> List[Any]:
return db.query(models.Vote).filter(models.Vote.game_id == game_id).all()
def add_vote(db: Session, game_id: int, vote: votes_schemas.VoteCreate) -> models.Vote:
db_game = crud.games.get_game_by_id(db, game_id)
db_vote = models.Vote(
vote_from = vote.vote_from,
vote_to = vote.vote_to,
game_id = game_id,
day = db_game.current_day,
vote_type = vote.vote_type.value
)
db.add(db_vote)
db.commit()
db.refresh(db_vote)
return db_vote | cyborggeneraal/weerwolven | api/votes/crud.py | crud.py | py | 722 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "api.models.Vote",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "api.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.List",
... |
37087106292 | from torch.utils import data
from get_data import get_data
import torch
BATCH_SIZE = 4
[items, attributes, df] = get_data()
NUM_ITEMS = len(items)
NUM_ATTRIBUTES = len(attributes)
features = torch.tensor(to_categorical(range(NUM_ITEMS)), dtype=torch.float32)
targets = torch.tensor(df.values, dtype=torch.float32)
class Dataset(data.Dataset):
"Construct a PyTorch Dataset & Dataloader to automate batching & shuffling"
def __init__(self, features, targets):
self.features = features
self.targets = targets
if len(targets) != len(features):
raise ValueError("Length of features and targets vectors are different")
self.n_samples = len(targets)
def __len__(self):
return self.n_samples
def __getitem__(self, index):
return (self.features[index], self.targets[index])
dataset = Dataset(features, targets)
dataloader = data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
| Pocket-titan/rogers_mcclelland_pytorch | dataset.py | dataset.py | py | 966 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "get_data.get_data",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"... |
1990226900 | from collections import deque, OrderedDict
import numpy as np
import dm_env
from dm_control.mujoco.engine import Camera
def xyz2pixels(xyz, cam_mat):
""" Project 3D locations to pixel locations using the camera matrix """
xyzs = np.ones((xyz.shape[0], xyz.shape[1]+1))
xyzs[:, :xyz.shape[1]] = xyz
xs, ys, s = cam_mat.dot(xyzs.T)
x, y = xs/s, ys/s
return x, y
def compute_pixel_offsets(cm_from, cm_to):
""" Compute 2D pixel offsets caused by 2D camera movement """
xyz0 = np.zeros((1, 3))
x_from, y_from = xyz2pixels(xyz0, cm_from)
x_to, y_to = xyz2pixels(xyz0, cm_to)
return (x_to - x_from)[0], (y_to - y_from)[0]
class FlattenStateWrapper(dm_env.Environment):
""" Flatten values in observations """
def __init__(self, env):
self._env = env
self._observation_spec = OrderedDict()
state_size = sum([np.int(np.prod(v.shape)) for v in env.observation_spec().values()])
state_dtype = env.observation_spec
self._observation_spec['state'] = dm_env.specs.Array(shape=(state_size,), dtype=np.float32, name='state')
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return self._env.action_spec()
def _flatten_state(self, time_step):
obs = [
np.array([v]) if np.isscalar(v) else v.ravel()
for v in time_step.observation.values()
]
obs = np.concatenate(obs).astype(np.float32)
return time_step._replace(observation=obs)
def reset(self):
time_step = self._env.reset()
return self._flatten_state(time_step)
def step(self, action):
time_step = self._env.step(action)
return self._flatten_state(time_step)
def __getattr__(self, name):
return getattr(self._env, name)
class KeypointsStateWrapper(dm_env.Environment):
""" Represent state using keypoints in 2D cartesian coordinates """
def __init__(self, env, relative_xy=True):
self._env = env
self.relative_xy = relative_xy
xpos = env.physics.named.data.geom_xpos #[1:]
state_size = xpos.shape[0] * 4
self._observation_spec = OrderedDict()
self._observation_spec['state'] = dm_env.specs.Array(shape=(state_size,), dtype=np.float32, name='state')
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return self._env.action_spec()
def _get_state(self, time_step):
self.camera_matrix = Camera(self.physics, height=84, width=84, camera_id=0).matrix
def xyz2pixels_norm(xyz, cam_mat):
x, y = xyz2pixels(xyz, cam_mat)
x, y = x/42-1, y/42-1
x[0] = 0
return x, y
xyz = self.physics.named.data.geom_xpos.copy()
cam_mat = self.camera_matrix.copy()
if len(self._xyzs) == 0:
self._xyzs.extend([xyz, xyz, xyz])
self._cam_mats.extend([cam_mat, cam_mat, cam_mat])
else:
self._xyzs.append(xyz)
self._cam_mats.append(cam_mat)
xyz_past = self._xyzs[-2]
cam_mat_past = self._cam_mats[-2]
x, y = xyz2pixels_norm(xyz, self.camera_matrix)
x_past, y_past = xyz2pixels_norm(xyz_past, self.camera_matrix)
x_vel = x - x_past
y_vel = y - y_past
if self.relative_xy:
x = x - x.mean()
y = y - y.mean()
obs = np.concatenate([x, x_vel, y, y_vel])
return time_step._replace(observation=obs.astype(np.float32))
def reset(self):
self._xyzs = []
self._cam_mats = []
time_step = self._env.reset()
return self._get_state(time_step)
def step(self, action):
time_step = self._env.step(action)
return self._get_state(time_step)
def __getattr__(self, name):
return getattr(self._env, name)
class ActionRepeatWrapper(dm_env.Environment):
""" Repeat same action for k steps """
def __init__(self, env, action_repeat=2):
self._env = env
self.action_repeat = action_repeat
def observation_spec(self):
return self._env.observation_spec()
def action_spec(self):
return self._env.action_spec()
def reset(self):
return self._env.reset()
def step(self, action):
reward = 0
for _ in range(self.action_repeat):
time_step = self._env.step(action)
reward += time_step.reward
if time_step.last():
break
return time_step._replace(reward=reward)
def __getattr__(self, name):
return getattr(self._env, name)
class FrameStackWrapper(dm_env.Environment):
"""
Stack pixel observations from past k steps.
Returns channel first.
"""
def __init__(self, env, k=3):
self._env = env
self.k = k
self._frames = deque([], maxlen=k)
pixels = self._env.observation_spec()['pixels']
obs_shape = (pixels.shape[2]*k, pixels.shape[0], pixels.shape[1])
self._observation_spec = OrderedDict()
self._observation_spec['pixels'] = dm_env.specs.Array(
shape=obs_shape,
dtype=pixels.dtype,
name=pixels.name
)
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return self._env.action_spec()
def reset(self):
time_step = self._env.reset()
for _ in range(self.k):
self._frames.append(time_step.observation['pixels'])
return self._get_obs(time_step)
def step(self, action):
time_step = self._env.step(action)
self._frames.append(time_step.observation['pixels'])
return self._get_obs(time_step)
def _get_obs(self, time_step):
obs = np.concatenate(self._frames, 2).transpose(2, 0, 1)
return time_step._replace(observation=obs)
def __getattr__(self, name):
return getattr(self._env, name)
class CameraOffsetFrameStackWrapper(dm_env.Environment):
"""
Stack pixel observations from past k steps.
Also, encode camera offset information in channel 0.
Returns channel first.
"""
def __init__(self, env, k=3):
self._env = env
self.k = k
self._frames = deque([], maxlen=k)
self._camera_matrices = deque([], maxlen=k)
pixels = self._env.observation_spec()['pixels']
obs_shape = (pixels.shape[2]*k+1, pixels.shape[0], pixels.shape[1])
self._observation_spec = OrderedDict()
self._observation_spec['pixels'] = dm_env.specs.Array(
shape=obs_shape,
dtype=pixels.dtype,
name=pixels.name
)
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return self._env.action_spec()
def reset(self):
time_step = self._env.reset()
camera_matrix = Camera(self.physics, height=84, width=84, camera_id=0).matrix.copy()
for _ in range(self.k):
self._frames.append(time_step.observation['pixels'])
self._camera_matrices.append(camera_matrix)
return self._get_obs(time_step)
def step(self, action):
time_step = self._env.step(action)
self._frames.append(time_step.observation['pixels'])
camera_matrix = Camera(self.physics, height=84, width=84, camera_id=0).matrix.copy()
self._camera_matrices.append(camera_matrix)
return self._get_obs(time_step)
def _get_obs(self, time_step):
def scale_diff(diff, scale):
diff = diff / scale # to -1..1
diff = int(np.floor(diff * 255)) # to -255..255
return diff
def unscale_diff(diff, scale):
diff = diff / 255
diff = diff * scale
return diff
camera_diffs = np.zeros((1, 84, 84), dtype=np.uint8)
for i, cam_mat in enumerate(self._camera_matrices):
x_diff, y_diff = compute_pixel_offsets(cam_mat, self._camera_matrices[-1])
assert x_diff < 84
assert y_diff < 84
x_diff = scale_diff(x_diff, 84)
y_diff = scale_diff(y_diff, 84)
if x_diff > 0:
camera_diffs[:, i, 0] = x_diff
else:
camera_diffs[:, i, 1] = -x_diff
if y_diff > 0:
camera_diffs[:, i, 2] = y_diff
else:
camera_diffs[:, i, 3] = -y_diff
obs = np.concatenate(self._frames, 2).transpose(2, 0, 1)
obs = np.concatenate([camera_diffs, obs], 0)
return time_step._replace(observation=obs)
def __getattr__(self, name):
return getattr(self._env, name)
| rinuboney/FPAC | dm_wrappers.py | dm_wrappers.py | py | 8,725 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ones",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "dm_env.Environment",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "collections.OrderedDic... |
74962694182 | import datetime
import qrcode
from django.core.mail import send_mail
from django.utils.crypto import get_random_string
from rest_framework import status
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
from UserSavedData.models import UserStoryArchived, SavedPost
from UserSavedData.serializer import UserSavedStorySerializer, UserSavedPostSerializer
from config import settings
from follow.models import Follow
from user.models import User, VarificationCode, UserStory
from user.serializer import UserSerilizer, UserUpdateView, UserRegisterSerializer, \
Follows, EmailVarificationCode, SendEmailVarification, UserStorySerializer
from post.models import Post, Comment, Like
from post.serializer import PostSerializer, CommentSerializer, LikeSerializer
from Massages import (subject, messages)
"""
user site api view
#########################################################################################
"""
class UserAPIView(APIView):
def get(self, request, *args, **kwargs):
user_queryset = User.objects.filter(username=kwargs.get('username'))
post_queryset = Post.objects.filter(user__username=kwargs.get('username'))
user_serializer = UserSerilizer(user_queryset, many=True)
post_serializer = PostSerializer(post_queryset , many=True)
return Response(
{"user_data":user_serializer.data , "post_data" : post_serializer.data}
)
class UserFollowedAPIView(APIView):
def get(self, request, *args, **kwargs):
queryset = Follow.objects.filter(followed_user__username=kwargs.get('username'))
serializer = Follows(queryset, many=True)
is_user = User.objects.filter(username=kwargs.get('username'))
if is_user:
return Response(serializer.data)
return Response(data={"error": f"{kwargs.get('username')} not found"})
class UserFollowersAPIView(APIView):
def get(self, request, *args, **kwargs):
queryset = Follow.objects.filter(follow__username=kwargs.get('username'))
serializer = Follows(queryset, many=True)
is_user = User.objects.filter(username=kwargs.get('username'))
if is_user:
return Response(serializer.data)
return Response(data={"error": f"{kwargs.get('username')} not found"})
class UserPostAPIView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
queryset = Post.objects.filter(user__username=kwargs.get('username'), is_archived=False)
serializer = PostSerializer(queryset, many=True)
is_user = User.objects.filter(username=kwargs.get('username'))
if is_user:
if queryset:
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(data={"detail": f"{kwargs.get('username')} haven't post"})
return Response(data={"error": f"{kwargs.get('username')} not found"})
class UserPostDetailAPIView(APIView):
def get(self, request, *args, **kwargs):
queryset = Post.objects.filter(id=kwargs.get('pk'))
serializer = PostSerializer(queryset, many=True)
is_user = User.objects.filter(
username=kwargs.get('username')
)
if is_user:
if queryset:
return Response(serializer.data)
return Response(data={"error": f"{kwargs.get('username')} haven't {kwargs.get('pk')} th post"})
return Response(data={"error": f"{kwargs.get('username')} can't find"})
class UserPostCommentAPIView(APIView):
def get(self, request, *args, **kwargs):
queryset = Comment.objects.filter(post_id=kwargs.get('pk'))
serializer = CommentSerializer(queryset, many=True)
is_user = User.objects.filter(
username=kwargs.get('username')
)
if is_user:
if queryset:
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(data={"detail": "haven't comment yet "})
return Response(data={"error": f"{kwargs.get('username')} can't find"})
class UserPostLikeAPIView(APIView):
def get(self, request, *args, **kwargs):
queryset = Like.objects.filter(post_id=kwargs.get('pk'))
serializer = LikeSerializer(queryset, many=True)
is_user = User.objects.filter(
username=kwargs.get('username')
)
if is_user:
if queryset:
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(data={"detail": "cannot find"})
return Response(data={"error": f"{kwargs.get('username')} can't find"})
class UsersLastMovementAPI(APIView):
def get(self, request, *args, **kwargs):
queryset = Follow.objects.filter(follow__username=kwargs.get('username'))
queryset2 = Post.objects.filter(user_id=request.user.id)
queryset3 = Like.objects.filter(post__user__user=request.user)
serializer = UserSerilizer(queryset, many=True)
serializer2 = PostSerializer(queryset2, many=True)
serilzer3 = LikeSerializer(queryset3, many=True)
return Response({"user": serializer.data, "post": serializer2.data, "like": serilzer3.data})
class UserQrCOde(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
data_to_scand = f"User : {request.user.username} , User photo {request.user.profile_photo}"
qr_code_file = f"{request.user.username}_qr_code.png"
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
qr.add_data(data_to_scand)
qr.make(fit=True)
img = qr.make_image(fill_color="black", back_color="white")
img.save(qr_code_file)
return Response({"Detail": f"Qr code here {qr_code_file}"})
class UserStoryAPIview(APIView):
def get(self, request, *args, **kwargs):
queryset = UserStory.objects.filter(user__username=kwargs.get('username'))
serializer_class = UserStorySerializer(queryset, many=True)
return Response(serializer_class.data)
class UserStoryCreatedAPI(generics.CreateAPIView):
queryset = UserStory.objects.all()
serializer_class = UserStorySerializer
def create(self, request, *args, **kwargs):
serilzier = self.get_serializer(data=request.data)
serilzier.is_valid(raise_exception=True)
user = serilzier.validated_data.get('user')
image = serilzier.validated_data.get('image')
video = serilzier.validated_data.get('video')
is_user = User.objects.filter(id=request.user.id)
if is_user:
UserStoryArchived.objects.create(
user=user,
image=image,
video=video,
create_data=datetime.datetime.now()
)
return Response(data={"detail": "Successfuly created! "})
return Response(data={"error": 'User not found '})
class UserStorySavedAPIview(APIView):
def get(self, request, *args, **kwargs):
if kwargs.get('username') != request.user.username:
return Response(data={"error": "User erorr !"})
queryset = UserStoryArchived.objects.filter(user__username=kwargs.get('username'))
serilizer = UserSavedStorySerializer(queryset, many=True)
return Response(serilizer.data, status=status.HTTP_200_OK)
class UserStorySavedDetailAPIview(APIView):
def get_object(self, pk):
return get_object_or_404(UserStoryArchived, pk=pk)
def get(self, request, *args, **kwargs):
serilizer = UserSavedStorySerializer(self.get_object(kwargs.get('pk')))
if serilizer.data:
return Response(serilizer.data)
return Response(data={"error": status.HTTP_204_NO_CONTENT})
def delete(self, request, *args, **kwargs):
archived_post = self.get_object(kwargs.get('pk'))
if archived_post:
archived_post.delete()
return Response(status=status.HTTP_302_FOUND,
headers={"Location": "http://127.0.0.1:8000/user/shagi/archived/story"})
return Response(data={"error": status.HTTP_204_NO_CONTENT})
class SavedPostAPIView(APIView):
def get(self, request, *args, **kwargs):
username = kwargs.get('username')
user = User.objects.filter(username=username).first()
if user:
queryset = SavedPost.objects.filter(user=user)
if queryset.exists():
serializer = UserSavedPostSerializer(queryset, many=True)
return Response(serializer.data)
else:
return Response({"message": "No saved posts for this user."}, status=200)
else:
return Response({"error": "User not found"}, status=404)
class SavedPostDetail(APIView):
def get_object(self, pk):
return get_object_or_404(SavedPost.objects.filter(pk=pk))
def get(self, request, *args, **kwargs):
serializer = UserSavedPostSerializer(self.get_object(kwargs.get('pk')))
return Response(serializer.data, status=status.HTTP_200_OK)
def delete(self, request, *args, **kwargs):
queryset = self.get_object(kwargs.get('pk'))
queryset.delete()
return Response(status=status.HTTP_202_ACCEPTED)
"""
user requirement
#########################################################################################
"""
class UserRegisterApi(generics.CreateAPIView):
queryset = User.objects.all()
serializer_class = UserRegisterSerializer
class EmailVarification(APIView):
permission_classes = [IsAuthenticated]
def post(self, request, *args, **kwargs):
serializer = SendEmailVarification(data=request.data)
serializer.is_valid(raise_exception=True)
email = serializer.validated_data.get('email')
code = get_random_string(allowed_chars='1234567890', length=6)
is_user = User.objects.filter(username=request.user.username).first()
if is_user:
create_code = (VarificationCode.objects.create(email=is_user.email, is_varification=False, code=code,
date=datetime.datetime.now()))
User.objects.update(email=email)
send_mail(
subject=subject.EMAIL_LOGIN_SUBJECT, message=messages.email_varification(is_user.username, code),
from_email=settings.EMAIL_HOST_USER, recipient_list=[email]
)
return Response(status=status.HTTP_302_FOUND,
headers={"Location": "http://127.0.0.1:8000/user/email/varification/check/"})
class CheckEmailVarificationCode(generics.CreateAPIView):
queryset = VarificationCode.objects.all()
serializer_class = EmailVarificationCode
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
email = serializer.validated_data.get('email')
code = serializer.validated_data.get('code')
varification = self.get_queryset().filter(email=email, code=code, is_varification=False).order_by(
'-date').first()
user = User.objects.filter(username=request.user.username)
if varification and varification.code == code:
user.email = email
varification.is_varification = True
return Response(data={"detail": "Succesfully Varification ! "})
else:
return Response(data={"error": "Can not update ! "})
class UserUpdateAPIView(APIView):
def get(self, request, *args, **kwargs):
queryset = User.objects.filter(username=kwargs.get('username'))
if kwargs.get('username') == request.user.username:
serializer = UserSerilizer(queryset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(data={'error': 'Object not found'})
def put(self, request, *args, **kwargs):
serializer = UserUpdateView(data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
def delete(self, request, *args, **kwargs):
if kwargs.get('username') == request.user.username:
user = request.user
user.delete()
return Response(f'{user} deleted')
return Response(data={'error': 'Object not found'})
class UserUpdateAPI(APIView):
def get_object(self, pk):
return get_object_or_404(User, pk=pk)
def get(self, request, *args, **kwargs):
if kwargs.get('username') == request.user.username:
pk = kwargs.get('pk')
serializer = UserUpdateView(self.get_object(pk))
return Response(serializer.data)
return Response(data={"error": "You can't update another users"})
def put(self, request, *args, **kwargs):
if kwargs.get('username') == request.user.username:
serializer = UserUpdateView(instance=self.get_object(pk=self.kwargs.get('pk')), data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data)
return Response(status=status.HTTP_400_BAD_REQUEST)
return Response(data={"error": f"You cannot update user : {kwargs.get('username')}"})
def delete(self, request, *args, **kwargs):
user = self.get_object(self.kwargs.get('pk'))
user.delete()
return Response(data={'detail': f'{user} deleted'})
class UserUpdateDestoryAPI(generics.RetrieveUpdateAPIView):
queryset = User.objects.all()
serializer_class = UserUpdateView
| theshag1/Instagram | user/views.py | views.py | py | 14,038 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.views.APIView",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "user.models.User.objects.filter",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "user.models.User.objects",
"line_number": 34,
"usage_type": "attribute"
},... |
6432501039 | import json,time
from collections import Iterable
from django.utils.timezone import datetime
from django.conf import settings
from django.core.paginator import Paginator
from django.http import JsonResponse
from django.views.generic import View
from Hellocial_0_1.settings import logger
from apps.products.models import Program
from apps.users.models import UserFornumRelation
from utils.mixin import LoginRequireView, OperatorAuthorityView
from utils.response_code import RET
from apps.scripts.models import JavaScriptCode, Fornum
from utils.util_func import get_json_data, filter_null
from utils.captcha_recognition import DoubanCaptchaRecognition,RuoKuaiCaptcha,CaptchaRecognition
# Create your views here.
# /script/fornum
class ForumView(OperatorAuthorityView):
"""
get请求 获取 论坛相关数据
post请求 修改 论坛相关数据
"""
def get(self, request):
page = request.GET.get('page')
f_id = request.GET.get('id')
is_all = request.GET.get('all')
try:
page = int(page)
except TypeError:
logger.debug('页码错误')
page = 1
if f_id is None:
fornums = Fornum.objects.all().order_by('-update_time')
else:
fornums = Fornum.objects.filter(id=f_id)
pages = 1
data_length = 0 if is_all is None and f_id is None else 1
if len(fornums) > 1:
paginator = Paginator(fornums, settings.PER_PAGE_COUNTS)
pages = paginator.num_pages
if page > pages:
page = 1
data_length = paginator.count
fornums_page = paginator.page(page)
else:
try:
fornums_page = [fornums.first()]
except Exception:
logger.info('没有该论坛信息')
return JsonResponse({'code': RET.PARAMERR, 'message': '没有该论坛信息'})
data = []
if len(fornums) == 0:
fornums_page = []
if is_all is not None and f_id is None:
fornums_page = fornums
for fornum in fornums_page:
d = {
'id': fornum.id,
'title': fornum.title,
'fornum_name': fornum.fornum_name,
'description': fornum.description,
'app_type':fornum.app_type
}
data.append(d)
return JsonResponse({'code': RET.OK, 'message': '成功', 'data': data, 'pages': pages,
'data_length': data_length})
def post(self, request):
f_id = request.GET.get('id')
json_dict = get_json_data(request)
if json_dict is None:
return JsonResponse({'code': RET.PARAMERR, 'message': '请使用json数据格式'})
if f_id is None:
return JsonResponse({'code': RET.PARAMERR, 'message': '参数错误'})
description = json_dict.get('description')
title = json_dict.get('title')
app_type = json_dict.get('app_type')
try:
fornum = Fornum.objects.get(id=f_id)
if title:
fornum.title = title
if description:
fornum.description = description
if app_type:
if int(app_type) not in Fornum.APP_TYPE.keys():
return JsonResponse({'code':RET.PARAMERR,'message':'应用类型错误'})
fornum.app_type = app_type
fornum.update_person = request.user.id
fornum.update_time = datetime.now()
fornum.save()
except Exception as e:
logger.warn(e)
return JsonResponse({'code': RET.PARAMERR, 'message': '该论坛不存在'})
return JsonResponse({'code': RET.OK, 'message': '修改成功'})
# /script/forum/change
class ForumChangeView(OperatorAuthorityView):
"""
get删除论坛相关信息
post增加论坛数据
"""
def get(self, request):
del_ids = request.GET.get('id')
if del_ids is None:
return JsonResponse({'code': RET.PARAMERR, 'message': '参数错误'})
try:
del_ids = json.loads(del_ids)
except Exception as e:
logger.debug(e)
return JsonResponse({'code': RET.PARAMERR, 'message': '参数错误'})
# 非可迭代对象和字符串把其变成单元素的列表
if (not isinstance(del_ids, Iterable)) or isinstance(del_ids, str):
del_ids = [del_ids]
# 过滤掉非整形数据
del_ids = list(filter(filter_null, del_ids))
if len(del_ids) == 0:
return JsonResponse({'code': RET.PARAMERR, 'message': '参数错误'})
fornums = Fornum.objects.filter(id__in=del_ids)
if len(fornums) == 0:
return JsonResponse({'code': RET.PARAMERR, 'message': '该id不存在'})
if len(fornums) != len(del_ids):
logger.debug('id存在错误')
return JsonResponse({'code': RET.PARAMERR, 'message': 'id存在错误'})
# 删除该论坛的js代码
for fornum in fornums:
for p in Program.objects.filter(fornum=fornum):
p.is_delete = True
p.save()
UserFornumRelation.objects.filter(fornum=fornum).delete()
for j in JavaScriptCode.objects.filter(fornum=fornum):
j.is_delete = True
j.save()
fornum.is_delete = True
fornum.save()
return JsonResponse({'code': RET.OK, 'message': '删除成功'})
def post(self, request):
json_dict = get_json_data(request)
if json_dict is None:
return JsonResponse({'code': RET.PARAMERR, 'message': '请使用json数据格式'})
description = json_dict.get('description')
fornum_name = json_dict.get('fornum_name')
title = json_dict.get('title')
app_type = json_dict.get('app_type')
if description is None: description = ''
if title is None : title = ''
if app_type is None: app_type = 0
if fornum_name is None:
return JsonResponse({'code': RET.PARAMERR, 'message': '缺少必要参数'})
try:
if int(app_type) not in Fornum.APP_TYPE.keys():
return JsonResponse({'code': RET.PARAMERR, 'message': '应用类型错误'})
except Exception:
return JsonResponse({'code': RET.PARAMERR, 'message': '应用类型错误'})
try:
fornum = Fornum.objects.create(
title = title,
fornum_name=fornum_name,
description=description,
update_person=request.user.id,
founder=request.user.id,
app_type=app_type
)
fornum.save()
except Exception as e:
logger.error(e)
return JsonResponse({'code': RET.DBERR, 'message': '数据库错误,创建失败'})
return JsonResponse({'code': RET.OK, 'message': '成功'})
# /script/js
class JavaScriptCodeView(OperatorAuthorityView):
"""
get 获取js脚本信息
post 修改js脚本信息
"""
def get(self, request):
"""
只传了all参数就返回所有的js脚本记录,不做分页
传了js_id就只返回单独该id对应的js记录
传了fornum_id,并且没传js_id就返回该论坛对应的所有js记录,分页显示
page代表分页的页数,在获取对应论坛的js记录时需要传,不然返回第一页
:param request:
:return:
"""
page = request.GET.get('page')
f_id = request.GET.get('fornum_id')
js_id = request.GET.get('js_id')
try:
page = int(page)
except TypeError:
logger.debug('page error')
page = 1
try:
js_id = int(js_id)
except Exception:
js_id = None
try:
f_id = int(f_id)
except Exception:
f_id = None
if js_id is None and f_id is not None:
try:
fornum = Fornum.objects.get(id=f_id)
except Exception:
logger.debug('id 为{}的论坛不存在'.format(f_id))
return JsonResponse({'code': RET.PARAMERR, 'message': '论坛不存在'})
js_codes = JavaScriptCode.objects.filter(fornum=fornum).order_by('-update_time')
elif not all([js_id,f_id]):
js_codes = JavaScriptCode.objects.all()
else:
js_codes = JavaScriptCode.objects.filter(id=js_id)
pages = 1
data_length = 0 if js_id is None and f_id is None else 1
if len(js_codes) > 1:
paginator = Paginator(js_codes, settings.PER_PAGE_COUNTS)
pages = paginator.num_pages
if page > pages:
page = 1
data_length = paginator.count
js_code_page = paginator.page(page)
else:
try:
js_code_page = [js_codes.first()]
except Exception as e:
logger.debug(e)
return JsonResponse({'code': RET.PARAMERR, 'message': '没有该论坛信息'})
data = []
if len(js_codes) == 0:
js_code_page = []
for js in js_code_page:
d = {
'id': js.id,
'title': js.title,
'description': js.description,
'fornum': js.fornum.fornum_name,
'js_code': js.js_code,
'fornum_id':js.fornum.id
}
data.append(d)
return JsonResponse({'code': RET.OK, 'message': '成功', 'data': data, 'pages': pages,
'data_length': data_length})
def post(self, request):
json_dict = get_json_data(request)
j_id = request.GET.get('id')
js_code = json_dict.get('js_code')
description = json_dict.get('description')
fornum_id = json_dict.get('fornum_id')
description = '' if description is None else description
if not all([j_id, js_code, fornum_id]):
return JsonResponse({'code': RET.PARAMERR, 'message': '参数错误'})
try:
js = JavaScriptCode.objects.get(id=int(j_id))
except Exception as e:
logger.debug(e)
return JsonResponse({'code': RET.PARAMERR, 'message': 'ID存在错误'})
try:
fornum = Fornum.objects.get(id=int(fornum_id))
except Exception as e:
logger.debug(e)
return JsonResponse({'code': RET.DBERR, 'message': '指定修改的论坛错误'})
try:
js.description = description
js.fornum = fornum
js.js_code = js_code
js.save()
except Exception as e:
logger.error(e)
return JsonResponse({'code': RET.DBERR, 'message': '数据库错误'})
return JsonResponse({'code': RET.OK, 'message': '成功'})
# /script/js/change
class JavaScriptCodeChangeView(OperatorAuthorityView):
"""
get: 删除js脚本相关信息
post: 增加js脚本信息
"""
def get(self, request):
d_id = request.GET.get('id')
if d_id is None:
return JsonResponse({'code': RET.PARAMERR, 'message': '缺少必要参数'})
try:
d_id = json.loads(d_id)
except Exception:
return JsonResponse({'code': RET.PARAMERR, 'message': '参数错误'})
# 不是可迭代对象
if not isinstance(d_id, Iterable) or isinstance(d_id, str):
d_id = [d_id]
# 将不能转换为整型的过滤掉
d_id = list(filter(filter_null, d_id))
if len(d_id) == 0:
return JsonResponse({'code': RET.PARAMERR, 'message': '参数错误'})
js = JavaScriptCode.objects.filter(id__in=d_id).all()
if len(js) != len(d_id):
logger.debug('js脚本{}存在错误'.format(d_id))
return JsonResponse({'code': RET.PARAMERR, 'message': '传入的参数有误'})
try:
for j in js:
j.is_delete = True
j.save()
except Exception as e:
logger.error(e)
return JsonResponse({'code': RET.DBERR, 'message': '数据库错误,删除失败'})
return JsonResponse({'code': RET.OK, 'message': '删除成功'})
def post(self, request):
json_dict = get_json_data(request)
if json_dict is None:
return JsonResponse({'code': RET.PARAMERR, 'message': '请使用json数据格式'})
js_code = json_dict.get('js_code')
fornum_id = json_dict.get('fornum_id')
title = json_dict.get('title')
description = json_dict.get('description')
if not all([js_code, fornum_id, title]):
return JsonResponse({'code': RET.PARAMERR, 'message': '参数错误'})
# 默认值设置为''
description = '' if description is None else description
try:
fornum = Fornum.objects.get(id=fornum_id)
except Exception as e:
logger.debug(e)
return JsonResponse({'code': RET.DBERR, 'message': '论坛不存在'})
try:
js = JavaScriptCode.objects.create(
fornum=fornum,
js_code=js_code,
title=title,
description=description,
update_person=request.user.id,
founder=request.user.id,
)
js.save()
except Exception as e:
logger.error(e)
return JsonResponse({'code': RET.DBERR, 'message': '添加js代码失败'})
return JsonResponse({'code': RET.OK, 'message': '添加成功'})
# /script/captcha
class CaptchaView(View):
def post(self,request):
"""
使用post请求以json格式传入数据
1.若快平台 用户账户
2.若快平台 用户密码
3.论坛编号 验证码所属的论坛的编号
4.验证码类型 {0:未知,1:中文,2:英文,3:数字,4:英文数字混合}
5.图片验证码的完整url
验证码类型未知的话 服务端将无法对图片做任何处理,发送至打码平台将准确率极低
:param request:
:return:
"""
json_dict = get_json_data(request)
if not json_dict:
return JsonResponse({'code':RET.PARAMERR,'message':'请使用json格式数据'})
image_url = json_dict.get('img_url')
rk_username = json_dict.get('rk_username')
rk_password = json_dict.get('rk_password')
fornum_code = json_dict.get('fornum_code')
captcha_type = json_dict.get('captcha_type')
if not all([image_url,rk_username,rk_password,fornum_code]):
return JsonResponse({'code':RET.PARAMERR,'message':'参数错误'})
captcha_type = captcha_type if captcha_type else 0
try:
captcha_type = int(captcha_type)
if captcha_type > 4:
raise Exception('非法验证码类型参数')
except Exception:
logger.debug('Params Error')
return JsonResponse({'code':RET.PARAMERR,'message':'参数错误'})
if len(rk_password) != 32:
return JsonResponse({'code':RET.PARAMERR,'message':'密码需要32位小写md5加密传输'})
try:
fornum = Fornum.objects.get(title=fornum_code)
except Fornum.DoesNotExist:
logger.debug('论坛编号错误')
return JsonResponse({'code':RET.PARAMERR,'message':'论坛编号错误'})
if fornum.id == 4:
# 是豆瓣的
image_engine = DoubanCaptchaRecognition()
else:
return JsonResponse({'code':RET.SERVERERR,'message':'暂不支持豆瓣以外论坛的验证码识别'})
im = image_engine.get_image_obj(image_url)
if captcha_type == 1:
# 处理中文验证码
im = image_engine.process_chinese(im)
elif captcha_type == 2:
# 处理英文验证码
im = image_engine.process_english(im)
sender = RuoKuaiCaptcha(username=rk_username,password=rk_password)
captcha_type = settings.RUOKUAI_CAPTCHA_TYPE[captcha_type]
return JsonResponse(sender.get_string(im,im_type=captcha_type))
| eashme/Django-backend | Hello_Server/apps/scripts/views.py | views.py | py | 16,481 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "utils.mixin.OperatorAuthorityView",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "Hellocial_0_1.settings.logger.debug",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "Hellocial_0_1.settings.logger",
"line_number": 35,
"usage_type": "n... |
28555405451 | import time
import sys
import math
from dronekit import connect, VehicleMode, LocationGlobalRelative, Command
from vehicle_additional import get_bearing, get_distance_metres, get_location_metres
from pymavlink import mavutil
from pid import PID
from wingman import Wingman
import argparse
parser = argparse.ArgumentParser(description='双机编队')
parser.add_argument('-v1',
help="Vehicle1 connection target string.", nargs='?', const="127.0.0.1:12341", type=str)
parser.add_argument('-v2',
help="Vehicle2 connection target string.", nargs='?', const="127.0.0.1:12342", type=str)
parser.add_argument('-vu1',
help="Vehicle1 connection target string.")
parser.add_argument('-vu2',
help="Vehicle2 connection target string.")
args = parser.parse_args()
if args.vu1 and args.vu2:
v1 = connect(args.vu1)
v2 = connect(args.vu2, vehicle_class=Wingman)
else:
v1 = connect(args.v1)
v2 = connect(args.v2, vehicle_class=Wingman)
C = 100
drift = 0
pid = PID(0.2, 0.01, 0.1)
pid.target = 0
while True:
pos = get_location_metres(
v1.location.global_relative_frame, v1.heading, C + drift, is_bearing=True)
dt_speed = pid(-1 * (get_distance_metres(
pos, v2.location.global_relative_frame) - C))
if dt_speed < 0:
dt_speed *= 1.5
v2.set_thr_aspd = v1.airspeed+dt_speed
print("速度差距", v2.airspeed-v1.airspeed, dt_speed)
time.sleep(0.2) | liangz678/arduplane_formation_flying | mission_follow.py | mission_follow.py | py | 1,486 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "dronekit.connect",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "dronekit.connect",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "wingman.Win... |
35836541266 | # @Date : 22:38 05/01/2020
# @Author : ClassicalPi
# @FileName: Data_Analysis.py
# @Software: PyCharm
import numpy as np
from pyecharts.charts import Map,Geo
from pyecharts import options
import pandas as pd
import nltk
import re
import os
import matplotlib.pyplot as plt
import string
import json
import openpyxl
from nltk.stem.snowball import SnowballStemmer
def loadReview(city: str, res: str or None, All: False) -> str:
ans = ""
os.chdir("/Users/lucas/Projects/Pycharm/Sentiment_Analysis/Data/{}".format(city))
if All:
wb = openpyxl.load_workbook("All_{}.xlsx".format(city))
else:
wb = openpyxl.load_workbook("{}".format(res))
ws = wb.active
for row in range(1, ws.max_row):
temp = str(ws.cell(row=row, column=5).value)
if ord(temp[0]) >= 65 and ord(temp[0]) <= 122:
ans += temp
ans += '\n'
return ans
def load_excel_url(url: str):
wb = openpyxl.load_workbook("{}".format(url))
ws = wb.active
ans = ""
for row in range(2, ws.max_row):
temp = str(ws.cell(row=row, column=6).value)
if ord(temp[0]) >= 65 and ord(temp[0]) <= 122:
ans += temp
ans += '\n'
return ans
def tokenize_and_stem(text):
stemmer = SnowballStemmer("english")
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
def tokenize_only(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
return filtered_tokens
def drawBarh(dic: dict, num: int):
"""
绘制水平条形图方法barh
参数一:数据字典
参数二:topN
"""
listkey = []
listval = []
for key, val in sorted(dic.items(), key=lambda x: (x[1], x[0]), reverse=True)[:num]:
listkey.append(key)
listval.append(val)
df = pd.DataFrame(listval[::-1], columns=[u'Times'])
df.index = listkey[::-1]
df.plot(kind='barh', color="lightblue")
plt.title(u'Top {} Most Common Words in Reviews of Hongkong,GuangZhou and Macau'.format(num))
plt.show()
def tf_idf(city: str):
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.stem.porter import PorterStemmer
path = '../Data/{}/'.format(city)
token_dict = {}
def tokenize(text):
tokens = nltk.word_tokenize(text)
stems = []
for item in tokens:
stems.append(item)
return stems
for dirpath, dirs, files in os.walk(path):
for f in files:
fname = os.path.join(dirpath, f)
print("fname=", fname)
text = loadReview(city, All=False, res=f)
token_dict[f] = text.lower().translate(string.punctuation)
tfidf = TfidfVectorizer(tokenizer=tokenize, stop_words='english')
tfs = tfidf.fit_transform(token_dict.values())
str = loadReview("GuangZhou", All=True, res=None)
response = tfidf.transform([str])
print(response)
feature_names = tfidf.get_feature_names()
for col in response.nonzero()[1]:
print(feature_names[col], ' - ', response[0, col])
def getFrequency(text: str, topN: int):
stopwords = nltk.corpus.stopwords.words('english')
stopwords.append('\'s')
stopwords.append('n\'t')
stopwords.append('us')
stopwords.append("restaurant")
stopwords.append("one")
stopwords.append("ordered")
stopwords.append("try")
stopwords.append("sum")
stopwords.append("kong")
totalvocab_tokenized = []
totalvocab_tokenized_stem = []
for i in [text]:
allwords_tokenized = tokenize_only(i)
totalvocab_tokenized.extend(allwords_tokenized)
totalvocab_tokenized_stem.extend(tokenize_and_stem(i))
words = []
transfer = {
"dim": "dim sum",
"hong": "hong kong"
}
for each in totalvocab_tokenized:
if each not in stopwords:
if each in transfer:
words.append(transfer[each])
else:
words.append(each)
fre = nltk.FreqDist(words)
drawBarh(fre, topN)
def getinfo(word:str):
wb = openpyxl.load_workbook("{}".format("/Users/lucas/Projects/Pycharm/Sentiment_Analysis/Code/All.xlsx"))
ws = wb.active
ans=0
for row in range(2, ws.max_row + 1):
comment = str(ws.cell(row=row, column=6).value)
comment=comment.lower()
if word in comment:
ans+=1
continue
return ans
def category():
base=open("/Users/lucas/Projects/Pycharm/Sentiment_Analysis/Code/AFINN.csv",'r')
base.readline()
dic={}
for eachline in base.readlines():
word,num=eachline.split(";")
num=int(num.strip(' '))
if dic.__contains__(word):
continue
else:
dic.setdefault(word,num)
file=open("/Users/lucas/Projects/Pycharm/Sentiment_Analysis/Code/category.csv",'r')
ans={}
file.readline()
for eachline in file.readlines():
decor,cate,num=eachline.split(';')
num=num.strip(' ')
if ans.__contains__(cate):
if dic.__contains__(decor):
if ans[cate].__contains__(dic[decor]):
ans[cate][dic[decor]]+=1
ans[cate]["Sum"]+=int(dic[decor])
if int(dic[decor])>=0:
ans[cate]["Positive Count"]+=1
ans[cate]["Positive Score"] += int(dic[decor])
else:
ans[cate]["Negative Count"]+=1
ans[cate]["Negative Score"] += int(dic[decor])
else:
ans[cate].setdefault(dic[decor],1)
ans[cate]["Sum"] += int(dic[decor])
if int(dic[decor])>=0:
ans[cate]["Positive Count"]+=1
ans[cate]["Positive Score"] += int(dic[decor])
else:
ans[cate]["Negative Count"]+=1
ans[cate]["Negative Score"] += int(dic[decor])
else:
continue
else:
if dic.__contains__(decor):
ans.setdefault(cate,{dic[decor]:1})
ans[cate].setdefault("Sum",int(dic[decor]))
if int(dic[decor])>0:
ans[cate].setdefault("Positive Count",1)
ans[cate].setdefault("Positive Score",int(dic[decor]))
ans[cate].setdefault("Negative Count",0)
ans[cate].setdefault("Negative Score",0)
else:
ans[cate].setdefault("Negative Count",1)
ans[cate].setdefault("Negative Score",int(dic[decor]))
ans[cate].setdefault("Positive Count",0)
ans[cate].setdefault("Positive Score", 0)
else:
continue
print(ans)
file.close()
with open('category2.json', 'w') as fp:
json.dump(ans,fp)
wb1 = openpyxl.Workbook()
ws1 = wb1.active
ws1.title = "ALL"
trans={
-5:"1",
-4:"2",
-3:"3",
-2:"4",
-1:"5",
5: "10",
4: "9",
3: "8",
2: "7",
1: "6",
"Negative Count":"11",
"Negative Score": "12",
"Positive Count": "13",
"Positive Score": "14",
"Sum":15
}
ws1.append(["Word", "痛恨:-5", "非常讨厌:-4", "很讨厌:-3", "比较讨厌:-2",
"不适:-1","有好感:1","比较喜欢:2","喜欢:3","很喜欢:4","非常喜欢:5",
"负面情感数量占比","负面情感分数占比","正面情感数量占比","正面情感分数分数","情感分数总计"])
for word,w_v in ans.items():
temp = [0 for a in range(0,16)]
temp[0]=word
for num,count in w_v.items():
index=int(trans[num])
temp[index]=count
total_amount=temp[11]+temp[13]
temp[11]=temp[11]/total_amount
temp[13] = temp[13] / total_amount
temp[15]=abs(temp[12])+temp[14]
if temp[15]!=0:
abs_score=abs(temp[12])+temp[14]
temp[12]=abs(temp[12])/abs_score
temp[14] = temp[14]/abs_score
ws1.append(temp)
wb1.save("category.xlsx")
def draw_heatmap():
wb = openpyxl.load_workbook("{}".format("/Users/lucas/Projects/Pycharm/Sentiment_Analysis/Code/All.xlsx"))
ws = wb.active
states = {"Alabama", "Alaska", "Arizona", "Arkansas", "California"
, "Colorado"
, "Connecticut"
, "Delaware"
, "Florida"
, "Georgia"
, "Hawaii"
, "Idaho"
, "Illinois"
, "Indiana"
, "Iowa"
, "Kansas"
, "Kentucky"
, "Louisiana"
, "Maine"
, "Maryland"
, "Massachusetts"
, "Michigan"
, "Minnesota"
, "Mississippi"
, "Missouri"
, "MontanaNebraska"
, "Nevada"
, "New Hampshire"
, "New Jersey"
, "New Mexico"
, "New York"
, "North Carolina"
, "North Dakota"
, "Ohio"
, "Oklahoma"
, "Oregon"
, "PennsylvaniaRhode Island"
, "South Carolina"
, "South Dakota"
, "Tennessee"
, "Texas"
, "Utah"
, "Vermont"
, "Virginia"
, "Washington"
, "West Virginia"
, "Wisconsin"
, "Wyoming"
}
dict = {}
for row in range(2, ws.max_row + 1):
country = str(ws.cell(row=row, column=4).value)
if country != "None":
try:
if country in states:
country="United States"
if dict.__contains__(country):
dict[country]+=1
else:
dict.setdefault(country,1)
except:
print(country)
c=[]
u=[]
for k,v in dict.items():
c.append(k)
u.append(int(v))
list1 = [[c[i], u[i]] for i in range(len(c))]
map_1 = Map().add(
series_name="游客数量", # 名称
data_pair=list1, # 传入数据
is_map_symbol_show=False,
maptype='world', # 地图类型
)
map_1.set_series_opts(label_opts=options.LabelOpts(is_show=False))
map_1.set_global_opts(
title_opts=options.TitleOpts(title="粤港澳境外游客 客源地热力图"),
visualmap_opts=options.VisualMapOpts(
max_=1500,
is_piecewise=True,
pieces=[
{"min": 1000,"color": '#000000'},
{"min": 600, "max": 1000,"color": '#4f4f4f'},
{"min": 300, "max": 600,"color": '#757575'},
{"min": 100, "max": 300,"color": '#919191'},
{"min": 10, "max": 100,"color": '#adadad'},
{"max": 10,"color": '#d1d1d1'}, ]
)
)
map_1.render('map4.html')
if __name__ == '__main__':
#synopses = [load_excel_url("/Users/lucas/Projects/Pycharm/Sentiment_Analysis/Code/All.xlsx")]
#getFrequency(synopses[0], 20)
# tf_idf("HongKong")
# draw_heatmap()
#for w in ['view','views','atmosphere','decor','decoration','toilet','environment']:
# print("{}:{}".format(w,getinfo(w)))
category() | QiaoLin-MA/Sentiment_Analysis | Sentiment_Analysis/Code/Data_Analysis.py | Data_Analysis.py | py | 12,007 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "openpyxl.load_workbook",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "openpyxl.load_workbook",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "openpyxl.load_... |
7194793169 | from collections import defaultdict
from typing import List, TypeVar, Generic, Dict, Optional
from mapfmclient import Problem
from src.data.agent import Agent
from src.data.vertex import Vertex
A = TypeVar("A", bound=Agent)
class Grid(Generic[A]):
def __init__(self, problem: Problem):
self.width: int = problem.width
self.height: int = problem.height
self._vertices: Dict[int, Dict[int, Optional[Vertex]]] = defaultdict(
lambda: defaultdict(lambda: None))
for x in range(self.width):
for y in range(self.height):
if problem.grid[y][x] == 0:
self._vertices[y][x] = Vertex(x, y)
self.agents: List[A] = self.create_agents(problem)
def get_vertex(self, x: int, y: int) -> Optional[Vertex]:
return self._vertices[y][x]
def create_agents(self, problem: Problem) -> List[A]:
raise NotImplementedError("Override this!")
| RobbinBaauw/CBMxSOC | src/grid.py | grid.py | py | 950 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TypeVar",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "src.data.agent.Agent",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Generic",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "mapfmclient.Problem"... |
12149713993 | from asyncio import current_task
from typing import AsyncGenerator, Any
from sqlalchemy.ext.asyncio import create_async_engine, async_sessionmaker, async_scoped_session, AsyncSession
from core.config import settings
class DataBaseHelper:
def __init__(self, url: str, echo: bool) -> None:
self.engine = create_async_engine(
url=url,
echo=echo,
)
self.session_factory = async_sessionmaker(
bind=self.engine,
autoflush=False,
expire_on_commit=False,
)
def get_scoped_session(self):
session = async_scoped_session(
session_factory=self.session_factory,
scopefunc=current_task,
)
return session
async def session_dependency(self) -> AsyncGenerator[async_scoped_session[AsyncSession], Any]:
session = self.get_scoped_session()
try:
yield session
finally:
await session.close()
db_helper = DataBaseHelper(
url=settings.db.url,
echo=settings.db.echo
)
| Norgius/microshop | core/models/db_helper.py | db_helper.py | py | 1,060 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.ext.asyncio.create_async_engine",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.asyncio.async_sessionmaker",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.asyncio.async_scoped_session",
"line_numb... |
14352296820 | import unittest
import mock
import networkx
from kiva.testing import KivaTestAssistant
from graphcanvas.graph_container import GraphContainer
from graphcanvas.graph_node_component import GraphNodeComponent
from graphcanvas.graph_view import graph_from_dict
class TestGraphContainer(KivaTestAssistant, unittest.TestCase):
def create_graph_container(self):
""" Utility method to generate a GraphContainer with a simple graph for
re-use in several tests herein.
"""
d = {'a': ['b'], 'b': ['c', 'd'], 'c': [], 'd': []}
g = graph_from_dict(d)
container = GraphContainer(graph=g)
for node in g.nodes():
GraphNodeComponent(container=container, value=node)
return container
def assert_in_bounds(self, container):
""" Utility method for asserting that all components are contained
within the bounds of the container.
"""
upper_x, upper_y = container.bounds
lower_x, lower_y = 0, 0
for component in container.components:
self.assertGreaterEqual(upper_x, component.x)
self.assertGreaterEqual(component.x, lower_x)
self.assertGreaterEqual(upper_y, component.y)
self.assertGreaterEqual(component.y, lower_y)
def test_no_layout_needed(self):
container = self.create_graph_container()
container._graph_layout_needed = False
result = container.do_layout()
self.assertIsNone(result)
def test_no_nodes(self):
container = GraphContainer(graph=graph_from_dict({}))
self.assertTrue(container.components == [])
result = container.do_layout()
self.assertIsNone(result)
def test_do_layout(self):
container = self.create_graph_container()
# test spring layout
container.style = 'spring'
self.assertTrue(container._graph_layout_needed)
container.do_layout()
self.assert_in_bounds(container)
self.assertFalse(container._graph_layout_needed)
# test tree layout
container = self.create_graph_container()
container.style = 'tree'
self.assertTrue(container._graph_layout_needed)
container.do_layout()
self.assert_in_bounds(container)
self.assertFalse(container._graph_layout_needed)
# test shell layout
container = self.create_graph_container()
container.style = 'shell'
self.assertTrue(container._graph_layout_needed)
container.do_layout()
self.assert_in_bounds(container)
self.assertFalse(container._graph_layout_needed)
# test spectral layout
container = self.create_graph_container()
container.style = 'spectral'
self.assertTrue(container._graph_layout_needed)
container.do_layout()
self.assert_in_bounds(container)
self.assertFalse(container._graph_layout_needed)
# test circular layout
g = networkx.balanced_tree(3, 5)
container = GraphContainer(graph=g)
for node in g.nodes():
GraphNodeComponent(container=container, value=node)
container.style = 'circular'
self.assertTrue(container._graph_layout_needed)
container.do_layout()
self.assert_in_bounds(container)
self.assertFalse(container._graph_layout_needed)
def test_draw(self):
container = self.create_graph_container()
self.assertPathsAreCreated(container)
def test_draw_directed_arrow_direction(self):
d = {'a': ['b'], 'b': []}
g = graph_from_dict(d)
container = GraphContainer(graph=g)
for node in g.nodes():
GraphNodeComponent(container=container, value=node)
# Node a is to the left of node b
container._layout_needed = False
container.components[0].x = 0.0
container.components[1].x = 100.0
container.components[0].y = 0.0
container.components[1].y = 0.0
self.assertPathsAreCreated(container)
# Node a is to the right of node b
container._layout_needed = False
container.components[0].x = 100.0
container.components[1].x = 0.0
container.components[0].y = 0.0
container.components[1].y = 0.0
self.assertPathsAreCreated(container)
# Node a is above of node b
container._layout_needed = False
container.components[0].x = 0.0
container.components[1].x = 0.0
container.components[0].y = 0.0
container.components[1].y = 100.0
self.assertPathsAreCreated(container)
# Node a is below of node b
container._layout_needed = False
container.components[0].x = 0.0
container.components[1].x = 0.0
container.components[0].y = 100.0
container.components[1].y = 0.0
self.assertPathsAreCreated(container)
def test_draw_no_layout(self):
container = self.create_graph_container()
container._layout_needed = False
self.assertPathsAreCreated(container)
def test_draw_not_directed(self):
d = {'a': ['b'], 'b': ['c', 'd'], 'c': [], 'd': []}
g = graph_from_dict(d)
g = g.to_undirected()
container = GraphContainer(graph=g)
for node in g.nodes():
GraphNodeComponent(container=container, value=node)
self.assertPathsAreCreated(container)
def test_draw_single_node(self):
g = networkx.DiGraph()
g.add_node('a')
container = GraphContainer(graph=g)
for node in g.nodes():
GraphNodeComponent(container=container, value=node)
self.assertPathsAreCreated(container)
def test_weighted(self):
g = networkx.Graph()
g.add_edge('a', 'b', weight=0.6)
g.add_edge('a', 'c', weight=0.2)
g.add_edge('c', 'd', weight=0.1)
g.add_edge('c', 'e', weight=0.7)
g.add_edge('c', 'f', weight=0.9)
g.add_edge('a', 'd', weight=0.3)
container = GraphContainer(graph=g)
for node in g.nodes():
GraphNodeComponent(container=container, value=node)
self.assertPathsAreCreated(container)
def test_spring_layout_with_non_zero_initial_positions(self):
container = self.create_graph_container()
for component in container.components:
component.position = [1.0, 2.0]
container.style = 'spring'
self.assertTrue(container._graph_layout_needed)
container.do_layout()
self.assert_in_bounds(container)
self.assertFalse(container._graph_layout_needed)
@mock.patch('graphcanvas.graph_container.tree_layout')
@mock.patch('networkx.drawing.nx_agraph.pygraphviz_layout')
def test_no_pygraphviz_tree(self,
mock_pygraphviz_layout,
mock_tree_layout):
mock_pygraphviz_layout.side_effect = ImportError()
container = self.create_graph_container()
mock_tree_layout.return_value = {
node: (0, 0) for node in container.graph.nodes()
}
container.style = 'tree'
container.do_layout()
self.assertFalse(container._graph_layout_needed)
mock_pygraphviz_layout.assert_called_once_with(
container.graph, prog='dot'
)
mock_tree_layout.assert_called_once_with(container.graph)
@mock.patch('networkx.circular_layout')
@mock.patch('networkx.drawing.nx_agraph.pygraphviz_layout')
def test_no_pygraphviz_circular(self,
mock_pygraphviz_layout,
mock_circular_layout):
mock_pygraphviz_layout.side_effect = ImportError()
container = self.create_graph_container()
container.style = 'circular'
container.do_layout()
self.assertFalse(container._graph_layout_needed)
mock_pygraphviz_layout.assert_called_once_with(
container.graph, prog='twopi'
)
mock_circular_layout.assert_called_once_with(
container.graph,
center=[bound // 2 for bound in container.bounds],
scale=min(container.bounds) // 2,
)
if __name__ == '__main__':
unittest.main()
| enthought/graphcanvas | graphcanvas/tests/test_graph_container.py | test_graph_container.py | py | 8,263 | python | en | code | 25 | github-code | 36 | [
{
"api_name": "kiva.testing.KivaTestAssistant",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "graphcanvas.graph_view.graph_from_dict",
"line_number": 19,
"usage_type": "call"
... |
9221143434 | import requests, os
from flask import Flask, jsonify, render_template, request
app = Flask(__name__)
apikey = os.getenv("API_KEY")
@app.route("/")
def index():
return render_template("index.html")
@app.route("/convert", methods=["POST"])
def convert():
# Query for currency exchange rate
symbol = request.form.get("currency")
res = requests.get("http://data.fixer.io/api/latest", params={
"access_key": apikey, "base": "EUR", "symbols": symbol})
# Make sure request succeeded
if res.status_code != 200:
return jsonify({"success": False})
# Make sure currency is in response
data = res.json()
if data['success'] == False:
return jsonify({"success": False})
if symbol not in data['rates']:
return jsonify({"success": False})
return jsonify({"success": True, "rate": data["rates"][symbol]})
| Yoimer/cs50-lecture-folders | lecture5/convert/application.py | application.py | py | 876 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
... |
32160839259 | """
Multiple WAD Injector
checks for a settings file and:
- Clones OOTR repo if not already present
- Then prints a link to it if gz gui is not already present
- Then requests a settings string if not present
- Then Requests the number of seeds to generate, if not present
Then loops and generates the roms, followed by injecting them into wad donors
to generate the alloted number of rando seeds
NOTE for some reason no cosmetics or sound changes can be done with this. The settings
string doesnt change when you update the cosmetics, and the system doesnt apeear to
allow for cosmetics/sounds to be update by settings string
"""
import os
import random
import subprocess
import json
import sys
import glob
import shutil
import time
from venv import create
def main():
# make sure we have all the files we need
try:
settings = json.loads(open("./settings.json").read())
except:
print("You might have accidentally deleted the default settings.json file, please re-clone!")
sys.exit()
if not os.path.exists("./output"):
os.makedirs("./output")
ootr_path = os.path.abspath(settings["ootr_path"])
gz_path = os.path.abspath(settings["gz_path"])
num_seeds = int(settings["num_seeds"])
ootr_settings_path = os.path.abspath("./ootr_settings.json")
ootr_settings = json.loads(open(ootr_settings_path, 'r').read())
rom_path = os.path.abspath(settings["rom_path"])
wad_path = os.path.abspath(settings["wad_path"])
ootr_log = os.path.abspath("./log.log")
out_dir = os.path.abspath(settings["out_dir"])
# doner rom, clean oot v1.0
dummy_path = os.path.abspath(".\OoT-Randomizer\ZOOTDEC.z64")
if not os.path.exists(dummy_path):
shutil.copy(rom_path, dummy_path)
print(f"Generating {num_seeds} seeds with settings from '{ootr_settings_path}'")
if not os.path.exists(ootr_path):
print("WARNING: ootr not cloned, run the following to clone it: ")
print(" git clone https://github.com/TestRunnerSRL/OoT-Randomizer.git")
print("then run me again :)")
sys.exit()
if len(glob.glob("./gz*")) == 0:
print("WARNING: you dont not have gz (for wad injection)")
print("download it from here: https://github.com/glankk/gz/releases")
print("Extract it in this repo's root directory, should be called something like 'gz-0.3.6-windows-i686'")
print("Then run me again :)")
sys.exit()
# handle settings
new_settings = ootr_settings
if settings["random_starting_item"]:
items = json.loads(open("./starting_items.json").read())
starting_category = list(items.keys())[random.randint(0,1)] # category is either equipment (swords etc) or gear (bow etc)
new_settings[starting_category] = [items[starting_category][random.randint(0, len(items[starting_category])-1)]]
if settings["extra_settings"]:
print(f"adding extra settings: \n{settings['extra_settings']}")
for key in settings["extra_settings"]:
new_settings[key] = settings["extra_settings"][key]
# make a new settings file and use the path to that instead
new_settings_path = os.path.abspath(f"./output/OOTR_SETTINGS_{str(time.time()).split('.')[0]}.json")
with open(new_settings_path, 'w') as f:
f.write(json.dumps(new_settings, indent=4))
ootr_settings_path = new_settings_path
sys.path.extend(".\\OoT-Randomizer")
# Generate the rando seed roms
created_roms = []
for i in range(num_seeds):
print(f"Creating seed number {str(i+1)}")
stdout = subprocess.check_output(["python", ".\\OoT-Randomizer\\OoTRandomizer.py", "--output_settings",
"--settings", ootr_settings_path, "1>", ootr_log, "2>&1"],
shell=True, encoding="utf8")
for line in open(ootr_log, 'r').readlines():
if "Created compressed rom at" in line:
filename = line.split("at:")[1].strip()
created_roms.append(filename)
print(f"Finished '{filename}'")
print("Copying result to outdir")
if not os.path.exists(out_dir):
os.mkdir(out_dir)
for rom in created_roms:
shutil.copy(rom, out_dir)
# Inject them into wad to make wii vc wads of the rando seeds
# home = os.getcwd()
# os.chdir(gz_path)
# cur = 1
# patcher = os.path.abspath("./patch-wad.bat")
# for rom in created_roms:
# new_path = os.path.abspath(f"./output/OOTR{str(cur)}.wad")
# command = [patcher, '-m', rom, '-o', new_path, wad_path]
# print(f"running: '{' '.join(command)}'")
# subprocess.check_output(command)
# cur += 1
# print(f"created '{new_path}'")
if __name__ == "__main__":
main() | castlez/MultiWadOOTR | main.py | main.py | py | 4,857 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
... |
40364787807 | import os
import tempfile
import requests
from testbot.configs import worker_config, server_url
from testbot.util import md5sum
class APIError(Exception):
pass
def get_auth_param():
return worker_config['name'], worker_config['password']
def report_started(submission_id: int, work_id: str, hostname: str, pid: int):
data = {'hostname': hostname, 'pid': pid}
resp = requests.put('%sapi/submissions/%d/worker-started/%s' % (server_url, submission_id, work_id),
json=data, auth=get_auth_param())
resp.raise_for_status()
def report_result(submission_id: int, work_id: str, data: dict):
resp = requests.put('%sapi/submissions/%d/worker-result/%s' % (server_url, submission_id, work_id),
json=data, auth=get_auth_param())
resp.raise_for_status()
def get_submission_and_config(submission_id: int, work_id: str):
resp = requests.get(
'%sapi/submissions/%d/worker-get-submission-and-config/%s' % (server_url, submission_id, work_id),
auth=get_auth_param())
resp.raise_for_status()
return resp.json()
def download_material(material: dict, folder: str, chunk_size: int = 65536) -> str:
resp = requests.get('%sapi/materials/%d/worker-download' % (server_url, material['id']), auth=get_auth_param(),
stream=True)
resp.raise_for_status()
name = material['name']
name_parts = name.rsplit('.', 2)
if len(name_parts) > 1:
name_parts[0] = ''
suffix = '.'.join(name_parts)
else:
suffix = None
ffd = None
try:
fd, path = tempfile.mkstemp(suffix=suffix, dir=folder)
ffd = os.fdopen(fd, 'wb')
for chunk in resp.iter_content(chunk_size=chunk_size):
if chunk:
ffd.write(chunk)
finally:
if ffd:
ffd.close()
if material['md5'] != md5sum(path):
raise APIError('MD5 check of material "%s" failed' % material['name'])
return os.path.relpath(path, folder)
def download_submission_file(submission_id: int, work_id: str, file: dict, local_save_path: str,
chunk_size: int = 65536):
resp = requests.get('%sapi/submissions/%d/worker-submission-files/%s/%d' %
(server_url, submission_id, work_id, file['id']),
auth=get_auth_param(), stream=True)
resp.raise_for_status()
with open(local_save_path, 'wb') as f:
for chunk in resp.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
if file['md5'] != md5sum(local_save_path):
raise APIError('MD5 check of submission file "%s" failed' % file['requirement']['name'])
def upload_output_files(submission_id: int, work_id: str, files: dict):
resp = requests.post('%sapi/submissions/%d/worker-output-files/%s' %
(server_url, submission_id, work_id),
files=files, auth=get_auth_param())
resp.raise_for_status()
| tjumyk/submit-testbot | testbot/api.py | api.py | py | 3,017 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "testbot.configs.worker_config",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "requests.put",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "testbot.configs.server_url",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": ... |
5096598714 | from FastExpression import *
from backtesting import Backtest, Strategy
from backtesting.lib import crossover
from backtesting.test import SMA
import math
import pandas as pd
class SmaCross(Strategy):
n1 = 10
n2 = 20
def init(self):
close = self.data.Close
self.sma1 = self.I(SMA, close, self.n1)
self.sma2 = self.I(SMA, close, self.n2)
def next(self):
if crossover(self.sma1, self.sma2):
self.position.close()
self.buy()
elif crossover(self.sma2, self.sma1):
self.position.close()
self.sell()
class RSI(Strategy):
def init(self):
close = self.data.Close
self.rsi = rsi(self.data)
def next(self):
if self.rsi < 30:
self.position.close()
self.buy()
if self.rsi > 70:
self.position.close()
self.buy()
# def marketTrend(self):
# for level in range(1,4):
# if adx(self.df) < 25*level:
# return level
# #20-10-2022
# def algorithm1(self):
# close_mean20 = ts_mean(self.close, 20)
# return round(100* -(self.close[-1] - close_mean20)/close_mean20,5)
#
# def algorithm2(self):
# return round(100* -ts_max_diff(self.close,5)/ts_max(self.close,5),5)
#
# def algorithm3(self):
# # volume_mean360 = ts_mean(self.volume,360)
# # close_mean20 = ts_mean(self.close,20)
# # print(100*(self.volume[-1] - volume_mean360) / volume_mean360)
# # print(self.close[-1]-close_mean20)
# # return round(100* -signed_power(self.close[-1]-close_mean20, (self.volume[-1]-volume_mean360)/volume_mean360)/close_mean20,2)
# return 0
#
# def algorithm4(self):
# vwap20 = vwap(self.df,20)
# return round(math.log(vwap20[-1]/self.close[-1]),5)
#
# def algorithm5(self):
# return round(100*-(self.close[-1] - (ts_max(self.close, 20) + ts_min(self.close, 20))/2) / self.low[-1],5)
| quangtiennnn/cryptoprediction | QuantAnalysis.py | QuantAnalysis.py | py | 2,022 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "backtesting.Strategy",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "backtesting.test.SMA",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "backtesting.test.SMA",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": ... |
36579110550 | #!/usr/bin/env python3
# coding: utf-8
import re
from PIL import Image, ImageDraw
import yaml, sys, subprocess
def draw_arc(draw: ImageDraw.ImageDraw, target, offset=(0,0), scale=1.0):
# check
# calc
x0 = (target['center']['x'] - target['radius']) * scale + offset[0]
x1 = (target['center']['x'] + target['radius']) * scale + offset[0]
y0 = (target['center']['y'] - target['radius']) * scale + offset[1]
y1 = (target['center']['y'] + target['radius']) * scale + offset[1]
start = target['range']['start']
end = target['range']['end']
# draw
draw.arc(
[(int(x0), int(y0)), (int(x1), int(y1))],
start, end)
def draw_line(draw: ImageDraw.ImageDraw, target, offset=(0,0), scale=1.0):
vertexes = []
for vertex in target['vertexes']:
x = vertex['x'] * scale + offset[0]
y = vertex['y'] * scale + offset[1]
vertexes.append((int(x), int(y)))
# draw
draw.line(vertexes)
if __name__ == '__main__':
width = 72
height = 72
background = (0, 0, 0, 0)
mode = 'RGBA'
input = 'alpaca.yaml'
output = 'hoge.png'
with open('alpaca.yaml') as f:
yml = yaml.safe_load(f)
# print(yml)
img = Image.new(mode, (width, height), background)
draw = ImageDraw.Draw(img)
for draw_target in yml['draws']:
if draw_target['type'] == 'ARC':
draw_arc(draw, draw_target, (36, 30), 2.5)
elif draw_target['type'] == 'LINE':
draw_line(draw, draw_target, (36, 30), 2.5)
img = img.transpose(Image.ROTATE_180)
img.save(output)
| takumi4424/takumi4424 | generate_icon.py | generate_icon.py | py | 1,441 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.ImageDraw.ImageDraw",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "PIL.ImageDraw",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.ImageDraw",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": ... |
29266081838 | import cv2
from model import FacialExpressionModel
import numpy as np
import threading, time
import queue
import logging
import sys
from memory_profiler import profile
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-9s) %(message)s',)
BUFFER_SIZE = 700
qbuffer = queue.Queue(BUFFER_SIZE)
class ProducerThread(threading.Thread):
def __init__(self, name):
super(ProducerThread,self).__init__()
self.name = name
self._stop = threading.Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
@profile
def run(self):
while True:
if self.stopped():
return
ret, fr = rgb.read()
if not qbuffer.full():
if ret==True:
qbuffer.put(fr)
logging.debug('ProducerThread ' + str(qbuffer.qsize()) + ' items in queue')
return
class ConsumerThread(threading.Thread):
def __init__(self, name):
super(ConsumerThread,self).__init__()
self.name = name
self._stop = threading.Event()
return
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
@profile
def run(self):
f=0
while True:
if self.stopped():
return
if not qbuffer.empty():
f+=1
fr = qbuffer.get()
if f%5==1:
gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
faces = facec.detectMultiScale(gray_fr, 1.3, 5)
for (x, y, w, h) in faces:
fc = gray_fr[y:y+h, x:x+w]
roi = cv2.resize(fc, (48, 48))
norm = np.zeros((48,48))
roi = cv2.normalize(roi, norm, 0, 255, cv2.NORM_MINMAX)
pred = cnn.predict_emotion(roi[np.newaxis, :, :, np.newaxis])
cv2.putText(fr, pred, (x, y), font, 1, (255, 255, 0), 2)
cv2.rectangle(fr,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow("emotion_win",fr)
cv2.waitKey(1)
logging.debug('ConsumerThread ' + str(qbuffer.qsize()) + ' items in queue')
#out.write(fr)
return
if __name__ == '__main__':
rgb = cv2.VideoCapture(0)
facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
font = cv2.FONT_HERSHEY_SIMPLEX
cnn = FacialExpressionModel("face_model.json", "face_model.h5")
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
#out = cv2.VideoWriter('output.avi',fourcc,float(5), (640,480))
t1 = ProducerThread(name='producer')
t2 = ConsumerThread(name='consumer')
t1.start()
t2.start()
time.sleep( 30 )
t1.stop()
t2.stop()
| nuralabuga/facial-expression-recognition--with--multithreads | camera_multithread_ram.py | camera_multithread_ram.py | py | 3,226 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "queue.Queue",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "threading.Thread",... |
15973885303 | #!/usr/bin/python3
#endereço do compilador para usar o executável
#import dos módulos:
import tkinter as tk
from tkinter import ttk
import serial
import numpy as np
Polynomial = np.polynomial.Polynomial
import math
import matplotlib.pyplot as plt
#---------------------
#Variáveis globais:
global portaUSB
portaUSB = ""
global azul
azul = False
global verde
verde = False
global vermelho
vermelho = False
global l
l = 0
global cor
cor = ""
global Analito
Analito = " "
global dados_c
dados_c = []
global P0
P0 = 0
global P1
P1 = 0
global P2
P2 = 0
global P3
P3 = 0
global P4
P4 = 0
global P5
P5 = 0
global P6
P6 = 0
global P7
P7 = 0
global P8
P8 = 0
global dados_P
dados_P = []
global dados_A
dados_A = []
global A0
A0 = 0
global A1
A1 = 0
global A2
A2 = 0
global A3
A3 = 0
global A4
A4 = 0
global A5
A5 = 0
global A6
A6 = 0
global A7
A7 = 0
global A8
A8 = 0
#--------------------
#Programação dos botões:
def create_porta():
global portaUSB
aux = temp.get()
try:
portaUSB = serial.Serial(aux, 9600)
botao_Port.config(text = "Desconectar",command = close_porta)
status_Conexao.config(text='==== CONECTADO ====')
except:
botao_Port.config(text = "Conectar")
status_Conexao.config(text='==== Porta Inexistente ====')
#if portaUSB != []:
# botao_Port.config(text = "Conectado!")
# status_Conexao.config(text='== CONECTADO ==')
#if portaUSB == []:
# botao_Port.config(text = "Desconectado!")
# status_Conexao.config(text='== Porta Inexistente ==')
def close_porta():
global portaUSB
sen_command('d')
portaUSB.close()
botao_Port.config(text = "Conectar",command = create_porta)
status_Conexao.config(text='==== DESCONECTADO ====')
botaoLEDAzul.config(fg = "black", bg = "white", highlightthickness = 0)
labelLEDAzul.config(text = "Desligado")
botaoLEDVerde.config(fg = "black", bg = "white", highlightthickness = 0)
labelLEDVerde.config(text = "Desligado")
botaoLEDVermelho.config(fg = "black", bg = "white", highlightthickness = 0)
labelLEDVermelho.config(text = "Desligado")
def sen_command(cod):
aux = str(cod)
portaUSB.write(aux.encode())
def comando(op):
global azul
global verde
global vermelho
global l
global cor
if (op == 1 and azul == False):
#print("Led Azul Ligado")
sen_command('b')
botaoLEDAzul.config(fg = "white", bg = "blue", highlightthickness = 0)
labelLEDAzul.config(text = "Ligado")
botaoLEDVerde.config(fg = "black", bg = "white", highlightthickness = 0)
labelLEDVerde.config(text = "Desligado")
botaoLEDVermelho.config(fg = "black", bg = "white", highlightthickness = 0)
labelLEDVermelho.config(text = "Desligado")
vermelho = False
verde = False
azul = True
cor = 'azul'
l = 450
elif (op == 1 and azul == True):
#print('Led Azul Desligado')
sen_command('d')
botaoLEDAzul.config(fg = "black", bg = "white", highlightthickness = 0)
labelLEDAzul.config(text = "Desligado")
azul = False
elif (op == 2 and verde == False):
#print("Led Vede Ligado")
sen_command('g')
botaoLEDVerde.config(fg = "white", bg = "green", highlightthickness = 0)
labelLEDVerde.config(text = "Ligado")
botaoLEDAzul.config(fg = "black", bg = "white", highlightthickness = 0)
labelLEDAzul.config(text = "Desligado")
botaoLEDVermelho.config(fg = "black", bg = "white", highlightthickness = 0)
labelLEDVermelho.config(text = "Desligado")
vermelho = False
azul = False
verde = True
cor = 'verde'
l = 550
elif (op == 2 and verde == True):
#print('Led Verde Desligado')
sen_command('d')
botaoLEDVerde.config(fg = "black", bg = "white", highlightthickness = 0)
labelLEDVerde.config(text = "Desligado")
verde = False
elif (op == 3 and vermelho == False):
#print('Led Vermelho Ligado')
sen_command('r')
botaoLEDVermelho.config(fg = "white", bg = "red", highlightthickness = 0)
labelLEDVermelho.config(text = "Ligado")
botaoLEDAzul.config(fg = "black", bg = "white", highlightthickness = 0)
labelLEDAzul.config(text = "Desligado")
botaoLEDVerde.config(fg = "black", bg = "white", highlightthickness = 0)
labelLEDVerde.config(text = "Desligado")
verde = False
azul = False
vermelho = True
cor = 'vermelho'
l = 700
elif (op == 3 and vermelho == True):
#print('Led Vermelho Desligado')
sen_command('d')
botaoLEDVermelho.config(fg = "black", bg = "white", highlightthickness = 0)
labelLEDVermelho.config(text = "Desligado")
vermelho = False
def medir(n):
global dados_c
global dados_P
global dados_A
global A0
global A1
global A2
global A3
global A4
global A5
global A6
global A7
global A8
global P0
global P1
global P2
global P3
global P4
global P5
global P6
global P7
global P8
if n == 0:
c0 = float(0.00000)
dados_c.insert(0, c0)
sen_command('l')
P0 = float(portaUSB.readline())
dados_P.insert(0, P0)
A0 = calc_A(0,0)
dados_A.insert(0, A0)
varTextLuminC0 = tk.StringVar()
varTextAbsorC0 = tk.StringVar()
varTextLuminC0.set(str("%.1f" % dados_P[0]))
varTextAbsorC0.set(str("%.3f" % dados_A[0]))
labelLuminC0.config(textvariable = varTextLuminC0)
labelAbsorC0.config(textvariable = varTextAbsorC0)
if n == 1:
c1 = float(entryC1.get())
dados_c.insert(1,c1)
sen_command('l')
P1 = float(portaUSB.readline())
dados_P.insert(1, P1)
A1 = calc_A(0,1)
dados_A.insert(1,A1)
varTextLuminC1 = tk.StringVar()
varTextAbsorC1 = tk.StringVar()
varTextLuminC1.set(str("%.1f" % dados_P[1]))
varTextAbsorC1.set(str("%.3f" % dados_A[1]))
labelLuminC1.config(textvariable = varTextLuminC1)
labelAbsorC1.config(textvariable = varTextAbsorC1)
if n == 2:
c2 = float(entryC2.get())
dados_c.insert(2,c2)
sen_command('l')
P2 = float(portaUSB.readline())
dados_P.insert(2, P2)
A2 = calc_A(0,2)
dados_A.insert(2, A2)
varTextLuminC2 = tk.StringVar()
varTextAbsorC2 = tk.StringVar()
varTextLuminC2.set(str("%.1f" % dados_P[2]))
varTextAbsorC2.set(str("%.3f" % dados_A[2]))
labelLuminC2.config(textvariable = varTextLuminC2)
labelAbsorC2.config(textvariable = varTextAbsorC2)
if n == 3:
c3 = float(entryC3.get())
dados_c.insert(3,c3)
sen_command('l')
P3 = float(portaUSB.readline())
dados_P.insert(3, P3)
A3 = calc_A(0,3)
dados_A.insert(3, A3)
varTextLuminC3 = tk.StringVar()
varTextAbsorC3 = tk.StringVar()
varTextLuminC3.set(str("%.1f" % dados_P[3]))
varTextAbsorC3.set(str("%.3f" % dados_A[3]))
labelLuminC3.config(textvariable = varTextLuminC3)
labelAbsorC3.config(textvariable = varTextAbsorC3)
if n == 4:
c4 = float(entryC4.get())
dados_c.insert(4,c4)
sen_command('l')
P4 = float(portaUSB.readline())
dados_P.insert(4, P4)
A4 = calc_A(0,4)
dados_A.insert(4, A4)
varTextLuminC4 = tk.StringVar()
varTextAbsorC4 = tk.StringVar()
varTextLuminC4.set(str("%.1f" % dados_P[4]))
varTextAbsorC4.set(str("%.3f" % dados_A[4]))
labelLuminC4.config(textvariable = varTextLuminC4)
labelAbsorC4.config(textvariable = varTextAbsorC4)
if n == 5:
c5 = float(entryC5.get())
dados_c.insert(5,c5)
sen_command('l')
P5 = float(portaUSB.readline())
dados_P.insert(5, P5)
A5 = calc_A(0,5)
dados_A.insert(5, A5)
varTextLuminC5 = tk.StringVar()
varTextAbsorC5 = tk.StringVar()
varTextLuminC5.set(str("%.1f" % dados_P[5]))
varTextAbsorC5.set(str("%.3f" % dados_A[5]))
labelLuminC5.config(textvariable = varTextLuminC5)
labelAbsorC5.config(textvariable = varTextAbsorC5)
if n == 6:
c6 = float(entryC6.get())
dados_c.insert(6,c6)
sen_command('l')
P6 = float(portaUSB.readline())
dados_P.insert(6, P6)
A6 = calc_A(0,6)
dados_A.insert(6, A6)
varTextLuminC6 = tk.StringVar()
varTextAbsorC6 = tk.StringVar()
varTextLuminC6.set(str("%.1f" % dados_P[6]))
varTextAbsorC6.set(str("%.3f" % dados_A[6]))
labelLuminC6.config(textvariable = varTextLuminC6)
labelAbsorC6.config(textvariable = varTextAbsorC6)
if n == 7:
c7 = float(entryC7.get())
dados_c.insert(7,c7)
sen_command('l')
P7 = float(portaUSB.readline())
dados_P.insert(7, P7)
A7 = calc_A(0,7)
dados_A.insert(7, A7)
varTextLuminC7 = tk.StringVar()
varTextAbsorC7 = tk.StringVar()
varTextLuminC7.set(str("%.1f" % dados_P[7]))
varTextAbsorC7.set(str("%.3f" % dados_A[7]))
labelLuminC7.config(textvariable = varTextLuminC7)
labelAbsorC7.config(textvariable = varTextAbsorC7)
if n == 8:
c8 = float(entryC8.get())
dados_c.insert(8,c8)
sen_command('l')
P8 = float(portaUSB.readline())
dados_P.insert(8, P8)
A8 = calc_A(0,8)
dados_A.insert(8, A8)
varTextLuminC8 = tk.StringVar()
varTextAbsorC8 = tk.StringVar()
varTextLuminC8.set(str("%.1f" % dados_P[8]))
varTextAbsorC8.set(str("%.3f" % dados_A[8]))
labelLuminC8.config(textvariable = varTextLuminC8)
labelAbsorC8.config(textvariable = varTextAbsorC8)
def limpar():
global dados_c
dados_c = []
varAnalito.delete(0, 'end')
entryC1.delete(0, 'end')
entryC2.delete(0, 'end')
entryC3.delete(0, 'end')
entryC4.delete(0, 'end')
entryC5.delete(0, 'end')
entryC6.delete(0, 'end')
entryC7.delete(0, 'end')
entryC8.delete(0, 'end')
resetar()
def resetar():
varTextLuminC0 = tk.StringVar()
varTextLuminC0.set("aguardando")
varTextAbsorC0 = tk.StringVar()
varTextAbsorC0.set("aguardando")
labelLuminC0.config(textvariable = varTextLuminC0)
labelAbsorC0.config(textvariable = varTextAbsorC0)
varTextLuminC1 = tk.StringVar()
varTextLuminC1.set("aguardando")
varTextAbsorC1 = tk.StringVar()
varTextAbsorC1.set("aguardando")
labelLuminC1.config(textvariable = varTextLuminC0)
labelAbsorC1.config(textvariable = varTextAbsorC0)
varTextLuminC2 = tk.StringVar()
varTextLuminC2.set("aguardando")
varTextAbsorC2 = tk.StringVar()
varTextAbsorC2.set("aguardando")
labelLuminC2.config(textvariable = varTextLuminC0)
labelAbsorC2.config(textvariable = varTextAbsorC0)
varTextLuminC3 = tk.StringVar()
varTextLuminC3.set("aguardando")
varTextAbsorC3 = tk.StringVar()
varTextAbsorC3.set("aguardando")
labelLuminC3.config(textvariable = varTextLuminC0)
labelAbsorC3.config(textvariable = varTextAbsorC0)
varTextLuminC4 = tk.StringVar()
varTextLuminC4.set("aguardando")
varTextAbsorC4 = tk.StringVar()
varTextAbsorC4.set("aguardando")
labelLuminC4.config(textvariable = varTextLuminC0)
labelAbsorC4.config(textvariable = varTextAbsorC0)
varTextLuminC5 = tk.StringVar()
varTextLuminC5.set("aguardando")
varTextAbsorC5 = tk.StringVar()
varTextAbsorC5.set("aguardando")
labelLuminC5.config(textvariable = varTextLuminC0)
labelAbsorC5.config(textvariable = varTextAbsorC0)
varTextLuminC6 = tk.StringVar()
varTextLuminC6.set("aguardando")
varTextAbsorC6 = tk.StringVar()
varTextAbsorC6.set("aguardando")
labelLuminC6.config(textvariable = varTextLuminC0)
labelAbsorC6.config(textvariable = varTextAbsorC0)
varTextLuminC7 = tk.StringVar()
varTextLuminC7.set("aguardando")
varTextAbsorC7 = tk.StringVar()
varTextAbsorC7.set("aguardando")
labelLuminC7.config(textvariable = varTextLuminC0)
labelAbsorC7.config(textvariable = varTextAbsorC0)
varTextLuminC8 = tk.StringVar()
varTextLuminC8.set("aguardando")
varTextAbsorC8 = tk.StringVar()
varTextAbsorC8.set("aguardando")
labelLuminC8.config(textvariable = varTextLuminC0)
labelAbsorC8.config(textvariable = varTextAbsorC0)
def calc_A(P0,Pi):
A = math.log10(dados_P[P0]/dados_P[Pi])
return A
def plotar():
subs = varAnalito.get()
## Variáveis globais:
conc = []
Absor = []
for valorA in dados_A:
Absor.append(valorA)
for valorC in dados_c:
conc.append(valorC)
## ----------------------------
cmin, cmax = min(conc), max(conc)
pfit, stats = Polynomial.fit(conc, Absor, 1,
full=True,
window=(cmin, cmax),
domain=(cmin, cmax))
plt.plot(conc, Absor, 'ko')
plt.plot(conc, pfit(conc), 'r')
## Configuração dos eixos do gráfico
plt.title("{}".format(subs))
plt.grid(True)
#ymax = float(Absor[-1]) + 0.005
#ymin = float(Absor[0]) - 0.001
#xmax = float(conc[-1]) + 0.5
#xmin = float(conc[0]) - 0.1
#plt.ylim((ymin, ymax))
#plt.xlim((xmin, xmax))
plt.xlabel("Concentração molar (" r'$ \times 10^{-5}$' "mol/L)")
plt.ylabel("Absorbância para " r'$ \lambda \approx $' "{}nm ({})".format(l, cor))
#plt.ylabel("Absorbância ")
plt.show()
#---------------------------------------------------
#Analito = " "
#Inicializa a janela principal com título e tamanho:
janelaPrincipal = tk.Tk()
janelaPrincipal.title("Lei de Lambert")
janelaPrincipal.configure(bg='white')
janelaPrincipal.geometry('470x450')
janelaPrincipal.resizable(width=False, height=False)
#---------------------------------------------------
#Layout
text_Port = tk.Label(janelaPrincipal,font=("Times New Roman", 12, "bold"), text='Informe a Porta: ',fg = "black", bg = "white")
text_Port.grid(column=0,row=0)
temp = tk.StringVar()
porta = tk.Entry(janelaPrincipal, text = "aaaa",bd=2, width = 24,fg = "black", bg = "white",textvariable = temp)
porta.focus_set()
porta.grid(column=1,columnspan=3,row=0)
botao_Port = tk.Button(text='Conectar', width = 10,command = create_porta,fg = "black", bg = "white")
botao_Port.grid(column=4,row=0)
btnLimpar = tk.Button(text='Limpar', width = 10,command = limpar,fg = "black", bg = "white")
btnLimpar.grid(column=4, row=1)
status_Port = tk.Label(janelaPrincipal, text='Status da conexão: ',fg = "black", bg = "white")
status_Port.grid(column=0,row=1)
status_Conexao = tk.Label(janelaPrincipal, text='==== DESCONECTADO ====',fg = "black", bg = "white")
status_Conexao.grid(column=1,columnspan=3,row=1)
labelAnalito = tk.Label(janelaPrincipal, text = "Digite o nome da analito",fg = "black", bg = "white")
labelAnalito.grid(column=1,columnspan=3, row=2)
varAnalito = tk.Entry(janelaPrincipal, bd=2, width = 18,fg = "black", bg = "white")
varAnalito.focus_set()
varAnalito.grid(column=1,columnspan=3,row=3)
labelLED = tk.Label(janelaPrincipal, text = "Escolha a cor do LED",fg = "black", bg = "white")
labelLED.grid(column=1,columnspan=3,row=4)
botaoLEDAzul = tk.Button(janelaPrincipal,fg = "black", bg = "white",text = "Azul", width = 5, command = lambda: comando(1))
botaoLEDAzul.grid(column=1,row=5)
labelLEDAzul = tk.Label(janelaPrincipal, text = "Desligado",fg = "black", bg = "white")
labelLEDAzul.grid(column=1,row=6)
botaoLEDVerde = tk.Button(janelaPrincipal, fg = "black", bg = "white", text = "Verde", width = 5,command = lambda: comando(2))
botaoLEDVerde.grid(column=2,row=5)
labelLEDVerde = tk.Label(janelaPrincipal, text = "Desligado",fg = "black", bg = "white")
labelLEDVerde.grid(column=2,row=6)
botaoLEDVermelho = tk.Button(janelaPrincipal, fg = "black", bg = "white", text = "Vermelho", width = 5,command = lambda: comando(3))
botaoLEDVermelho.grid(column=3,row=5)
labelLEDVermelho = tk.Label(janelaPrincipal, text = "Desligado",fg = "black", bg = "white")
labelLEDVermelho.grid(column=3,row=6)
status_LED = tk.Label(janelaPrincipal, text ="Status do LED: ", fg = "black", bg = "White")
status_LED.grid(column = 0, row = 6)
ttk.Separator(janelaPrincipal, orient = 'horizontal').grid(column=0, columnspan = 5, row = 7, sticky = 'ew')
botaoPlotar = tk.Button(janelaPrincipal, text = "Plotar",fg = "black", bg = "white", height = 5,width = 10, command = lambda: plotar())
botaoPlotar.grid(column=4,row=2,rowspan=4)
botaoSair = tk.Button(janelaPrincipal, text = "Sair",fg = "black", bg = "white", height = 5, width = 10,command = janelaPrincipal.quit)
botaoSair.grid(column=0,row=2,rowspan=4)
labelLeituras= tk.Label(janelaPrincipal, font=("Times New Roman", 12, "bold"), text ="== MEDIDAS == ", fg = "black", bg = "White")
labelLeituras.grid(column = 0, row = 8)
labelC0 = tk.Label(janelaPrincipal, font=("Times New Roman", 12, "bold"), text="(mol/L)", fg = "black", bg = "White").grid(column = 1, row= 8)
labelLeituraBranco = tk.Label(janelaPrincipal, text="Branco", fg = "black", bg = "White")
labelLeituraBranco.grid(column = 0, row = 9)
labelC0 = tk.Label(janelaPrincipal, text="0.00000", fg = "black", bg = "White").grid(column = 1, row= 9)
labelLeituraC1 = tk.Label(janelaPrincipal, text="Concentração 1", fg = "black", bg = "White")
labelLeituraC1.grid(column = 0, row = 10)
entryC1 = tk.Entry(janelaPrincipal, width = 6)
entryC1.grid(column = 1, row= 10)
labelLeituraC2 = tk.Label(janelaPrincipal, text="Concentração 2", fg = "black", bg = "White")
labelLeituraC2.grid(column = 0, row = 11)
entryC2 = tk.Entry(janelaPrincipal, width = 6)
entryC2.grid(column = 1, row= 11)
labelLeituraC3 = tk.Label(janelaPrincipal, text="Concentração 3", fg = "black", bg = "White")
labelLeituraC3.grid(column = 0, row = 12)
entryC3 = tk.Entry(janelaPrincipal, width = 6)
entryC3.grid(column = 1, row= 12)
labelLeituraC4 = tk.Label(janelaPrincipal, text="Concentração 4", fg = "black", bg = "White")
labelLeituraC4.grid(column = 0, row = 13)
entryC4 = tk.Entry(janelaPrincipal, width = 6)
entryC4.grid(column = 1, row= 13)
labelLeituraC5 = tk.Label(janelaPrincipal, text="Concentração 5", fg = "black", bg = "White")
labelLeituraC5.grid(column = 0, row = 14)
entryC5 = tk.Entry(janelaPrincipal, width = 6)
entryC5.grid(column = 1, row= 14)
labelLeituraC6 = tk.Label(janelaPrincipal, text="Concentração 6", fg = "black", bg = "White")
labelLeituraC6.grid(column = 0, row = 15)
entryC6 = tk.Entry(janelaPrincipal, width = 6)
entryC6.grid(column = 1, row= 15)
labelLeituraC7 = tk.Label(janelaPrincipal, text="Concentração 7", fg = "black", bg = "White")
labelLeituraC7.grid(column = 0, row = 16)
entryC7 = tk.Entry(janelaPrincipal, width = 6)
entryC7.grid(column = 1, row= 16)
labelLeituraC8 = tk.Label(janelaPrincipal, text="Concentração 8", fg = "black", bg = "White")
labelLeituraC8.grid(column = 0, row = 17)
entryC8 = tk.Entry(janelaPrincipal, width = 6)
entryC8.grid(column = 1, row= 17)
btnCalibrar = tk.Button(janelaPrincipal, fg = "black", bg = "white", text = "Calibrar", width = 3,command = lambda: medir(0))
btnCalibrar.grid(column = 2, row = 9)
btnResetar =tk.Button(janelaPrincipal, fg = "black", bg = "white", text = "Resetar", width = 3,command = lambda: resetar())
btnResetar.grid(column = 2, row = 8)
btnMedirC1 = tk.Button(janelaPrincipal, fg = "black", bg = "white", text = "Medir", width = 3,command = lambda: medir(1))
btnMedirC1.grid(column = 2, row = 10)
btnMedirC2 = tk.Button(janelaPrincipal, fg = "black", bg = "white", text = "Medir", width = 3,command = lambda: medir(2))
btnMedirC2.grid(column = 2, row = 11)
btnMedirC3 = tk.Button(janelaPrincipal, fg = "black", bg = "white", text = "Medir", width = 3,command = lambda: medir(3))
btnMedirC3.grid(column = 2, row = 12)
btnMedirC4 = tk.Button(janelaPrincipal, fg = "black", bg = "white", text = "Medir", width = 3,command = lambda: medir(4))
btnMedirC4.grid(column = 2, row = 13)
btnMedirC5 = tk.Button(janelaPrincipal, fg = "black", bg = "white", text = "Medir", width = 3,command = lambda: medir(5))
btnMedirC5.grid(column = 2, row = 14)
btnMedirC6 = tk.Button(janelaPrincipal, fg = "black", bg = "white", text = "Medir", width = 3,command = lambda: medir(6))
btnMedirC6.grid(column = 2, row = 15)
btnMedirC7 = tk.Button(janelaPrincipal, fg = "black", bg = "white", text = "Medir", width = 3,command = lambda: medir(7))
btnMedirC7.grid(column = 2, row = 16)
btnMedirC8 = tk.Button(janelaPrincipal, fg = "black", bg = "white", text = "Medir", width = 3,command = lambda: medir(8))
btnMedirC8.grid(column = 2, row = 17)
labelLumin= tk.Label(janelaPrincipal, font=("Times New Roman", 12, "bold"),text ="Intensidades", fg = "black", bg = "White")
labelLumin.grid(column = 3, row = 8)
labelAbsor= tk.Label(janelaPrincipal, font=("Times New Roman", 12, "bold"), text ="Absorbâncias", fg = "black", bg = "White")
labelAbsor.grid(column = 4, row = 8)
labelLuminC0 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelLuminC0.grid(column = 3, row= 9)
labelAbsorC0 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelAbsorC0.grid(column = 4, row= 9)
labelLuminC1 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelLuminC1.grid(column = 3, row= 10)
labelAbsorC1 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelAbsorC1.grid(column = 4, row= 10)
labelLuminC2 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelLuminC2.grid(column = 3, row= 11)
labelAbsorC2 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelAbsorC2.grid(column = 4, row= 11)
labelLuminC3 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelLuminC3.grid(column = 3, row= 12)
labelAbsorC3 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelAbsorC3.grid(column = 4, row= 12)
labelLuminC4 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelLuminC4.grid(column = 3, row= 13)
labelAbsorC4 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelAbsorC4.grid(column = 4, row= 13)
labelLuminC5 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelLuminC5.grid(column = 3, row= 14)
labelAbsorC5 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelAbsorC5.grid(column = 4, row= 14)
labelLuminC6 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelLuminC6.grid(column = 3, row= 15)
labelAbsorC6 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelAbsorC6.grid(column = 4, row= 15)
labelLuminC7 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelLuminC7.grid(column = 3, row= 16)
labelAbsorC7 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelAbsorC7.grid(column = 4, row= 16)
labelLuminC8 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelLuminC8.grid(column = 3, row= 17)
labelAbsorC8 = tk.Label(janelaPrincipal, text="aguardando", fg = "black", bg = "White")
labelAbsorC8.grid(column = 4, row= 17)
#---------------------------------------------------
#Inicia e abre a janela:
janelaPrincipal.mainloop()
#---------------------------------------------------
| abbarreto/Beer-Lambert | GuiLambertv1.py | GuiLambertv1.py | py | 23,636 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.polynomial",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "serial.Serial",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "tkinter.StringVar",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "tkinter.StringV... |
74352223463 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 18 13:40:36 2022
maybe needed:
%load_ext autoreload
%autoreload 2
@author: fabian_balzer
"""
# %%
import argparse
import input_scripts.availability_plots as av
import input_scripts.filter_coverage_plot as fc
import input_scripts.separation_plots as sep
import output_scripts.specz_photz_plots as s_p
import output_scripts.template_analysis_plots as ta
import util.my_tools as mt
def read_args():
"""Reads out the arguments given by the user."""
parser = argparse.ArgumentParser(
description="Analyse a variety of files related to the LePhare photo-z process.")
stem_defaults = {"input": "baseline_input", "separation": "baseline_input",
"Filter": "baseline_filters", "output": "new_test", "template": "baseline_templates"}
for argtype in ["input", "separation", "Filter", "output", "template"]:
# Introduce a boolean call:
short = f"-{argtype[0]}" # i, s, F, o and t can be specified now.
long = f"--produce_{argtype}_plots"
helpstring = f"Specify wether to save the {argtype} plots"
parser.add_argument(short, long, action="store_true",
help=helpstring, default=True)
# Allow to specify the stem names:
argname = f"--{argtype}_stem"
default = stem_defaults[argtype]
helpstring = f"Specify the stem name of the {argtype} data."
parser.add_argument(argname, help=helpstring, default=default)
parser.add_argument("--context",
help="The context for the LePhare run.", type=int, default=-1)
parser.add_argument("-S", "--Stats", action="store_true",
help="Provide statistics on each of the files to the command line.")
parser.add_argument("-v", "--verbose", action="store_true",
help="Increase output verbosity.")
args, _ = parser.parse_known_args()
mt.LOGGER.info("Starting the analysis script for the following subtypes:")
args.output_stem = "with_dr10"
args.input_stem = "with_dr10"
for argtype in [argtype.split('_')[1] for argtype, val in vars(args).items() if isinstance(val, bool) and val]:
stemval = vars(args)[f'{argtype}_stem']
mt.LOGGER.info(f"{argtype}_stem: {stemval}")
args.output_stem = "with_dr10"
return args
args = read_args()
mt.LOGGER.info(args.context) # TODO
# print(args.context)
# %%
# input dataframes:
if args.produce_input_plots:
input_df = mt.read_plike_and_ext(prefix=f"matches/{args.input_stem}_",
suffix="_processed_table.fits")
input_df = mt.add_mag_columns(input_df)
av.plot_r_band_magnitude(input_df, args.input_stem)
av.plot_input_distribution(input_df, args.input_stem, args.context)
av.plot_band_number_distribution(input_df, args.input_stem, args.context)
# %% Separation plots:
if args.produce_separation_plots:
input_df = mt.read_plike_and_ext(prefix=f"matches/{args.input_stem}_",
suffix="_processed_table.fits")
input_df = mt.add_mag_columns(input_df)
sep.plot_all_separations(input_df, args.separation_stem, args.context)
# %% Filter analysis:
if args.produce_Filter_plots:
filter_df = fc.read_filter_info_file(args.Filter_stem)
fc.produce_filter_plot(filter_df, args.Filter_stem)
info_df = fc.read_filter_overview_file(args.Filter_stem)
fc.save_filter_info(info_df, args.Filter_stem)
# %% Output dataframe and analysis:
if args.produce_output_plots:
output_df = mt.read_plike_and_ext(
prefix=f"lephare_output/{args.output_stem}_", suffix=".fits")
output_df = mt.add_filter_columns(output_df)
output_df = output_df[output_df["mag_i-ls10"] > 0]
for ttype in ["pointlike", "extended"]:
# ta.plot_problematic_templates(output_df, ttype)
s_p.plot_photoz_vs_specz(output_df, ttype, args.output_stem)
# %%
if args.produce_template_plots:
output_df = mt.read_plike_and_ext(
prefix=f"lephare_output/{args.output_stem}_", suffix=".fits")
output_df = mt.add_filter_columns(output_df)
for ttype in ["pointlike", "extended"]:
# ta.plot_problematic_templates(output_df, ttype, args.template_stem)
template_df = mt.read_template_library(
f"{ttype}_mag_lib.dat") # TODO: {args.template_stem}_
ta.plot_multiple_against_redshift(
output_df, template_df, ttype, args.template_stem, bands=("i-hsc", "i-kids"))
| Fabian-Balzer/sel-4hi-q | old/master_plots.py | master_plots.py | py | 4,507 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "util.my_tools.LOGGER.info",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "util.my_tools.LOGGER",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_n... |
74024990185 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import NoReturn
import streamlit as st
def page_title_area(title: str) -> NoReturn:
"""
标题栏内容
:param title: 标题名称
:return: None
"""
col1, _, _, col4 = st.columns(4)
title_container = st.container()
with title_container:
with col1:
st.title(title)
st.divider()
| zhaoqianjie/imageAI-streamlit | streamlit_gallery/views/object_detection/header.py | header.py | py | 400 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.columns",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "streamlit.container",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "streamlit.divid... |
21544899518 | import pytest
from autots.models import NeuralCDE
from autots.tests import helpers
from autots.utils import make_time_series_problem
def setup_ncde_problem(static_dim=None, use_initial=True):
# Simple problem
input_dim = 4
output_dim = 1
data, labels = make_time_series_problem(
n_channels=input_dim, n_classes=output_dim, static_dim=static_dim
)
# Setup and NCDE
hidden_dim = 15
model = NeuralCDE(
input_dim,
hidden_dim,
output_dim,
static_dim=static_dim,
interpolation="linear",
use_initial=use_initial,
)
return model, data, labels
@pytest.mark.parametrize(
"static_dim, use_initial",
[(None, True), (None, False), (5, True), (5, False)],
)
def test_ncde_static_initial(static_dim, use_initial):
# Test the model runs and gets a normal accuracy
model, data, labels = setup_ncde_problem(
static_dim=static_dim, use_initial=use_initial
)
_, acc = helpers.training_loop(model, data, labels, n_epochs=1)
assert 0 <= acc <= 1
| jambo6/autots | autots/tests/models/test_ncde.py | test_ncde.py | py | 1,063 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "autots.utils.make_time_series_problem",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "autots.models.NeuralCDE",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "autots.tests.helpers.training_loop",
"line_number": 39,
"usage_type": "call... |
34088049341 | import scrapy
class QuotesSpider(scrapy.Spider):
name = "quotes"
def start_requests(self):
#start_urls
urls = [
'https://quotes.toscrape.com/page/1/',
'https://quotes.toscrape.com/page/2/'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
blocks = response.xpath("//div[contains(@class,'quote')]")
for block in blocks:
text = block.xpath(".//span[@class='text']/text()").get().strip()
author = block.xpath(".//small[@class='author']/text()").get(),
tags = block.xpath(".//div[@class='tags']/a/text()").get()
yield {
'Text':text,
'Author':author,
'Tags':tags
}
next_page = response.xpath("//a[text()='Next ']/@href").get()
print(next_page)
if next_page is not None:
next_page = response.urljoin(next_page)
print(next_page)
yield scrapy.Request(next_page, callback=self.parse)
| nikku179201/Scrapy_Project | MyScrapyProject/spiders/test.py | test.py | py | 1,154 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 37,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.