seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16472178151 | import argparse
import hashlib
import logging
import time
import spacy
import config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
class MEDLINESpacySents:
def __init__(self, medline_abstracts, output_fname):
self.medline_abstracts = medline_abstracts
self.output_fname = output_fname
self.sent_tok = spacy.load("en_core_sci_lg")
logger.info("Using spacy.")
def extract_sentences(self):
n, d = 0, 0
logger.info("Extracting sentences from `{}` ...".format(self.medline_abstracts))
hash_set = set()
with open(self.medline_abstracts, encoding="utf-8", errors="ignore") as rf, open(self.output_fname, "w") as wf:
for idx, abstract in enumerate(rf):
d += 1
abstract = abstract.strip()
if not abstract:
continue
# Strip starting b' or b" and ending ' or "
if (abstract[:2] == "b'" and abstract[-1] == "'") or (abstract[:2] == 'b"' and abstract[-1] == '"'):
abstract = abstract[2:-1]
for sent in self.sent_tok(abstract).sents:
sent = sent.text
shash = hashlib.sha256(sent.encode("utf-8")).hexdigest()
if shash not in hash_set:
hash_set.add(shash)
wf.write(sent + "\n")
if __name__ == "__main__":
infile = config.medline_file
outfile = config.medline_spacy_sents
print("Infile {}, Outfile {}".format(infile, outfile))
ms = MEDLINESpacySents(infile, outfile)
t = time.time()
ms.extract_sentences()
t = (time.time() - t) // 60
logger.info("Took {} mins!".format(t))
| IBM/aihn-ucsd | amil/preprocess/_2_spacy_sents.py | _2_spacy_sents.py | py | 1,808 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "spacy.load",
... |
74494760743 | # -*- coding: utf-8 -*-
from selenium import webdriver
from lxml import etree
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from urllib.parse import quote
driver = webdriver.Chrome()
def get_list_page(url):
driver.get(url)
while True:
html = driver.page_source
html = etree.HTML(html)
job_list = html.xpath('//div[@class="job-list"]/ul/li')
for job in job_list:
detail_url = job.xpath('.//h3/a/@href')[0]
detail_url = 'https://www.zhipin.com' + detail_url
get_detail(detail_url)
next_btn = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'a.next')))
if not next_btn:
break
next_btn.click()
def get_detail(detail_url):
driver.execute_script('window.open("{}")'.format(detail_url))
driver.switch_to.window(driver.window_handles[1])
detail_html = driver.page_source
html = etree.HTML(detail_html)
job_name = html.xpath('//div[@class="info-primary"]/div[@class="name"]/h1/text()')[0]
job_salary = html.xpath('//div[@class="info-primary"]/div[@class="name"]/span/text()')[0].strip()
job_location = html.xpath('//div[@class="info-primary"]/p//text()')[0]
work_experience = html.xpath('//div[@class="info-primary"]/p//text()')[1]
education_background = html.xpath('//div[@class="info-primary"]/p//text()')[2]
job_detail = ''.join(html.xpath('//div[@class="detail-content"]/div[1]//text()')).strip()
job_company = html.xpath('//div[@class="company-info"]/a/@title')[0].strip()
# print(work_experience,education_background)
# print(job_company)
data = {
'job_name': job_name,
'job_salary': job_salary,
'job_location': job_location,
'work_experience': work_experience,
'education_background': education_background,
'job_detail': job_detail,
'job_company': job_company
}
print(data)
driver.close()
driver.switch_to.window(driver.window_handles[0])
def run(url):
get_list_page(url)
if __name__ == '__main__':
kw = 'python'
url = 'https://www.zhipin.com/job_detail/?query={}&city=100010000&industry=&position='.format(quote(kw))
run(url)
| xieys/webSpider | bossSpider/crawler.py | crawler.py | py | 2,342 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "lxml.etree.HTML",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "lxml.etree... |
11043698970 | import pytest
from RiceClassifier.config.configuration import (
ConfigurationManager, YAMLConfigReader, FilesystemDirectoryCreator
)
from RiceClassifier.components.prepare_base_model import (
BaseModelLoader, BaseModelUpdater, FullModelPreparer
)
from RiceClassifier.logger import logger
from RiceClassifier.pipeline.stage_02_prepare_base_model import PrepareBaseModelTrainingPipeline
class TestPrepareBaseModelTrainingPipeline:
@pytest.fixture(autouse=True)
def setup(self):
self.mock_config_manager = ConfigurationManager(
config_reader=YAMLConfigReader(),
dir_creator=FilesystemDirectoryCreator()
)
def test_main(self, mocker):
# Mock necessary classes and methods
mocker.patch.object(BaseModelLoader, "load_base_model")
mocker.patch.object(BaseModelLoader, "save_base_model")
mocker.patch.object(FullModelPreparer, "prepare_full_model")
mocker.patch.object(BaseModelUpdater, "save_updated_base_model")
# Create an instance of the pipeline
pipeline = PrepareBaseModelTrainingPipeline()
# Call the main method
pipeline.main()
# Assert that the necessary methods were called
BaseModelLoader.load_base_model.assert_called_once()
BaseModelLoader.save_base_model.assert_called_once()
FullModelPreparer.prepare_full_model.assert_called_once()
BaseModelUpdater.save_updated_base_model.assert_called_once()
def test_main_exception(self, mocker):
# Mock an exception to be raised
mocker.patch.object(BaseModelLoader, "load_base_model", side_effect=Exception("Mocked exception"))
# Patch logger.exception method
mock_logger_exception = mocker.patch.object(logger, "exception")
# Create an instance of the pipeline
pipeline = PrepareBaseModelTrainingPipeline()
# Call the main method and check if the exception is raised
try:
pipeline.main()
except Exception as e:
# Log the exception manually
logger.exception(str(e))
# Assert that logger.exception was called with the raised exception
mock_logger_exception.assert_called_once_with("Mocked exception")
if __name__ == '__main__':
pytest.main()
| nasserml/End-To-End_Rice-Classification-Project | tests/test_RiceClassifier/test_pipeline/test_stage_02_prepare_base_model.py | test_stage_02_prepare_base_model.py | py | 2,303 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "RiceClassifier.config.configuration.ConfigurationManager",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "RiceClassifier.config.configuration.YAMLConfigReader",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "RiceClassifier.config.configuration... |
74198666023 | """Example: Message instance conversion."""
from __future__ import annotations
import importlib
from typing import TYPE_CHECKING
import numpy
if TYPE_CHECKING:
from typing import Any
NATIVE_CLASSES: dict[str, Any] = {}
def to_native(msg: Any) -> Any: # noqa: ANN401
"""Convert rosbags message to native message.
Args:
msg: Rosbags message.
Returns:
Native message.
"""
msgtype: str = msg.__msgtype__
if msgtype not in NATIVE_CLASSES:
pkg, name = msgtype.rsplit('/', 1)
NATIVE_CLASSES[msgtype] = getattr(importlib.import_module(pkg.replace('/', '.')), name)
fields = {}
for name, field in msg.__dataclass_fields__.items():
if 'ClassVar' in field.type:
continue
value = getattr(msg, name)
if '__msg__' in field.type:
value = to_native(value)
elif isinstance(value, numpy.ndarray):
value = value.tolist()
fields[name] = value
return NATIVE_CLASSES[msgtype](**fields)
if __name__ == '__main__':
from rosbags.typesys.types import (
builtin_interfaces__msg__Time,
sensor_msgs__msg__Image,
std_msgs__msg__Header,
)
image = sensor_msgs__msg__Image(
std_msgs__msg__Header(
builtin_interfaces__msg__Time(42, 666),
'/frame',
),
4,
4,
'rgb8',
False,
4 * 3,
numpy.zeros(4 * 4 * 3, dtype=numpy.uint8),
)
native_image = to_native(image)
# native_image can now be passed to the ROS stack
| cmrobotics/rosbags | docs/examples/use_with_native.py | use_with_native.py | py | 1,573 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "importlib.import_module",
... |
72167501223 | #!/usr/bin/env python3
import sys
import subprocess
from pathlib import Path
from cv19 import CV19ROOT
def run_pylint():
"""
Run the Pylint test on the module and some other files in the repository.
Automatically ran on every pull request via GitHub actions.
"""
# Messages/warnings/errors to enable and disable.
messages_enable = ['all']
messages_disable = ['R',
'line-too-long',
'missing-module-docstring',
'invalid-name',
'attribute-defined-outside-init',
'access-member-before-definition',
'fixme']
# List of files or directories to run the linter on.
# Currently assumes that the working directory is where to get the files.
file_list = ['cv19']
file_list += [str(f) for f in Path(CV19ROOT).glob('test/**/*.py')]
print("Running on:")
for f in file_list:
print(f"\t{f}")
print("")
# List of class names for which member attributes should not be checked (from pylint).
ignored_classes = ['InteractionSites', 'Person', 'Policy',
'Population', 'Simulation']
# Overall command to run.
cmd_list = ["pylint",
"--jobs=1",
"--score=n",
"--output-format=colorized",
f"--enable={','.join(messages_enable)}",
f"--disable={','.join(messages_disable)}",
f"--ignored-classes={','.join(ignored_classes)}"]
# Unnamed arguments (the files to process).
cmd_list += file_list
# Run the pylint command.
# Return non-zero exit code upon failure.
try:
subprocess.run(cmd_list, check=True, text=True)
except subprocess.CalledProcessError as e:
print(f"\npylint returned with non-zero exit code: {e.returncode}.")
return e.returncode
return 0
if __name__ == "__main__":
sys.exit(run_pylint())
| Queens-Physics/quaboom | test/linters/pylint.py | pylint.py | py | 1,985 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv19.CV19ROOT",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "subprocess.run",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "subprocess.CalledProces... |
70268090023 | import cv2
# import opencv
from PIL import Image,ImageFilter
from PIL import ImageEnhance
import matplotlib.image as mp
import os, sys
import http.client
import json
import ssl
import urllib.parse
from os.path import expanduser
#获取图片清晰度
def getImageVar(imgPath):
image = cv2.imread(imgPath);
img2gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
imageVar = cv2.Laplacian(img2gray, cv2.CV_64F).var()
return imageVar
#修改图片亮度和对比度
#修改的值待定
def changePhoto(path,file):
im = Image.open(path+file)
# # 亮度
im_2 = ImageEnhance.Brightness(im).enhance(0.9)
# # im_2.show()
# # 提高对比度
# im_3 = ImageEnhance.Contrast(im_2).enhance(1.0)
# im_3.show()
#提高锐度
enh_sha = ImageEnhance.Sharpness(im_2).enhance(2.0)
enh_sha.show()
#文件保存
# im_3.save('img2/'+file)
enh_sha.save('img2/'+file)
def main():
# 打开文件
# 文件夹
path = "img/"
dirs = os.listdir(path)
i = int(1)
# 输出所有文件和文件夹
for file in dirs:
cl = getImageVar(path + file)
print(cl)
# 判断清晰度是否达到要求,对达不到要求的图片进行增加亮度和对比度的操作
# 清晰度判断值待定
if cl <= 100:
changePhoto(path, file)
# print(getImageVar('img2/'+file))
def result():
ssl._create_default_https_context = ssl._create_unverified_context
# urllib打开http链接会验证SSL证书,全局取消证书验证防止异常
subscription_key = '144ce86219b740938b003a1f3d36a26a' # Face API的key
uri_base = 'https://aimovie.cognitiveservices.azure.cn/' # Face API的end point
global parsed
headers = {
'Content-Type': 'application/octet-stream',
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise'
}) # 返回的内容,FaceID,年龄,性别,头型,微笑,面部毛发,眼镜,情绪,头发,化妆,遮挡,配饰,模糊,曝光,干扰
img = open(expanduser('D:/1_study/2.0work/faceUP/img2/16.jpg'), "rb")
# img = open(expanduser('D:/1_study/2.0work/faceUP/img/image.png'), "rb")
try:
conn = http.client.HTTPSConnection('api.cognitive.azure.cn')
conn.request("POST", "/face/v1.0/detect?%s" % params, img, headers)
response = conn.getresponse()
data = response.read()
parsed = json.loads(data) # 将字符串转化为字典
print("Response:")
print(json.dumps(parsed, sort_keys=True, indent=2))
word=json.dumps(parsed, sort_keys=True, indent=2)
print("len:",len(word))
with open("result/test16锐度2.0Bri0.9.txt", "w") as f:
f.write(json.dumps(parsed, sort_keys=True, indent=2))
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
if __name__ == '__main__':
path='img/'
file='16.jpg'
changePhoto(path,file)
result()
| YukiXueyan/faceUP | testPhoto.py | testPhoto.py | py | 3,156 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "cv2.Laplacian",
"... |
37900995282 | # -*- coding: utf-8 -*-
import scrapy
import re
from copy import deepcopy
from biquge.items import BiqugeIndexItem, BiqugeDetailsItem
class BookspiderSpider(scrapy.Spider):
name = 'bookspider'
allowed_domains = ['biquge.com.cn']
start_urls = [
'http://www.biquge.com.cn/xuanhuan/',
'http://www.biquge.com.cn/xiuzhen/',
'http://www.biquge.com.cn/dushi/',
'http://www.biquge.com.cn/lishi/',
'http://www.biquge.com.cn/wangyou/',
'http://www.biquge.com.cn/kehuan/',
'http://www.biquge.com.cn/yanqing/',
'http://www.biquge.com.cn/qita/',
'http://www.biquge.com.cn/quanben/'
]
def parse_book_details(self, response):
'''爬取图书每一页信息'''
item = BiqugeDetailsItem()
book_id = response.meta["book_id"]
sort_id = response.meta["sort_id"]
item['book_id'] = int(book_id)
item['sort_id'] = int(sort_id)
detail_title = response.xpath("//div[@class='bookname']/h1/text()").extract_first()
# print("detail_title = ", detail_title)
item['detail_title'] = detail_title
detail_content = response.xpath("//div[@id='content']/text()").extract()
detail = ''
for i in detail_content:
detail = detail + i
item["detail_content"] = detail
# print("detail = ", detail)
yield item
def parse_book_info(self, response):
'''爬取图书基本信息'''
# # print("开始爬取图书首页的信息")
# # print(response.body.decode())
item = BiqugeIndexItem()
book_id = response.meta["book_id"]
item["book_id"] = int(book_id)
book_cate = response.meta["book_cate"]
item["book_cate"] = book_cate
book_infos = response.xpath("//div[@id='maininfo']")
# book_id = book_infos.xpaht("./")
book_name = book_infos.xpath("./div[@id='info']/h1/text()").extract_first()
item["book_name"] = book_name
url = response.xpath("//div[@id='fmimg']/img/@src").extract_first()
item["image_urls"] = url
book_author = book_infos.xpath("./div[@id='info']/p[1]/text()").extract_first()
print("清洗之前的book_author = ", book_author)
book_author = re.findall('作 者:(.*?)$', book_author)[0]
print("清洗之后的book_author = ", book_author)
# print(book_author)
item["book_author"] = book_author
book_status = book_infos.xpath("./div[@id='info']/p[2]/text()").extract_first()
print("清洗之前的book_status = ", book_status)
book_status = re.findall('状 态:(.*?),', book_status)[0]
print("清洗之后的book_status = ", book_status)
item["book_status"] = book_status
book_last_update_time = book_infos.xpath("./div[@id='info']/p[3]/text()").extract_first()
print("清洗之前的book_last_update_time = ", book_last_update_time)
book_last_update_time = re.findall("最后更新:(.*?)$", book_last_update_time)[0]
print("清洗之后的book_last_update_time = ", book_last_update_time)
item["book_last_update_time"] = book_last_update_time
book_newest_name = book_infos.xpath("./div[@id='info']/p[4]/a/text()").extract_first()
# print(book_newest_name)
item["book_newest_name"] = book_newest_name
book_newest_url = book_infos.xpath("./div[@id='info']/p[4]/a/@href").extract_first()
# /book/39837/1035785.html
book_newest_url = re.findall("^/book/.*/(.*?).html", book_newest_url)[0]
# print(book_newest_url)
item["book_newest_url"] = int(book_newest_url)
book_desc = book_infos.xpath("./div[@id='intro']/text()").extract_first()
# print(book_desc)
item["book_desc"] = book_desc
book_detail_urls = response.xpath("//div[@class='box_con']/div/dl/dd/a")
# print(book_detail_urls)
yield item
# # print("我在详情页中了。。。")
for dd in book_detail_urls:
# # print("我在详情页的for循环中了。。。")
detail_url = 'http://www.biquge.com.cn' + dd.xpath("./@href").extract_first()
sort_id = re.findall(r"(\d+)\.html", dd.xpath("./@href").extract_first())[0]
# print(sort_id)
# print(detail_url)
# todo 在这个地方判断一下: 如果数据库中存在这章内容,则跳过
# todo: 如果数据库中不存在这章内容,则继续
yield scrapy.Request(
detail_url,
callback = self.parse_book_details,
meta={"book_id": deepcopy(book_id), "sort_id": deepcopy(sort_id)}
)
# 入口函数
def parse(self, response):
'''从起始页面爬取需要读取的图书信息'''
if response.url == 'https://www.biquge.com.cn/quanben/':
print("开始处理全本")
li_list = response.xpath("//div[@id='main']/div[@class='novelslist2']/ul/li/span[@class='s2']/a")
book_cate = re.findall("biquge.com.cn/(.*?)/", response.url)[0]
for li in li_list:
book_index_url = 'http://www.biquge.com.cn' + li.xpath("./@href").extract_first()
book_id = re.findall(r"\d+\.?\d*", li.xpath("./@href").extract_first())[0]
# print(book_id)
# print(book_index_url)
yield scrapy.Request(
book_index_url,
callback = self.parse_book_info,
meta={"book_id": deepcopy(book_id), "book_cate": book_cate}
)
else:
li_list1 = response.xpath("//div[@class='l']/ul/li/span[@class='s2']/a")
book_cate = re.findall("biquge.com.cn/(.*?)/", response.url)[0]
for li in li_list1:
# # print(li.xpath("./@href").extract_first())
book_index_url = 'http://www.biquge.com.cn' + li.xpath("./@href").extract_first()
book_id = re.findall(r"\d+\.?\d*", li.xpath("./@href").extract_first())[0]
# print(book_id)
# print(book_index_url)
yield scrapy.Request(
book_index_url,
callback = self.parse_book_info,
meta={"book_id": deepcopy(book_id), "book_cate": book_cate}
)
li_list2 = response.xpath("//div[@class='r']/ul/li/span[@class='s2']/a")
# # print("li_list2 = ", li_list2)
for li in li_list2:
# # print(li.xpath("./@href").extract_first())
book_index_url = 'http://www.biquge.com.cn' + li.xpath("./@href").extract_first()
book_id = re.findall(r"\d+\.?\d*", li.xpath("./@href").extract_first())[0]
# print(book_id)
# print(book_index_url)
yield scrapy.Request(
book_index_url,
callback=self.parse_book_info,
meta={"book_id": deepcopy(book_id), "book_cate": book_cate}
)
def is_book_exist(self):
pass
| silenterofsea/silenter_read_story | 9999_some_tiny_program/biquge/biquge/spiders/bookspider.py | bookspider.py | py | 7,200 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "biquge.items.BiqugeDetailsItem",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "biquge.items.BiqugeIndexItem",
"line_number": 44,
"usage_type": "call"
},
{
"api... |
15953309425 | import os
from typing import Dict
from aiohttp_retry import Any
from flask import Flask, request
from langchain import PromptTemplate
from twilio.twiml.messaging_response import MessagingResponse
import json
from langchain.llms import OpenAI
from langchain.memory import ConversationSummaryBufferMemory
from supabase import create_client, Client
from twilio_utils import validate_twilio_request
application = Flask(__name__)
url: str = os.environ.get("SUPABASE_URL")
key: str = os.environ.get("SUPABASE_KEY")
supabase: Client = create_client(url, key)
@application.route('/')
def index():
return json.dumps({"health": "ok"})
@application.route('/twilio', methods=["POST"])
@validate_twilio_request
def twilio_callback():
try:
sender = request.values.get('From', None)
reciever = request.values.get('To', None)
incoming_msg = request.values.get('Body', None)
user_res = supabase.table("users").select("*").eq("phone_number", sender).limit(1).execute().data
if len(user_res) == 0:
print("user")
return "user not found", 200
sender_name = user_res[0]["name"]
sender_user_id = user_res[0]["id"]
group_member_res = supabase.table("group_members").select("*").eq("twilio_phone_number", reciever).eq("user_id", sender_user_id).execute().data
if len(group_member_res) == 0:
return "group member not found", 200
group_id = group_member_res[0]["group_id"]
group_res = supabase.table("groups").select("*").eq("id", group_id).execute().data
if len(group_res) == 0:
return "group not found", 200
group = group_res[0]
llm_message = get_llm_message(group, incoming_msg, sender_name)
resp = MessagingResponse()
res = f"{llm_message}"
resp.message(res)
return str(resp)
except Exception as e:
print(e)
return "error", 500
llm = OpenAI()
ai_prompt = PromptTemplate(input_variables=[], template="AI Motvator: ")
human_prompt = PromptTemplate(input_variables=["name", "msg"], template="{name}: {msg}")
def get_llm_message(group: Dict[str, Any], incoming_msg: str, sender_name: str):
if group["memory"] == None:
initial_prompt = (
group["base_prompt"]
+ "\n"
+ group["initial_context"]
+ human_prompt.format(name=sender_name, msg=incoming_msg)
+ ai_prompt.format()
)
return llm(initial_prompt).strip()
if __name__ == "__main__":
application.debug = True
application.run() | joseph-mcallister/motivate | flask-app/application.py | application.py | py | 2,603 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line... |
38904370760 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn import metrics
import math
from helpers import one_hot_embedding, get_device
from MutualInformation import MutualInformation
def relu_evidence(y):
return F.relu(y)
def calc_ece_softmax(softmax, label, bins=5, sample_wise=False):
bin_boundaries = torch.linspace(0, 1, bins + 1)
bin_lowers = bin_boundaries[:-1]
bin_uppers = bin_boundaries[1:]
softmax = torch.tensor(softmax)
labels = torch.tensor(label)
softmax_max, predictions = torch.max(softmax, 1)
correctness = predictions.eq(labels)
# ece = torch.zeros(1)
batch_size = softmax.shape[0]
ece = torch.zeros(batch_size)
for i in range(batch_size):
for bin_lower, bin_upper in zip(bin_lowers, bin_uppers):
in_bin = softmax_max[i].gt(bin_lower.item()) * softmax_max[i].le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0.0:
accuracy_in_bin = correctness[i][in_bin].float().mean()
avg_confidence_in_bin = softmax_max[i][in_bin].mean()
# print(accuracy_in_bin, avg_confidence_in_bin)
ece[i] += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
if sample_wise:
return ece
ece = ece.mean().item()
# print(ece)
log_ece = -math.log(ece+1e-9)
# print(log_ece)
return ece
def calc_ece_evidence_u(softmax, u, label, bins=15, sample_wise=False):
bin_boundaries = torch.linspace(0, 1, bins + 1)
bin_lowers = bin_boundaries[:-1]
bin_uppers = bin_boundaries[1:]
softmax = torch.tensor(softmax)
u = torch.tensor(u)
labels = torch.tensor(label)
softmax_max, predictions = torch.max(softmax, 1)
# print(predictions.shape, labels.shape, softmax_max.shape)
correctness = predictions.eq(labels)
# correctness = correctness.unsqueeze(1)
ece = torch.zeros(1)
for bin_lower, bin_upper in zip(bin_lowers, bin_uppers):
in_bin = softmax_max.gt(bin_lower.item()) * softmax_max.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0.0:
accuracy_in_bin = correctness[in_bin].float().mean()
avg_confidence_in_bin = softmax_max[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece.item()
class DiceMetric(nn.Module):
def __init__(self, smooth=1.0, activation='sigmoid'):
super(DiceMetric, self).__init__()
self.smooth = smooth
self.activation = activation
self.device = get_device()
def dice_coef(self, softmax_pred, gt):
""" computational formula
"""
# softmax_pred = torch.nn.functional.softmax(pred, dim=1)
seg_pred = torch.argmax(softmax_pred, dim=1)
all_dice = 0
gt = gt.squeeze(dim=1)
batch_size = gt.shape[0]
num_class = softmax_pred.shape[1]
dice_ones = torch.ones(batch_size).to(self.device)
for i in range(num_class):
each_pred = torch.zeros_like(seg_pred)
each_pred[seg_pred==i] = 1
each_gt = torch.zeros_like(gt)
each_gt[gt==i] = 1
intersection = torch.sum((each_pred * each_gt).view(batch_size, -1), dim=1)
# mask = each_gt.view(batch_size,-1).sum(1) > 0
# mask = mask.to(torch.int32)
union = each_pred.view(batch_size,-1).sum(1) + each_gt.view(batch_size,-1).sum(1)
mask = union > 0
mask = mask.to(torch.int32)
dice = (2. * intersection + 1e-5)/ (union + 1e-5)
dice = mask * dice + (1-mask) * dice_ones
all_dice += torch.mean(dice)
return all_dice * 1.0 / num_class
def forward(self, pred, gt):
sigmoid_pred = F.softmax(pred,dim=1)
batch_size = gt.shape[0]
num_class = sigmoid_pred.shape[1]
# conver label to one-hot
bg = torch.zeros_like(gt)
bg[gt==0] = 1
label1 = torch.zeros_like(gt)
label1[gt==1] = 1
label2 = torch.zeros_like(gt)
label2[gt == 2] = 1
label = torch.cat([bg, label1, label2], dim=1)
loss = 0
smooth = 1e-5
for i in range(num_class):
intersect = torch.sum(sigmoid_pred[:, i, ...] * label[:, i, ...])
z_sum = torch.sum(sigmoid_pred[:, i, ...] )
y_sum = torch.sum(label[:, i, ...] )
loss += (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss * 1.0 / num_class
return loss
def calc_mi(outputs, labels, sample_wise=False):
device = get_device()
# print(device)
_, preds = torch.max(outputs, 1)
match = torch.eq(preds, labels)
match = match.unsqueeze(1)
evidence = relu_evidence(outputs)
alpha = evidence + 1
expected_prob = torch.nn.functional.normalize(alpha, p=1, dim=1)
uncertainty, _ = torch.max(expected_prob, dim=1, keepdim=True)
MI = MutualInformation(num_bins=256, sigma=0.4, normalize=True, device=device)
score = ( MI(match, uncertainty) + MI(uncertainty, match) ) / 2.
if sample_wise:
return score
score = score.mean().item()
return score
| Tom-Liii/PostNet-ESD | metrics.py | metrics.py | py | 5,593 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.functional.relu",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.linspace",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.ten... |
7557787218 | import sys
import io
import os
import copy
def Run(input, output):
readline = io.BytesIO(
os.read(
input,
os.fstat(input).st_size
)
).readline
N = int(readline())
grass = [None for _ in range(N)]
for i in range(N):
grass[i] = [int(i) for i in readline().split()]
prefix_100 = get_prefix(grass, N, 100)
prefix_101 = get_prefix(grass, N, 101)
rect_100 = get_n_rectangles(prefix_100, N)
rect_101 = get_n_rectangles(prefix_101, N)
output.write("{}\n".format(rect_100 - rect_101))
def get_prefix(matrix, N, threshold):
prefix = copy.deepcopy(matrix)
for i in range(N):
if prefix[i][-1] >= threshold:
prefix[i][-1] = 1
else:
prefix[i][-1] = 0
for j in range(N - 2, -1, -1):
if prefix[i][j] >= threshold:
prefix[i][j] = prefix[i][j + 1] + 1
else:
prefix[i][j] = 0
return prefix
def get_n_rectangles(prefix, N):
total = 0
for i in range(N):
for j in range(N):
min = N
for k in range(i, N):
if prefix[k][j] < min:
min = prefix[k][j]
total += min
return total
Run(0, sys.stdout) | chenant2017/USACO | Silver/2021 Feb/green.py | green.py | py | 1,137 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "io.BytesIO",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.read",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.fstat",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 26,
... |
39124528339 | import datetime
class Comment:
def __init__(self,game,userName,description):
self.userName = userName
self.game = game
self.date = str(datetime.datetime.now())
self.description = description
def dump(self):
return {
'name': self.userName,
'game': self.game,
'date': self.date,
'description': self.description
} | eduardomep/SpartanStore-Server | Comment.py | Comment.py | py | 416 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 6,
"usage_type": "attribute"
}
] |
1922570776 | from problem_000 import *
from prime import next_prime
class Problem_007(Problem):
def __init__(self):
self.problem_nr = 7
self.input_format = (InputType.NUMBER_INT, 1, 1000000)
self.default_input = 10001
self.description_str ='''By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
What is the ''' + dye_input_var("10 001") + "st prime number?"
def calculate(self, N):
p = 2
nr = 1
while nr < N:
p = next_prime(p)
nr += 1
self.last_result = p
register_problem(Problem_007())
| Kwasniok/ProjectEuler-Solver | src/problem_007.py | problem_007.py | py | 628 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "prime.next_prime",
"line_number": 19,
"usage_type": "call"
}
] |
37350115657 | import numpy as np
import pytest
from ase.build import bulk
from gpaw import GPAW, PW, Mixer
from gpaw.mpi import world
@pytest.mark.stress
def test_pw_si_stress(in_tmp_dir):
xc = 'PBE'
si = bulk('Si')
si.calc = GPAW(mode=PW(200),
mixer=Mixer(0.7, 5, 50.0),
xc=xc,
kpts=(1, 1, 2), # Run (1, 1, 2) to avoid gamma pt code
convergence={'energy': 1e-8},
parallel={'domain': min(2, world.size)},
txt='si_stress.txt')
si.set_cell(np.dot(si.cell,
[[1.02, 0, 0.03],
[0, 0.99, -0.02],
[0.2, -0.01, 1.03]]),
scale_atoms=True)
si.get_potential_energy()
# Trigger nasty bug (fixed in !486):
si.calc.wfs.pt.blocksize = si.calc.wfs.pd.maxmyng - 1
# Compute error in stress as numerical - analytical
s_analytical = si.get_stress()
s_numerical = si.calc.calculate_numerical_stress(si, 1e-5)
s_err = s_numerical - s_analytical
assert np.all(abs(s_err) < 1e-4)
| f-fathurrahman/ffr-learns-gpaw | my_gpaw/test/pw/test_si_stress.py | test_si_stress.py | py | 1,105 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ase.build.bulk",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "gpaw.GPAW",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "gpaw.PW",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "gpaw.Mixer",
"line_number": 14,
... |
7024088458 | import customtkinter
class infoPage:
def __init__(self, frame, mainColor, secondColor, thirdColor, fourthColor, textColor, openConvertPage):
self.openConvertPage = openConvertPage
self.create_page(frame, mainColor, secondColor, thirdColor, fourthColor, textColor)
def create_page(self, frame, mainColor, secondColor, thirdColor, fourthColor, textColor):
self.infoFrame = customtkinter.CTkFrame(frame, fg_color=mainColor)
self.infoFrame.columnconfigure((0, 1, 2), weight=1)
self.infoFrame.rowconfigure((0, 1, 2, 3), weight=2)
self.infoFrame.rowconfigure(4, weight=1)
companies = "PONZIO POLSKA Sp. z o.o\nNip: 7741008197\n\nWinkhaus Polska Beteiligungs Sp. z o.o sp.k.\nNip: 6970011183\n\nAliplast"
companiesFrame = customtkinter.CTkFrame(self.infoFrame, fg_color=thirdColor)
companiesFrame.grid(row = 0, column = 0, rowspan = 4, columnspan = 3, sticky = "NSWE", padx = 50, pady = 50)
companiesFrame.rowconfigure(0, weight=1)
companiesFrame.columnconfigure(0, weight=1)
companiesLabel = customtkinter.CTkLabel(companiesFrame, text = companies, font=("Arial", 25), text_color=textColor)
companiesLabel.grid(row = 0, column = 0, sticky = "NSWE")
versionLabel = customtkinter.CTkLabel(self.infoFrame, text = "Wersja 1.4", font=("Arial", 12), text_color=textColor)
versionLabel.grid(row = 4, column = 2, sticky = "NSWE")
exitButton = customtkinter.CTkButton(self.infoFrame, text = "Wyjdź", command=self.openConvertPage)
exitButton.grid(row = 4, column = 1, sticky = "NSWE", padx = 50, pady = 50)
| LukaszButurla/xml-compiler-tkinter | ui/infoPage.py | infoPage.py | py | 1,717 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "customtkinter.CTkFrame",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTkFrame",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTkLabel",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "... |
43043002916 | import cv2
import numpy as np
import face_recognition
cap = cv2.VideoCapture(0)
success,imgUser = cap.read()
imgUser = cv2.cvtColor(imgUser, cv2.COLOR_BGR2RGB)
encodeUser = face_recognition.face_encodings(imgUser)
cap = cv2.VideoCapture(0)
while True:
success,img = cap.read()
imgS = cv2.resize(img,(0,0),None,0.25,0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
for encodeFace,faceLoc in zip(encodesCurFrame,facesCurFrame):
maches = face_recognition.compare_faces(encodeUser,encodeFace)
faceDist = face_recognition.face_distance(encodeUser,encodeFace)
print(faceDist)
matchIndex = np.argmin(faceDist)
if maches[matchIndex]:
name = "User"
print(name)
y1,x2,y2,x1 = faceLoc
y1, x2, y2, x1=y1*4,x2*4,y2*4,x1*4
cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
cv2.putText(img,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)
else:
name = "Not User"
print(name)
y1,x2,y2,x1 = faceLoc
y1, x2, y2, x1=y1*4,x2*4,y2*4,x1*4
cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
cv2.putText(img,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)
cv2.imshow('Webcam',img)
cv2.waitKey(1) | SRA-V/Exam-Cheater | Cheater.py | Cheater.py | py | 1,623 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "face_recognition.fa... |
495566217 | import os
import time
import pytest
from dagster.core.engine.child_process_executor import (
ChildProcessCommand,
ChildProcessCrashException,
ChildProcessDoneEvent,
ChildProcessEvent,
ChildProcessStartEvent,
ChildProcessSystemErrorEvent,
execute_child_process_command,
)
class DoubleAStringChildProcessCommand(ChildProcessCommand):
def __init__(self, a_str):
self.a_str = a_str
def execute(self):
yield self.a_str + self.a_str
class AnError(Exception):
pass
class ThrowAnErrorCommand(ChildProcessCommand): # pylint: disable=no-init
def execute(self):
raise AnError('Oh noes!')
class CrashyCommand(ChildProcessCommand): # pylint: disable=no-init
def execute(self):
# access inner API to simulate hard crash
os._exit(1) # pylint: disable=protected-access
class LongRunningCommand(ChildProcessCommand): # pylint: disable=no-init
def execute(self):
time.sleep(1.0)
yield 1
def test_basic_child_process_command():
events = list(
filter(
lambda x: x and not isinstance(x, ChildProcessEvent),
execute_child_process_command(DoubleAStringChildProcessCommand('aa')),
)
)
assert events == ['aaaa']
def test_basic_child_process_command_with_process_events():
events = list(
filter(lambda x: x, execute_child_process_command(DoubleAStringChildProcessCommand('aa')))
)
assert len(events) == 3
assert isinstance(events[0], ChildProcessStartEvent)
child_pid = events[0].pid
assert child_pid != os.getpid()
assert events[1] == 'aaaa'
assert isinstance(events[2], ChildProcessDoneEvent)
assert events[2].pid == child_pid
def test_child_process_uncaught_exception():
results = list(
filter(
lambda x: x and isinstance(x, ChildProcessSystemErrorEvent),
execute_child_process_command(ThrowAnErrorCommand()),
)
)
assert len(results) == 1
assert 'AnError' in str(results[0].error_info.message)
def test_child_process_crashy_process():
with pytest.raises(ChildProcessCrashException):
list(execute_child_process_command(CrashyCommand()))
@pytest.mark.skip('too long')
def test_long_running_command():
list(execute_child_process_command(LongRunningCommand()))
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster_tests/core_tests/engine_tests/test_child_process_executor.py | test_child_process_executor.py | py | 2,339 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "dagster.core.engine.child_process_executor.ChildProcessCommand",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "dagster.core.engine.child_process_executor.ChildProcessCommand",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "dagster.core.engine... |
11944953948 | # -*- coding: utf-8 -*-
"""
Created on Mon May 09 08:58:28 2016
@author: Jonatan
"""
from __future__ import division
from spider_plot_2 import *
def p2h_spider():
""" Make a spider plot where the different scenarios are compared
for different scenarios.
"""
d = Drawing(400, 400)
sp = SpiderChart()
sp.x = 50
sp.y = 50
sp.width = 300
sp.height = 300
sp.data = [[1, 2, 3], [8, 3, 5], [5, 9, 1]]
sp.labels = ['Base case', 'Hydro min = 0', 'With accumulation']
sp.strands.strokeWidth
d.add(sp)
return d
if __name__ == '__main__':
d = p2h_spider()
from reportlab.graphics.renderPDF import drawToFile
drawToFile(d, 'p2h_spider.pdf') | GersHub/P2HSweden | Power2Heat/Python/Modules/Plot/plot_spider_2.py | plot_spider_2.py | py | 708 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "reportlab.graphics.renderPDF.drawToFile",
"line_number": 31,
"usage_type": "call"
}
] |
16032798991 | from modeling.resnet import resnet50
import torch.nn as nn
import torch.nn.functional as F
import torch
class fpn_module(nn.Module):
def __init__(self, numClass):
super(fpn_module, self).__init__()
# Top layer
self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0) # Reduce channels
# Smooth layers
self.smooth1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth2_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth4_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth1_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.smooth2_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.smooth3_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.smooth4_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
# Lateral layers
self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)
# Classify layers
self.classify = nn.Conv2d(128*4, numClass, kernel_size=3, stride=1, padding=1)
def _concatenate(self, p5, p4, p3, p2):
_, _, H, W = p2.size()
p5 = F.upsample(p5, size=(H, W), mode='bilinear')
p4 = F.upsample(p4, size=(H, W), mode='bilinear')
p3 = F.upsample(p3, size=(H, W), mode='bilinear')
return torch.cat([p5, p4, p3, p2], dim=1)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_, _, H, W = y.size()
return F.upsample(x, size=(H, W), mode='bilinear') + y
def forward(self, c2, c3, c4, c5):
# Top-down
p5 = self.toplayer(c5)
p4 = self._upsample_add(p5, self.latlayer1(c4))
p3 = self._upsample_add(p4, self.latlayer2(c3))
p2 = self._upsample_add(p3, self.latlayer3(c2))
# Smooth
p5 = self.smooth1_2(self.smooth1_1(p5))
p4 = self.smooth2_2(self.smooth2_1(p4))
p3 = self.smooth3_2(self.smooth3_1(p3))
p2 = self.smooth4_2(self.smooth4_1(p2))
# Classify
output = self.classify(self._concatenate(p5, p4, p3, p2))
return output
class fpn(nn.Module):
def __init__(self, numClass):
super(fpn, self).__init__()
# Res net
self.resnet = resnet50(True)
# fpn module
self.fpn = fpn_module(numClass)
# init fpn
for m in self.fpn.children():
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# Top-down
c2, c3, c4, c5 = self.resnet.forward(x)
return self.fpn.forward(c2, c3, c4, c5)
class FocalLoss(nn.Module):
# def __init__(self, device, gamma=0, eps=1e-7, size_average=True):
def __init__(self, gamma=0, eps=1e-7, size_average=True, reduce=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.eps = eps
self.size_average = size_average
self.reduce = reduce
# self.device = device
def forward(self, input, target):
# y = one_hot(target, input.size(1), self.device)
y = one_hot(target, input.size(1))
probs = F.softmax(input, dim=1)
probs = (probs * y).sum(1) # dimension ???
probs = probs.clamp(self.eps, 1. - self.eps)
log_p = probs.log()
# print('probs size= {}'.format(probs.size()))
# print(probs)
batch_loss = -(torch.pow((1 - probs), self.gamma)) * log_p
# print('-----bacth_loss------')
# print(batch_loss)
if self.reduce:
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
else:
loss = batch_loss
return loss
def one_hot(index, classes):
size = index.size()[:1] + (classes,) + index.size()[1:]
view = index.size()[:1] + (1,) + index.size()[1:]
# mask = torch.Tensor(size).fill_(0).to(device)
if torch.cuda.is_available():
mask = torch.Tensor(size).fill_(0).cuda()
else:
mask = torch.Tensor(size).fill_(0)
index = index.view(view)
ones = 1.
return mask.scatter_(1, index, ones)
def get_NoGT_target(inputs):
sfmx_inputs = F.log_softmax(inputs, dim=1)
target = torch.argmax(sfmx_inputs, dim=1)
return target
def resize_target(target, size):
new_target = np.zeros((target.shape[0], size, size), np.int32)
for i, t in enumerate(target.numpy()):
new_target[i, ...] = cv2.resize(t, (size,) * 2, interpolation=cv2.INTER_CUBIC)
return new_target
| ShenZheng2000/Semantic-Guided-Low-Light-Image-Enhancement | modeling/fpn.py | fpn.py | py | 5,489 | python | en | code | 76 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
72664562664 | import cv2
import numpy as np
from matplotlib import pyplot as plt
# loading image
#img0 = cv2.imread('SanFrancisco.jpg',)
img0 = cv2.imread('segmented_img.jpg')
# converting to gray scale
img = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY)
# remove noise
#img = cv2.GaussianBlur(gray,(3,3),0)
# convolute with proper kernels
laplacian = cv2.Laplacian(img,cv2.CV_64F)
cv2.imwrite("laplacian_img.jpg", laplacian)
sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) # x
cv2.imwrite("sobelx_img.jpg", sobelx)
sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5) # y
cv2.imwrite("sobely_img.jpg", sobelx)
plt.subplot(2,2,1),plt.imshow(img,cmap = 'gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,2),plt.imshow(laplacian,cmap = 'gray')
plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,3),plt.imshow(sobelx,cmap = 'gray')
plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
plt.subplot(2,2,4),plt.imshow(sobely,cmap = 'gray')
plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
plt.show()
| kateriska/IBP | sobelExercise.py | sobelExercise.py | py | 1,025 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cv2.Laplacian",
"l... |
8248770144 | """Provides basic utilities to check status of Discord bot
ping() pings the bot and checks Discord latency, message latency and database latency
source() gets information on where to find source files and feedback server
clearMessages() deletes messages from the last 14 days from a channel
setAutoNicknames() true or false to set function to automatically change users nicnknames on joining
configurations() displays servers' configuration such as autoNickname
"""
__author__ = "Joel Adams"
__maintainer__ = "Joel Adams"
__email__ = "joa38@aber.ac.uk"
__version__ = "2.0"
__status__ = "Production"
__system__ = "Discord bot"
__deprecated__ = False
import discord
from discord import Embed
from discord.ext import commands
from discord.ext.commands import Context
#from discord_slash import cog_ext, SlashContext
from AberLink import logger as logging
from cogs import admin_roles, emojis, shelve_file, guild_ids
from .db import PostgreSQL
from time import time
import asyncio
import shelve
def setup(bot):
bot.add_cog(Utilities(bot))
class Utilities(commands.Cog):
"""
Bot utilities
"""
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['p'])
#@cog_ext.cog_subcommand(base="utilities", name="ping", guild_ids=guild_ids)
async def ping(self, ctx: Context):
"""
Returns latency and response time of Discord and the database
"""
start_time = time()
message = await ctx.send(f'🏓 pong `DWSP latency: {str(round(ctx.bot.latency * 1000))}ms`')
end_time = time()
db_latency = PostgreSQL.get_connection_latency()
db_poll = PostgreSQL.get_polling_status()
await message.edit(content=f'🏓 pong \n{emojis["discord"]} `DWSP latency: {str(round(ctx.bot.latency * 1000))}ms` ' +
f'`Response time: {str(int((end_time - start_time) * 1000))}ms` \n' +
f'{emojis["aberlink_database"]} `Database Polling status: {db_poll}` `Database latency: {db_latency}ms`')
@commands.command(aliases=['s'])
#@cog_ext.cog_subcommand(base="utilities", name="source", guild_ids=guild_ids)
async def source(self, ctx: Context):
"""
Returns a link to the source code
"""
embed = Embed(description='Created and maintained by `Joel Adams` for his major project', colour=discord.Colour.green())
embed.add_field(name=f'{emojis["aberlink"]} Repository (closed source):',
value='https://github.com/JoelLucaAdams/aberlink', inline=False)
embed.add_field(name=f'{emojis["discord"]} Discord server (suggestions or feedback):',
value='https://discord.gg/XKtfya9NHF', inline=False)
await ctx.send(embed=embed)
#await ctx.send(content='Created and maintained by `Joel Adams` for a major project\n'
#f'{emojis["aberlink"]} Repository (closed source): <https://github.com/JoelLucaAdams/aberlink>\n'
#f'{emojis["discord"]} Discord server (suggestions or feedback): https://discord.gg/b3EdxVK')
@commands.command()
#@cog_ext.cog_subcommand(base="utilities", name="bots", guild_ids=guild_ids)
async def bots(self, ctx: Context):
'''
Displays a list of useful bots to add to your server
'''
embed = Embed(title='Additional discord bots', description='Below is a list of discord bots that you should consider adding to your server', colour=discord.Color.blue())
embed.set_thumbnail(url='https://discord.com/assets/2c21aeda16de354ba5334551a883b481.png')
embed.add_field(name=f'{emojis["demohelper"]} DemoHelper',
value='Discord invite: https://bit.ly/2Qj1A3W\n'
'Github link: https://github.com/AberDiscordBotsTeam/demoHelperBot', inline=False)
embed.add_field(name=f'{emojis["muddy_points"]} Muddy Points',
value='Discord invite: https://bit.ly/3tCUNk1\n'
'Github link: https://github.com/NealSnooke/Muddy-Points---Discord-Bot', inline=False)
embed.add_field(name=f'{emojis["simple_poll"]} Simple Poll',
value='Discord invite: https://bit.ly/3eTkA3o\n'
'Github link: N/A', inline=False)
await ctx.send(embed=embed)
@commands.command(aliases=['cm'])
@commands.has_any_role(*admin_roles)
@commands.bot_has_permissions(manage_messages=True)
async def clearMessages(self, ctx: Context):
"""
*Warning* Clears all messages in a channel
that are less than 14 days old
"""
msg = await ctx.send('Are you sure you want to clear messages?')
await msg.add_reaction('👍')
await msg.add_reaction('👎')
def check(_, user):
return user == ctx.message.author
reaction, _ = await ctx.bot.wait_for('reaction_add', check=check)
if str(reaction.emoji) == '👍':
logging.info('{0}: #{1} messages cleared by {2}'.format(ctx.guild, ctx.channel.name, ctx.message.author))
counter = await ctx.channel.purge()
msg = await ctx.channel.send(f'Success! Messages deleted: `{len(counter)}`, this message will delete in 5 seconds')
await asyncio.sleep(5)
await msg.delete()
elif str(reaction.emoji) == '👎':
await msg.delete()
await ctx.send('Messages have not been cleared')
@commands.command(aliases=['san'])
@commands.has_any_role(*admin_roles)
async def setAutoNicknames(self, ctx: Context, state: bool):
"""
Change whether nicknames are automatically set
:param state: bool
"""
#logging.info('{0}: #{1} setAddMessage to "{2}" by {3}'.format(ctx.guild, ctx.channel.name, message, ctx.message.author))
with shelve.open(shelve_file) as db:
db[str(ctx.guild.id)] = state
embed = Embed(description=f'Auto set user nickanmes has been set to `{state}`')
if state:
embed.colour = discord.Colour.green()
else:
embed.colour = discord.Colour.red()
await ctx.send(embed=embed)
@commands.command(aliases=['c'])
@commands.has_any_role(*admin_roles)
async def configurations(self, ctx: Context):
"""
Displays the bot's configuration in this server
"""
serverID = str(ctx.guild.id)
with shelve.open(shelve_file) as db:
if serverID in db:
data = db[serverID]
else:
db[serverID] = True
data = db[serverID]
embed = Embed(description='Below is a list of configurations available in the bot', colour=discord.Colour.orange())
embed.add_field(name='Set Auto Nicknames:', value=f'`{data}`')
await ctx.send(embed=embed)
| JoelLucaAdams/aberlink | src/AberLinkDiscord/cogs/utilities.py | utilities.py | py | 6,872 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Context",
"line_number": 45,
"usage_type": "name"
},
{
"a... |
18248471298 | """
This file contains methods to visualize EKG data, clean EKG data and run EKG analyses.
Classes
-------
EKG
Notes
-----
All R peak detections should be manually inspected with EKG.plotpeaks method and
false detections manually removed with rm_peak method. After rpeak examination,
NaN data can be accounted for by removing false IBIs with rm_ibi method.
"""
import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import scipy as sp
import statistics
import biosignalsnotebooks as bsnb
from scipy import interpolate
from numpy import linspace, diff, zeros_like, arange, array
from mne.time_frequency import psd_array_multitaper
from pandas.plotting import register_matplotlib_converters
from scipy.signal import welch
from scipy.stats.distributions import chi2
class EKG:
"""
Run EKG analyses including cleaning and visualizing data.
Attributes
----------
metadata : nested dict
File information and analysis information.
Format {str:{str:val}} with val being str, bool, float, int or pd.Timestamp.
data : pd.DataFrame
Raw data of the EKG signal (mV) and the threshold line (mV) at each sampled time point.
rpeak_artifacts : pd.Series
False R peak detections that have been removed.
rpeaks_added : pd.Series
R peak detections that have been added.
ibi_artifacts : pd.Series
Interbeat interval data that has been removed.
rpeaks : pd.Series
Cleaned R peaks data without removed peaks and with added peaks.
rr : np.ndarray
Time between R peaks (ms).
nn : np.ndarray
Cleaned time between R peaks (ms) without removed interbeat interval data.
rpeaks_df : pd.DataFrame
Raw EKG value (mV) and corresponding interbeat interval leading up to the data point (ms) at each sampled point.
"""
def __init__(self, fname, fpath, polarity='positive', min_dur=True, epoched=True, smooth=False, sm_wn=30, mw_size=90, upshift=3.5,
rms_align='right', detect_peaks=True, pan_tompkins=True):
"""
Initialize raw EKG object.
Parameters
----------
fname : str
Filename.
fpath : str
Path to file.
polarity: str, default 'positive'
polarity of the R-peak deflection. Options: 'positive', 'negative'
min_dur : bool, default True
Only load files that are >= 5 minutes long.
epoched : bool, default True
Whether file was epoched using ioeeg.
smooth : bool, default False
Whether raw signal should be smoothed before peak detections. Set True if raw data has consistent high frequency noise
preventing accurate peak detection.
sm_wn : float, default 30
Size of moving window for rms smoothing preprocessing (milliseconds).
mw_size : float, default 100
Moving window size for R peak detection (milliseconds).
upshift : float, default 3.5
Detection threshold upshift for R peak detection (% of signal).
rms_align: str, default 'right'
whether to align the mean to the right or left side of the moving window [options: 'right', 'left']
rm_artifacts : bool, default False
Apply IBI artifact removal algorithm.
detect_peaks : bool, default True
Option to detect R peaks and calculate interbeat intervals.
pan_tompkins : bool, default True
Option to detect R peaks using automatic pan tompkins detection method
Returns
-------
EKG object. Includes R peak detections and calculated inter-beat intervals if detect_peaks is set to True.
"""
# set metadata
filepath = os.path.join(fpath, fname)
if epoched == False:
in_num, start_date, slpstage, cycle = fname.split('_')[:4]
elif epoched == True:
in_num, start_date, slpstage, cycle, epoch = fname.split('_')[:5]
self.metadata = {'file_info':{'in_num': in_num,
'fname': fname,
'path': filepath,
'rpeak_polarity': polarity,
'start_date': start_date,
'sleep_stage': slpstage,
'cycle': cycle
}
}
if epoched == True:
self.metadata['file_info']['epoch'] = epoch
# load the ekg
self.load_ekg(min_dur)
# flip the polarity if R peaks deflections are negative
if polarity == 'negative':
self.data = self.data*-1
if smooth == True:
self.rms_smooth(sm_wn)
else:
self.metadata['analysis_info']['smooth'] = False
self.metadata['analysis_info']['rms_smooth_wn'] = 'N/A'
# create empty series for false detections removed and missed peaks added
self.rpeak_artifacts = pd.Series()
self.rpeaks_added = pd.Series()
self.ibi_artifacts = pd.Series()
# detect R peaks
if detect_peaks == True:
if pan_tompkins == True:
self.metadata['analysis_info']['mw_size'] = 'N/A'
self.metadata['analysis_info']['upshift'] = 'N/A'
self.metadata['analysis_info']['rms_align'] = 'N/A'
self.pan_tompkins_detector()
# detect R peaks & calculate inter-beat intevals
else:
self.calc_RR(smooth, mw_size, upshift, rms_align)
self.metadata['analysis_info']['pan_tompkins'] = False
# initialize the nn object
self.nn = self.rr
register_matplotlib_converters()
def load_ekg(self, min_dur):
"""
Load EKG data from csv file and extract metadata including sampling frequency, cycle length, start time and NaN data.
Parameters
----------
min_dur : bool, default True
If set to True, will not load files shorter than the minimum duration length of 5 minutes.
"""
data = pd.read_csv(self.metadata['file_info']['path'], header = [0, 1], index_col = 0, parse_dates=True)['EKG']
# Check cycle length against 5 minute duration minimum
cycle_len_secs = (data.index[-1] - data.index[0]).total_seconds()
if cycle_len_secs < 60*5-1:
if min_dur == True:
print('Data is shorter than minimum duration. Cycle will not be loaded.')
print('--> To load data, set min_dur to False')
return
else:
print('* WARNING: Data is shorter than 5 minutes.')
self.data = data
else:
self.data = data
diff = data.index.to_series().diff()[1:2]
s_freq = 1000000/diff[0].microseconds
nans = len(data) - data['Raw'].count()
# Set metadata
self.metadata['file_info']['start_time'] = data.index[0]
self.metadata['analysis_info'] = {'s_freq': s_freq, 'cycle_len_secs': cycle_len_secs,
'NaNs(samples)': nans, 'NaNs(secs)': nans/s_freq}
print('EKG successfully imported.')
def rms_smooth(self, sm_wn):
"""
Smooth raw data with root mean square (RMS) moving window.
Reduce noise leading to false R peak detections.
Parameters
----------
sm_wn : float, default 30
Size of moving window for RMS smoothing preprocessing (ms).
"""
self.metadata['analysis_info']['smooth'] = True
self.metadata['analysis_info']['rms_smooth_wn'] = sm_wn
mw = int((sm_wn/1000)*self.metadata['analysis_info']['s_freq'])
self.data['raw_smooth'] = self.data.Raw.rolling(mw, center=True).mean()
def set_Rthres(self, smooth, mw_size, upshift, rms_align):
"""
Set R peak detection threshold based on moving average shifted up by a percentage of the EKG signal.
Parameters
----------
smooth : bool, default False
If set to True, raw EKG data will be smoothed using RMS smoothing window.
mw_size : float, default 100
Time over which the moving average of the EKG signal will be taken to calculate the R peak detection threshold (ms).
upshift : float, default 3.5
Percentage of EKG signal that the moving average will be shifted up by to set the R peak detection threshold.
rms_align: str, default 'right'
whether to align the mean to the right or left side of the moving window [options: 'right', 'left']
See Also
--------
EKG.rms_smooth : Smooth raw EKG data with root mean square (RMS) moving window.
"""
print('Calculating moving average with {} ms window and a {}% upshift...'.format(mw_size, upshift))
# convert moving window to sample & calc moving average over window
mw = int((mw_size/1000)*self.metadata['analysis_info']['s_freq'])
#if smooth is true have the moving average calculated based off of smoothed data
if smooth == False:
mavg = self.data.Raw.rolling(mw).mean()
ekg_avg = np.mean(self.data['Raw'])
elif smooth == True:
mavg = self.data.raw_smooth.rolling(mw).mean()
ekg_avg = np.mean(self.data['raw_smooth'])
if rms_align == 'left':
# get the number of NaNs and shift the average left by that amount
mavg = mavg.shift(-mavg.isna().sum())
# replace edge nans with overall average
mavg = mavg.fillna(ekg_avg)
# set detection threshold as +upshift% of moving average
upshift_perc = upshift/100
det_thres = mavg + np.abs(mavg*upshift_perc)
# insert threshold column at consistent position in df to ensure same color for plotting regardless of smoothing
self.data.insert(1, 'EKG_thres', det_thres) # can remove this for speed, just keep as series
#set metadata
self.metadata['analysis_info']['mw_size'] = mw_size
self.metadata['analysis_info']['upshift'] = upshift
self.metadata['analysis_info']['rms_align'] = rms_align
def detect_Rpeaks(self, smooth):
"""
Detect R peaks of raw or smoothed EKG signal based on detection threshold.
Parameters
----------
smooth : bool, default False
If set to True, raw EKG data is smoothed using a RMS smoothing window.
See Also
--------
EKG.rms_smooth : Smooth raw EKG data with root mean square (RMS) moving window
EKG.set_Rthres : Set R peak detection threshold based on moving average shifted up by a percentage of the EKG signal.
"""
print('Detecting R peaks...')
#Use the raw data or smoothed data depending on bool smooth
if smooth == False:
raw = pd.Series(self.data['Raw'])
elif smooth == True:
raw = pd.Series(self.data['raw_smooth'])
thres = pd.Series(self.data['EKG_thres'])
#create empty peaks list
peaks = []
x = 0
#Within the length of the data if the value of raw data (could be smoothed raw data) is less than ekg threshold keep counting forwards
while x < len(raw):
if raw[x] > thres[x]:
roi_start = x
# count forwards to find down-crossing
for h in range(x, len(raw), 1):
# if value drops below threshold, end ROI
if raw[h] < thres[h]:
roi_end = h
break
# else if data ends before dropping below threshold, leave ROI open
# & advance h pointer to end loop
elif (raw[h] >= thres[h]) and (h == len(raw)-1):
roi_end = None
h += 1
break
# if ROI is closed, get maximum between roi_start and roi_end
if roi_end:
peak = raw[x:h].idxmax()
peaks.append(peak)
# advance the pointer
x = h
else:
x += 1
self.rpeaks = raw[peaks]
print('R peak detection complete')
# get time between peaks and convert to mseconds
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
# create rpeaks dataframe and add ibi columm
rpeaks_df = pd.DataFrame(self.rpeaks)
ibi = np.insert(self.rr, 0, np.NaN)
rpeaks_df['ibi_ms'] = ibi
self.rpeaks_df = rpeaks_df
print('R-R intervals calculated')
def rm_peak(self, time):
"""
Examine a second of interest and manually remove artifact R peaks.
Parameters
----------
time: str {'hh:mm:ss'}
Time in the format specified dictating the second containing the peak of interest.
Modifies
-------
self.rpeaks : Peaks that have been removed are removed from attribute.
self.rpeaks_df : Peaks that have been removed are removed from attribute.
self.rpeak_artifacts : Removed peaks added to attribute.
"""
# print all rpeaks in the second of interest
peak_idxlist = {}
peak_num = 1
h, m, s = time.split(':')
print('id', '\t', 'time', '\t\t\t\t', 'ibi_ms')
for i, x in enumerate(self.rpeaks_df.index):
if x.hour == int(h) and x.minute == int(m) and x.second == int(s):
peak_idxlist[peak_num] = x
print(peak_num, '\t', x, '\t', self.rpeaks_df['ibi_ms'].loc[x])
peak_num += 1
# specify the peak to remove
rm_peak = input('Rpeaks to remove [list ids or None]: ')
print('\n')
if rm_peak == 'None':
print('No peaks removed.')
return
else:
rm_peaks = rm_peak.split(',')
rm_peaks = [int(x) for x in rm_peaks]
for p in rm_peaks:
peak_to_rm = pd.Series(self.rpeaks[peak_idxlist[p]])
peak_to_rm.index = [peak_idxlist[p]]
# add peak to rpeak_artifacts list
self.rpeak_artifacts = self.rpeak_artifacts.append(peak_to_rm)
self.rpeak_artifacts.sort_index(inplace=True)
# remove peak from rpeaks list & rpeaks dataframe
self.rpeaks.drop(peak_idxlist[p], inplace=True)
self.rpeaks_df.drop(peak_idxlist[p], inplace=True)
print('R peak at ', peak_to_rm.index[0], ' successfully removed.')
# recalculate ibi values
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
ibi = np.insert(self.rr, 0, np.NaN)
self.rpeaks_df['ibi_ms'] = ibi
print('ibi values recalculated.')
# refresh nn values
self.nn = self.rr
def undo_rm_peak(self, time):
"""
Manually add back incorrectly removed peaks from EKG.rm_peak method.
Parameters
----------
time : str {'hh:mm:ss'}
Second of incorrectly removed R peak.
Notes
-----
This is strictly an "undo" method. It is NOT equivalent to add_peaks().
Modifies
-------
self.rpeaks : Incorrectly removed R peaks added back.
self.rpeaks_df : Incorrectly removed R peaks added back.
self.rr : IBI values recalculated to reflect change in R peaks.
self.nn : IBI values recalculated to reflect change in R peaks.
self.rpeaks_artifacts : Incorrectly removed R peaks removed from attribute.
See Also
--------
EKG.rm_peak : Examine a second of interest and manually remove artifact R peaks.
EKG.add_peak : Examine a second of interest and manually add missed R peaks.
EKG.undo_add_peak : Manually remove incorrectly added peaks from EKG.add_peak method.
"""
if len(self.rpeak_artifacts) == 0:
print('No rpeaks have been removed.')
return
# print all rpeaks in the second of interest
peak_idxlist = {}
peak_num = 1
h, m, s = time.split(':')
print('id', '\t', 'time')
for i, x in enumerate(self.rpeak_artifacts.index):
if x.hour == int(h) and x.minute == int(m) and x.second == int(s):
peak_idxlist[peak_num] = x
print(peak_num, '\t', x)
peak_num += 1
# specify the peak to add back
add_peak = input('Removed Rpeaks to add back [list ids or None]: ')
print('\n')
if add_peak == 'None':
print('No peaks added.')
return
else:
add_peaks = add_peak.split(',')
add_peaks = [int(x) for x in add_peaks]
for p in add_peaks:
peak_to_add = pd.Series(self.rpeak_artifacts[peak_idxlist[p]])
peak_to_add.index = [peak_idxlist[p]]
# remove peak from rpeak_artifacts list
self.rpeak_artifacts.drop(labels=peak_to_add.index, inplace=True)
# add peak back to rpeaks list
self.rpeaks = self.rpeaks.append(peak_to_add)
self.rpeaks.sort_index(inplace=True)
# add peak back to rpeaks_df
self.rpeaks_df.loc[peak_to_add.index[0]] = [peak_to_add[0], np.NaN]
self.rpeaks_df.sort_index(inplace=True)
print('Rpeak at ', peak_to_add.index[0], ' successfully replaced.')
# recalculate ibi values
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
ibi = np.insert(self.rr, 0, np.NaN)
self.rpeaks_df['ibi_ms'] = ibi
print('ibi values recalculated.')
# refresh nn values
self.nn = self.rr
def add_peak(self, time):
"""
Examine a second of interest and manually add missed R peaks.
Parameters
----------
time : str {'hh:mm:ss'}
Second within which peak is to be added.
Modifies
-------
self.rpeaks : Added peaks added to attribute.
self.rpeaks_df : Added peaks added to attribute.
self.rr : IBI values recalculate to reflect changed R peaks.
self.nn : IBI values recalculate to reflect changed R peaks.
self.rpeaks_added : Added peaks stored.
See Also
--------
EKG.undo_add_peak : Manually add back incorrectly added R peaks from EKG.add_peak method.
EKG.rm_peak : Examine a second of interest and manually remove artifact R peak.
EKG.undo_rm_peak : Manually add back incorrectly removed R peaks from EKG.rm_peak method.
"""
# specify time range of missed peak
h, m, s = time.split(':')
us_rng = input('Millisecond range of missed peak [min:max]: ').split(':')
# add zeros bc datetime microsecond precision goes to 6 figures
us_min, us_max = us_rng[0] + '000', us_rng[1] + '000'
# set region of interest for new peak
## can modify this to include smoothing if needed
roi = []
for x in self.data.index:
if x.hour == int(h) and x.minute == int(m) and x.second == int(s) and x.microsecond >= int(us_min) and x.microsecond <= int(us_max):
roi.append(x)
# define new rpeak
if self.metadata['analysis_info']['smooth'] == False:
peak_idx = self.data.loc[roi]['Raw'].idxmax()
peak_val = self.data['Raw'].loc[peak_idx]
new_peak = pd.Series(peak_val, [peak_idx])
if self.metadata['analysis_info']['smooth'] == True:
peak_idx = self.data.loc[roi]['raw_smooth'].idxmax()
peak_val = self.data['raw_smooth'].loc[peak_idx]
new_peak = pd.Series(peak_val, [peak_idx])
# add peak to rpeaks list
self.rpeaks = self.rpeaks.append(new_peak)
self.rpeaks.sort_index(inplace=True)
# add peak to rpeaks_df
self.rpeaks_df.loc[peak_idx] = [peak_val, np.NaN]
self.rpeaks_df.sort_index(inplace=True)
# add peak to rpeaks_added list
self.rpeaks_added = self.rpeaks_added.append(new_peak)
self.rpeaks_added.sort_index(inplace=True)
print('New peak added.')
# recalculate ibi values
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
ibi = np.insert(self.rr, 0, np.NaN)
self.rpeaks_df['ibi_ms'] = ibi
print('ibi values recalculated.')
# refresh nn values
self.nn = self.rr
def undo_add_peak(self, time):
"""
Manually remove incorrectly added peaks from EKG.add_peak method.
Parameters
----------
time : str {'hh:mm:ss'}
Second of incorrectly removed R peak.
Modifies
-------
self.rpeaks : Incorrectly added R peaks removed.
self.rpeaks_df : Incorrectly added R peaks removed.
self.rr : IBI values recalculated to reflect change in R peaks.
self.nn : IBI values recalculated to reflect change in R peaks.
self.rpeaks_added : Incorrectly added R peaks removed from attribute.
Notes
-----
This is strictly an "undo" method. It is NOT equivalent to EKG.rm_peak.
See Also
--------
EKG.add_peak : Examine a second of interest and manually add missed R peaks.
EKG.rm_peak : Examine a second of interest and manually remove artifact R peaks.
EKG.undo_rm_peak : Manually add back incorrectly removed peaks from EKG.rm_peak method.
"""
if len(self.rpeaks_added) == 0:
print('No rpeaks have been added.')
return
# print all rpeaks in the second of interest
peak_idxlist = {}
peak_num = 1
h, m, s = time.split(':')
print('id', '\t', 'time')
for i, x in enumerate(self.rpeaks_added.index):
if x.hour == int(h) and x.minute == int(m) and x.second == int(s):
peak_idxlist[peak_num] = x
print(peak_num, '\t', x)
peak_num += 1
# specify the peak to remove
rm_peak = input('Added Rpeaks to remove [list ids or None]: ')
print('\n')
if rm_peak == 'None':
print('No peaks removed.')
return
else:
rm_peaks = rm_peak.split(',')
rm_peaks = [int(x) for x in rm_peaks]
for p in rm_peaks:
peak_to_rm = pd.Series(self.rpeaks_added[peak_idxlist[p]])
peak_to_rm.index = [peak_idxlist[p]]
# remove peak from rpeaks_added list
self.rpeaks_added.drop(labels=peak_to_rm.index, inplace=True)
# remove peak from rpeaks list & rpeaks dataframe
self.rpeaks.drop(peak_idxlist[p], inplace=True)
self.rpeaks_df.drop(peak_idxlist[p], inplace=True)
print('R peak at ', peak_to_rm.index, ' successfully removed.')
# recalculate ibi values
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
ibi = np.insert(self.rr, 0, np.NaN)
self.rpeaks_df['ibi_ms'] = ibi
print('ibi values recalculated.')
# refresh nn values
self.nn = self.rr
def rm_ibi(self, thres = 3000):
"""
Manually remove IBI's that can't be manually added with EKG.add_peak() method.
IBIs to be removed could correspond to missing data (due to cleaning) or missed beats.
Parameters
----------
thres: int, default 3000
Threshold time for automatic IBI removal (ms).
Notes
-----
This step must be completed LAST, after removing any false peaks and adding any missed peaks.
See Also
--------
EKG.add_peak : Manually add missed R peaks.
"""
# check for extra-long IBIs & option to auto-remove
if any(self.rpeaks_df['ibi_ms'] > thres):
print(f'IBIs greater than {thres} milliseconds detected')
rm = input('Automatically remove? [y/n]: ')
if rm.casefold() == 'y':
# get indices of ibis greater than threshold
rm_idx = [i for i, x in enumerate(self.nn) if x > thres]
# replace ibis w/ NaN
self.nn[rm_idx] = np.NaN
print('{} IBIs removed.'.format(len(rm_idx), thres))
# add ibi to ibi_artifacts list
df_idx = [x+1 for x in rm_idx] # shift indices by 1 to correspond with df indices
ibis_rmvd = pd.Series(self.rpeaks_df['ibi_ms'].iloc[df_idx])
self.ibi_artifacts = self.ibi_artifacts.append(ibis_rmvd)
self.ibi_artifacts.sort_index(inplace=True)
print('ibi_artifacts series updated.')
# update rpeaks_df
ibi = np.insert(self.nn, 0, np.NaN)
self.rpeaks_df['ibi_ms'] = ibi
print('R peaks dataframe updated.\n')
else:
print(f'All ibis are less than {thres} milliseconds.')
# option to specify which IBIs to remove
rm = input('Manually remove IBIs? [y/n]: ')
if rm.casefold() == 'n':
print('Done.')
return
elif rm.casefold() == 'y':
# print IBI list w/ IDs
print('Printing IBI list...\n')
print('ID', '\t', 'ibi end time', '\t', 'ibi_ms')
for i, x in enumerate(self.rpeaks_df.index[1:]):
print(i, '\t',str(x)[11:-3], '\t', self.rpeaks_df['ibi_ms'].loc[x])
rm_ids = input('IDs to remove [list or None]: ')
if rm_ids.casefold() == 'none':
print('No ibis removed.')
return
else:
# replace IBIs in nn array
rm_ids = [int(x) for x in rm_ids.split(',')]
self.nn[rm_ids] = np.NaN
print('{} IBIs removed.'.format(len(rm_ids)))
# add ibi to ibi_artifacts list
df_idx = [x+1 for x in rm_ids] # shift indices by 1 to correspond with df indices
ibis_rmvd = pd.Series(self.rpeaks_df['ibi_ms'].iloc[df_idx])
self.ibi_artifacts = self.ibi_artifacts.append(ibis_rmvd)
self.ibi_artifacts.sort_index(inplace=True)
print('ibi_artifacts series updated.')
# update self.rpeaks_df
ibi = np.insert(self.nn, 0, np.NaN)
self.rpeaks_df['ibi_ms'] = ibi
print('R peaks dataframe updated.\nDone.')
def calc_RR(self, smooth, mw_size, upshift, rms_align):
"""
Set R peak detection threshold, detect R peaks and calculate R-R intervals.
Parameters
----------
smooth : bool, default True
If set to True, raw EKG data will be smoothed using RMS smoothing window.
mw_size : float, default 100
Time over which the moving average of the EKG signal will be taken to calculate the R peak detection threshold (ms).
upshift : float, default 3.5
Percentage of EKG signal that the moving average will be shifted up by to set the R peak detection threshold.
rms_align: str, default 'right'
Whether to align the mean to the right or left side of the moving window [options: 'right', 'left']
See Also
--------
EKG.set_Rthres : Set R peak detection threshold based on moving average shifted up by a percentage of the EKG signal.
EKG.detect_Rpeaks : Detect R peaks of raw or smoothed EKG signal based on detection threshold.
EKG.pan_tompkins_detector : Use the Pan Tompkins algorithm to detect R peaks and calculate R-R intervals.
"""
# set R peak detection parameters
self.set_Rthres(smooth, mw_size, upshift, rms_align)
# detect R peaks & make RR tachogram
self.detect_Rpeaks(smooth)
def pan_tompkins_detector(self):
"""
Use the Pan Tompkins algorithm to detect R peaks and calculate R-R intervals.
Jiapu Pan and Willis J. Tompkins.
A Real-Time QRS Detection Algorithm.
In: IEEE Transactions on Biomedical Engineering
BME-32.3 (1985), pp. 230–236.
See Also
----------
EKG.calc_RR : Set R peak detection threshold, detect R peaks and calculate R-R intervals.
"""
self.metadata['analysis_info']['pan_tompkins'] = True
#interpolate data because has NaNs, cant for ecg band pass filter step
data = self.data.interpolate()
#makes our data a list because that is the format that bsnb wants it in
signal = pd.Series.tolist(data['Raw'])
# get sample rate
# must be an int
sr = int(self.metadata['analysis_info']['s_freq'])
filtered_signal = bsnb.detect._ecg_band_pass_filter(signal, sr) #Step 1 of Pan-Tompkins Algorithm - ECG Filtering (Bandpass between 5 and 15 Hz)
differentiated_signal = diff(filtered_signal)
squared_signal = differentiated_signal * differentiated_signal
nbr_sampls_int_wind = int(0.080 * sr)
# Initialisation of the variable that will contain the integrated signal samples
integrated_signal = zeros_like(squared_signal)
cumulative_sum = squared_signal.cumsum()
integrated_signal[nbr_sampls_int_wind:] = (cumulative_sum[nbr_sampls_int_wind:] - cumulative_sum[:-nbr_sampls_int_wind]) / nbr_sampls_int_wind
integrated_signal[:nbr_sampls_int_wind] = cumulative_sum[:nbr_sampls_int_wind] / arange(1, nbr_sampls_int_wind + 1)
#R peak detection algorithm
rr_buffer, signal_peak_1, noise_peak_1, threshold = bsnb.detect._buffer_ini(integrated_signal, sr)
probable_peaks, possible_peaks= bsnb.detect._detects_peaks(integrated_signal, sr)
#Identification of definitive R peaks
definitive_peaks = bsnb.detect._checkup(probable_peaks, integrated_signal, sr, rr_buffer, signal_peak_1, noise_peak_1, threshold)
# Conversion to integer type.
definitive_peaks = array(list(map(int, definitive_peaks)))
#Correcting step
#Due to the multiple pre-processing stages there is a small lag in the determined peak positions, which needs to be corrected !
definitive_peaks_rephase = np.array(definitive_peaks) - 30 * (sr / 1000)
definitive_peaks_rephase = list(map(int, definitive_peaks_rephase))
#make peaks list
index = data.index[definitive_peaks_rephase]
values = np.array(signal)[definitive_peaks_rephase]
self.rpeaks = pd.Series(values, index = index)
print('R peak detection complete')
# get time between peaks and convert to mseconds
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
# create rpeaks dataframe and add ibi columm
rpeaks_df = pd.DataFrame(self.rpeaks)
ibi = np.insert(self.rr, 0, np.NaN)
rpeaks_df['ibi_ms'] = ibi
self.rpeaks_df = rpeaks_df
print('R-R intervals calculated')
def export_RR(self, savedir):
"""
Export R peaks and RR interval data to .txt files.
Includes list of R peaks artifacts, R peaks added, R peaks detected, IBI artifacts, RR intervals and NN intervals.
Parameters
----------
savedir : str
Path to directory where .txt files will be saved.
See Also
--------
EKG.calc_RR : Set R peak detection threshold, detect R peaks and calculate R-R intervals.
EKG.rm_ibi : Manually remove IBI's that can't be manually added with EKG.add_peak() method.
EKG.add_peak : Manually add missed R peak.
EKG.rm_peak : Examine a second of interest and manually remove artifact R peaks.
"""
# set save directory
if savedir is None:
savedir = os.getcwd()
chngdir = input('Files will be saved to ' + savedir + '. Change save directory? [Y/N] ')
if chngdir == 'Y':
savedir = input('New save directory: ')
if not os.path.exists(savedir):
createdir = input(savedir + ' does not exist. Create directory? [Y/N] ')
if createdir == 'Y':
os.makedirs(savedir)
else:
savedir = input('Try again. Save directory: ')
if not os.path.exists(savedir):
print(savedir + ' does not exist. Aborting. ')
return
elif not os.path.exists(savedir):
print(savedir + ' does not exist. Creating directory...')
os.makedirs(savedir)
else:
print('Files will be saved to ' + savedir)
# save rpeak_artifacts list
try:
self.rpeak_artifacts
except AttributeError:
cont = input('EKG object has no artifacts attribute. Continue save without cleaning? [y/n]: ')
if cont == 'y':
pass
elif cont == 'n':
print('Save aborted.')
return
else:
savearts = self.metadata['file_info']['fname'].split('.')[0] + '_rpeak_artifacts.txt'
art_file = os.path.join(savedir, savearts)
self.rpeak_artifacts.to_csv(art_file, header=False)
print('R peak artifacts exported.')
# save rpeaks_added list
savename = self.metadata['file_info']['fname'].split('.')[0] + '_rpeaks_added.txt'
savefile = os.path.join(savedir, savename)
self.rpeaks_added.to_csv(savefile, header=False)
print('R peak additions exported.')
# save R peak detections
savepeaks = self.metadata['file_info']['fname'].split('.')[0] + '_rpeaks.txt'
peaks_file = os.path.join(savedir, savepeaks)
self.rpeaks.to_csv(peaks_file, header=False)
print('R peaks exported.')
# save ibi_artifact list
savename = self.metadata['file_info']['fname'].split('.')[0] + '_ibi_artifacts.txt'
savefile = os.path.join(savedir, savename)
self.ibi_artifacts.to_csv(savefile, header=False)
print('IBI artifacts exported.')
# save RR intervals
if self.metadata['analysis_info']['pan_tompkins'] == False:
rr_header = 'R peak detection mw_size = {} & upshift = {}'.format(self.metadata['analysis_info']['mw_size'], self.metadata['analysis_info']['upshift'])
else:
rr_header = 'R peak detection using the Pan Tompkins algorithm'
saverr = self.metadata['file_info']['fname'].split('.')[0] + '_rr.txt'
rr_file = os.path.join(savedir, saverr)
np.savetxt(rr_file, self.rr, header=rr_header, fmt='%.0f', delimiter='\n')
print('rr intervals exported.')
# save NN intervals, if exists
try:
self.nn
except AttributeError:
print('EKG object has no nn attribute. Only exporting r peaks and rr intervals.')
pass
else:
# set # of artifacts removed for header
try:
self.rpeak_artifacts
except AttributeError:
arts_len = 0
else:
arts_len = len(self.rpeak_artifacts) + len(self.ibi_artifacts)
if self.metadata['analysis_info']['pan_tompkins'] == False:
nn_header = 'R peak detection mw_size = {} & upshift = {}.\nTotal artifacts removed = {} ( {} false peaks + {} false ibis).'.format(self.metadata['analysis_info']['mw_size'], self.metadata['analysis_info']['upshift'], arts_len, len(self.rpeak_artifacts), len(self.ibi_artifacts))
else:
nn_header = 'R peak detection using the Pan Tompkins algorithm.\nTotal artifacts removed = {} ( {} false peaks + {} false ibis).'.format(arts_len, len(self.rpeak_artifacts), len(self.ibi_artifacts))
savenn = self.metadata['file_info']['fname'].split('.')[0] + '_nn.txt'
nn_file = os.path.join(savedir, savenn)
np.savetxt(nn_file, self.nn, header=nn_header, fmt='%.0f', delimiter='\n')
print('nn intervals exported.')
print('Done.')
def calc_tstats(self, itype):
"""
Calculate commonly used time domain HRV statistics.
Time domain HRV statistics include mean, min and max HR (bpm), mean interbeat interval length, SDNN, RMSSD, pNN20 and pNN50.
SDNN is the standard deviation of normal to normal IBI. RMSSD is the root mean squared standard deviation of normal interbeat interval length.
pNN20 and pNN50 are the percentage of normal interbeat intervals that exceed 20ms and 50ms respectively.
Min and max HR is determined over 5 RR intervals.
Parameters
----------
itype : str {'rr, 'nn'}
Interval type.'rr' is uncleaned data. 'nn' is normal intervals (cleaned).
See Also
--------
EKG.hrv_stats : Calculate all HRV statistics on IBI object.
EKG.calc_fstats : Calculate frequency domain statistics.
EKG.calc_psd_welch : Calculate welch power spectrum.
EKG.calc_psd_mt : Calculate multitaper power spectrum.
EKG.calc_fbands : Calculate different frequency band measures.
"""
print('Calculating time domain statistics...')
if itype == 'rr':
ii = self.rr
ii_diff = np.diff(self.rr)
ii_diffsq = ii_diff**2
self.rr_diff = ii_diff
self.rr_diffsq = ii_diffsq
elif itype == 'nn':
# remove np.NaNs for calculations
ii = self.nn[~np.isnan(self.nn)]
ii_diff = np.diff(ii)
ii_diffsq = ii_diff**2
self.nn_diff = ii_diff
self.nn_diffsq = ii_diffsq
# heartrate in bpm
hr_avg = 60/np.mean(ii)*1000
rollmean_ii = pd.Series(ii).rolling(5).mean()
mx_ii, mn_ii = np.nanmax(rollmean_ii), np.nanmin(rollmean_ii)
hr_max = 60/mn_ii*1000
hr_min = 60/mx_ii*1000
# inter-beat interval & SD (ms)
ibi = np.mean(ii)
sdnn = np.std(ii, ddof=1)
# SD & RMS of differences between successive II intervals (ms)
sdsd = np.std(ii_diff)
rmssd = np.sqrt(np.mean(ii_diffsq))
# pNN20 & pNN50
pxx20 = sum(np.abs(ii_diff) >= 20.0)/(len(ii_diff)-1) *100
pxx50 = sum(np.abs(ii_diff) >= 50.0)/(len(ii_diff)-1) *100
self.time_stats = {'linear':{'HR_avg': hr_avg, 'HR_max': hr_max, 'HR_min': hr_min, 'IBI_mean': ibi,
'SDNN': sdnn, 'RMSSD': rmssd, 'pXX20': pxx20, 'pXX50': pxx50},
}
print('Time domain stats stored in obj.time_stats\n')
def interpolate_IBI(self, itype):
"""
Resample tachogram to original sampling frequency and interpolate for power spectral estimation.
This is done since RRs are not evenly placed.
Parameters
----------
itype : str {'rr', 'nn'}
Interval type.'rr' is uncleaned data. 'nn' is normal intervals (cleaned).
Note
----
Adapted from pyHRV
See Also
--------
EKG.calc_psd_welch : Calculate welch power spectrum.
EKG.calc_psd_mt : Calculate multitaper power spectrum.
"""
# specify data
if itype == 'rr':
ii = self.rr
elif itype == 'nn':
# remove np.NaNs for calculations
ii = self.nn[~np.isnan(self.nn)]
# interpolate
fs = self.metadata['analysis_info']['s_freq']
t = np.cumsum(ii)
t -= t[0]
f_interp = sp.interpolate.interp1d(t, ii, 'cubic')
t_interp = np.arange(t[0], t[-1], 1000./fs)
self.ii_interp = f_interp(t_interp)
self.metadata['analysis_info']['s_freq_interp'] = self.metadata['analysis_info']['s_freq']
def data_pre_processing(self,fs):
"""
Load data to the workspace and perform pre-processing: Linearly interpolate the NN time series and zero center.
Parameters
----------
fs : int
desired sampling frequency of time series in Hz
Returns
-------
NN_intervals_interpolated : np.ndarray
Interpolated and zero centered NN time series
K : int
Length of NN_intervals_interpolated
"""
# remove the time intervals that are not valid
NN_intervals = self.nn
NN_intervals = NN_intervals[~np.isnan(NN_intervals)]
spike_times = np.cumsum(NN_intervals)/1000
dt = 1/fs
T = np.floor(spike_times.max(0)) # Observation duration in seconds
t = np.arange(1.0, T, dt) # Consider a linearly spaced time axis
# Derive the linearly interpolated NN time series
NN_intervals_interpolated = np.interp(t,spike_times,NN_intervals)
K = NN_intervals_interpolated.shape[0]
# Zero centering the time series prior to multi-tapering
NN_intervals_interpolated = NN_intervals_interpolated - np.mean(NN_intervals_interpolated)
NN_intervals_interpolated = NN_intervals_interpolated.reshape((K,1))
return NN_intervals_interpolated, K
def denoised_mt_spectral_estimation(self, NN_intervals_interpolated, N, NW, no_of_tapers, K, fs):
"""
Peform expectation maximization to estimate the denoised Eigen coefficients and denoised Multitaper spectral estimates.
Parameters
----------
NN_intervals_interpolated : np.ndarray
The interpolated inter heart beat interval time series
N : int
Total number of frequency bins
NW : int
Time half bandwidth of multitapering
no_of_tapers : int
Number of tapers considered for Multitapering
K : int
Observation duration in samples
fs : int
Desired sampling frequency of the time series in Hz
Returns
-------
denoised_MT_est : np.ndarray
denoised multitaper estimate of the Power Spectral Density.
denoised_w_est_tapers : np.ndarray
The real and imaginary components of the denoised Eigen-coefficients for each taper.
Modifies
--------
self.psd_mt_denoised : Dict created containing power spectral density at respective frequencies.
'freqs' : np.ndarray
'pwr' : np.ndarray. Power spectral density in (V^2/Hz). 10log10 to convert to dB.
See Also
--------
EKG.data_pre_processing : Load data to the workspace and perform pre-processing: Linearly interpolate the NN time series and zero center.
EKG.direct_MT_Spectral_Estimation : Produce the classical multitaper estimate of the Power Spectral Density.
EKG.plot_estimates : Plot the final PSD estimates with the confidence levels
EKG.generate_PS : Generate power spectrum with desired confidence levels.
"""
# Initializing the parameters
iter_EM = 50 # Maximum EM iterations
Q_initial_factor = 10**(5) # Initialization of Q
sigma_observation = 1*10**(4) # Initialization of Observation noise variance
Sequences = sp.signal.windows.dpss(K, NW, Kmax=no_of_tapers) # Generate the data tapers used for multitapering
self.metadata['analysis_info']['psd_method'] = 'denoised multitaper'
# constructing the inverse FFT matrix (A)
A=np.zeros((K,2*N))
for k in range(0,K):
for n in range(0,N):
A[k,2*n] = np.cos((k+1)*np.pi*(n)/N)
A[k,2*n+1] = -np.sin((k+1)*np.pi*(n)/N)
A= A/N
A = np.delete(A, 1, 1)
denoised_MT_est_tapers = np.zeros((N, no_of_tapers)) # Stores the denoised Eigen spectra for each taper
denoised_w_est_tapers = np.zeros(((2*N - 1),no_of_tapers)) # Stores the denoised Eigen coefficients for each taper
# Derive the denoised Eigen coefficients for each taper using Expectation Maximization
for taper in range(0,no_of_tapers):
print("Estimating denoised Eigen-coefficients for taper", taper+1)
Q = Q_initial_factor*np.eye((2*N - 1)) # Initialize the Q matrix
taper_sequence = Sequences[taper,:]
taper_sequence = taper_sequence.reshape((K,1))
tapered_NN_intervals = taper_sequence*NN_intervals_interpolated # Obtain the tapered time series
tapered_NN_intervals = tapered_NN_intervals.reshape((K,1))
w_est = np.zeros((2*N-1, 1))
P_est = np.zeros((2*N-1, 2*N-1))
regularizer = (10**(-10))*np.eye((2*N-1))
# Expectation Maximization
for r in range(0,iter_EM):
# Expectation step (E - step)
w_est = np.linalg.inv(regularizer+A.T@A + np.linalg.inv(regularizer+Q)*sigma_observation)@(A.T@tapered_NN_intervals) # Update the expected value of the denoised Eigen coefficients
P_est = ((np.linalg.inv(regularizer+A.T@A/(regularizer[0,0]+sigma_observation) + np.linalg.inv(regularizer+Q)))) # Update the covariance of the denoised Eigen coefficients
# Maximization (M - step)
Q = np.diag(np.diag(P_est + w_est@w_est.T)) # Update the Q matrix
sigma_observation = (tapered_NN_intervals.T@tapered_NN_intervals - 2*tapered_NN_intervals.T@A@w_est + np.trace((A.T)@(A)@(P_est + w_est@w_est.T)))/K # Update the observation noise variance
# print(sigma_observation)
# Store estimated denoised Eigen coefficients for all tapers
denoised_w_est_tapers[:,taper] = w_est[:,0]
# Derive and store the denoised Eigen spectra
final_eigen_spectra = w_est@w_est.T
denoised_MT_est_tapers[0,taper] = final_eigen_spectra[0,0]
for n in range(1,N):
denoised_MT_est_tapers[n,taper] = final_eigen_spectra[2*n-1,2*n-1] + final_eigen_spectra[2*n,2*n]
# The final multi-taper estimates are the average spectral estimates across all tapers
denoised_MT_est = np.squeeze(np.mean(np.absolute(denoised_MT_est_tapers), axis=1, keepdims = True))
freq_vector = np.arange(0.0, 0.5*fs, 0.5*fs/N)
self.psd_mt_denoised = {'freqs': freq_vector, 'pwr': denoised_MT_est}
return denoised_MT_est, denoised_w_est_tapers
def direct_MT_Spectral_Estimation(self, NN_intervals_interpolated, N, NW, no_of_tapers, fs):
"""
Produce the classical multitaper estimate of the Power Spectral Density.
Parameters
----------
NN_intervals_interpolated : np.ndarray
The interpolated inter heart beat interval time series
N : int
Total number of frequency bins
NW : int
Time half bandwidth of multitapering
no_of_tapers : int
Number of tapers considered for Multitapering
fs : int
Desired sampling frequency of the time series in Hz
Returns
-------
direct_MT_est : np.ndarray
The classical multitaper estimate of the Power Spectral Density
direct_w_est_tapers : np.ndarray
The real and imaginary components of the Eigen-coefficients for each taper.
Modifies
--------
self.psd_mt : Dict created containing power spectral density at respective frequencies.
'freqs' : np.ndarray
'pwr' : np.ndarray. Power spectral density in (V^2/Hz). 10log10 to convert to dB.
See Also
--------
EKG.data_pre_processing : Load data to the workspace and perform pre-processing: Linearly interpolate the NN time series and zero center.
EKG.denoised_MT_Spectral_Estimation : Peform expectation maximization to estimate the denoised Eigen coefficients and denoised Multitaper spectral estimates.
EKG.plot_estimates : Plot the final PSD estimates with the confidence levels
EKG.generate_PS : Generate power spectrum with desired confidence levels.
EKG.Confidence_Intervals_Bootstrapping : Perform bootstrapping to derive confidence bounds.
EKG.Confidence_Intervals_Chi_squared : Derive confidence bounds based on the Chi-squared approximation.
"""
# Initializing variables
K = NN_intervals_interpolated.shape[0] # Observation duration in samples
Fourier_est = np.zeros((2*N, no_of_tapers),dtype=complex) # Eigen Spectra
direct_MT_est_tapers = np.zeros((2*N,no_of_tapers)) # Classical Multi-taper spectral estimate
direct_w_est_tapers = np.zeros((2*N-1,no_of_tapers)) # The real and imaginery components of the Eigen-coefficients for each taper
dpss_seq = sp.signal.windows.dpss(K, NW, Kmax=no_of_tapers) # Generate the data tapers used for multitapering
self.metadata['analysis_info']['psd_method'] = 'direct multitaper'
# Computing the Eigen-coefficients and the Eigenspectra for each taper
for taper in range(0,no_of_tapers):
temp = NN_intervals_interpolated.T*dpss_seq[taper,:]
Fourier_est[:, taper] = ((np.fft.fft(temp, n = 2*N)).T).reshape((2*N,)) # Eigen-coefficients of the tapered process
direct_w_est_tapers[0,taper] = np.absolute(Fourier_est[0,taper]) # dc component
direct_w_est_tapers[1:2*N-1:2,taper] = np.real(Fourier_est[1:N,taper]) # real components
direct_w_est_tapers[2:2*N-1:2,taper] = np.imag(Fourier_est[1:N,taper]) # imaginary components
direct_MT_est_tapers[:,taper] = (np.absolute(Fourier_est[:, taper]))**2 # Eigenspectral estimates
# The final multi-taper estimates are the average spectral estimates across all tapers
direct_MT_est = np.mean(direct_MT_est_tapers[0:N,:], axis=1, keepdims = True)
freq_vector = np.arange(0.0, 0.5*fs, 0.5*fs/N)
self.psd_mt_direct = {'freqs': freq_vector, 'pwr': direct_MT_est}
return direct_MT_est, direct_w_est_tapers
def confidence_intervals_bootstrapping(self, MT_est, w_est_tapers, CI, bootstrapping_repeats, fs, K, N):
"""
Perform bootstrapping to derive confidence bounds.
Kim et al.,2018
A Multitaper Frequency-Domain Bootstrap Method
In: IEEE Signal Processing Letters
SPL-25.12 (2018), pp. 1805–1809.
Parameters
----------
MT_est : np.ndarray
Multitaper estimate of the Power Spectral Density
w_est_tapers : np.ndarray
Real and imaginery components of the Eigen coefficients of each taper
CI : int
Desired confidence bounds
bootstrapping_repeats : int
Number of bootstrap repeats
fs : int
Sampling frequency
K : int
Observation duration in samples
Returns
-------
Lower_confidence_PSD : np.ndarray
The lower confidence bound of the multi-taper spectral estimates
Upper_confidence_PSD : np.ndarray
The upper confidence bound of the multi-taper spectral estimates
See Also
--------
EKG.denoised_MT_Spectral_Estimation : Peform expectation maximization to estimate the denoised Eigen coefficients and denoised Multitaper spectral estimates.
EKG.direct_MT_Spectral_Estimation : Produce the classical multitaper estimate of the Power Spectral Density.
EKG.plot_estimates : Plot the final PSD estimates with the confidence levels
EKG.generate_PS : Generate power spectrum with desired confidence levels.
EKG.confidence_intervals_chi_squared : Derive confidence bounds based on the Chi-squared approximation
"""
self.metadata['analysis_info']['confidence_intervals'] = 'bootstrapping'
N = MT_est.shape[0] # Number of frequency bins
scaling_fac = (1/fs)*(K / N) # The scaling factor of the final estimates
no_of_tapers = w_est_tapers.shape[1] # The number of tapers used for multi-tapering
denoised_MT_est_bootstrap = np.zeros((N,bootstrapping_repeats)) # Store bootstrapped MT estimates
# Step 2 (Kim et al., 2018): Scale the Eigen coefficients by the power of the MT estimates
scaled_w_hat = w_est_tapers
scaled_w_hat[0,:] = scaled_w_hat[0,:]/np.sqrt(MT_est[0]/2)
for n in range(0,N-1):
S_fac = np.sqrt(MT_est[n+1]/2)
scaled_w_hat[2*n+1,:] = scaled_w_hat[2*n+1,:] / S_fac
scaled_w_hat[2*n+2,:] = scaled_w_hat[2*n+2,:] / S_fac
# Step 3 (Kim et al., 2018): Standardize the concatanated Eigencoefficients to have zero mean and unit variance
for taper in range(0,no_of_tapers):
temp = scaled_w_hat[:,taper]
scaled_w_hat[:,taper] = [temp - np.mean(temp)]/np.sqrt(np.mean((temp - np.mean(temp))**2))
# Perform the bootstrapping
for i in range(0,bootstrapping_repeats):
denoised_MT_est_bootstrap_taper = np.zeros((N,no_of_tapers)) # Bootstrapped MT estimate for each taper
scaled_w_hat_bootstrap = np.zeros((2*N-1,no_of_tapers)) #. Bootstrapped Eigen coefficients
for n in range(0,scaled_w_hat.shape[0]):
temp = scaled_w_hat[n,:]
bootstrap_order = np.random.randint(no_of_tapers, size=no_of_tapers) # Step 4 (Kim et al., 2018): bootstrapping with replacement
scaled_w_hat_bootstrap[n,:] = temp[bootstrap_order]
# Step 5 (Kim et al., 2018): Re-scale the bootstrapped eigen coefficients
if n == 0:
scaled_w_hat_bootstrap[n,:] = scaled_w_hat_bootstrap[n,:]*np.sqrt(MT_est[0]/2)
else:
scaled_w_hat_bootstrap[n,:] = scaled_w_hat_bootstrap[n,:]*np.sqrt(MT_est[(np.ceil(n/2)).astype(int)]/2)
# Step 6 (Kim et al., 2018): Derive the bootstrapped Eigen spectra for each taper
for taper in range(0,no_of_tapers):
temp = scaled_w_hat_bootstrap[:,taper]
temp = temp.reshape((temp.shape[0],1))
final_eigen_spectra_bootstrap = temp@temp.T
denoised_MT_est_bootstrap_taper[0,taper] = final_eigen_spectra_bootstrap[0,0]
for n in range(0,N-1):
denoised_MT_est_bootstrap_taper[n+1,taper] = final_eigen_spectra_bootstrap[2*n+1,2*n+1]+final_eigen_spectra_bootstrap[2*n+2,2*n+2]
# Derive the bootstrapped Multitaper Spectral Estimates
temp = scaling_fac*np.mean(np.absolute(denoised_MT_est_bootstrap_taper), axis=1, keepdims = True)
denoised_MT_est_bootstrap[:,i] = temp.reshape((temp.shape[0],))
# Specify the lower and upper percentiles based on the desired Confidence Intervals
lower_percentile = (np.floor(((1-CI)/2)*bootstrapping_repeats)).astype(int)
upper_percentile = (np.ceil(((1+CI)/2)*bootstrapping_repeats)).astype(int)
Upper_confidence_PSD = np.zeros((N,1))
Lower_confidence_PSD = np.zeros((N,1))
# Derive the confidence bounds using the upper and lower percentiles
for n in range(0,N):
temp = np.sort(denoised_MT_est_bootstrap[n,:])
Lower_confidence_PSD[n] = temp[lower_percentile]
Upper_confidence_PSD[n] = temp[upper_percentile]
return Lower_confidence_PSD.reshape((N,)), Upper_confidence_PSD.reshape((N,))
def Confidence_intervals_chi_squared(self, MT_est, CI, no_of_tapers, N):
"""
Derive confidence bounds based on the Chi-squared approximation.
Percival & Walden, 1993
In: Spectral analysis for Physical Applications
Pp. 255 & 343
Parameters
----------
MT_est : np.ndarray
Multitaper estimate of the Power Spectral Density
CI : int
The desired confidence bounds
no_of_tapers : int
Number of tapers used for multitapering
Returns
-------
Lower_confidence_PSD : np.ndarray
The lower confidence bound of the multi-taper spectral estimates
Upper_confidence_PSD : np.ndarray
The upper confidence bound of the multi-taper spectral estimates
See Also
--------
EKG.denoised_mt_spectral_estimation : Peform expectation maximization to estimate the denoised Eigen coefficients and denoised Multitaper spectral estimates.
EKG.direct_mt_spectral_estimation : Produce the classical multitaper estimate of the Power Spectral Density.
EKG.plot_estimates : Create figure containing the final PSD estimates with the confidence levels.
EKG.generate_PS : Generate power spectrum with desired confidence levels.
EKG.confidence_intervals_bootstrapping : Perform bootstrapping to derive confidence bounds.
"""
self.metadata['analysis_info']['confidence_intervals'] = 'chi_sq'
# The Degrees of freedom of the Chi-squared distribution equals to twice the number of tapers used in multi-tapering
Degree_of_freedom = 2*no_of_tapers
Lower_confidence_PSD = (Degree_of_freedom / chi2.ppf((1+CI)/2, df=Degree_of_freedom)) * abs(MT_est);
Upper_confidence_PSD = (Degree_of_freedom / chi2.ppf((1-CI)/2, df=Degree_of_freedom)) * abs(MT_est);
return Lower_confidence_PSD.reshape((N,)), Upper_confidence_PSD.reshape((N,))
def plot_estimates(self, MT_PSD_est, Lower_confidence_PSD, Upper_confidence_PSD, fs):
"""
Create figure containing the final PSD estimates with the confidence levels.
Parameters
----------
MT_est : np.ndarray
Multitaper estimate of the Power Spectral Density
Lower_confidence_PSD : np.array
The lower confidence bound of the multi-taper spectral estimates
Upper_confidence_PSD : np.array
The upper confidence bound of the multi-taper spectral estimates
fs : int
Sampling frequency
Returns
-------
fig : figure
Plot of the final PSD estimates with confidence levels
See Also
--------
EKG.generate_PS : Generate power spectrum with desired confidence levels
"""
N = MT_PSD_est.shape[0]
freq_vector = np.arange(0.0, 0.5*fs, 0.5*fs/N)
y_axis_upper_bound = 20*10**4
fig, ax = plt.subplots(figsize=(15,3))
freq_1 = np.max(freq_vector[freq_vector<= 0.04])
ax.plot([freq_1,freq_1],[0,y_axis_upper_bound], 'b--')
freq_2 = np.max(freq_vector[freq_vector<= 0.15])
ax.plot([freq_2,freq_2],[0,y_axis_upper_bound], 'b--')
freq_3 = np.max(freq_vector[freq_vector<= 0.40])
ax.plot([freq_3,freq_3],[0,y_axis_upper_bound], 'b--')
ax.plot(freq_vector, MT_PSD_est, color="black")
ax.fill_between(freq_vector, Lower_confidence_PSD, Upper_confidence_PSD, color='k', alpha=.4)
plt.xlabel("frequency ($Hz$)")
plt.ylabel("Power ($ms^2/Hz$)")
ax.set_xlim(0, 0.4)
plt.subplots_adjust(bottom=0.15)
# plt.xlim([np.min(freq_vector), np.max(freq_vector)])
#plt.ylim([0, y_axis_upper_bound])
return fig
def calc_psd_welch(self, itype, window):
"""
Calculate welch power spectrum.
Parameters
----------
itype : str {'rr', 'nn'}
Interval type with which to calculate the power spectrum.
'rr' is uncleaned data. 'nn' is normal intervals (cleaned).
window : str
Windowing function.
Options from scipy.signal welch
Wrapper default 'hamming'
See Also
--------
EKG.calc_psd_mt : Calculate multitaper power spectrum.
"""
self.metadata['analysis_info']['psd_method'] = 'welch'
self.metadata['analysis_info']['psd_window'] = window
# specify data
if itype == 'rr':
ii = self.rr
elif itype == 'nn':
ii = self.nn[~np.isnan(self.nn)]
# set nfft to guidelines of power of 2 above len(data), min 256 (based on MATLAB guidelines)
nfft = max(256, 2**(int(np.log2(len(self.ii_interp))) + 1))
# Adapt 'nperseg' according to the total duration of the II series (5min threshold = 300000ms)
if max(np.cumsum(ii)) < 300000:
nperseg = nfft
else:
nperseg = 300
# default overlap = 50%
f, Pxx = welch(self.ii_interp, fs=4, window=window, scaling = 'density', nfft=nfft,
nperseg=nperseg)
self.psd_welch = {'freqs':f, 'pwr': Pxx, 'nfft': nfft, 'nperseg': nperseg}
def calc_fbands(self, method):
"""
Calculate frequency band measures.
Parameters
----------
method : str {'welch', 'mt'}
Method to be used to calculate frequency band measures.
Notes
-----
Modified from pyHRV
Normalized units are normalized to total lf + hf power, according to Heathers et al. (2014)
"""
if method is None:
method = input('Please enter PSD method (options: "welch", "mt_direct", "mt_denoised"): ')
if method == 'welch':
psd = self.psd_welch
if method == 'mt_direct':
psd = self.psd_mt_direct
elif method == 'mt_denoised':
psd = self.psd_mt_denoised
# set frequency bands
ulf = None
vlf = (0.000, 0.04)
lf = (0.04, 0.15)
hf = (0.15, 0.4)
args = (ulf, vlf, lf, hf)
names = ('ulf', 'vlf', 'lf', 'hf')
freq_bands = dict(zip(names, args))
#self.freq_bands = freq_bands
# get indices and values for frequency bands in calculated spectrum
fband_vals = {}
for key in freq_bands.keys():
fband_vals[key] = {}
if freq_bands[key] is None:
fband_vals[key]['idx'] = None
fband_vals[key]['pwr'] = None
else:
# lower limit not inclusive
fband_vals[key]['idx'] = np.where((freq_bands[key][0] < psd['freqs']) & (psd['freqs'] <= freq_bands[key][1]))[0]
fband_vals[key]['pwr'] = psd['pwr'][fband_vals[key]['idx']]
self.psd_fband_vals = fband_vals
# calculate stats
total_pwr = sum(filter(None, [np.sum(fband_vals[key]['pwr']) for key in fband_vals.keys()]))
freq_stats = {'totals':{'total_pwr': total_pwr}}
# by band
for key in freq_bands.keys():
freq_stats[key] = {}
freq_stats[key]['freq_range'] = str(freq_bands[key])
if freq_bands[key] is None:
freq_stats[key]['pwr_ms2'] = None
freq_stats[key]['pwr_peak'] = None
freq_stats[key]['pwr_log'] = None
freq_stats[key]['pwr_%'] = None
freq_stats[key]['pwr_nu'] = None
else:
freq_stats[key]['pwr_ms2'] = np.sum(fband_vals[key]['pwr'])
peak_idx = np.where(fband_vals[key]['pwr'] == max(fband_vals[key]['pwr']))[0][0]
freq_stats[key]['pwr_peak'] = psd['freqs'][fband_vals[key]['idx'][peak_idx]]
freq_stats[key]['pwr_log'] = np.log(freq_stats[key]['pwr_ms2'])
freq_stats[key]['pwr_%'] = freq_stats[key]['pwr_ms2']/freq_stats['totals']['total_pwr']*100
# add normalized units to lf & hf bands
for key in ['lf', 'hf']:
freq_stats[key]['pwr_nu'] = freq_stats[key]['pwr_ms2']/(freq_stats['lf']['pwr_ms2'] + freq_stats['hf']['pwr_ms2'])*100
# add lf/hf ratio
freq_stats['totals']['lf/hf'] = freq_stats['lf']['pwr_ms2']/freq_stats['hf']['pwr_ms2']
self.freq_stats = freq_stats
def calc_fstats(self, itype, method, bandwidth, window):
"""
Calculate commonly used frequency domain HRV statistics.
Parameters
----------
itype : str {'rr, 'nn'}
Interval type.
'rr' is uncleaned data. 'nn' is normal intervals (cleaned).
method : str, {'mt, 'welch'}
Method to compute power spectra.
'mt' is multitaper.
bandwith : float
Bandwidth for multitaper power spectral estimation.
window : str
Window to use for welch FFT. See mne.time_frequency.psd_array_multitaper for options.
See Also
--------
EKG.calc_tstats : Calculate commonly used time domain HRV statistics.
EKG.hrv_stats : Calculate both time and frequency domain HRV statistics on IBI object.
"""
# resample & interpolate tachogram
print('Interpolating and resampling tachogram...')
self.interpolate_IBI(itype)
# calculate power spectrum
print('Calculating power spectrum...')
if method == 'mt':
self.calc_psd_mt(bandwidth)
elif method == 'welch':
self.calc_psd_welch(itype, window)
#calculate frequency domain statistics
print('Calculating frequency domain measures...')
self.calc_fbands(method)
print('Frequency measures stored in obj.freq_stats\n')
def hrv_stats(self, itype='nn', nn_file=None, method='mt_denoised', bandwidth=0.01, window='hamming'):
"""
Calculate both time and frequency domain HRV statistics on IBI object.
Parameters
----------
itype : str {'nn', 'rr'}
Interbeat interval object type to use for calculations.
'rr' is uncleaned data. 'nn' is normal intervals (cleaned)
nn_file : str, optional
Path to csv file containing cleaned nn values, if nn values were previously exported.
method : str, {'mt_denoised', 'mt_direct', 'welch'}
Method to use when calculating power spectrum.
'mt' is multitaper
bandwidth : float, default 0.01
Bandwidth used when calculating frequency domain statistics.
window : str , default 'hamming'
Window type used for welch power spectral analysis.
Options from scipy.signal welch.
"""
self.metadata['analysis_info']['itype'] = itype
# load nn attribute if data was cleaned previously
if itype == 'nn' and nn_file is not None:
# read in metadata
with open(nn_file, 'r') as f:
line1 = [x for x in f.readline().split(' ')]
line2 = [x for x in f.readline().split(' ')]
self.metadata['analysis_info']['mw_size'] = float(line1[6])
self.metadata['analysis_info']['upshift'] = float(line1[10].split('.\n')[0])
self.metadata['analysis_info']['artifacts_rmvd'] = int(line2[5])
# load nn intervals
self.nn = np.loadtxt(nn_file)
else:
self.metadata['analysis_info']['artifacts_rmvd'] = str(str(len(self.rpeak_artifacts)) + ' false peaks (removed); ' + str(len(self.rpeaks_added)) + ' missed peaks (added); ' + str(len(self.ibi_artifacts)) + ' ibis removed (from NaN data)')
# create nn variable if it doesn't exist
try:
self.nn
except AttributeError:
self.nn = self.rr
# calculate statistics
self.calc_tstats(itype)
self.calc_fstats(itype, method, bandwidth, window)
print('Done.')
def to_spreadsheet(self, spreadsheet, savedir):
"""
Append calculations as a row in master spreadsheet.
Information exported includes arrays 'data', 'rpeaks', 'rr', 'rr_diff', 'rr_diffsq', 'rpeak_artifacts', 'rpeaks_added', 'ibi_artifacts',
'rpeaks_df', 'nn', 'nn_diff', 'nn_diffsq', 'rr_arts', 'ii_interp', 'psd_mt_direct', 'psd_mt_denoised', psd_welch', 'psd_fband_vals' if calculated.
Parameters
----------
savedir : str
Path to directory where spreadsheet will be saved.
spreadsheet : str
Name of output file.
Notes
-----
Creates new spreadsheet if output file does not exist.
"""
# this is from before division to two classes. 'data' and 'rpeaks' arrays shouldn't exist in IBI object.
arrays = ['data', 'rpeaks', 'rr', 'rr_diff', 'rr_diffsq', 'rpeak_artifacts', 'rpeaks_added', 'ibi_artifacts',
'rpeaks_df', 'nn', 'nn_diff', 'nn_diffsq', 'rr_arts', 'ii_interp', 'psd_mt_direct', 'psd_mt_denoised', 'psd_welch', 'psd_fband_vals']
data = {k:v for k,v in vars(self).items() if k not in arrays}
reform = {(level1_key, level2_key, level3_key): values
for level1_key, level2_dict in data.items()
for level2_key, level3_dict in level2_dict.items()
for level3_key, values in level3_dict.items()}
df = pd.DataFrame(reform, index=[0])
df.set_index([('metadata', 'file_info', 'in_num'), ('metadata', 'file_info', 'start_time')], inplace=True)
savename = os.path.join(savedir, spreadsheet)
if os.path.exists(savename):
with open(savename, 'a') as f:
df.to_csv(f, header=False, line_terminator='\n')
print('Data added to {}'.format(spreadsheet))
else:
with open(savename, 'a') as f:
df.to_csv(f, header=True, line_terminator='\n')
print('{} does not exist. Data saved to new spreadsheet'.format(spreadsheet))
def to_report(self, savedir=None, fmt='txt'):
"""
Export HRV statistics as a csv report.
Parameters
----------
savedir : str, optional
Path to directory in which to save report.
fmt: str, {'txt', 'json'}
Output format.
See Also
--------
EKG.hrv_stats : Calculate both time and frequency domain HRV statistics on IBI object.
EKG.calc_fstats : Calculate commonly used frequency domain HRV statistics.
EKG.calc_tstats : Calculate commonly used time domain HRV statistics.
EKG.denoised_mt_spectral_estimation : Peform expectation maximization to estimate the denoised Eigen coefficients and denoised Multitaper spectral estimates.
EKG.direct_mt_spectral_estimation : Produce the classical multitaper estimate of the Power Spectral Density.
"""
# set save directory
if savedir is None:
savedir = os.getcwd()
chngdir = input('Files will be saved to ' + savedir + '. Change save directory? [Y/N] ')
if chngdir == 'Y':
savedir = input('New save directory: ')
if not os.path.exists(savedir):
createdir = input(savedir + ' does not exist. Create directory? [Y/N] ')
if createdir == 'Y':
os.makedirs(savedir)
else:
savedir = input('Try again. Save directory: ')
if not os.path.exists(savedir):
print(savedir + ' does not exist. Aborting. ')
return
elif not os.path.exists(savedir):
print(savedir + ' does not exist. Creating directory...')
os.makedirs(savedir)
else:
print('Files will be saved to ' + savedir)
# export everything that isn't a dataframe, series, or array
arrays = ['data', 'rpeaks', 'rr', 'rr_diff', 'rr_diffsq', 'rpeak_artifacts', 'rpeaks_added', 'ibi_artifacts', 'rpeaks_df', 'nn', 'nn_diff', 'nn_diffsq', 'rr_arts', 'ii_interp', 'psd_mt_direct', 'psd_mt_denoised', 'psd_fband_vals']
data = {k:v for k,v in vars(self).items() if k not in arrays}
# set savename info
if 'epoch' in self.metadata['file_info'].keys():
saveinfo = ('_'.join((self.metadata['file_info']['fname'].split('_')[:6]))).split('.')[0]
else:
saveinfo = ('_'.join((self.metadata['file_info']['fname'].split('_')[:5]))).split('.')[0]
# save calculations
if fmt == 'txt':
savename = saveinfo + '_HRVstats.txt'
file = os.path.join(savedir, savename)
with open(file, 'w') as f:
for k, v in data.items():
if type(v) is not dict:
line = k+' '+str(v) + '\n'
f.write(line)
elif type(v) is dict:
line = k + '\n'
f.write(line)
for kx, vx in v.items():
if type(vx) is not dict:
line = '\t'+ kx + ' ' + str(vx) + '\n'
f.write(line)
else:
line = '\t' + kx + '\n'
f.write(line)
for kxx, vxx in vx.items():
line = '\t\t' + kxx + ' ' + str(vxx) + '\n'
f.write(line)
elif fmt == 'json':
savename = saveinfo + '_HRVstats_json.txt'
file = os.path.join(savedir, savename)
with open(file, 'w') as f:
json.dump(data, f, indent=4)
# save power spectra for later plotting
try:
self.psd_mt_denoised
except AttributeError:
pass
else:
savepsd = saveinfo + '_psd_mt_denoised.txt'
psdfile = os.path.join(savedir, savepsd)
psd_mt_df = pd.DataFrame(self.psd_mt_denoised)
psd_mt_df.to_csv(psdfile, index=False)
try:
self.psd_mt_direct
except AttributeError:
pass
else:
savepsd = saveinfo + '_psd_mt_direct.txt'
psdfile = os.path.join(savedir, savepsd)
psd_mt_df = pd.DataFrame(self.psd_mt_direct)
psd_mt_df.to_csv(psdfile, index=False)
try:
self.psd_welch
except AttributeError:
pass
else:
savepsd = saveinfo + '_psd_welch.txt'
psdfile = os.path.join(savedir, savepsd)
psd_mt_df = pd.DataFrame(self.psd_welch)
psd_mt_df.to_csv(psdfile, index=False)
# plotting methods
def plotpeaks(self, rpeaks=True, ibi=True, thres = True):
"""
Plot EKG class instance.
Visualization of raw EKG data, smoothed EKG data, R peaks, IBI length and EKG threshold detection line.
Parameters
----------
rpeaks : bool, default True
Shows r peaks on plot if set to True.
ibi : bool, default True
Displays plot with IBI time leading up to each r peak if set to True
thres : bool, default True
Shows threshold line if set to True.
"""
# set number of panels
if ibi == True:
plots = ['ekg', 'ibi']
if thres == True:
data = [self.data, self.rpeaks_df['ibi_ms']]
if thres == False:
if self.metadata['analysis_info']['smooth'] == False:
data = [self.data['Raw'], self.rpeaks_df['ibi_ms']]
if self.metadata['analysis_info']['smooth'] == True:
data = [self.data[['Raw', 'raw_smooth']], self.rpeaks_df['ibi_ms']]
else:
plots = ['ekg']
if thres == True:
data = [self.data]
if thres == False:
if self.metadata['analysis_info']['smooth'] == False:
data = [self.data['Raw']]
if self.metadata['analysis_info']['smooth'] == True:
data = [self.data[['Raw', 'raw_smooth']]]
fig, axs = plt.subplots(len(plots), 1, sharex=True, figsize = [9.5, 6])
if len(plots) > 1:
for dat, ax, plot in zip(data, axs, plots):
if plot == 'ekg' and rpeaks == True:
ax.plot(dat, zorder = 1)
ax.scatter(self.rpeaks.index, self.rpeaks.values, color='red', zorder = 2)
ax.set_ylabel('EKG (mV)')
if self.metadata['analysis_info']['pan_tompkins'] == True:
ax.legend(('raw data', 'rpeak'), fontsize = 'small')
else:
if thres == True:
if self.metadata['analysis_info']['smooth'] == True:
ax.legend(('raw data', 'threshold line', 'smoothed data', 'rpeak'), fontsize = 'small')
else:
ax.legend(('raw data', 'threshold line', 'rpeak'), fontsize = 'small')
else:
if self.metadata['analysis_info']['smooth'] == True:
ax.legend(('raw data', 'smoothed data', 'rpeak'), fontsize = 'small')
else:
ax.legend(('raw data', 'rpeak'), fontsize = 'small')
elif plot == 'ibi':
ax.plot(dat, color='grey', marker='.', markersize=8, markerfacecolor=(0, 0, 0, 0.8), markeredgecolor='None')
ax.set_ylabel('Inter-beat interval (ms)')
ax.set_xlabel('Time')
ax.margins(x=0)
# show microseconds for mouse-over
ax.format_xdata = lambda d: mdates.num2date(d).strftime('%H:%M:%S.%f')[:-3]
else:
for dat, plot in zip(data, plots):
if plot == 'ekg' and rpeaks == True:
axs.plot(dat, zorder = 1)
axs.scatter(self.rpeaks.index, self.rpeaks.values, color='red', zorder = 2)
axs.set_ylabel('EKG (mV)')
axs.set_xlabel('Time')
axs.margins(x=0)
# show microseconds for mouse-over
axs.format_xdata = lambda d: mdates.num2date(d).strftime('%H:%M:%S.%f')[:-3]
def generate_welch(self, method='welch', dB=False, bands=True, save=True, savedir=None):
"""
Plot power spectrum with method of choice and save if appropriate.
Parameters
----------
method : str, {'welch', 'mt'}
Method by which power spectrum is to be calculated.
'mt' is multitaper.
dB : bool, default False
If True, decibals used as unit for power spectral density instead of s^2/Hz
bands : bool, default True
If True, spectrum plotted colored by frequency band.
save : bool, default True
If True, power spectrum will be saved as well as plotted.
savedir : str, optional
Path to directory where spectrum is to be saved.
See Also
--------
EKG.calc_psd_welch : Calculate welch power spectrum.
"""
# set title
title = self.metadata['file_info']['in_num'] + ' ' + self.metadata['file_info']['start_date'] + '\n' + self.metadata['file_info']['sleep_stage'] + ' ' + self.metadata['file_info']['cycle']
try:
n.metadata['file_info']['epoch']
except:
pass
else:
title = title + ' ' + n.metadata['file_info']['epoch']
# set data to plot
psd = self.psd_welch
# transform units
if dB == True:
pwr = 10 * np.log10(psd['pwr'])
ylabel = 'Power spectral density (dB)'
else:
pwr = psd['pwr']/1e6 # convert to seconds
ylabel = 'Power spectral density (s^2/Hz)'
fig, ax = plt.subplots()
# plot just spectrum
if bands == False:
ax.plot(psd['freqs'], pwr)
# or plot spectrum colored by frequency band
elif bands == True:
ax.plot(psd['freqs'], pwr, color='black', zorder=10)
colors = [None, 'yellow', 'darkorange', 'tomato']
zdict = {0:0.6, 1:0.6, 2:0.4, 3:0.6}
for (zord, alpha), (key, value), color in zip(zdict.items(), self.psd_fband_vals.items(), colors):
if value['idx'] is not None:
# get intercepts & plot vertical lines for bands
xrange = [float(x) for x in self.freq_stats[key]['freq_range'][1:-1].split(",")]
# fill spectra by band
ax.fill_between(psd['freqs'], pwr, where = [xrange[0] <= x for x in psd['freqs']],
facecolor=color, alpha=alpha, zorder=zord)
ax.set_xlim(0, 0.4)
ax.margins(y=0)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xlabel('Frequency (Hz)')
plt.ylabel(ylabel)
plt.suptitle(title)
if save == True:
if savedir is None:
print('ERROR: File not saved. Please specify savedir argument.')
else:
savename = os.path.join(savedir, self.metadata['file_info']['fname'].split('.')[0]) + '_psd.png'
fig.savefig(savename, dpi=300)
return fig
def generate_PS(self, denoised = True, confidence = 'bootstrapping'):
"""
Generate power spectrum with desired confidence levels.
Parameters
----------
denoised : bool default True
Denoise the mulitaper estimate of the power spectral density.
confidence : str {'bootstrapping', 'chi sq'}
Method with which to determine the confidence intervals.
See Also
---------
EKG.data_pre_processing : Load data to the workspace and perform pre-processing.
EKG.denoised_mt_spectral_estimation : Peform expectation maximization to estimate the denoised Eigen coefficients and denoised Multitaper spectral estimates.
EKG.direct_mt_spectral_estimation : Produce the classical multitaper estimate of the Power Spectral Density.
EKG.plot_estimates : Create figure containing the final PSD estimates with the confidence levels.
"""
# Main task 1. Load data into the workspace and specify the parameters - pre processing
# Specify the desired sampling frequency of the time series in Hz
fs = 4
# Extract the interpolated and zero centered NN time series
NN_intervals_interpolated, K = self.data_pre_processing(fs)
K = NN_intervals_interpolated.shape[0]
# Set the parameters required for Spectral analysis - multi tapering
N = 512 # Number of frequency bins considered in the frequency interval [0,fs/2). This determines the frequency spacing.
NW = 2 # time half-bandwidth product of Multitapering
no_of_tapers = 3 # the number of tapers considered for Multitapering
# Set the parameters required for Confidence Intervals
CI = 0.95 # Specify the Required Confidence levels
bootstrapping_repeats = 5000 # Specify the number of bootstrap samples
scaling_fac = (1/fs)*(K / N) # Final scaling factor of the PSD estimates
freq_vector = np.arange(0.0, 0.5*fs, 0.5*fs/N)
multi_tapering_spectral_resolution = NW*fs/K
if denoised==True:
denoised_MT_est, denoised_w_est_tapers = self.denoised_mt_spectral_estimation(NN_intervals_interpolated, N, NW, no_of_tapers,K, fs)
# Multiply by the required scaling factors to get the final spectral estimates
denoised_MT_est_final = scaling_fac*denoised_MT_est;
if confidence == "bootstrapping":
denoised_MT_est_Lower_confidence_bootstrap, denoised_MT_est_Upper_confidence_bootstrap = self.confidence_intervals_bootstrapping(denoised_MT_est, denoised_w_est_tapers, CI, bootstrapping_repeats, fs, K, N)
fig = self.plot_estimates(denoised_MT_est_final, denoised_MT_est_Lower_confidence_bootstrap, denoised_MT_est_Upper_confidence_bootstrap, fs)
plt.title('Denoised Multitaper Spectral Estimate: with %d%% Confidence Intervals - Bootstrapping'% (CI*100),fontdict = {'fontsize' : 16})
if confidence == "chi sq":
denoised_MT_est_Lower_confidence_Chi_squared, denoised_MT_est_Upper_confidence_Chi_squared = self.confidence_intervals_chi_squared(denoised_MT_est_final, CI, no_of_tapers, N)
fig = self.plot_estimates(denoised_MT_est_final, denoised_MT_est_Lower_confidence_Chi_squared, denoised_MT_est_Upper_confidence_Chi_squared, fs)
plt.title('Denoised Multitaper Spectral Estimate: with %d%% Confidence Intervals - Chi - squared test'% (CI*100),fontdict = {'fontsize' : 16})
if denoised==False:
direct_MT_est, direct_w_est_tapers = self.direct_mt_spectral_estimation(NN_intervals_interpolated, N, NW, no_of_tapers, fs)
# Multiply by the required scaling factors to get the final spectral estimates
direct_MT_est_final = scaling_fac*direct_MT_est
if confidence == 'bootstrapping':
direct_MT_est_Lower_confidence_bootstrap, direct_MT_est_Upper_confidence_bootstrap = self.confidence_intervals_bootstrapping(direct_MT_est, direct_w_est_tapers, CI, bootstrapping_repeats, fs, K, N)
fig = self.plot_estimates(direct_MT_est_final, direct_MT_est_Lower_confidence_bootstrap, direct_MT_est_Upper_confidence_bootstrap, fs)
plt.title('Direct Multitaper Spectral Estimate: with %d%% Confidence Intervals - Bootstrapping'% (CI*100),fontdict = {'fontsize' : 16})
if confidence == 'chi sq':
direct_MT_est_Lower_confidence_Chi_squared, direct_MT_est_Upper_confidence_Chi_squared = self.confidence_intervals_chi_squared(direct_MT_est_final, CI, no_of_tapers, N)
fig = self.plot_estimates(direct_MT_est_final, direct_MT_est_Lower_confidence_Chi_squared, direct_MT_est_Upper_confidence_Chi_squared, fs)
plt.title('Direct Multitaper Spectral Estimate: with %d%% Confidence Intervals - Chi - squared test'% (CI*100),fontdict = {'fontsize' : 16})
plt.xlabel("frequency ($Hz$)")
plt.show()
# plt.xlim([np.min(freq_vector), np.max(freq_vector)])
| CardioPy/CardioPy | cardiopy/ekg.py | ekg.py | py | 87,636 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "pandas.Series",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_... |
20015190759 | import math
import datetime
# Literal är en konstant värde i koden
s = 'This is String'
s2 = "This is also another String"
# man kan placera text på flera olika rader redan här
s3 = '''This is
a triple
String'''
sEmpty = ' '
# problem = 'this won't work'
solution = "This os how it´s done"
orThatWay = ' "like that" AS CITAT'
##( \ ) backslash måste stå själv och inget annat skrivs efter den, och börja med nytt rad efter
string = 'testa, ' "att" \
' Köra något nytt '
print(s3)
print(f'formatted string, want to print the value of pi : {math.pi:.4f}')
print(r'c:\user\right what do you want without having backslash to annoy you')
ss = 'this' ' and '
ss2 = 'that'
print(ss + ss2) # output blir: this and that
# Repeat the String
repString = 'Hej ' + 'Y' + 'a' * 2 + 'r' + 'a' * 3 + '!'
print(repString) # Output blir: Hej Yaaraaa!
stringen = "Iam Yara"
stringen.isdecimal() # Returns TRUE/False if all characters are decimal numbers
stringen.isalpha() # Returns TRUE/False if all characters are alfapet
stringen.isupper() # Returns TRUE/False if all characters ar uppercase (Stora bokstäver)
stringen.lower() # Returns copy of all characters in lowercase (ändrar karaktärer till små bokstäver)
stringen.upper() # Returns copy of all characters in Uppercase (ändrar karaktärer till stora bokstäver)
stringen.replace('old Str', 'new Str') # Returns copy of all characters replacing old "str1" to new "str2"
print(stringen.upper()) # Output: IAM YARA
print(stringen.replace('Y', 'S')) # OutPut: Iam Sara
#stringen[2] = 'W' -- Error
#print(stringen) -- Error
# Vi kan inte tilldela Stringen ett nytt värde.
print(stringen.isdecimal()) # OutPut: False
datetimeString = datetime.datetime.now()
print(datetimeString) # Output kommer att vara: 2023-06-20 13:48:54.283764
""" Attributes:
Datum: år, månad, dag, Tid: timme, minut, sekund, mikroSek? """
print(f'Formatted string datum & tid är {datetimeString.month}/{datetimeString.day}/{datetimeString.year}')
#Outputen blir så istället: Formatted string datum & tid är 6/20/2023
| yararajjoub/pythonModulo | Modul3/Strings.py | Strings.py | py | 2,088 | python | sv | code | 0 | github-code | 36 | [
{
"api_name": "math.pi",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 54,
"usage_type": "attribute"
}
] |
438511318 | # -*- coding: utf_8 -*-
#pytho3.5
import csv
import urllib.request
import lxml
from bs4 import BeautifulSoup
import re
import codecs
# -------------------- main --------------------
if __name__ == '__main__':
#2014-2016年に東京と中山で行われた全大会・全日数・全ラウンドの勝敗データを取得
#Webページ(HTML)の取得
# for year in [2014]:
# for place in [1,2,3,4,5,6,7,8,9,10]:
# for name in [1,2,3,4,5]:
# for number in [1, 2, 3, 4, 5, 6, 7, 8]:
# for Round in [1,2,3,4,5,6,7,8,9,10, 11, 12]:
#0詰め処理
# fyear = "{0:04d}".format(year)
# fplace = "{0:02d}".format(place)
# fname = "{0:02d}".format(name)
# fnumber = "{0:02d}".format(number)
# fRound = "{0:02d}".format(Round)
# file = str(fyear)+str(fplace)+str(fname)+str(fnumber)+str(fRound)
# print(file)
link = 'http://db.netkeiba.com/horse/ped/2015104976/'
URL = urllib.request.urlopen(link).read()
soup = BeautifulSoup(URL, 'lxml')
horse_name = soup.findAll("title")[0]
horse_name = horse_name.strip()
horse_name.decode('utf-8')
print(horse_name)
#Nname = len(horse_name)-17
#print(Nname)
#new_horse_name = horse_name[6:Nname]
#print(new_horse_name)
tables = soup.findAll("table",{"class":"blood_table detail"})
# if len(tables)==0:
# continue
# else:
#tableの中身を取ってくる.from<table>to</table>
table = soup.findAll("table",{"class":"blood_table detail"})[0]
#tr属性をすべて取ってくる
rows = table.findAll("tr")
#print (rows)
# fileOpen準備
csvFile = codecs.open("BloodKeibaData.csv", "a", "utf-8")
#csvFileに書き込み準備
writer = csv.writer(csvFile)
#direction初期化
direction=""
sex=""
age=""
i = 0
try:
csvRow = []
for row in rows:
#print (row)
#csvRow rist初期化
#td or thごとに切ってcsvRowに追加
count = 1
for cell in row.findAll(['a']):
cell = cell.get_text().strip()
if cell == '血統':
continue
if cell == '産駒':
continue
#print (cell)
i=i+1
#print (i)
csvRow.append(cell)
count=count+1
#記述
#writer.writerow(csvRow)
finally:
csvFile.close()
| Ryota819/Data | 競馬/20180501/keiba_blood_to_csv.py | keiba_blood_to_csv.py | py | 2,462 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.request.urlopen",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 35,
"usage_type": "name"
},
{
"api_nam... |
74082781542 | # import numpy as np
# from sympy import Symbol, Poly, simplify, re, im
# from dispersion_relation import longitudinal_disp, transverse_disp
# # Number of data points
# # N = 10
# k = Symbol('k')
# S0 = Symbol('S0')
# w = Symbol('w')
# S0 = 0.4
# w_array = np.linspace(100,1E7,5)
# for w in w_array:
# disp_rel = Poly(simplify(longitudinal_disp(S0,w,1)),k)
# print('w = ' + str(w))
# coeffs = disp_rel.all_coeffs()
# j = complex(0,1)
# real = np.array([float(re(c)) for c in coeffs])
# imag = np.array([float(im(c)) for c in coeffs])
# co = real + imag*j
# print(co)
# roots = np.roots(co)
# print(roots)
# print(np.poly(roots))
# Plotting speed and attenuation against initial saturation for longitudinal case
from config import epsilon
from dispersion_relation import longitudinal_disp
from sympy import nroots
from sympy import re, im, Symbol, simplify, Poly
import numpy as np
import matplotlib.pyplot as plt
from math import pi
plt.rc('text', usetex=True)
# Number of data points
N = 50
# Choose non-wetting material
# 0 - Air
# 1 - Oil
# 2 - Gas
material_mode = 2
w_lin = np.linspace(1, 1E7, N)
w_log = np.logspace(0, 7, N)
w = np.concatenate((w_lin, w_log), axis=0)
f = w/(2*pi)
S0_array = [0.2, 0.4, 0.6, 0.8, 1-epsilon]
S0_label = [str(i) for i in S0_array]
k = Symbol('k')
j = complex(0,1)
for i in range(len(S0_array)):
S0 = S0_array[i]
print('Progress: ' + str(int(100*(i+1)/5)) + '%')
disp_rel = longitudinal_disp(S0, w, material_mode)
disp_rel = [Poly(simplify(d), k) for d in disp_rel]
coeff = [d.all_coeffs() for d in disp_rel]
root_array = np.array([])
for c in coeff:
real = np.array([float(re(a)) for a in c])
imag = np.array([float(im(a)) for a in c])
co = real + imag*j
roots = np.roots(co)
roots = roots[::2]
root_array = np.append(root_array, roots)
print('real')
reals = np.abs(np.real(root_array))
print(reals)
print('imag')
imags = np.abs(np.imag(root_array))
print(imags)
for l in range(3):
# speed_array = w[100:200]/[abs(re(k_array[i][j])) for i in range(len(k_array)) if i > N-1]
# attenuation_array = [abs(im(k_array[i][j])) for i in range(len(k_array)) if i < N]
print(l)
print('real_short')
realx = reals[l::3]
print(realx)
print('imag_short')
imagx = imags[l::3]
print(imagx)
speed_array = w[N:]/realx[N:]
attenuation_array = imagx[:N]
plt.figure(l)
plt.semilogx(f[N:], speed_array, label=S0_label[i])
plt.figure(l+3)
if l == 2:
plt.plot(f[:N], attenuation_array, label=S0_label[i])
else:
plt.semilogy(f[:N], attenuation_array, label=S0_label[i])
for j in range(3):
plt.figure(j)
plt.legend()
plt.xlabel('frequency / Hz')
plt.ylabel(r'velocity / $ms^{-1}$')
plt.title('P' + str(3-j) + '-wave Velocity Against Frequency')
plt.savefig('../plots/speed_freq_p' + str(3-j) + '.eps')
plt.clf()
plt.figure(j+3)
plt.legend()
plt.xlabel('frequency / Hz')
plt.ylabel(r'attenuation / $m^{-1}$')
plt.title('P' + str(3-j) + '-wave Attenuation Against Frequency')
plt.savefig('../plots/attenuation_freq_p' + str(3-j) + '.eps')
plt.clf() | tigeryst/alcbl | Numerical Simulation/source/archive/test_solve.py | test_solve.py | py | 3,330 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.logspace"... |
36445667629 | """add latest updates
Revision ID: 9d2bc5f28130
Revises: 98b564f17b54
Create Date: 2021-02-20 18:07:38.182527
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9d2bc5f28130'
down_revision = '98b564f17b54'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('latest_updates',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updates', sa.JSON(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('latest_updates')
# ### end Alembic commands ###
| mhelmetag/mammoth | alembic/versions/9d2bc5f28130_add_latest_updates.py | 9d2bc5f28130_add_latest_updates.py | py | 898 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
18316291673 | import abc
import devtools
from typing import Any, AsyncIterator, Optional
import kubernetes_asyncio.watch
from servo.logging import logger
class BaseKubernetesHelper(abc.ABC):
@classmethod
@abc.abstractmethod
async def watch_args(cls, api_object: object) -> AsyncIterator[dict[str, Any]]:
...
@classmethod
@abc.abstractmethod
def is_ready(cls, api_object: object, event_type: Optional[str] = None) -> bool:
...
@classmethod
async def wait_until_deleted(cls, api_object: object) -> None:
async with cls.watch_args(api_object) as watch_args:
async with kubernetes_asyncio.watch.Watch().stream(**watch_args) as stream:
async for event in stream:
cls.log_watch_event(event)
if event["type"] == "DELETED":
stream.stop()
return
@classmethod
async def wait_until_ready(cls, api_object: object) -> None:
async with cls.watch_args(api_object) as watch_args:
async with kubernetes_asyncio.watch.Watch().stream(**watch_args) as stream:
async for event in stream:
cls.log_watch_event(event)
if cls.is_ready(event["object"], event["type"]):
stream.stop()
return
@classmethod
def log_watch_event(cls, event: dict[str, Any]) -> None:
event_type: str = event["type"]
obj: dict = event["object"].to_dict()
kind: str = obj.get("kind", "UNKNOWN")
metadata = obj.get("metadata", {})
name: str = metadata.get("name", "UNKNOWN")
namespace: str = metadata.get("namespace", "UNKNOWN")
logger.debug(
f"watch yielded event: {event_type} on kind {kind} {name}"
f" in namespace {namespace}"
)
logger.trace(devtools.pformat(obj))
| opsani/servox | servo/connectors/kubernetes_helpers/base.py | base.py | py | 1,920 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "abc.ABC",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "abc.abstractmethod",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "typing.AsyncIterator",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Any"... |
14863334950 | from argparse import ArgumentParser
from screenshot_handler.screen_grabber import ScreenGrabber
def read_arguments():
parser = ArgumentParser()
parser.add_argument('-d', '--dest-folder', help='Destination folder for the images')
parser.add_argument('-m', '--monitor', help='Dimensions for cropped screenshots in the following format: TOPxLEFT_WIDTHxHEIGHT')
return parser.parse_args()
def screenshot_loop(screen_grabber):
index = 0
while True:
screen_grabber.grab_screenshot(str(index))
index += 1
def main():
arguments = read_arguments()
dest_folder = arguments.dest_folder
monitor = arguments.monitor
screen_grabber = ScreenGrabber(dest_folder, monitor=monitor)
screenshot_loop(screen_grabber)
if __name__ == '__main__':
main()
| Luisetex/HexAI | grab_images.py | grab_images.py | py | 799 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "screenshot_handler.screen_grabber.ScreenGrabber",
"line_number": 21,
"usage_type": "call"
}
] |
38659585982 | from django.forms.models import model_to_dict
from django.http import JsonResponse
import re
class JsonBaseMixin:
json_response_class = JsonResponse
response_type = 'text/http'
accepted_types = ['text/http', 'application/json']
def dispatch(self, response, *args, **kwargs):
accept = response.META.get('HTTP_ACCEPT', 'text/html').split(',')
for t in accept:
if t in self.accepted_types:
self.response_type = t
break
return super(JsonBaseMixin, self).dispatch(response, *args, **kwargs)
def render_to_response(self, context, **response_kwargs):
if not self.response_class or self.response_type != 'application/json':
return super(JsonBaseMixin, self).render_to_response(context, **response_kwargs)
return self.json_response_class({
'object': self.get_object_dict()
}, **response_kwargs)
def get_object_dict(self):
pass
class JsonListMixin(JsonBaseMixin):
fields = None
exclude = None
def get_object_dict(self):
return [ self.serialize(obj) for obj in self.get_queryset() ]
def serialize(self, obj):
return model_to_dict(obj, fields=self.fields, exclude=self.exclude)
class JsonDetailMixin(JsonBaseMixin):
fields = None
exclude = None
def get_object_dict(self):
return self.serialize(self.get_object())
def serialize(self, obj):
return model_to_dict(obj, fields=self.fields, exclude=self.exclude)
class OrderableMixin(object):
orderable_fields = '__all__'
def get_ordering(self):
order = self.request.GET.get('order', self.ordering)
if self.orderable_fields == '__all__':
return order
m = re.match('-?([0-9a-zA-Z_]+)', order)
if m and m.group(1) in self.orderable_fields:
return order
return self.ordering
class FilterMixin(object):
allowed_filters = {}
def get_queryset(self):
qs = super(FilterMixin, self).get_queryset()
filters = {}
for field in self.allowed_filters:
if field in self.request.GET:
filters[self.allowed_filters[field]] = self.request.GET.get(field)
qs = qs.filter(**filters)
return qs
| niklasmh/it1901-band-booking-project | json_views/views.py | views.py | py | 2,002 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.http.JsonResponse",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.forms.models.model_to_dict",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.forms.models.model_to_dict",
"line_number": 44,
"usage_type": "call"
}... |
27519507809 | from flask import Flask,render_template,request,redirect,url_for
import urllib
import urllib.request
from bs4 import BeautifulSoup
import os
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
app = Flask(__name__)
app.secret_key="flash message"
@app.route('/')
def expatcoinAfrique():
grand=[]
#**********************************************coin-afrique**********************************************#
def table(url):
thepage=urllib.request.urlopen(url)
soup=BeautifulSoup(thepage,"html.parser")
return soup
tab=[]
tabprix=[]
tabimages=[]
tabsources=[]
tablocation=[]
tablieu=[]
tab3=[]
playersave=""
playersave1=""
for link in range(0,10):
tabimag=[]
tabsource=[]
tabp=[]
tablo=[]
tabli=[]
soup=table("https://sn.coinafrique.com/categorie/appartements/51?page="+str(link)+"")
div=soup.find('div',{'class':'column four-fifth'})
prix=div.findAll('p',{'class':'card-title activator orange-text'})
for i in prix:
tabp.append(i.text)
tabprix.append(tabp)
location=div.findAll('p',{'class':'card-desc'})
for i in location:
tablo.append(i.text.replace('\xa0',''))
tablocation.append(tablo)
lieu=div.findAll('p',{'class':'card-location'})
for i in lieu:
tabli.append(i.text.replace(' location_on\xa0\xa0','').replace(", Dakar","").replace("'","").replace(" ","").replace("-",""))
tablieu.append(tabli)
imgexp=soup.findAll('div',{'class':'card-image waves-effect waves-block waves-light'})
# print(imgexp)
for i in imgexp:
http="https://sn.coinafrique.com"
tabimag.append(i.find('img').get('src'))
a=http+i.find('a').get('href')
tabsource.append(a)
tabsources.append(tabsource)
tabimages.append(tabimag)
# print(tabimages)
# print(tabsources)
for i,j,k,l,m in zip(tablocation[0],tabprix[0],tablieu[0],tabsources[0],tabimages[0]):
dic = {'location':i,'prix':j, 'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(tablocation[1],tabprix[1],tablieu[1],tabsources[1],tabimages[1]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(tablocation[2],tabprix[2],tablieu[2],tabsources[2],tabimages[2]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(tablocation[3],tabprix[3],tablieu[3],tabsources[3],tabimages[3]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(tablocation[4],tabprix[4],tablieu[4],tabsources[4],tabimages[4]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(tablocation[5],tabprix[5],tablieu[5],tabsources[5],tabimages[5]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(tablocation[6],tabprix[6],tablieu[6],tabsources[6],tabimages[6]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(tablocation[7],tabprix[7],tablieu[7],tabsources[7],tabimages[7]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(tablocation[8],tabprix[8],tablieu[8],tabsources[8],tabimages[8]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(tablocation[9],tabprix[9],tablieu[9],tabsources[9],tabimages[9]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
# playersave2=playersave+playersave1
# print(playersave2)
# # header="location;prix;lieu"+"\n"
# # file=open(os.path.expanduser("coinafri.csv"),"wb")
# # file.write(bytes(header,encoding='ascii',errors='ignore'))
# # file.write(bytes(playersave2,encoding='ascii',errors='ignore'))
#**********************************************expat-dakar**********************************************#
def table1(url):
thepage=urllib.request.urlopen(url)
soup1=BeautifulSoup(thepage,"html.parser")
return soup1
prixexpt=[]
locationexpat=[]
lieuexpat=[]
playersave=""
playersave1=""
tabimages=[]
tabsources=[]
for link in range(0,10):
soup1=table1("https://www.expat-dakar.com/appartements-a-louer?type=1&page="+str(link)+"")
tabp=[]
tablo=[]
tabli=[]
tabimag=[]
tabsource=[]
nomexpt=soup1.findAll('div',{'class':'listing-details-row'})
for i in nomexpt:
tablo.append(i.find('h2').text.strip()+i.find('span').text.strip().replace('Nombre de chambres',' Nombre de chambres'))
locationexpat.append(tablo)
for i in nomexpt:
tabli.append(i.find('span',{'class':'picto picto-place ed-icon-before icon-location'}).text.replace("-","").replace(" ","").replace("-",""))
lieuexpat.append(tabli)
for i in nomexpt:
tabp.append(i.find('span',{'class':'prix'}).text.replace('\n\t\t\t\t\t\t\t\t\t\t','').replace('\t\t\t\t\t\t\t\t\t','').replace('\xa0',' '))
prixexpt.append(tabp)
imgexp=soup1.findAll('div',{'class':'listing-card-inner'})
# print(imgexp)
for i in imgexp:
v=i.find('a')
if v !=None:
http="https://www.expat-dakar.com"
a=http+v.get('href')
tabsource.append(a)
tabsources.append(tabsource)
for i in imgexp:
try:
http = "https://www.expat-dakar.com"
image = i.a.find('img')['data-src']
# print(image)
product_image = http + image
tabimag.append(product_image)
except Exception:
continue
tabimages.append(tabimag)
# tabimages.append("https://www.expat-dakar.com")
# print(len(tabimages[0]))
# print(len(tabsources[0]) )
for i,j,k,l,m in zip(locationexpat[0],prixexpt[0],lieuexpat[0],tabsources[0],tabimages[0]):
playersave1=playersave1+i+";"+j+";"+k+";"+l+";"+m+"\n"
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(locationexpat[1],prixexpt[1],lieuexpat[1],tabsources[1],tabimages[1]):
playersave1=playersave1+i+";"+j+";"+k+";"+l+";"+m+"\n"
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(locationexpat[2],prixexpt[2],lieuexpat[2],tabsources[2],tabimages[2]):
playersave1=playersave1+i+";"+j+";"+k+";"+l+";"+m+"\n"
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(locationexpat[3],prixexpt[3],lieuexpat[3],tabsources[3],tabimages[3]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(locationexpat[4],prixexpt[4],lieuexpat[4],tabsources[4],tabimages[4]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(locationexpat[5],prixexpt[5],lieuexpat[5],tabsources[5],tabimages[5]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(locationexpat[6],prixexpt[6],lieuexpat[6],tabsources[6],tabimages[6]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(locationexpat[7],prixexpt[7],lieuexpat[7],tabsources[7],tabimages[7]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(locationexpat[8],prixexpt[8],lieuexpat[8],tabsources[8],tabimages[8]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
for i,j,k,l,m in zip(locationexpat[9],prixexpt[9],lieuexpat[9],tabsources[9],tabimages[9]):
dic = {'location':i,'prix':j,'lieu':k,'source':l,'image':m}
grand.append(dic)
df = pd.DataFrame(data=grand)
prix=df['prix']
labels=df['location']
lieu=df['lieu']
fntion=df.groupby('lieu')['location'].count()
prixn=df.groupby('location')['prix'].count()
indexe=fntion.index.tolist()
# print(fntion.nlargest('15'))
nombre=[]
for i in fntion:
nombre.append(i)
# print(i)
# print(prixn)
# for i in prixn:
# print(i)
# print("Statistical Summary\n",df.describe())
outfile=open("/home/moussa/Documents/PROJET/projethtml/web&scrapping/expat-coinafriq.json",'w+')
rows=json.dumps(grand,indent=2)
outfile.write(rows)
return render_template('test1.html',grand=grand,indexe=indexe,nombre=nombre)
if __name__ == '__main__':
app.run(debug=True)
| mousaa32/web-scrapping | expat-coinAfrique.py | expat-coinAfrique.py | py | 9,320 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_na... |
8695805257 | from lxml import etree
from sys import stdout, stderr
def mk_comment(text):
return '/*'+text.strip(' ')+'*/\n'
header_gen = u"""var wl = require('./build/Release/wayland_client');
var interfaces = {};
exports.interfaces = interfaces;
"""
interface_template = u"""function %(name)s(proxy) {
this.proxy = proxy;
proxy.spy(this);
};
%(name)s.prototype = {
%(prototype)s
};
%(name)s.interface = wl.get_interface_by_name(%(name)r);
interfaces[%(name)r] = %(name)s
"""
default_proxy_template = """ listen: function(listeners) {
var self = this;
this.proxy.listen(function(name){
if (listeners[name]) listeners[name].apply(self, Array.prototype.slice.call(arguments, 1));
});
},
destroy: function() {
this.proxy.destroy()
}"""
request_template = u""" %(name)s: function(%(args)s) {
this.proxy.marshal(%(argv)s);
}"""
factory_template = u""" %(name)s: function(%(args)s) {
new_id = this.proxy.create(%(spy)s.interface);
this.proxy.marshal(%(argv)s);
return new %(spy)s(new_id);
}"""
factory_dyn_template = u""" %(name)s: function(%(args)s) {
new_id = this.proxy.create(spy.interface);
this.proxy.marshal(%(argv)s);
return new spy(new_id);
}"""
def generate_request(index, request):
data = dict(name = request.attrib['name'], magic=index)
args = []
argv = [str(index)]
template = request_template
for node in request:
if node.tag == 'arg' and node.attrib['type'] in ('int', 'uint', 'fd', 'string', 'fixed'):
name = node.attrib['name']
args.append(name)
argv.append(name)
elif node.tag == 'arg' and node.attrib['type'] == 'object':
name = node.attrib['name']
args.append(name)
argv.append('(%(var)s === null || %(var)s === undefined)?%(var)s:%(var)s.proxy' % dict(var=name))
elif node.tag == 'arg' and node.attrib['type'] == 'new_id':
if 'interface' in node.attrib:
template = factory_template
data['spy'] = node.attrib['interface']
argv.append('new_id')
else:
template = factory_dyn_template
args.append('spy, version')
argv.append('spy.interface.get_name(), version, new_id')
elif node.tag == 'description':
continue
else:
stderr.write("%s %r %r" % (node.tag, node.attrib, node[:]))
stderr.write("\n")
raise Exception("unknown argument node %r" % node)
data['args'] = ', '.join(args)
data['argv'] = ', '.join(argv)
return template % data
def generate_enum_const(enum_name, const):
data = dict(
name=('%s_%s' % (enum_name, const.attrib['name'])).upper(),
value=const.attrib['value'],
)
return '%(name)s: %(value)s' % data
def generate_interface(interface):
count = 0
methods = []
enums = []
name = interface.attrib['name']
if name != 'wl_display':
methods.append(default_proxy_template)
for node in interface:
if node.tag == 'description':
continue
elif node.tag == 'request':
methods.append(generate_request(count, node))
count += 1
elif node.tag == 'event':
continue
elif node.tag == 'enum':
enum_name = node.attrib['name']
for node in node:
if node.tag == 'entry':
enums.append(generate_enum_const(enum_name, node))
elif node.tag == 'description':
continue
else:
stderr.write("%s %r %r" % (node.tag, node.attrib, node[:]))
stderr.write("\n")
raise Exception("unknown entry node %r" % node)
elif node.tag == etree.Comment:
continue
else:
raise Exception("unknown interface node %r" % node)
return dict(prototype=',\n'.join(methods + enums))
root = etree.parse("wayland.xml").getroot()
stdout.write(header_gen)
for node in root:
if node.tag == 'copyright':
stdout.write(mk_comment(node.text).encode('utf-8'))
elif node.tag == 'interface':
data = generate_interface(node)
data.update(node.attrib)
stdout.write((interface_template % data).encode('utf-8'))
else:
raise Exception("unknown root node")
| cheery/node-wayland | tools/nodeland-scanner.py | nodeland-scanner.py | py | 4,464 | python | en | code | 61 | github-code | 36 | [
{
"api_name": "sys.stderr.write",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "sys.stderr.write",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_n... |
11044632470 | import numpy as np
from scipy.special import expit as sig
import matplotlib.pyplot as plt
def sigmoid(arr):
return sig(arr)
def sigmoid_prime(arr):
arr = sigmoid(arr)
return arr*(1.0-arr)
def train(epoches,X,Y,weightHidden,weightOutput,lR):
for epoch in range(epoches):
hiddenIn = np.dot(X,weightHidden)
hiddenOut = sigmoid(hiddenIn)
outIn = np.dot(hiddenOut,weightOutput)
outOut = outIn
error = Y - outOut
dErrorByDW2 = np.dot(hiddenOut.T,error*lR)
weightOutput = weightOutput+dErrorByDW2
dErrorByDW1 = np.dot(X.T,np.dot(error*lR,weightOutput.T)*sigmoid_prime(hiddenIn))
weightHidden = weightHidden+dErrorByDW1
return weightHidden,weightOutput
def test(testData,weightsHidden,weightsOutput):
act_hidden = sigmoid(np.dot(testData, weightHidden))
return (np.dot(act_hidden, weightOutput))
"""
X = np.array([[0,0],[0,1],[1,0],
[0.1,0.2],[0.1,0.4],[0.4,0.9],
[0.9,0],[0.99,0.99],[0.97,0.89],
[0.3,0.3],[0.89,0.78],[0.12,0.56]])
Y = np.array([[0],[1],[1],
[0],[0],[1],
[1],[1],[1],
[0],[1],[1]])
inputLayerSize, hiddenNeuronsSize, outputSize = 2, 3, 1
"""
X = []
Y = []
for i in range(50,80):
X.append([i*1.0])
Y.append([(i*1.8)+32.0])
X = np.array(X)
Y = np.array(Y)
inputLayerSize, hiddenNeuronsSize, outputSize = 1, 4, 1
epoches = 100000
lR = 0.001
weightHidden = np.random.uniform(size=(inputLayerSize, hiddenNeuronsSize))
weightOutput = np.random.uniform(size=(hiddenNeuronsSize, outputSize))
#weightHidden , weightOutput = train(epoches,X,Y,weightHidden,weightOutput,lR)
for epoch in range(epoches):
hiddenIn = np.dot(X,weightHidden)
hiddenOut = sigmoid(hiddenIn)
outIn = np.dot(hiddenOut,weightOutput)
outOut = outIn
error = Y - outOut
dErrorByDW2 = np.dot(hiddenOut.T,error*lR)
weightOutput = weightOutput+dErrorByDW2
dErrorByDW1 = np.dot(X.T,np.dot(error*lR,weightOutput.T)*sigmoid_prime(hiddenIn))
weightHidden = weightHidden+dErrorByDW1
print (error)
output = test(np.array([69]),weightHidden,weightOutput)
print (output)
print ('expected ',(69*1.8)+32.0)
| mars-boy/deeplearning_handson | neuralnet.py | neuralnet.py | py | 2,134 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.special.expit",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number":... |
43701305099 | from tkinter import *
from errorMessage import ErrorMessage
from asignarMedico import AsignarMedico
import connection as con
import customtkinter as ct
class IngresoMedico:
def __init__(self, parent):
self.parent = parent
self.win = Toplevel(parent)
self.win.title("Ingreso Medico")
etiTitle = ct.CTkLabel(self.win, text="Ingreso medico", font=("Arial", 20, "bold"))
etiNum_col = ct.CTkLabel(self.win, text="Numero colegiado")
inputNum_col = ct.CTkEntry(self.win, width=200)
etiNombre = ct.CTkLabel(self.win, text="Nombre")
inputNombre = ct.CTkEntry(self.win, width=200)
etiApellido = ct.CTkLabel(self.win, text="Apellido")
inputApellido = ct.CTkEntry(self.win, width=200)
etiEspecialidad = ct.CTkLabel(self.win, text="Especialidad")
inputEspecialidad = ct.CTkEntry(self.win, width=200)
etiTelefono = ct.CTkLabel(self.win, text="Telefono")
inputTelefono = ct.CTkEntry(self.win, width=200)
etiDireccion = ct.CTkLabel(self.win, text="Direccion")
inputDireccion = ct.CTkEntry(self.win, width=200)
etiEmail = ct.CTkLabel(self.win, text="Email")
inputEmail = ct.CTkEntry(self.win, width=200)
buttonSignup = ct.CTkButton(self.win, text="Registrar", command= lambda: self.inputMedico(inputNum_col, inputNombre, inputApellido, inputEspecialidad, inputTelefono, inputDireccion, inputEmail), width=100)
buttonAsignar = ct.CTkButton(self.win, text="Asignar", command= lambda: AsignarMedico(self.win), width=100)
buttonClose = ct.CTkButton(self.win, text="Close", command= lambda: self.close(), width=100)
etiTitle.pack(pady=5)
etiNum_col.pack()
inputNum_col.pack(pady=5)
etiNombre.pack()
inputNombre.pack(pady=5)
etiApellido.pack()
inputApellido.pack(pady=5)
etiEspecialidad.pack()
inputEspecialidad.pack(pady=5)
etiTelefono.pack()
inputTelefono.pack(pady=5)
etiDireccion.pack()
inputDireccion.pack(pady=5)
etiEmail.pack()
inputEmail.pack(pady=5)
buttonSignup.pack(pady=5)
buttonAsignar.pack(pady=5)
buttonClose.pack(pady=5)
self.win.geometry("600x1000")
def inputMedico(self,inputNum_col, inputNombre, inputApellido, inputEspecialidad, inputTelefono, inputDireccion, inputEmail):
query = r"""insert into medico values('""" + inputNum_col.get() + r"""', '""" + inputNombre.get() + r"""', '""" + inputApellido.get() + r"""', '""" + inputEspecialidad.get() + r"""', '""" + inputTelefono.get() + r"""', '""" + inputDireccion.get() + r"""', '""" + inputEmail.get() + r"""');"""
results = con.connect(query)
if (results == ""):
mensaje = "Se ha registrado correctamente"
ErrorMessage(self.win, mensaje=mensaje)
else:
mensaje = "Ha ocurrido un error al registrar"
ErrorMessage(self.win, mensaje=mensaje)
def close(self):
self.win.destroy() | angelcast2002/proyecto2_BD_python | ingresoMedico.py | ingresoMedico.py | py | 3,148 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "customtkinter.CTkLabel",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTkLabel",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTkEntry",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "... |
7375849160 | #!/usr/bin/python3
import shamir
import sqlite3
import rsa_encrypt
import time
import base64
import settings
#Class to hold the database
class db:
name = ""
key = ""
def __init__(self):
self.name = ""
self.key = ""
#add user secret to the secrets database
def add_secret(username, name, secret, currtime):
#initiate database connection
conn = sqlite3.connect(settings.DBdir + "secrets.db")
c = conn.cursor()
#make sure table exists
c.execute("CREATE TABLE IF NOT EXISTS secrets(id PRIMARY KEY, name, secret, timestamp DOUBLE)")
#INSERT OR REPLACE into secrets the secret and user info
c.execute("REPLACE INTO secrets VALUES (?,?,?,?)", [username, name, str(secret), currtime])
#commit and close connection
conn.commit()
conn.close()
return
#Encrypt shares with db_keys and store them into their respective databases
def add_shares(username, shares, keys, currtime):
#Grab database keys
db_keys = rsa_encrypt.get_keys(settings.DBS)
#shares must be equal to dbs to prevent loss or oversharing
if((not len(shares) == len(settings.DBS))):
return -1
#For each database
for i in range(len(settings.DBS)):
#initiate database connection
conn = sqlite3.connect(settings.DBdir + settings.DBS[i] + ".db")
c = conn.cursor()
#make sure the shares table exists
create = "CREATE TABLE IF NOT EXISTS enc_shares(id PRIMARY KEY, share, timestamp DOUBLE)"
c.execute(create)
#Convert share data to a string
payload = username + ":" + str(shares[i][0]) + ":" + str(shares[i][1]) + ":" + str(keys[i])
#Grab the database key for the current database
k = db_keys[settings.DBS[i]].key
#encrypt the share string with the database public key
payload = rsa_encrypt.encrypt_str(k, payload)
#insert or replace the encrypted share, the username, and a timestamp into the database
c.execute("REPLACE INTO enc_shares VALUES(?, ?, ?)", [username, payload, currtime])
#commit the action and close the database
conn.commit()
conn.close()
return
#Generate the secrets for the sharing scheme to use
def gen_secrets(username, name, keys):
#Validate that there are enough databases
if(len(settings.DBS) < settings.TOTAL) or len(keys) < settings.TOTAL:
return -1
#Generate the secret and shares
secret, shares = shamir.make_random_shares(settings.THRESH, settings.TOTAL)
#Grab a timestamp
currtime = time.time()
#add the secret to the secrets database
add_secret(username, name, secret, currtime)
#add encrypted shares to the shares db
add_shares(username, shares, keys, currtime)
return
#add a user to the system given a username, name, and key list
def add_user(username, name, keys_list):
#make sure that all keys are non-null
for i in keys_list:
if i == "":
return -1
#generate the user
gen_secrets(username, name, keys_list)
return
#if run as main
if __name__ == "__main__":
#Exit if client node
if not settings.ID == 'auth':
print("run this on an auth node")
exit(0)
#Add test users
add_user("r3k", "Ryan Kennedy", ["111111"] * settings.TOTAL)
add_user("hal", "Halston Sellentin", ["111111"] * settings.TOTAL ) | sellenth/crow | shamir/code/shamir_gen.py | shamir_gen.py | py | 3,462 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "settings.DBdir",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "rsa_encrypt.get_keys",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "settings.DBS... |
29282168130 | # encoding=utf-8
import config
import datetime
import threading
import time
# 每日删除config中网址宣传的Ip列表
def clear_config_ip_days(interval):
config.url_ip_list.clear()
threading.Timer(interval, clear_config_ip_days).start()
# 开启某个任务,固定每日每时执行
def task_start_day_hour(task, which_hour=0, max_error=10, interval=86400):
"""
task : 开启的任务
which_hour : 每天开启的时间(小时)
max_error : 最大误差(秒)
interval : 每隔多久执行一次(秒)
"""
while True:
now = datetime.datetime.now()
if now.hour == which_hour:
task(interval)
return
else:
time.sleep(max_error)
| gaowenhao/AdProject | tools.py | tools.py | py | 757 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "config.url_ip_list.clear",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "config.url_ip_list",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "threading.Timer",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "date... |
28248113208 | from telebot.types import Message
from loader import bot
from database.orm import User
from states.states import UserStates
from keyboards.inline.inline import get_language_keyboard
from utils.utils import get_user_state
from telebot.callback_data import CallbackData
ru_lang = CallbackData('ru_RU', prefix="search")
@bot.message_handler(commands=['language'])
def language_handler(message: Message):
print('language_handler func')
chat_id = message.chat.id
user_id = message.from_user.id
if User.get_or_none(User.user_id == user_id) is None:
bot.send_message(user_id, "Вы не зарегистрированы. Напишите /start")
return
bot.set_state(chat_id, UserStates.select_language)
with bot.retrieve_data(chat_id) as data:
data["user_id"] = user_id
data["state"] = UserStates.select_language
# bot.register_next_step_handler(message, process_select_language)
bot.send_message(chat_id, "Select language", reply_markup=get_language_keyboard())
def process_select_language(message: Message, answer: str):
print('process_select_language func')
chat_id = message.chat.id
user_id = message.from_user.id
language = answer
if language not in ['ru_RU', 'en_EN']:
bot.send_message(chat_id, 'Incorrect language')
bot.delete_state(chat_id)
return
user = User.get_or_none(User.user_id == user_id)
if user:
user.language = language
user.save()
bot.send_message(chat_id, 'Language saved!')
bot.delete_state(chat_id)
@bot.callback_query_handler(func=None, config=ru_lang.filter())
def callback_language_worker(call):
print('language callback_worker func')
chat_id = call.message.chat.id
state = get_user_state(bot, chat_id)
if state == UserStates.select_language.name:
process_select_language(call.message, call.data)
| makushatnik/travelbot | handlers/custom_handlers/language.py | language.py | py | 1,900 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "telebot.callback_data.CallbackData",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "telebot.types.Message",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "database.orm.User.get_or_none",
"line_number": 17,
"usage_type": "call"
},
{
... |
27379695129 | import os
import numpy as np
import tensorflow as tf
from flask import Flask, request, jsonify
from tensorflow.keras.preprocessing import image
import tensorflow_hub as hub
from PIL import Image
import io
import cv2
import uuid
import datetime
import random
import firebase_admin
from firebase_admin import credentials, firestore, storage
cred = credentials.Certificate("wearitsuw.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
my_model = tf.keras.models.load_model('clothing-recognition.h5', custom_objects={'KerasLayer': hub.KerasLayer})
app = Flask(__name__)
def upload_image_to_storage(image, filename):
bucket = storage.bucket('wearitsuw.appspot.com')
blob = bucket.blob(filename)
blob.content_type = 'image/jpeg'
image.seek(0)
blob.upload_from_file(image, content_type='image/jpeg')
blob.make_public()
return blob.public_url
def get_random_image(num_images):
collection_ref = db.collection('clothing')
docs = collection_ref.get()
random_docs = random.sample(docs, num_images)
image_urls = [doc.get('imageUrl') for doc in random_docs]
return image_urls
@app.route('/predict', methods=['POST'])
def predict():
try:
if 'file' not in request.files:
return jsonify({'error': 'No file uploaded'})
label = ['Coat', 'Sweter', 'Skirt', 'Polo', 'T-Shirt', 'Shorts', 'Hoodie', 'Jacket', 'Shirt (Kemeja)', 'Dress', 'Denim_Jacket', 'Pants', 'Jeans', 'Gym_Jacket', 'Blazzer']
label_mapping = {
'Blazer': 11,
'Coat': 12,
'Denim_Jacket': 13,
'Dress': 14,
'Gym_Jacket': 15,
'Hoodie': 16,
'Jacket': 17,
'Jeans': 18,
'Shorts': 19,
'Pants': 20,
'Shirt': 21,
'Skirt': 22,
'Sweater': 23,
'T-Shirt': 24
}
color_mapping = {
'Black': 0,
'White': 255,
'Red': 178,
'Orange': 69,
'Yellow': 510,
'Green': 250,
'Blue': 205,
'Violet': 127
}
filenya = request.files['file']
img = Image.open(io.BytesIO(filenya.read()))
img = img.resize((224, 224))
x = image.img_to_array(img)
x /= 255
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
pred_arr = my_model.predict(images, batch_size=5)
predict = np.argmax(pred_arr, axis=1)
prediction = label[predict[0]]
img_cv2 = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
height, width, _ = img_cv2.shape
x = width // 2
y = height // 2
color_code = img_cv2[y, x]
red, green, blue = color_code[::-1]
if red < 50 and green < 50 and blue < 50:
color = "Black"
elif red > 200 and green > 200 and blue > 200:
color = "White"
else:
hsv = cv2.cvtColor(img_cv2, cv2.COLOR_BGR2HSV)
hue_value = hsv[y, x, 0]
color = "Undefined"
if hue_value < 5 or hue_value > 170:
color = "Red"
elif 5 <= hue_value < 22:
color = "Orange"
elif 22 <= hue_value < 33:
color = "Yellow"
elif 33 <= hue_value < 78:
color = "Green"
elif 78 <= hue_value < 131:
color = "Blue"
elif 131 <= hue_value < 170:
color = "Violet"
unique_id = str(uuid.uuid4())
unique_filename = str(uuid.uuid4()) + '.jpg'
image_url = upload_image_to_storage(filenya, unique_filename)
response = {
'string_label': prediction,
'imageId': unique_id,
'integer_label': label_mapping[prediction],
'string_color': color,
'integer_color': color_mapping[color],
'imageUrl': image_url
}
doc_ref = db.collection('clothing').document(unique_id)
doc_ref.set(response)
return jsonify(response)
except:
return "Invalid Image"
@app.route('/recommendation', methods=['GET'])
def recommendation():
num_images = 3
image_urls = get_random_image(num_images)
return jsonify({'image_urls': image_urls})
if __name__ == '__main__':
app.run(debug=True)
| nzwhd/wearit_cloud | app.py | app.py | py | 4,354 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "firebase_admin.credentials.Certificate",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "firebase_admin.credentials",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "firebase_admin.initialize_app",
"line_number": 18,
"usage_type": "call"... |
1765068678 | import numpy as np
import matplotlib.pyplot as plt
from fact.io import read_h5py
import pandas as pd
import plotting
import click
import matplotlib
if matplotlib.get_backend() == 'pgf':
from matplotlib.backends.backend_pgf import PdfPages
else:
from matplotlib.backends.backend_pdf import PdfPages
columns = [
'source_x_prediction',
'source_y_prediction',
'dragon_time',
'gammaness',
'concentration_cog',
'focal_length',
'alt_tel',
'az_tel'
]
@click.command()
@click.argument('outdir', type=click.Path(exists=True, dir_okay=True))
@click.argument('output', type=click.Path(exists=False, dir_okay=False))
def main(outdir, output):
runs = [
f'{outdir}/dl2_v0.5.1_LST-1.Run02113.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02114.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02115.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02116.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02117.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02130.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02131.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02132.h5',
f'{outdir}/dl2_v0.5.1_LST-1.Run02133.h5'
]
df = pd.DataFrame()
for i, run in enumerate(runs):
df = pd.concat( [
df,
read_h5py(run, key = 'events', columns=columns)
],
ignore_index=True
)
df_runs = []
for i, run in enumerate(runs):
df_temp = read_h5py(run, key = 'events', columns=columns)
df_runs.append(df_temp)
figures = []
theta2_cut = 0.04
gammaness_threshold = 0.6
figures.append(plt.figure())
ax = figures[-1].add_subplot(1, 1, 1)
plotting.theta2(df, theta2_cut, gammaness_threshold, df, ax=ax, coord='mrk 421', n_offs=3)
ax.set_title('Mrk 421 coordinates, n_offs = 3')
figures.append(plt.figure())
ax = figures[-1].add_subplot(1, 1, 1)
plotting.theta2(df, theta2_cut, gammaness_threshold, df, ax=ax, coord='mrk 421', n_offs=5)
#ax.set_title('Mrk 421 coordinates, n_offs = 5')
#figures.append(plt.figure())
#ax = figures[-1].add_subplot(1, 1, 1)
#plotting.theta2(df, theta2_cut, gammaness_threshold, ax=ax, range=None)
#ax.set_title('Mrk 421 camera center')
#mrk 421 coordinates
#figures.append(plt.figure())
#ax = figures[-1].add_subplot(1, 1, 1)
#plotting.plot2D_runs(df_runs, runs, 'mrk 421', gammaness_threshold, ax)
#saving
with PdfPages(output) as pdf:
for fig in figures:
fig.tight_layout()
pdf.savefig(fig)
if __name__ == '__main__':
main() | LukasBeiske/bachelor_thesis_cta | mrk421.py | mrk421.py | py | 2,591 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.get_backend",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "fact.io.read_h5p... |
36916000678 | import sys
import random
import osmnx as ox
from time import sleep
from direct.gui.DirectGui import DirectButton, DirectFrame
from direct.showbase.ShowBase import ShowBase
from direct.showbase.ShowBase import ShowBase
from panda3d.core import Geom, GeomNode, GeomVertexFormat, GeomVertexData, GeomTriangles, GeomLines, GeomVertexWriter
from panda3d.core import AmbientLight, DirectionalLight, PointLight, Vec4
from panda3d.core import LVector3, LVector4
from panda3d.core import Texture, PNMImage, LColor
from panda3d.core import TransparencyAttrib
from panda3d.core import LODNode
from panda3d.core import CardMaker
from panda3d.core import loadPrcFileData
from panda3d.core import TextNode
from panda3d.core import CollisionRay, CollisionNode, CollisionTraverser, CollisionHandlerQueue
from geopy.geocoders import Nominatim
from geopy.exc import GeocoderTimedOut
from shapely.geometry import Polygon
from shapely.ops import triangulate
from manipulator import Manipulator
def do_geocode(geolocator, address, attempt=1, max_attempts=5):
try:
return geolocator.geocode(address)
except GeocoderTimedOut:
if attempt <= max_attempts:
sleep(1) # Wait for 1 second before retrying
return do_geocode(geolocator, address, attempt=attempt+1)
raise
class MyApp(ShowBase):
def __init__(self):
loadPrcFileData("", "fullscreen true")
loadPrcFileData("", "win-size 1920 1080") # Set the resolution
ShowBase.__init__(self)
base.setFrameRateMeter(True)
self.manipulator = Manipulator(self)
self.accept('mouse1', self.show_mouse_position)
# Set the background color to black
self.setBackgroundColor(0, 0, 0)
self.exit_button = DirectButton(text=("Exit", "Exit", "Exit", "Exit"),
scale=.05,
command=sys.exit,
pos=(-1.2, 0, 0.9),
text_align=TextNode.ALeft,
frameColor=(0, 0, 0, 0),
text_fg=(1, 0, 0, 1),
text_shadow=(0, 0, 0, 1))
# Get building and road data
place_name = "Santa Monica Pier, California, USA" # updated location
geolocator = Nominatim(user_agent="your_app_name", timeout=10)
location = do_geocode(geolocator, place_name)
if location:
point = (location.latitude, location.longitude)
tags = {"building": True}
osm_data = ox.features_from_point(point, tags=tags, dist=100) # dist=1000
road_data = ox.graph_from_point(point, dist=250, network_type='all')
water_tags = {"natural": ["water", "coastline"], "water": ["sea", "ocean", "lake"]}
water_data = ox.features_from_point(point, tags=water_tags, dist=10000)
print(f"Number of water bodies fetched: {len(water_data)}")
else:
print("Error: Location not found")
# Create building models and add them to the scene
for _, building in osm_data.iterrows():
try:
geometry = building['geometry']
if geometry.geom_type == 'Polygon':
self.create_building(geometry, location)
except Exception as e:
print(f"Error drawing building: {e}")
# Create road models and add them to the scene
for _, road_data in ox.graph_to_gdfs(road_data, nodes=False).iterrows():
try:
geometry = road_data['geometry']
if geometry.geom_type == 'LineString':
self.create_road(geometry, location)
except Exception as e:
print(f"Error drawing road: {e}")
# Create water models and add them to the scene
for _, water in water_data.iterrows():
try:
geometry = water['geometry']
if geometry.geom_type == 'Polygon':
self.create_water_body(geometry, location)
except Exception as e:
print(f"Error drawing water body: {e}")
# Set up the camera
self.camera.set_pos(0, -50, 50)
self.camera.set_hpr(0, -30, 0)
# Set up lighting
ambient_light = AmbientLight("ambientLight")
ambient_light.setColor(LVector4(1, 1, 1, 1))
self.render.setLight(self.render.attachNewNode(ambient_light))
directional_light = DirectionalLight("directionalLight")
directional_light.setDirection(LVector3(0, 8, -2.5))
directional_light.setColor(LVector4(1, 1, 1, 1))
self.render.setLight(self.render.attachNewNode(directional_light))
def show_mouse_position(self):
# Check if the mouse is within the window
if self.mouseWatcherNode.hasMouse():
# Get the mouse position
mouse_position = self.mouseWatcherNode.getMouse()
# Convert the mouse position to world coordinates
mouse_x = mouse_position.getX() * self.win.getXSize() / 2
mouse_y = mouse_position.getY() * self.win.getYSize() / 2
print(f"Mouse position: x={mouse_x}, y={mouse_y}")
# Create a collision ray that starts at the camera and goes through the mouse position
picker_ray = CollisionRay()
picker_ray.setFromLens(self.camNode, mouse_position.getX(), mouse_position.getY())
# Create a collision node to hold the ray, and attach it to the camera
picker_node = CollisionNode('picker')
picker_node.addSolid(picker_ray)
picker_node.setFromCollideMask(GeomNode.getDefaultCollideMask())
picker_node_path = self.camera.attachNewNode(picker_node)
# The collision traverser will check for collisions between the ray and the scene
traverser = CollisionTraverser()
queue = CollisionHandlerQueue()
# Add the collision node to the traverser
traverser.addCollider(picker_node_path, queue)
# Check for collisions
traverser.traverse(self.render)
if queue.getNumEntries() > 0:
# Sort the collision entries by distance from the camera
queue.sortEntries()
# Get the first collision entry
entry = queue.getEntry(0)
# Get the node that the ray collided with
collided_node = entry.getIntoNodePath().findNetTag('type')
if not collided_node.isEmpty():
print(f"Selected object: {collided_node.getName()}")
self.manipulator.select_object(collided_node)
else:
print("No object selected")
def create_building(self, polygon, location):
format = GeomVertexFormat.getV3()
vdata = GeomVertexData('vertices', format, Geom.UHStatic)
# Vertices for the building walls
vdata.setNumRows(len(polygon.exterior.coords) * 2)
vertex = GeomVertexWriter(vdata, 'vertex')
for x, y in polygon.exterior.coords[:-1]:
x, y = x - location.longitude, y - location.latitude
vertex.addData3(x * 100000, y * 100000, 0)
vertex.addData3(x * 100000, y * 100000, 10 * 10)
prim = GeomTriangles(Geom.UHStatic)
for i in range(0, len(polygon.exterior.coords) - 1):
prim.addVertices(i * 2, i * 2 + 1, (i * 2 + 2) % (len(polygon.exterior.coords) * 2 - 2))
prim.addVertices(i * 2 + 1, (i * 2 + 3) % (len(polygon.exterior.coords) * 2 - 2), (i * 2 + 2) % (len(polygon.exterior.coords) * 2 - 2))
geom = Geom(vdata)
geom.addPrimitive(prim)
node = GeomNode('building')
node.addGeom(geom)
# Create the roof
roof_coords = [(x - location.longitude, y - location.latitude) for x, y in polygon.exterior.coords[:-1]]
roof_polygon = Polygon(roof_coords)
triangles = triangulate(roof_polygon)
# Vertices and triangles for the roof
vdata_roof = GeomVertexData('vertices', format, Geom.UHStatic)
vertex_roof = GeomVertexWriter(vdata_roof, 'vertex')
prim_roof = GeomTriangles(Geom.UHStatic)
for triangle in triangles:
for x, y in triangle.exterior.coords[:-1]:
vertex_roof.addData3(x * 100000, y * 100000, 10 * 10)
start = vdata_roof.getNumRows() - 3
prim_roof.addVertices(start, start + 1, start + 2)
geom_roof = Geom(vdata_roof)
geom_roof.addPrimitive(prim_roof)
node_roof = GeomNode('roof')
node_roof.addGeom(geom_roof)
# Create a random blue color
r, g, b = random.uniform(0, 0.5), random.uniform(0.5, 1), random.uniform(0.5, 1)
# Create a semi-transparent texture
image = PNMImage(1, 1)
image.setXelA(0, 0, LColor(r, g, b, 0.5)) # set the color to random blue and alpha to 0.5
texture = Texture()
texture.load(image)
# Set building color to random blue
building_node = self.render.attachNewNode(node)
building_node.setTag('type', 'building')
building_node.setColor(r, g, b, 1)
building_node.setTexture(texture, 1)
building_node.setTransparency(TransparencyAttrib.MAlpha)
# Set roof color to random blue
roof_node = self.render.attachNewNode(node_roof)
roof_node.setTag('type', 'roof')
roof_node.setColor(r, g, b, 1)
roof_node.setTexture(texture, 1)
roof_node.setTransparency(TransparencyAttrib.MAlpha)
print("Building node created and colored")
def create_road(self, line, location):
format = GeomVertexFormat.getV3()
vdata= GeomVertexData('vertices', format, Geom.UHStatic)
vdata.setNumRows(len(line.coords))
vertex = GeomVertexWriter(vdata, 'vertex')
for x, y in line.coords:
# Convert coordinates to a local coordinate system
x, y = x - location.longitude, y - location.latitude
vertex.addData3(x * 100000, y * 100000, 0.1)
prim = GeomLines(Geom.UHStatic)
for i in range(len(line.coords) - 1):
prim.addVertices(i, i + 1)
geom = Geom(vdata)
geom.addPrimitive(prim)
node = GeomNode('road')
node.addGeom(geom)
# Set road color to white
road_node = self.render.attachNewNode(node)
road_node.setTag('type', 'road')
road_node.setColor(1, 1, 1, 1)
# Set the road width
road_node.setRenderModeThickness(2) # 2 is fine
def create_water_body(self, polygon, location):
format = GeomVertexFormat.getV3() # Format with just the position
vdata = GeomVertexData('water', format, Geom.UHStatic)
vertex = GeomVertexWriter(vdata, 'vertex')
prim = GeomTriangles(Geom.UHStatic)
if polygon.geom_type == 'Polygon':
polygons = [polygon]
elif polygon.geom_type == 'MultiPolygon':
polygons = list(polygon)
else:
return
for polygon in polygons:
# Vertices for the water body
for i, (x, y) in enumerate(polygon.exterior.coords[:-1]):
x, y = x - location.longitude, y - location.latitude
vertex.addData3(x * 100000, y * 100000, 0)
# Triangles for the water body
for i in range(len(polygon.exterior.coords) - 3):
prim.addVertices(i, i + 1, i + 2)
geom = Geom(vdata)
geom.addPrimitive(prim)
node = GeomNode('water')
node.addGeom(geom)
# Add the water body to the scene
water_node = self.render.attachNewNode(node)
water_node.setTag('type', 'water')
water_node.setColor(0, 0, 1, 0.5) # Set water color to semi-transparent blue
water_node.setTransparency(TransparencyAttrib.MAlpha) # Enable transparency
print("Water body created")
app = MyApp()
app.run() | stressatoo/OpenPandaMap | main.py | main.py | py | 12,224 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "geopy.exc.GeocoderTimedOut",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "direct.showbase.ShowBase.ShowBase",
"line_number": 40,
"usage_type": "name"
},
{
"api_name"... |
26721591942 | import scrapy
from scrapy.http import Request, Response
from xdvideo.items import XdvideoItem
class CrawlSpider(scrapy.Spider):
name = 'xdvideo'
allowed_domains = ['sme.xidian.edu.cn']
VIDEOS_PER_PAGE = 15
# start_urls = [f'https://sme.xidian.edu.cn/html/bkjj/zxkt/bdtwl1/list_92_{i}.html' for i in range(1, 4)]
def __init__(self, url="https://sme.xidian.edu.cn/html/bkjj/zxkt/bdtwl1/list_92_{}.html", pages=3, *args, **kwargs):
super().__init__(*args, **kwargs)
self.url = url
self.pages = int(pages)
def start_requests(self):
for i in range(1, self.pages + 1):
yield Request(self.url.format(i), meta={"page": i})
def parse(self, response: Response):
XPATH_URL = "//body//div[@class='childinfo']//div//div[*]//a[1]/@href"
urls = response.xpath(XPATH_URL).getall()
for i in range(len(urls)):
yield response.follow(urls[i], callback=self.parse_detail,
meta = {"n": (response.meta["page"] - 1) * self.VIDEOS_PER_PAGE + i + 1})
def parse_detail(self, response: Response):
XPATH_TITLE = "//div[@class='text']//h4[1]/text()"
XPATH_COURSE = "//div[@class='childtitle']//p/text()"
XPATH_VIDEO = "//video/@src"
title = response.xpath(XPATH_TITLE).get()
course = response.xpath(XPATH_COURSE).get()
video_url = response.urljoin(response.xpath(XPATH_VIDEO).get())
return XdvideoItem(title=title, course=course, file_urls=[video_url], episode=response.meta["n"])
| ttimasdf/XIDIAN-SME-OCW-Crawler | xdvideo/xdvideo/spiders/crawl.py | crawl.py | py | 1,554 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "scrapy.http.Request",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scrapy.http.Response",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "scrapy.htt... |
40507960364 | from django import template
from apps.carts.models import *
from apps.products.models import Category
register = template.Library()
@register.simple_tag(takes_context=True)
def get_user_cart(context):
request = context['request']
user = request.user
try:
cart = Cart.objects.get(client=user, is_ordered=False)
except:
cart = []
return cart
@register.simple_tag(takes_context=True)
def get_user_wishlist(context):
request = context['request']
user = request.user
try:
wl = Wishlist.objects.filter(user=user)
except:
wl = []
wlist_products = [product.product.id for product in wl]
return wlist_products
@register.simple_tag()
def categories():
try:
cat = Category.objects.all()
except:
cat = None
return cat
| MansurSobirjonov/ogani | apps/products/templatetags/cart_tag.py | cart_tag.py | py | 817 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.template.Library",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "apps.products.models.Category.objects.all",
"line_number": 34,
"usage_type": "call"
},
{
"a... |
36060935686 | import requests
url = "https://ngl.link/api/submit"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Origin": "https://ngl.link",
"Referer": "https://ngl.link/z7hejan"
}
data = {
"username": "TARGET HERE",
"question": "UR QUESTION HERE",
"deviceId": "cfd278d9-b21d-444d-8a6f-6e7494f84bf8",
"gameSlug": "",
"referrer": ""
}
# amount of the spamms
num_requests = 100
for i in range(num_requests):
response = requests.post(url, headers=headers, data=data)
print(f"Request {i+1}: {response.status_code}")
| zxrby/NGL.Link-Spammer | ok.py | ok.py | py | 754 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 25,
"usage_type": "call"
}
] |
3182784604 | from datetime import datetime
from pyquery import PyQuery as pquery
import urllib3
urllib3.disable_warnings()
import webbrowser
def timestamp():
return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def main():
active = 'https://webbtelescope.org/contents/media/images/2022/028/01G77Q8BTTSEB7ZSB2ZEY49HSQ'
print(f'{timestamp()} Starting...')
count=0
while (active):
py = pquery(active, verify=False)
page_num = int([i.text() for i in py.find("div.controls").parent().find('li').items()][0])
span = py.find("span")
spans = [i.text() for i in span.items()]
try:
next_idx = spans.index("Next")
next = 'https://webbtelescope.org' + [i.attr("href") for i in span.eq(next_idx).parent().items()][0]
except ValueError:
if page_num == 247:
pass
else:
print(active)
print(page_num)
print(timestamp())
raise
downloads = py.find("div.media-library-links-list").find('a')
imgs_text = [i.text() for i in downloads.items()]
imgs_links = [i.attr('href') for i in downloads.items()]
full_res_str = [i for i in imgs_text if i.startswith('Full') and 'TIF' not in i] # Full Res first
if not full_res_str:
full_res_str = [i for i in imgs_text if 'TIF' not in i] # Png next or first jpg
if not full_res_str:
full_res_str = [imgs_text[0]] # Whatever is left over
full_res_idx = imgs_text.index(full_res_str[0])
img_link = 'https:' + imgs_links[full_res_idx]
webbrowser.open(img_link)
count+=1
if page_num < 247:
active = next
else:
break
print(f'{timestamp()} Done...')
if __name__ == "__main__":
main() | bmikolaj/WebbScrap | main.py | main.py | py | 1,853 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pyque... |
15287690449 | from __future__ import print_function
import bisect
########################################################################
# official recepi from doc.python.org #
# https://docs.python.org/2/library/bisect.html#searching-sorted-lists #
########################################################################
def find_index(array, x):
"Locate the leftmost value exactly equal to x"
i = bisect.bisect_left(array, x)
if i != len(array) and array[i] == x:
return i
raise ValueError
def find_lt(array, x):
"Find rightmost value less than x"
i = bisect.bisect_left(array, x)
if i:
return array[i-1]
raise ValueError
def find_le(array, x):
"Find rightmost value less than or equal to x"
i = bisect.bisect_right(array, x)
if i:
return array[i-1]
raise ValueError
def find_gt(array, x):
"Find leftmost value greater than x"
i = bisect.bisect_right(array, x)
if i != len(array):
return array[i]
raise ValueError
def find_ge(array, x):
"Find leftmost item greater than or equal to x"
i = bisect.bisect_left(array, x)
if i != len(array):
return array[i]
raise ValueError
def find_last_true(sorted_list, true_criterion):
"""
[EN doc]
suppose we have a list of item [item1, item2, ..., itemn]
if we do a map:
list of items -- tru_criterion --> [True, True, ... True, False, False, ... False]
(last true)
this function returns the index of last true item.
we do can do the map for all item, and run a binary search to find the index. But sometime
the mapping function is expensive. so this function gives a way to minimize the time cost.
[CN doc]
假设有一组排序号了的元素, 从前往后假设前面的元素都满足某一条件, 而到了中间某处起就不再满足了。
本函数返回满足这一条件的最后一个元素。
例题:
序号 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
真值表 [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]
我们要找到那个小于等于6的元素
算法:
我们检验一个序号ind, 如果为False, 那么则向前跳跃继续检验
如果为True, 那么则检验ind+1, 如果为False, 说明找到了。如果ind+1也为真, 则向后跳跃。重复这一过程。
例:
第一次检查 int((0+9)/2.0) = 4, 为True,
检查4+1=5, 也是True。 那么跳跃至 int((4+9)/2.0)=6。很显然, 我们找到了
"""
# exam first item, if not true, then impossible to find result
if not true_criterion(sorted_list[0]):
raise ValueError
# exam last item, if true, it is the one.
if true_criterion(sorted_list[-1]):
return sorted_list[-1]
lower, upper = 0, len(sorted_list) - 1
index = int((lower+upper)/2.0)
while 1:
if true_criterion(sorted_list[index]):
if true_criterion(sorted_list[index+1]):
lower = index
index = int((index+upper)/2.0)
else:
return sorted_list[index]
else:
upper = index
index = int((lower+index)/2.0)
def find_nearest(array, x):
"""find the nearest item of x from sorted array
"""
if x <= array[0]:
return array[0]
elif x >= array[-1]:
return array[-1]
else:
lower = find_le(array, x)
upper = find_ge(array, x)
if (x - lower) > (upper - x):
return upper
else:
return lower
if __name__ == "__main__":
from collections import OrderedDict
import unittest
import random
import time
class BiSearch():
"""A binary search class, doens't have better performance than original implementation
"""
def fit(self, array):
self.train_dict = OrderedDict()
for ind, value in enumerate(array):
self.train_dict[ind] = value
self.train_array = array
def find_le(self, x):
"Find rightmost value less than or equal to x"
i = bisect.bisect_right(self.train_array, x)
if i != len(self.train_array):
return self.train_dict[i-1]
raise ValueError
def find_ge(self, x):
"Find leftmost item greater than or equal to x"
i = bisect.bisect_left(self.train_array, x)
if i != len(self.train_array):
return self.train_dict[i]
raise ValueError
class FunctionsUnittest(unittest.TestCase):
def setUp(self):
self.sorted_array = list(range(1000))
def test_index(self):
self.assertEqual(find_index(self.sorted_array, 0), 0)
self.assertEqual(find_index(self.sorted_array, 999), 999)
self.assertEqual(find_index(self.sorted_array, 499), 499)
self.assertRaises(ValueError, find_index, self.sorted_array, -1)
self.assertRaises(ValueError, find_index, self.sorted_array, 1001)
def test_find_nearest(self):
self.assertEqual(find_nearest(self.sorted_array, 25), 25)
self.assertEqual(find_nearest(self.sorted_array, 25.49), 25)
self.assertEqual(find_nearest(self.sorted_array, 25.5), 25)
self.assertEqual(find_nearest(self.sorted_array, 25.51), 26)
self.assertEqual(find_nearest(self.sorted_array, -1), 0)
self.assertEqual(find_nearest(self.sorted_array, 1000), 999)
class PerformanceTest(unittest.TestCase):
def setUp(self):
self.sorted_array = list(range(1000*1000))
self.bisearch = BiSearch()
self.bisearch.fit(self.sorted_array)
def test_speed(self):
"""because original recepi use list[index] to take item. I thought the speed can be
improved if I use dict[index]. But failed.
"""
st = time.clock()
for _ in range(1000):
find_le(self.sorted_array, 500*1000)
original = time.clock() - st
st = time.clock()
for _ in range(1000):
self.bisearch.find_le(500*1000)
improved = time.clock() - st
self.assertFalse(improved < original) # improved elapse not smaller than original
class LastTrueTest(unittest.TestCase):
def setUp(self):
self.sorted_list = list({random.randint(1, 100000) for _ in range(1000)})
self.sorted_list.sort()
def true_criterion(self, item):
return item <= 500
def test(self):
value = find_last_true(self.sorted_list, self.true_criterion)
print("last True value is %s" % value)
unittest.main()
| MacHu-GWU/Angora | angora/DATA/binarysearch.py | binarysearch.py | py | 6,977 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bisect.bisect_left",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bisect.bisect_left",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "bisect.bisect_right",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "bisect.bise... |
28224928026 | from django.urls import path
from . import views
urlpatterns = [
# API routes
path('', views.index, name='index'),
path('library', views.library, name='library'),
path('library/<int:song_id>', views.song, name='song'),
path('setlists', views.setlists, name='setlists'),
path('setlists/<int:id>', views.setlist, name='setlist'),
path('search', views.search_genius, name='search'),
path('search/<int:id>', views.search_genius_by_id, name='search_by_id'),
path('profile/<int:userId>', views.profile_view, name='profile'),
# Auth routes
path('session', views.session_view, name='session'),
path('csrf', views.get_csrf_token, name='csrf'),
path('register', views.register, name='register'),
path('login', views.login_view, name='login'),
path('logout', views.logout_view, name='logout'),
]
| alexboneham/lyric-library | backend/apis/urls.py | urls.py | py | 847 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
19197360818 | import cv2, os, random, colorsys, onnxruntime, string, time, argparse, uuid, logging
import numpy as np
from utils import Processing
from glob import glob
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser("license")
parser.add_argument('-i',"--input", type = str, required = True, default = False, help = "path image ...")
logging.basicConfig(filename=f'log/ocr.log', filemode='w', format='%(asctime)s - %(message)s', level = logging.INFO, datefmt='%d-%b-%y %H:%M:%S')
class Detection(Processing):
def __init__(self, path_model:str, path_classes:str, image_shape:list, padding:int):
self.path_model = path_model
self.path_classes = path_classes
self.session = onnxruntime.InferenceSession(self.path_model)
self.class_labels, self.num_names = self.get_classes(self.path_classes)
self.image_shape = image_shape
self.font = ImageFont.truetype('weights/font.otf', 8)
self.class_colors = self.colors(self.class_labels)
def boxes_detection(self, image, size):
ort_inputs = {self.session.get_inputs()[0].name:image, self.session.get_inputs()[1].name:size}
box_out, scores_out, classes_out = self.session.run(None, ort_inputs)
return box_out, scores_out, classes_out
def draw_detection(self, image, boxes_out, scores_out, classes_out):
image_pred = image.copy()
for i, c in reversed(list(enumerate(classes_out))):
draw = ImageDraw.Draw(image_pred)
predicted_class = self.class_labels[c]
box = boxes_out[i]
score = scores_out[i]
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
label = '{}: {:.2f}%'.format(predicted_class, score*100)
print(label)
logging.info(f'{label}')
label_size = draw.textsize(label, self.font)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
draw.rectangle([left, top, right, bottom], outline= tuple(self.class_colors[c]), width=1)
draw.text(text_origin, label, fill = (255,255,0), font = self.font)
del draw
return np.array(image_pred)
def __call__(self, input_image:str):
image = Image.open(input_image)
input_image_shape = np.expand_dims(np.array([image.size[1], image.size[0]], dtype='float32'), 0)
image = self.cvtColor(image)
image_data = self.resize_image(image, (self.image_shape[1], self.image_shape[0]))
image_data = np.expand_dims(self.preprocess_input(np.array(image_data, dtype='float32')), 0)
box_out, scores_out, classes_out = self.boxes_detection(image_data,input_image_shape)
image_pred = self.draw_detection(image, box_out, scores_out, classes_out)
return image_pred
if __name__ == '__main__':
args = parser.parse_args()
opt = {"path_model":"weights/yolo4.onnx","path_classes":"classes.txt","image_shape":[416,416],"padding":0}
detector = Detection(**opt)
image_pred = detector(args.input)
image = cv2.cvtColor(image_pred, cv2.COLOR_BGR2RGB)
cv2.imwrite("out.jpg", image)
| Kurmangozhin/plates-recognition-yolo4 | module.py | module.py | py | 3,557 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "utils.Pr... |
35671367197 | from django.contrib import admin
from import_export import resources
from import_export.admin import ImportExportModelAdmin
from .models import Product, Inventory, OrderLine
from import_export import fields
from import_export.widgets import ForeignKeyWidget
class ProductResource(resources.ModelResource):
class Meta:
model = Product
import_id_fields = ['product_code']
@admin.register(Product)
class ProductAdmin(ImportExportModelAdmin):
resource_class = ProductResource
import_export_options = {'update': True}
class InventoryResource(resources.ModelResource):
product = fields.Field(
column_name='product_code',
attribute='product',
widget=ForeignKeyWidget(Product, 'product_code')
)
class Meta:
model = Inventory
fields = ('id', 'product', 'current_stock')
@admin.register(Inventory)
class InventoryAdmin(ImportExportModelAdmin):
resource_class = InventoryResource
class OrderLineResource(resources.ModelResource):
product = fields.Field(
column_name='product_code',
attribute='product',
widget=ForeignKeyWidget(Product, 'product_code')
)
class Meta:
model = OrderLine
fields = ('id', 'product', 'reorder_point')
@admin.register(OrderLine)
class OrderLineAdmin(ImportExportModelAdmin):
resource_class = OrderLineResource
| HirokiShimoi/investoru_app | myapp/admin.py | admin.py | py | 1,384 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "import_export.resources.ModelResource",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "import_export.resources",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "models.Product",
"line_number": 11,
"usage_type": "name"
},
{
"a... |
14551216933 | from collections import defaultdict
from pathlib import Path
from typing import DefaultDict, List
from jubeatools import song
from jubeatools.formats.dump_tools import make_dumper_from_chart_file_dumper
from jubeatools.formats.filetypes import ChartFile
from jubeatools.utils import group_by
from .. import commons as konami
from ..commons import AnyNote
from ..dump_tools import make_events_from_chart
from . import construct
def _dump_jbsq(song: song.Song, **kwargs: dict) -> List[ChartFile]:
res = []
for dif, chart, timing, hakus in song.iter_charts():
events = make_events_from_chart(chart.notes, timing, hakus)
jbsq_chart = make_jbsq_chart(events, chart.notes)
chart_bytes = construct.jbsq.build(jbsq_chart)
res.append(ChartFile(chart_bytes, song, dif, chart))
return res
dump_jbsq = make_dumper_from_chart_file_dumper(
internal_dumper=_dump_jbsq, file_name_template=Path("seq_{difficulty:l}.jbsq")
)
def make_jbsq_chart(events: List[konami.Event], notes: List[AnyNote]) -> construct.JBSQ:
jbsq_events = [convert_event_to_jbsq(e) for e in events]
num_events = len(events)
combo = compute_max_combo(notes)
end_time = next(e for e in events if e.command == konami.Command.END).time
first_note_time_in_beats = min((n.time for n in notes), default=0)
starting_notes = [n for n in notes if n.time == first_note_time_in_beats]
starting_buttons = sum(1 << n.position.index for n in starting_notes)
first_note_time = min(
(
e.time
for e in events
if e.command in (konami.Command.PLAY, konami.Command.LONG)
),
default=0,
)
densities = compute_density_graph(events, end_time)
jbsq_chart = construct.JBSQ(
num_events=num_events,
combo=combo,
end_time=end_time,
starting_buttons=starting_buttons,
start_time=first_note_time,
density_graph=densities,
events=jbsq_events,
)
jbsq_chart.magic = b"JBSQ"
return jbsq_chart
def convert_event_to_jbsq(event: konami.Event) -> construct.Event:
return construct.Event(
type_=construct.EventType[event.command.name],
time_in_ticks=event.time,
value=event.value,
)
def compute_max_combo(notes: List[AnyNote]) -> int:
notes_by_type = group_by(notes, type)
tap_notes = len(notes_by_type[song.TapNote])
long_notes = len(notes_by_type[song.LongNote])
return tap_notes + 2 * long_notes
def compute_density_graph(events: List[konami.Event], end_time: int) -> List[int]:
events_by_type = group_by(events, lambda e: e.command)
buckets: DefaultDict[int, int] = defaultdict(int)
for tap in events_by_type[konami.Command.PLAY]:
bucket = int((tap.time / end_time) * 120)
buckets[bucket] += 1
for long in events_by_type[konami.Command.LONG]:
press_bucket = int((long.time / end_time) * 120)
buckets[press_bucket] += 1
duration = konami.EveLong.from_value(long.value).duration
release_time = long.time + duration
release_bucket = int((release_time / end_time) * 120)
buckets[release_bucket] += 1
res = []
for i in range(0, 120, 2):
# The jbsq density graph in a array of nibbles, the twist is that for
# some obscure reason each pair of nibbles is swapped in the byte ...
# little-endianness is a hell of a drug, don't do drugs kids ...
first_nibble = min(buckets[i], 15)
second_nibble = min(buckets[i + 1], 15)
density_byte = (second_nibble << 4) + first_nibble
res.append(density_byte)
return res
| Stepland/jubeatools | jubeatools/formats/konami/jbsq/dump.py | dump.py | py | 3,650 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "jubeatools.song.Song",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "jubeatools.song",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "jubeatools.song.iter_charts",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": ... |
10962724593 | import base64
import json
import requests
from cassandra.cqlengine import connection
from flask import make_response
from flask_restful import Resource
from conf.config import CASSANDRA_HOSTS, FRIEND_KEYSPACE
from conf.service import USER_INFO_BULK_URL
from model.friend import FriendRelation
from service.common import get_user_id_from_jwt
class GetFriendList(Resource):
def get(self):
user_id = get_user_id_from_jwt()
if not user_id:
return make_response("You must send the userInfo into the header X-Endpoint-Api-Userinfo", 405)
connection.setup(hosts=CASSANDRA_HOSTS, default_keyspace=FRIEND_KEYSPACE)
friend_rows = FriendRelation.filter(user_id=user_id)
friends = {}
friend_ids = []
for friend_row in friend_rows:
friend = friend_row.to_object()
friend_id = friend['user_id']
friends[friend_id] = friend
friend_ids.append(friend_id)
user_info_list = self._get_user_info_bulk(friend_ids)
for user in user_info_list:
user_id = user['user_id']
friends[user_id]['username'] = user['username']
return {
"results": len(friends),
"friends": friends
}
@staticmethod
def _get_user_info_bulk(user_ids):
payload = json.dumps({"user_ids": user_ids})
headers = {'Content-Type': 'application/json'}
response = requests.post(USER_INFO_BULK_URL, data=payload, headers=headers)
data = response.json()
return data.get("users")
| glimpseapp/glimpse-service-friend | service/get_friend_list.py | get_friend_list.py | py | 1,574 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_restful.Resource",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "service.common.get_user_id_from_jwt",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 19,
"usage_type": "call"
},
{
"ap... |
3051216703 |
#!/usr/bin/python
# tetrous.py
import wx
#All functions and objects from the basic modules will start with a wx.
APP_EXIT = 1
class tetrous(wx.Frame): #tetrous was changed from Example
def __init__(self, parent, title, *args, **kwargs):
super(tetrous, self).__init__(parent, title=title,
size=(500, 800), *args, **kwargs)
self.Centre()
self.Show()
self.InitUI()
def InitUI(self):
menubar = wx.MenuBar()
fileMenu = wx.Menu()
qmi = wx.MenuItem(fileMenu, APP_EXIT, '&AnHero\tCtrl+Q')
qmi.SetBitmap(wx.Bitmap('exit.png')) #need to load exit.jpg
fileMenu.Append(qmi)
# fitem = fileMenu.Append( wx.ID_EXIT, #'AnHero', 'Rage Quit')
menubar.Append(fileMenu, '&Ask') #&Ask used to be called file
self.SetMenuBar(menubar)
self.Bind(wx.EVT_MENU, self.OnQuit, qmi)#qmifitem)
self.SetSize((500, 800))
self.SetTitle('notsuspicious.exe')
self.Centre()
self.Show(True)
def OnQuit(self, e):
self.Close()
#_______________________________________________________________________
def main():
app = wx.App()
tetrous(None, title='Size')
app.MainLoop()
if __name__ == '__main__':
main()
| Shapez/Tetris_Project | tetrous.py | tetrous.py | py | 1,269 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "wx.Frame",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "wx.MenuBar",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "wx.Menu",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "wx.MenuItem",
"line_number": 24,
... |
70934643944 | import cv2,os,shutil
import numpy as np
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True,help="directory of the cropped odonates")
ap.add_argument("-o", "--output", required=True,\
help="parent directory to which each one be stored")
args = vars(ap.parse_args())
input_dir=args['input']
out_dir=args['output']
for i in range(14):
vars()["dir"+str(i+1)]=out_dir+ 'thumbi_%s/'%str(i+1)
try:
for i in range(14):
os.mkdir(out_dir+ 'thumbi_%s/'%str(i+1))
print("directory %s created"%(i+1))
except FileExistsError :
print("Directories already exists")
for fln in sorted(os.listdir(input_dir)):
fname=fln.replace(".jpg"," ")
img=cv2.imread(os.path.join(input_dir,fln))
cv2.namedWindow(fname, cv2.WINDOW_NORMAL)
cv2.setWindowProperty(fname, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
cv2.imshow(fname,img)
while(1):
k=cv2.waitKey(0) & 0xFF
if k==ord("a"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir1)
break
if k==ord("b"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir2)
break
if k==ord("c"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir3)
break
if k==ord("d"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir4)
break
if k==ord("e"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir5)
break
if k==ord("f"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir6)
break
if k==ord("g"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir7)
break
if k==ord("h"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir8)
break
if k==ord("i"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir9)
break
if k==ord("j"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir10)
break
if k==ord("k"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir11)
break
if k==ord("l"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir12)
break
if k==ord("m"):
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir13)
break
if k==27:
cv2.destroyAllWindows()
shutil.move(os.path.join(input_dir,fln),dir14)
break
| robinjacobroy/Odonata_detection | sorting.py | sorting.py | py | 2,940 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_num... |
29870177823 | import sys
import copy
from collections import deque
# Running script: given code can be run with the command:
# python file.py, ./path/to/init_state.txt ./output/output.txt
# Variable ordering heuristics: Most constrained variable + Most constraining variable
# Value ordering heuristics: Least constraining value
# Inference mechanisms: Arc consistency
class Sudoku(object):
def __init__(self, puzzle):
self.puzzle = puzzle # self.puzzle is a list of lists
self.var_domain, self.var_constraints, self.var_unassigned = self.csp(puzzle)
def csp(self, puzzle):
var_domain = {}
var_constraints = {}
var_unassigned = 0
for r in xrange(9):
for c in xrange(9):
var_domain[(r, c)] = None
var_constraints[(r, c)] = set()
if puzzle[r][c] == 0:
var_unassigned += 1
possible_domain = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
for var in var_domain:
row, column = var[0], var[1]
assigned_val = set()
puzzle_val = puzzle[row][column]
for c in xrange(9):
val = puzzle[row][c]
var_constraints[var].add((row, c))
if val != 0 and puzzle_val == 0:
assigned_val.add(val)
for r in xrange(9):
val = puzzle[r][column]
var_constraints[var].add((r, column))
if val != 0 and puzzle_val == 0:
assigned_val.add(val)
subgrid_r = (row / 3) * 3
subgrid_c = (column / 3) * 3
for r in xrange(subgrid_r, subgrid_r + 3):
for c in xrange(subgrid_c, subgrid_c + 3):
val = puzzle[r][c]
var_constraints[var].add((r, c))
if val != 0 and puzzle_val == 0:
assigned_val.add(val)
var_constraints[var].remove(var)
if puzzle_val == 0:
var_domain[var] = possible_domain - assigned_val
else:
var_domain[var] = puzzle_val
return var_domain, var_constraints, var_unassigned
def is_complete(self, var_unassigned):
return var_unassigned == 0
def is_consistent(self, var, val, var_domain, var_constraints):
return all(var_domain[constraint] != val for constraint in var_constraints[var])
def select_unassigned_var(self, var_domain, var_constraints):
most_constrained_var = set()
fewest_legal_val = 9
for var in var_domain:
domain = var_domain[var]
if isinstance(domain, set):
legal_val = len(domain)
if legal_val < fewest_legal_val:
most_constrained_var = set()
fewest_legal_val = legal_val
if legal_val == fewest_legal_val:
most_constrained_var.add(var)
most_constraining_var = None
most_constraints = 0
for var in most_constrained_var:
num_constraints = 0
for constraint in var_constraints[var]:
if isinstance(var_domain[constraint], set):
num_constraints += 1
if num_constraints >= most_constraints:
most_constraining_var = var
most_constraints = num_constraints
# last var in most_constrained_var with largest num_constraints
# may not be only var with that num_constraints
return most_constraining_var
def order_domain_val(self, var, var_domain, var_constraints):
val_order = []
for val in var_domain[var]:
num_affected = 0
for constraint in var_constraints[var]:
if isinstance(var_domain[constraint], set):
if val in var_domain[constraint]:
num_affected += 1
val_order.append((val, num_affected))
val_order.sort(key = lambda c: c[1])
return [v[0] for v in val_order]
def revise(self, var_domain, x_i, x_j):
revised = False
domain_i = var_domain[x_i]
delete = set()
for val_x in domain_i:
domain_j = var_domain[x_j]
if isinstance(domain_j, set):
if not any(val_y != val_x for val_y in domain_j):
delete.add(val_x)
revised = True
else:
if not domain_j != val_x:
delete.add(val_x)
revised = True
var_domain[x_i] = domain_i - delete
return revised
def inference(self, var, var_domain, var_constraints):
# queue of arcs (x_i, x_j) for all x_i which are unassigned. x_j is var.
queue = deque()
for constraint in var_constraints[var]:
if isinstance(var_domain[constraint], set):
queue.append((constraint, var))
while queue:
x_i, x_j = queue.popleft()
if self.revise(var_domain, x_i, x_j):
if len(var_domain[x_i]) == 0:
return False
for x_k in var_constraints[x_i] - set([x_j]):
if isinstance(var_domain[x_k], set):
queue.append((x_k, x_i))
return True
def backtrack(self, var_domain, var_constraints, var_unassigned):
if self.is_complete(var_unassigned):
return var_domain
var = self.select_unassigned_var(var_domain, var_constraints)
for val in self.order_domain_val(var, var_domain, var_constraints):
var_domain_prev = var_domain.copy()
var_unassigned_prev = var_unassigned
if self.is_consistent(var, val, var_domain, var_constraints):
var_domain[var] = val
var_unassigned -= 1
inferences = self.inference(var, var_domain, var_constraints)
if inferences != False:
result = self.backtrack(var_domain, var_constraints, var_unassigned)
if result != False:
return result
var_domain = var_domain_prev
var_unassigned = var_unassigned_prev
return False
def solve(self):
complete_assignment = self.backtrack(self.var_domain, self.var_constraints, self.var_unassigned)
for var in complete_assignment:
r, c = var[0], var[1]
self.puzzle[r][c] = complete_assignment[var]
return self.puzzle
# you may add more classes/functions if you think is useful
# However, ensure all the classes/functions are in this file ONLY
# Note that our evaluation scripts only call the solve method.
# Any other methods that you write should be used within the solve() method.
if __name__ == "__main__":
# STRICTLY do NOT modify the code in the main function here
if len(sys.argv) != 3:
print ("\nUsage: python CS3243_P2_Sudoku_XX.py input.txt output.txt\n")
raise ValueError("Wrong number of arguments!")
try:
f = open(sys.argv[1], 'r')
except IOError:
print ("\nUsage: python CS3243_P2_Sudoku_XX.py input.txt output.txt\n")
raise IOError("Input file not found!")
puzzle = [[0 for i in range(9)] for j in range(9)]
lines = f.readlines()
i, j = 0, 0
for line in lines:
for number in line:
if '0' <= number <= '9':
puzzle[i][j] = int(number)
j += 1
if j == 9:
i += 1
j = 0
sudoku = Sudoku(puzzle)
ans = sudoku.solve()
with open(sys.argv[2], 'a') as f:
for i in range(9):
for j in range(9):
f.write(str(ans[i][j]) + " ")
f.write("\n")
| cs3243-ay1920s2-g30/sudoku | CS3243_P2_Sudoku_30_1.py | CS3243_P2_Sudoku_30_1.py | py | 7,898 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line... |
830008449 | #!/usr/bin/env python
import re
import lib.analysis.analyse as a
import lib.analysis.component as a_comp
import lib.dynamic.component as comp
import lib.independence.fs as fs
import lib.settings as s
import lib.ui.menu as menu
# The greater purpose of (functions in) this file is to
# ask user what results are to be analysed
class AnalysisMenuHandler(object):
'''Object to handle sequential submenu calls,
and to provide selection filters'''
# Constructor
# resultpath is the path where all results are kept
# components is a list of available methodcomponent
def __init__(self, resultpath, components):
self.current_path = resultpath
self.components = components
# Returns a list of dirs with datetime name, found in Meizodon/results
def get_result_dirs(self):
if not fs.isdir(self.current_path):
return []
return_list = fs.lsonlydir(self.current_path, full_paths=True)
if fs.isdir(s.analyseddir):
already_analysed = fs.lsonlydir(s.analyseddir)
else:
already_analysed = []
regex = '[0-9]+-[0-9]{2}-[0-9]{2}\([0-9]{2}:[0-9]{2}:[0-9]{2}\)'
pattern = re.compile(regex)
for item in return_list:
basename = fs.basename(item)
if (not pattern.match(basename)) or (basename in already_analysed):
return_list.remove(item)
return return_list
# Returns a list of dirs in Meizodon/results/<datetime>/,
# with names corresponding to available methods
def get_result_sub_dirs(self):
return_list = fs.lsonlydir(self.current_path)
component_names = [str(x) for x in self.components]
for item in return_list:
if not item in component_names:
return_list.remove(item)
return return_list
# Takes a full path to a result, returns an AnalysisComponent
# path like: results/<datetime>/DroidSafe/adsvr.soporteweb.es.apk
def get_analysis_component(self, path):
path_array = fs.split(path)
datetime = path_array[-3]
method_name = path_array[-2]
apk_name = path_array[-1]
method = comp.Component.get_component_for_name(self.components, method_name)
results = fs.join(s.resultsdir,datetime,'results.csv')
analysis_result_loc = fs.join(s.analyseddir,datetime,method_name,apk_name)
return a_comp.AnalysisComponent(path, analysis_result_loc, method, results)
# Takes list of full paths to a result, returns list of AnalysisComponents
# path like: [results/<datetime>/DroidSafe/adsvr.soporteweb.es.apk, ...]
def get_analysis_components(self, paths):
analysiscomponents = []
for path in paths:
analysiscomponents.append(self.get_analysis_component(path))
return analysiscomponents
# Takes list of paths to methods, returns list of AnalysisComponents
# path like: [results/<datetime>/DroidSafe, results/<datetime>/JN-SAF]
def get_sub_all_analysis_components(self, paths):
analysiscomponents = []
for path in paths:
contents = fs.lsonlydir(path, full_paths=True)
analysiscomponents.extend(self.get_analysis_components(contents))
return analysiscomponents
# Takes list of path to execution results, returns list of AnalysisComponents
# path like: [results/2019-04-14(13:59:55), results/2019-05-24(01:23:34)]
def get_all_analysis_components(self, paths):
analysiscomponents = []
for path in paths:
contents = fs.lsonlydir(path, full_paths=True)
analysiscomponents.extend(self.get_sub_all_analysis_components(contents))
return analysiscomponents
# Perform analysis on one or more full execution results
# path like: [results/2019-04-14(13:59:55), results/2019-05-24(01:23:34)]
def analyse_all(self, paths):
analysiscomponents = self.get_all_analysis_components(paths)
a.analyse_all(analysiscomponents)
# Perform analysis on one or more components from a execution results
# path like: [results/<datetime>/DroidSafe, results/<datetime>/JN-SAF]
def analyse_sub_all(self, paths):
analysiscomponents = self.get_sub_all_analysis_components(paths)
a.analyse_all(analysiscomponents)
# Perform analysis on one or more apks from one component from one execution result
# path like: [results/<datetime>/DroidSafe/adsvr.soporteweb.es.apk, ...]
def analyse_sub_sub_all(self, paths):
analysiscomponents = self.get_analysis_components(paths)
a.analyse_all(analysiscomponents)
# Perform analysis on exactly 1 apk's execution result for one component
# path like: results/<datetime>/DroidSafe/adsvr.soporteweb.es.apk
def analyse_sub_sub_single(self, path):
analysiscomponent = self.get_analysis_component(path)
a.analyse_all([analysiscomponent])
# Shows user a menu to determine which analyse generated
# result directories should be analysed
def analysis_menu(self):
if not fs.isdir(s.resultsdir) or not fs.ls(s.resultsdir):
print('Nothing to analyse.')
return
while True:
print('Results for which run do you want to analyse?')
options = self.get_result_dirs()
chosenopts, result = menu.standard_menu(options, lambda x: fs.basename(str(x)))
if result == menu.MenuResults.CHOSEN:
if len(chosenopts) == 1:
self.current_path = fs.join(self.current_path,chosenopts[0])
self.analysis_submenu()
return
elif len(chosenopts) > 1:
self.analyse_all(chosenopts)
return
elif result == menu.MenuResults.EVERYTHING:
self.analyse_all(chosenopts)
return
elif result == menu.MenuResults.BACK:
return
# Shows user a menu to further decide which
# execution results should be analysed
def analysis_submenu(self):
if not fs.ls(self.current_path):
print('Nothing to analyse here')
return
while True:
print('Results for which method do you want to analyse?')
options = self.get_result_sub_dirs()
chosenopts, result = menu.standard_menu(options, lambda x: str(x))
if result == menu.MenuResults.CHOSEN:
if len(chosenopts) == 1:
self.current_path = fs.join(self.current_path,chosenopts[0])
self.analysis_sub_submenu()
return
elif len(chosenopts) > 1:
self.analyse_sub_all([fs.join(self.current_path,x) for x in chosenopts])
return
elif result == menu.MenuResults.EVERYTHING:
self.analyse_sub_all([fs.join(self.current_path,x) for x in chosenopts])
return
elif result == menu.MenuResults.BACK:
self.current_path = fs.dirname(self.current_path)
return
# Shows user a menu to further decide which
# execution results should be analysed
def analysis_sub_submenu(self):
if not fs.ls(self.current_path):
print('Nothing to analyse here')
return
print('Results for which apk do you want to analyse?')
options = fs.lsonlydir(self.current_path)
chosenopts, result = menu.standard_menu(options, lambda x: str(x))
if result == menu.MenuResults.CHOSEN:
if len(chosenopts) == 1:
self.current_path = fs.join(self.current_path,chosenopts[0])
self.analyse_sub_sub_single(self.current_path)
elif len(chosenopts) > 1:
self.analyse_sub_sub_all([fs.join(self.current_path,x) for x in chosenopts])
elif result == menu.MenuResults.EVERYTHING:
self.analyse_sub_sub_all([fs.join(self.current_path,x) for x in chosenopts])
elif result == menu.MenuResults.BACK:
self.current_path = fs.dirname(self.current_path)
# Returns True if this window should be shown in the main menu
# Otherwise, it returns False
def should_show(components):
handler = AnalysisMenuHandler(s.resultsdir, components)
return len(handler.get_result_dirs()) > 0
# Main function of this menu. Creates a handler-object and executes it
def analysis_menu(components):
handler = AnalysisMenuHandler(s.resultsdir, components)
handler.analysis_menu()
| Sebastiaan-Alvarez-Rodriguez/Meizodon | lib/analysis/menu.py | menu.py | py | 8,592 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "lib.independence.fs.isdir",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "lib.independence.fs",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "lib.independence.fs.lsonlydir",
"line_number": 30,
"usage_type": "call"
},
{
"api_n... |
34203818933 | import torch
import torch.nn.functional as F
def R_from_axis_angle(k: torch.tensor, theta: torch.tensor):
if torch.norm(k) == 0.:
return torch.eye(3)
k = F.normalize(k, p=2., dim=0)
kx, ky, kz = k[0], k[1], k[2]
cos, sin = torch.cos(theta), torch.sin(theta)
R = torch.zeros((3, 3)).to(k)
R[0, 0] = cos + (kx**2) * (1 - cos)
R[0, 1] = kx * ky * (1 - cos) - kz * sin
R[0, 2] = kx * kz * (1 - cos) + ky * sin
R[1, 0] = kx * ky * (1 - cos) + kz * sin
R[1, 1] = cos + (ky**2) * (1 - cos)
R[1, 2] = ky * kz * (1 - cos) - kx * sin
R[2, 0] = kx * kz * (1 - cos) - ky * sin
R[2, 1] = ky * kz * (1 - cos) + kx * sin
R[2, 2] = cos + (kz**2) * (1 - cos)
return R
def axis_angle_to_quaternions(axis: torch.tensor, angle: torch.tensor):
a = F.normalize(axis, p=2., dim=0)
half_angle = angle * 0.5
sin_ = torch.sin(half_angle)
cos_ = torch.cos(half_angle)
r = cos_
i = a[0] * sin_
j = a[1] * sin_
k = a[2] * sin_
q = torch.tensor([r, i, j, k], dtype=torch.float32).to(axis)
return q
def R_from_quaternions(quaternions: torch.tensor):
quaternions = F.normalize(quaternions, p=2., dim=0)
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3)).to(quaternions)
def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
"""
Returns torch.sqrt(torch.max(0, x))
but with a zero subgradient where x is 0.
"""
ret = torch.zeros_like(x)
positive_mask = x > 0
ret[positive_mask] = torch.sqrt(x[positive_mask])
return ret
def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:
"""
Convert rotations given as rotation matrices to quaternions.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.")
batch_dim = matrix.shape[:-2]
m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(
matrix.reshape(batch_dim + (9,)), dim=-1
)
q_abs = _sqrt_positive_part(
torch.stack(
[
1.0 + m00 + m11 + m22,
1.0 + m00 - m11 - m22,
1.0 - m00 + m11 - m22,
1.0 - m00 - m11 + m22,
],
dim=-1,
)
)
# we produce the desired quaternion multiplied by each of r, i, j, k
quat_by_rijk = torch.stack(
[
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
# `int`.
torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1),
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
# `int`.
torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1),
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
# `int`.
torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1),
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
# `int`.
torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1),
],
dim=-2,
)
# We floor here at 0.1 but the exact level is not important; if q_abs is small,
# the candidate won't be picked.
flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device)
quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr))
# if not for numerical problems, quat_candidates[i] should be same (up to a sign),
# forall i; we pick the best-conditioned one (with the largest denominator)
return quat_candidates[
F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :
].reshape(batch_dim + (4,))
def R_from_6d(d6: torch.tensor):
"""
Converts 6D rotation representation by Zhou et al. [1] to rotation matrix
using Gram--Schmidt orthogonalization per Section B of [1].
Args:
d6: 6D rotation representation, of size (*, 6)
Returns:
batch of rotation matrices of size (*, 3, 3)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
a1, a2 = d6[..., :3], d6[..., 3:]
b1 = F.normalize(a1, dim=-1)
b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1
b2 = F.normalize(b2, dim=-1)
b3 = torch.cross(b1, b2, dim=-1)
return torch.stack((b1, b2, b3), dim=-2).to(d6)
def quaternion_to_axis_angle(quaternions: torch.Tensor) -> torch.Tensor:
"""
Convert rotations given as quaternions to axis/angle.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle
turned anticlockwise in radians around the vector's
direction.
"""
norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True)
half_angles = torch.atan2(norms, quaternions[..., :1])
angles = 2 * half_angles
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
axis = quaternions[..., 1:] / sin_half_angles_over_angles
axis = F.normalize(axis, p=2., dim=0)
return axis, angles
def axis_angle_from_6d(d6: torch.Tensor):
R = R_from_6d(d6)
q = matrix_to_quaternion(R)
axis, angle = quaternion_to_axis_angle(q)
return axis, angle
def matrix_to_axis_angle(R: torch.Tensor):
return quaternion_to_axis_angle(matrix_to_quaternion(R)) | 3dlg-hcvc/paris | utils/rotation.py | rotation.py | py | 6,716 | python | en | code | 31 | github-code | 36 | [
{
"api_name": "torch.tensor",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.norm",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "torch.eye",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.normalize",
... |
73934948902 | # coding=utf-8
# 笑脸弹跳游戏
import pygame
pygame.init()
screen = pygame.display.set_mode([800,600])
keep_going = True
# step1. 载入图像
# smilePic = pygame.image.load("smile.gif")
smilePic = pygame.image.load("data/asprite.bmp")
# 去除图像的白色背景和边框,貌似对gif无效
colorkey = smilePic.get_at((0,0))
smilePic.set_colorkey(colorkey)
# step2. 设置XY坐标移动起来
picX = 0
picY = 0
BLACK = (0,0,0)
timer = pygame.time.Clock()
# speed = 5
speedX = 5
speedY = 5
while keep_going:
for event in pygame.event.get():
if event.type == pygame.QUIT:
keep_going = False
picX += speedX
picY += speedY
# step3. 模拟碰到墙壁弹回
if picX <= 0 or smilePic.get_width() + picX >= 800 :
# 通过修改speed为负值,从而修改移动的方向
# speed = -speed
speedX = -speedX
if picY <= 0 or smilePic.get_height() + picY >= 600 :
speedY = -speedY
# 解决用黑色像素填充屏幕,消除像素轨迹
screen.fill(BLACK)
# blit()方法把图像从硬盘加载绘制到显示界面上。当我们想要将像素从一个界面(如硬盘)复制到另一个界面(如绘制窗口)之上的时候就使用blit()
screen.blit(smilePic,(picX,picY))
# pygame.display.update()
pygame.display.flip()
timer.tick(60)
pygame.quit()
| jellier/forPython2.7 | SmileBounce.py | SmileBounce.py | py | 1,375 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.loa... |
41289141746 | import itertools
from Atom import Atom
class Struct:
# computes the space when the instance is initialized
def __init__(self, name, types):
self.name = name
self.types = types
self.unpackaged = self._calculate_space_unpackaged()
self.packaged = self._calculate_space_packaged()
self.optimal = self._calculate_space_optimal()
# computes the metrics when the stucture of the type is exactly the same
# as the given by the user
def _calculate_space_unpackaged(self):
align = 0
unused = 0
# asign space for each type contained in the struct
# align: represent the last used space
# unused: count the number of spaces that have been left empty
for typ in self.types:
if isinstance(typ, Atom):
typ_align = typ.align
typ_repre = typ.repre
else:
typ_align = typ.unpackaged[1]
typ_repre = typ.unpackaged[0]
if align % typ_align != 0:
desp = typ_align - (align % typ_align)
unused += desp
align += desp
align += typ_repre
# the alignment is given by the first element described.
first = self.types[0]
if isinstance(first, Atom):
al = first.align
else:
al = first.packaged[1]
return (align, al, unused)
# computes the metrics without taking into consideration the alignment
# the final used space will be the space used by each inner subtype.
def _calculate_space_packaged(self):
used = 0
for typ in self.types:
if isinstance(typ, Atom):
typ_repre = typ.repre
else:
typ_repre = typ.packaged[0]
used += typ_repre
# the alignment is given by the first element
first = self.types[0]
if isinstance(first, Atom):
al = first.align
else:
al = first.packaged[1]
return (used, al, 0)
# checks which of the interations leave less spaces unused taking into
# consideration the alignment. (bruteforce, as the prolblem is np-complete)
def _calculate_space_optimal(self):
# get all permutations
original = self.types
permutations = list(itertools.permutations(self.types))
optimal_perm = None
for permuatation in permutations:
self.types = list(permuatation)
space = self._calculate_space_unpackaged()
# checks if the new permutation is better
if optimal_perm:
if optimal_perm[2] > space[2]:
optimal_perm = space
else:
optimal_perm = space
self.types = original
return optimal_perm
def __str__(self):
return f'Soy un {self.typ}'
| mfaria724/ci3641-examen2 | pregunta3/Struct.py | Struct.py | py | 2,603 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Atom.Atom",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "Atom.Atom",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "Atom.Atom",
"line_number": 55,
"usage_type": "argument"
},
{
"api_name": "Atom.Atom",
"line_numbe... |
5951741639 | from pymongo import MongoClient
import tiktoken
from configsecrets import mongoConnection
def upload_to_mongo(data):
# connect to mongodb
with MongoClient(host=mongoConnection["host"],
port=mongoConnection["port"],
username=mongoConnection["login"],
password=mongoConnection["password"]
) as client:
db = client[mongoConnection["db"]]
collection = db[mongoConnection["collection"]]
# insert data
collection.insert_one(data)
def count_tokens(text:str) -> int:
encoding = tiktoken.encoding_for_model("text-embedding-ada-002")
return len(encoding.encode(text))
def get_text_info(document) -> dict:
# do something to get info from the document
return {"text":text, "title":title, "author":author, "url":url, "token_count":count_tokens(text)}
def split_text(text:str, MAX_TOKENS) -> list:
# split text into chunks that are smaller than the max token count, if needed
return text_chunks
if __name__ == "__main__":
MAX_TOKENS = 512
documents = ["**some","list", "of", "documents**"]
for document in documents:
text_info = get_text_info(document)
# check if the text is too long, in which case, break into chunks as needed
if text_info["token_count"] > MAX_TOKENS:
text_chunks = split_text(text_info["text"],MAX_TOKENS)
for chunk in text_chunks:
token_count = count_tokens(chunk)
text_info["token_count"] = token_count
text_info["text"] = chunk
upload_to_mongo(text_info)
else:
upload_to_mongo(text_info)
| wyler-m/docubot | load_documents.py | load_documents.py | py | 1,704 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "configsecrets.mongoConnection",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "configsecrets.mongoConnection",
"line_number": 8,
"usage_type": "name"
},
{
"api_... |
495151957 | from collections import namedtuple
from enum import Enum
from dagster import check
from dagster.core.definitions import Materialization, SolidHandle
from dagster.core.definitions.events import EventMetadataEntry
from dagster.core.serdes import whitelist_for_serdes
from dagster.core.types.runtime_type import RuntimeType
from dagster.utils import merge_dicts
from dagster.utils.error import SerializableErrorInfo
@whitelist_for_serdes
class StepOutputHandle(namedtuple('_StepOutputHandle', 'step_key output_name')):
@staticmethod
def from_step(step, output_name='result'):
check.inst_param(step, 'step', ExecutionStep)
return StepOutputHandle(step.key, output_name)
def __new__(cls, step_key, output_name='result'):
return super(StepOutputHandle, cls).__new__(
cls,
step_key=check.str_param(step_key, 'step_key'),
output_name=check.str_param(output_name, 'output_name'),
)
@whitelist_for_serdes
class StepInputData(namedtuple('_StepInputData', 'input_name type_check_data')):
def __new__(cls, input_name, type_check_data):
return super(StepInputData, cls).__new__(
cls,
input_name=check.str_param(input_name, 'input_name'),
type_check_data=check.opt_inst_param(type_check_data, 'type_check_data', TypeCheckData),
)
@whitelist_for_serdes
class TypeCheckData(namedtuple('_TypeCheckData', 'success label description metadata_entries')):
def __new__(cls, success, label, description=None, metadata_entries=None):
return super(TypeCheckData, cls).__new__(
cls,
success=check.bool_param(success, 'success'),
label=check.str_param(label, 'label'),
description=check.opt_str_param(description, 'description'),
metadata_entries=check.opt_list_param(
metadata_entries, metadata_entries, of_type=EventMetadataEntry
),
)
@whitelist_for_serdes
class UserFailureData(namedtuple('_UserFailureData', 'label description metadata_entries')):
def __new__(cls, label, description=None, metadata_entries=None):
return super(UserFailureData, cls).__new__(
cls,
label=check.str_param(label, 'label'),
description=check.opt_str_param(description, 'description'),
metadata_entries=check.opt_list_param(
metadata_entries, metadata_entries, of_type=EventMetadataEntry
),
)
@whitelist_for_serdes
class StepOutputData(
namedtuple('_StepOutputData', 'step_output_handle intermediate_materialization type_check_data')
):
def __new__(cls, step_output_handle, intermediate_materialization=None, type_check_data=None):
return super(StepOutputData, cls).__new__(
cls,
step_output_handle=check.inst_param(
step_output_handle, 'step_output_handle', StepOutputHandle
),
intermediate_materialization=check.opt_inst_param(
intermediate_materialization, 'intermediate_materialization', Materialization
),
type_check_data=check.opt_inst_param(type_check_data, 'type_check_data', TypeCheckData),
)
@property
def output_name(self):
return self.step_output_handle.output_name
@whitelist_for_serdes
class StepFailureData(namedtuple('_StepFailureData', 'error user_failure_data')):
def __new__(cls, error, user_failure_data):
return super(StepFailureData, cls).__new__(
cls,
error=check.opt_inst_param(error, 'error', SerializableErrorInfo),
user_failure_data=check.opt_inst_param(
user_failure_data, 'user_failure_data', UserFailureData
),
)
@whitelist_for_serdes
class StepSuccessData(namedtuple('_StepSuccessData', 'duration_ms')):
def __new__(cls, duration_ms):
return super(StepSuccessData, cls).__new__(
cls, duration_ms=check.float_param(duration_ms, 'duration_ms')
)
class StepKind(Enum):
COMPUTE = 'COMPUTE'
class StepInputSourceType(Enum):
SINGLE_OUTPUT = 'SINGLE_OUTPUT'
MULTIPLE_OUTPUTS = 'MULTIPLE_OUTPUTS'
CONFIG = 'CONFIG'
class StepInput(
namedtuple('_StepInput', 'name runtime_type source_type source_handles config_data')
):
def __new__(cls, name, runtime_type, source_type, source_handles=None, config_data=None):
return super(StepInput, cls).__new__(
cls,
name=check.str_param(name, 'name'),
runtime_type=check.inst_param(runtime_type, 'runtime_type', RuntimeType),
source_type=check.inst_param(source_type, 'source_type', StepInputSourceType),
source_handles=check.opt_list_param(
source_handles, 'source_handles', of_type=StepOutputHandle
),
config_data=config_data, # can be any type
)
@property
def is_from_output(self):
return (
self.source_type == StepInputSourceType.SINGLE_OUTPUT
or self.source_type == StepInputSourceType.MULTIPLE_OUTPUTS
)
@property
def is_from_single_output(self):
return self.source_type == StepInputSourceType.SINGLE_OUTPUT
@property
def is_from_multiple_outputs(self):
return self.source_type == StepInputSourceType.MULTIPLE_OUTPUTS
@property
def dependency_keys(self):
return {handle.step_key for handle in self.source_handles}
class StepOutput(namedtuple('_StepOutput', 'name runtime_type optional')):
def __new__(cls, name, runtime_type, optional):
return super(StepOutput, cls).__new__(
cls,
name=check.str_param(name, 'name'),
runtime_type=check.inst_param(runtime_type, 'runtime_type', RuntimeType),
optional=check.bool_param(optional, 'optional'),
)
class ExecutionStep(
namedtuple(
'_ExecutionStep',
(
'pipeline_name key_suffix step_inputs step_input_dict step_outputs step_output_dict '
'compute_fn kind solid_handle logging_tags metadata'
),
)
):
def __new__(
cls,
pipeline_name,
key_suffix,
step_inputs,
step_outputs,
compute_fn,
kind,
solid_handle,
logging_tags=None,
metadata=None,
):
return super(ExecutionStep, cls).__new__(
cls,
pipeline_name=check.str_param(pipeline_name, 'pipeline_name'),
key_suffix=check.str_param(key_suffix, 'key_suffix'),
step_inputs=check.list_param(step_inputs, 'step_inputs', of_type=StepInput),
step_input_dict={si.name: si for si in step_inputs},
step_outputs=check.list_param(step_outputs, 'step_outputs', of_type=StepOutput),
step_output_dict={so.name: so for so in step_outputs},
compute_fn=check.callable_param(compute_fn, 'compute_fn'),
kind=check.inst_param(kind, 'kind', StepKind),
solid_handle=check.inst_param(solid_handle, 'solid_handle', SolidHandle),
logging_tags=merge_dicts(
{
'step_key': str(solid_handle) + '.' + key_suffix,
'pipeline': pipeline_name,
'solid': solid_handle.name,
'solid_definition': solid_handle.definition_name,
},
check.opt_dict_param(logging_tags, 'logging_tags'),
),
metadata=check.opt_dict_param(metadata, 'metadata', key_type=str),
)
@property
def key(self):
return str(self.solid_handle) + '.' + self.key_suffix
@property
def solid_name(self):
return self.solid_handle.name
@property
def solid_definition_name(self):
return self.solid_handle.definition_name
def has_step_output(self, name):
check.str_param(name, 'name')
return name in self.step_output_dict
def step_output_named(self, name):
check.str_param(name, 'name')
return self.step_output_dict[name]
def has_step_input(self, name):
check.str_param(name, 'name')
return name in self.step_input_dict
def step_input_named(self, name):
check.str_param(name, 'name')
return self.step_input_dict[name]
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster/core/execution/plan/objects.py | objects.py | py | 8,359 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "dagster.check.inst_param",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "dagster.check",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "dagster... |
2786391582 | from typing import Any, NoReturn, List
from numpy import ndarray
import socket
import select
import xml.etree.ElementTree as ElementTree
import logging
import errno
from base_classes import PostProcessorBase
class SocketServerPostProcessor(PostProcessorBase):
"""
Outputs data to clients as a TCP server.
Configuration info:
- `Port`: The port number of the TCP server. This must follow the port usage rules specified by the FRC Game
Manual. A port number in the range 5800-5810 is recommended.
"""
port = int()
sock = None
async def setup(self, component_config_root: ElementTree.Element):
self.port = int(component_config_root.find("Port").text)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('0.0.0.0', self.port))
self.sock.listen(5)
logging.debug("Server listening on port " + str(self.port))
self.read_list = [self.sock]
self.write_list = []
# noinspection PyMethodMayBeStatic
async def cleanup(self):
self.sock.close()
async def postprocess(self, data: List[Any], frame: ndarray) -> NoReturn:
def disconnect(s: socket.socket) -> NoReturn:
if s in self.read_list:
self.read_list.remove(s)
if s in self.write_list:
self.write_list.remove(s)
if s in readable:
readable.remove(s)
if s in writable:
writable.remove(s)
s.close()
try:
readable, writable, errored = select.select(self.read_list, self.write_list, self.read_list, 0.02)
except select.error as e:
print(e)
for s in errored:
logging.warn("exceptional condition on " + str(s.getpeername()))
disconnect(s)
for s in readable:
if s is self.sock:
client_socket, address = self.sock.accept()
logging.debug("Accepted connection from " + str(client_socket.getpeername()))
client_socket.setblocking(0)
self.read_list.append(client_socket)
self.write_list.append(client_socket)
else:
data_in = s.recv(1024)
if data_in:
if data_in.startswith(b"shutdown"):
raise KeyboardInterrupt
for s in writable:
if len(data) > 0:
message = self.to_string(data[0])
try:
s.send(bytes(message, "utf-8"))
except socket.error as err:
if err.errno == errno.EPIPE:
logging.warn("client unexpectedly disconnected")
disconnect(s)
@staticmethod
def to_string(data: Any):
return str(data.angle) + "\n"
| 1777TheVikings/FRC1777-Vision | postprocessors/socketserver.py | socketserver.py | py | 2,969 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "base_classes.PostProcessorBase",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 25,
"usage_type": "name"
},
... |
3678024520 | import copy
import json
import logging
import os
import pickle
import warnings
import numpy as np
from typing import Any, List, Optional, Text, Dict, Tuple
import rasa.utils.io
from rasa.core.domain import Domain
from rasa.core.featurizers import (
TrackerFeaturizer,
FullDialogueTrackerFeaturizer,
LabelTokenizerSingleStateFeaturizer,
MaxHistoryTrackerFeaturizer,
)
from rasa.core.policies.policy import Policy
from rasa.core.constants import DEFAULT_POLICY_PRIORITY
from rasa.core.trackers import DialogueStateTracker
from rasa.utils import train_utils
import tensorflow as tf
# avoid warning println on contrib import - remove for tf 2
tf.contrib._warning = None
logger = logging.getLogger(__name__)
class EmbeddingPolicy(Policy):
"""Transformer Embedding Dialogue Policy (TEDP)
Transformer version of the REDP used in our paper https://arxiv.org/abs/1811.11707
"""
SUPPORTS_ONLINE_TRAINING = True
# default properties (DOC MARKER - don't remove)
defaults = {
# nn architecture
# a list of hidden layers sizes before user embed layer
# number of hidden layers is equal to the length of this list
"hidden_layers_sizes_pre_dial": [],
# a list of hidden layers sizes before bot embed layer
# number of hidden layers is equal to the length of this list
"hidden_layers_sizes_bot": [],
# number of units in transformer
"transformer_size": 128,
# number of transformer layers
"num_transformer_layers": 1,
# type of positional encoding in transformer
"pos_encoding": "timing", # string 'timing' or 'emb'
# max sequence length if pos_encoding='emb'
"max_seq_length": 256,
# number of attention heads in transformer
"num_heads": 4,
# training parameters
# initial and final batch sizes:
# batch size will be linearly increased for each epoch
"batch_size": [8, 32],
# how to create batches
"batch_strategy": "balanced", # string 'sequence' or 'balanced'
# number of epochs
"epochs": 1,
# set random seed to any int to get reproducible results
"random_seed": None,
# embedding parameters
# dimension size of embedding vectors
"embed_dim": 20,
# the type of the similarity
"num_neg": 20,
# flag if minimize only maximum similarity over incorrect labels
"similarity_type": "auto", # string 'auto' or 'cosine' or 'inner'
# the type of the loss function
"loss_type": "softmax", # string 'softmax' or 'margin'
# how similar the algorithm should try
# to make embedding vectors for correct labels
"mu_pos": 0.8, # should be 0.0 < ... < 1.0 for 'cosine'
# maximum negative similarity for incorrect labels
"mu_neg": -0.2, # should be -1.0 < ... < 1.0 for 'cosine'
# the number of incorrect labels, the algorithm will minimize
# their similarity to the user input during training
"use_max_sim_neg": True, # flag which loss function to use
# scale loss inverse proportionally to confidence of correct prediction
"scale_loss": True,
# regularization
# the scale of L2 regularization
"C2": 0.001,
# the scale of how important is to minimize the maximum similarity
# between embeddings of different labels
"C_emb": 0.8,
# dropout rate for dial nn
"droprate_a": 0.1,
# dropout rate for bot nn
"droprate_b": 0.0,
# visualization of accuracy
# how often calculate validation accuracy
"evaluate_every_num_epochs": 20, # small values may hurt performance
# how many examples to use for hold out validation set
"evaluate_on_num_examples": 0, # large values may hurt performance
}
# end default properties (DOC MARKER - don't remove)
@staticmethod
def _standard_featurizer(max_history: Optional[int] = None) -> "TrackerFeaturizer":
if max_history is None:
return FullDialogueTrackerFeaturizer(LabelTokenizerSingleStateFeaturizer())
else:
return MaxHistoryTrackerFeaturizer(
LabelTokenizerSingleStateFeaturizer(), max_history=max_history
)
def __init__(
self,
featurizer: Optional["TrackerFeaturizer"] = None,
priority: int = DEFAULT_POLICY_PRIORITY,
graph: Optional["tf.Graph"] = None,
session: Optional["tf.Session"] = None,
user_placeholder: Optional["tf.Tensor"] = None,
bot_placeholder: Optional["tf.Tensor"] = None,
similarity_all: Optional["tf.Tensor"] = None,
pred_confidence: Optional["tf.Tensor"] = None,
similarity: Optional["tf.Tensor"] = None,
dial_embed: Optional["tf.Tensor"] = None,
bot_embed: Optional["tf.Tensor"] = None,
all_bot_embed: Optional["tf.Tensor"] = None,
attention_weights: Optional["tf.Tensor"] = None,
max_history: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Declare instant variables with default values"""
if not featurizer:
featurizer = self._standard_featurizer(max_history)
super().__init__(featurizer, priority)
self._load_params(**kwargs)
# encode all label_ids with numbers
self._encoded_all_label_ids = None
# tf related instances
self.graph = graph
self.session = session
self.a_in = user_placeholder
self.b_in = bot_placeholder
self.sim_all = similarity_all
self.pred_confidence = pred_confidence
self.sim = similarity
# persisted embeddings
self.dial_embed = dial_embed
self.bot_embed = bot_embed
self.all_bot_embed = all_bot_embed
self.attention_weights = attention_weights
# internal tf instances
self._iterator = None
self._train_op = None
self._is_training = None
# init helpers
def _load_nn_architecture_params(self, config: Dict[Text, Any]) -> None:
self.hidden_layers_sizes = {
"pre_dial": config["hidden_layers_sizes_pre_dial"],
"bot": config["hidden_layers_sizes_bot"],
}
self.pos_encoding = config["pos_encoding"]
self.max_seq_length = config["max_seq_length"]
self.num_heads = config["num_heads"]
self.transformer_size = config["transformer_size"]
self.num_transformer_layers = config["num_transformer_layers"]
self.batch_size = config["batch_size"]
self.batch_strategy = config["batch_strategy"]
self.epochs = config["epochs"]
self.random_seed = config["random_seed"]
def _load_embedding_params(self, config: Dict[Text, Any]) -> None:
self.embed_dim = config["embed_dim"]
self.num_neg = config["num_neg"]
self.similarity_type = config["similarity_type"]
self.loss_type = config["loss_type"]
if self.similarity_type == "auto":
if self.loss_type == "softmax":
self.similarity_type = "inner"
elif self.loss_type == "margin":
self.similarity_type = "cosine"
self.mu_pos = config["mu_pos"]
self.mu_neg = config["mu_neg"]
self.use_max_sim_neg = config["use_max_sim_neg"]
self.scale_loss = config["scale_loss"]
def _load_regularization_params(self, config: Dict[Text, Any]) -> None:
self.C2 = config["C2"]
self.C_emb = config["C_emb"]
self.droprate = {"bot": config["droprate_b"], "dial": config["droprate_a"]}
def _load_visual_params(self, config: Dict[Text, Any]) -> None:
self.evaluate_every_num_epochs = config["evaluate_every_num_epochs"]
if self.evaluate_every_num_epochs < 1:
self.evaluate_every_num_epochs = self.epochs
self.evaluate_on_num_examples = config["evaluate_on_num_examples"]
def _load_params(self, **kwargs: Dict[Text, Any]) -> None:
config = copy.deepcopy(self.defaults)
config.update(kwargs)
self._tf_config = train_utils.load_tf_config(config)
self._load_nn_architecture_params(config)
self._load_embedding_params(config)
self._load_regularization_params(config)
self._load_visual_params(config)
# data helpers
# noinspection PyPep8Naming
@staticmethod
def _label_ids_for_Y(data_Y: "np.ndarray") -> "np.ndarray":
"""Prepare Y data for training: extract label_ids."""
return data_Y.argmax(axis=-1)
# noinspection PyPep8Naming
def _label_features_for_Y(self, label_ids: "np.ndarray") -> "np.ndarray":
"""Prepare Y data for training: features for label_ids."""
if len(label_ids.shape) == 2: # full dialogue featurizer is used
return np.stack(
[
np.stack(
[
self._encoded_all_label_ids[label_idx]
for label_idx in seq_label_ids
]
)
for seq_label_ids in label_ids
]
)
else: # max history featurizer is used
return np.stack(
[self._encoded_all_label_ids[label_idx] for label_idx in label_ids]
)
# noinspection PyPep8Naming
def _create_session_data(
self, data_X: "np.ndarray", data_Y: Optional["np.ndarray"] = None
) -> "train_utils.SessionDataType":
"""Combine all tf session related data into dict."""
if data_Y is not None:
# training time
label_ids = self._label_ids_for_Y(data_Y)
Y = self._label_features_for_Y(label_ids)
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
label_ids = np.expand_dims(label_ids, -1)
else:
# prediction time
label_ids = None
Y = None
return {
"dialogue_features": [data_X],
"bot_features": [Y],
"action_ids": [label_ids],
}
def _create_tf_bot_embed(self, b_in: "tf.Tensor") -> "tf.Tensor":
"""Create embedding bot vector."""
b = train_utils.create_tf_fnn(
b_in,
self.hidden_layers_sizes["bot"],
self.droprate["bot"],
self.C2,
self._is_training,
layer_name_suffix="bot",
)
return train_utils.create_tf_embed(
b, self.embed_dim, self.C2, self.similarity_type, layer_name_suffix="bot"
)
def _create_tf_dial(self, a_in) -> Tuple["tf.Tensor", "tf.Tensor"]:
"""Create dialogue level embedding and mask."""
# mask different length sequences
# if there is at least one `-1` it should be masked
mask = tf.sign(tf.reduce_max(self.a_in, -1) + 1)
a = train_utils.create_tf_fnn(
a_in,
self.hidden_layers_sizes["pre_dial"],
self.droprate["dial"],
self.C2,
self._is_training,
layer_name_suffix="pre_dial",
)
self.attention_weights = {}
hparams = train_utils.create_t2t_hparams(
self.num_transformer_layers,
self.transformer_size,
self.num_heads,
self.droprate["dial"],
self.pos_encoding,
self.max_seq_length,
self._is_training,
)
a = train_utils.create_t2t_transformer_encoder(
a, mask, self.attention_weights, hparams, self.C2, self._is_training
)
if isinstance(self.featurizer, MaxHistoryTrackerFeaturizer):
# pick last label if max history featurizer is used
a = a[:, -1:, :]
mask = mask[:, -1:]
dial_embed = train_utils.create_tf_embed(
a, self.embed_dim, self.C2, self.similarity_type, layer_name_suffix="dial"
)
return dial_embed, mask
def _build_tf_train_graph(self) -> Tuple["tf.Tensor", "tf.Tensor"]:
"""Bulid train graph using iterator."""
# iterator returns a_in, b_in, action_ids
self.a_in, self.b_in, _ = self._iterator.get_next()
if isinstance(self.featurizer, MaxHistoryTrackerFeaturizer):
# add time dimension if max history featurizer is used
self.b_in = self.b_in[:, tf.newaxis, :]
all_bot_raw = tf.constant(
self._encoded_all_label_ids, dtype=tf.float32, name="all_bot_raw"
)
self.dial_embed, mask = self._create_tf_dial(self.a_in)
self.bot_embed = self._create_tf_bot_embed(self.b_in)
self.all_bot_embed = self._create_tf_bot_embed(all_bot_raw)
return train_utils.calculate_loss_acc(
self.dial_embed,
self.bot_embed,
self.b_in,
self.all_bot_embed,
all_bot_raw,
self.num_neg,
mask,
self.loss_type,
self.mu_pos,
self.mu_neg,
self.use_max_sim_neg,
self.C_emb,
self.scale_loss,
)
# prepare for prediction
def _create_tf_placeholders(
self, session_data: "train_utils.SessionDataType"
) -> None:
"""Create placeholders for prediction."""
dialogue_len = None # use dynamic time
self.a_in = tf.placeholder(
dtype=tf.float32,
shape=(None, dialogue_len, session_data["dialogue_features"][0].shape[-1]),
name="a",
)
self.b_in = tf.placeholder(
dtype=tf.float32,
shape=(None, dialogue_len, None, session_data["bot_features"][0].shape[-1]),
name="b",
)
def _build_tf_pred_graph(
self, session_data: "train_utils.SessionDataType"
) -> "tf.Tensor":
"""Rebuild tf graph for prediction."""
self._create_tf_placeholders(session_data)
self.dial_embed, mask = self._create_tf_dial(self.a_in)
self.sim_all = train_utils.tf_raw_sim(
self.dial_embed[:, :, tf.newaxis, :],
self.all_bot_embed[tf.newaxis, tf.newaxis, :, :],
mask,
)
self.bot_embed = self._create_tf_bot_embed(self.b_in)
self.sim = train_utils.tf_raw_sim(
self.dial_embed[:, :, tf.newaxis, :], self.bot_embed, mask
)
return train_utils.confidence_from_sim(self.sim_all, self.similarity_type)
# training methods
def train(
self,
training_trackers: List["DialogueStateTracker"],
domain: "Domain",
**kwargs: Any,
) -> None:
"""Train the policy on given training trackers."""
logger.debug("Started training embedding policy.")
# set numpy random seed
np.random.seed(self.random_seed)
# dealing with training data
training_data = self.featurize_for_training(training_trackers, domain, **kwargs)
# encode all label_ids with policies' featurizer
state_featurizer = self.featurizer.state_featurizer
self._encoded_all_label_ids = state_featurizer.create_encoded_all_actions(
domain
)
# check if number of negatives is less than number of label_ids
logger.debug(
"Check if num_neg {} is smaller "
"than number of label_ids {}, "
"else set num_neg to the number of label_ids - 1"
"".format(self.num_neg, domain.num_actions)
)
# noinspection PyAttributeOutsideInit
self.num_neg = min(self.num_neg, domain.num_actions - 1)
# extract actual training data to feed to tf session
session_data = self._create_session_data(training_data.X, training_data.y)
if self.evaluate_on_num_examples:
session_data, eval_session_data = train_utils.train_val_split(
session_data,
self.evaluate_on_num_examples,
self.random_seed,
label_key="action_ids",
)
else:
eval_session_data = None
self.graph = tf.Graph()
with self.graph.as_default():
# set random seed in tf
tf.set_random_seed(self.random_seed)
# allows increasing batch size
batch_size_in = tf.placeholder(tf.int64)
(
self._iterator,
train_init_op,
eval_init_op,
) = train_utils.create_iterator_init_datasets(
session_data,
eval_session_data,
batch_size_in,
self.batch_strategy,
label_key="action_ids",
)
self._is_training = tf.placeholder_with_default(False, shape=())
loss, acc = self._build_tf_train_graph()
# define which optimizer to use
self._train_op = tf.train.AdamOptimizer().minimize(loss)
# train tensorflow graph
self.session = tf.Session(config=self._tf_config)
train_utils.train_tf_dataset(
train_init_op,
eval_init_op,
batch_size_in,
loss,
acc,
self._train_op,
self.session,
self._is_training,
self.epochs,
self.batch_size,
self.evaluate_on_num_examples,
self.evaluate_every_num_epochs,
)
# rebuild the graph for prediction
self.pred_confidence = self._build_tf_pred_graph(session_data)
self.attention_weights = train_utils.extract_attention(
self.attention_weights
)
def continue_training(
self,
training_trackers: List["DialogueStateTracker"],
domain: "Domain",
**kwargs: Any,
) -> None:
"""Continue training an already trained policy."""
batch_size = kwargs.get("batch_size", 5)
epochs = kwargs.get("epochs", 50)
with self.graph.as_default():
for _ in range(epochs):
training_data = self._training_data_for_continue_training(
batch_size, training_trackers, domain
)
session_data = self._create_session_data(
training_data.X, training_data.y
)
train_dataset = train_utils.create_tf_dataset(
session_data, batch_size, label_key="action_ids"
)
train_init_op = self._iterator.make_initializer(train_dataset)
self.session.run(train_init_op)
# fit to one extra example using updated trackers
while True:
try:
self.session.run(
self._train_op, feed_dict={self._is_training: True}
)
except tf.errors.OutOfRangeError:
break
def tf_feed_dict_for_prediction(
self, tracker: "DialogueStateTracker", domain: "Domain"
) -> Dict["tf.Tensor", "np.ndarray"]:
"""Create feed dictionary for tf session."""
# noinspection PyPep8Naming
data_X = self.featurizer.create_X([tracker], domain)
session_data = self._create_session_data(data_X)
return {self.a_in: session_data["dialogue_features"][0]}
def predict_action_probabilities(
self, tracker: "DialogueStateTracker", domain: "Domain"
) -> List[float]:
"""Predict the next action the bot should take.
Return the list of probabilities for the next actions.
"""
if self.session is None:
logger.error(
"There is no trained tf.session: "
"component is either not trained or "
"didn't receive enough training data"
)
return [0.0] * domain.num_actions
tf_feed_dict = self.tf_feed_dict_for_prediction(tracker, domain)
confidence = self.session.run(self.pred_confidence, feed_dict=tf_feed_dict)
return confidence[0, -1, :].tolist()
def persist(self, path: Text) -> None:
"""Persists the policy to a storage."""
if self.session is None:
warnings.warn(
"Method `persist(...)` was called "
"without a trained model present. "
"Nothing to persist then!"
)
return
self.featurizer.persist(path)
meta = {"priority": self.priority}
meta_file = os.path.join(path, "embedding_policy.json")
rasa.utils.io.dump_obj_as_json_to_file(meta_file, meta)
file_name = "tensorflow_embedding.ckpt"
checkpoint = os.path.join(path, file_name)
rasa.utils.io.create_directory_for_file(checkpoint)
with self.graph.as_default():
train_utils.persist_tensor("user_placeholder", self.a_in, self.graph)
train_utils.persist_tensor("bot_placeholder", self.b_in, self.graph)
train_utils.persist_tensor("similarity_all", self.sim_all, self.graph)
train_utils.persist_tensor(
"pred_confidence", self.pred_confidence, self.graph
)
train_utils.persist_tensor("similarity", self.sim, self.graph)
train_utils.persist_tensor("dial_embed", self.dial_embed, self.graph)
train_utils.persist_tensor("bot_embed", self.bot_embed, self.graph)
train_utils.persist_tensor("all_bot_embed", self.all_bot_embed, self.graph)
train_utils.persist_tensor(
"attention_weights", self.attention_weights, self.graph
)
saver = tf.train.Saver()
saver.save(self.session, checkpoint)
with open(os.path.join(path, file_name + ".tf_config.pkl"), "wb") as f:
pickle.dump(self._tf_config, f)
@classmethod
def load(cls, path: Text) -> "EmbeddingPolicy":
"""Loads a policy from the storage.
**Needs to load its featurizer**
"""
if not os.path.exists(path):
raise Exception(
"Failed to load dialogue model. Path '{}' "
"doesn't exist".format(os.path.abspath(path))
)
featurizer = TrackerFeaturizer.load(path)
file_name = "tensorflow_embedding.ckpt"
checkpoint = os.path.join(path, file_name)
if not os.path.exists(checkpoint + ".meta"):
return cls(featurizer=featurizer)
meta_file = os.path.join(path, "embedding_policy.json")
meta = json.loads(rasa.utils.io.read_file(meta_file))
with open(os.path.join(path, file_name + ".tf_config.pkl"), "rb") as f:
_tf_config = pickle.load(f)
graph = tf.Graph()
with graph.as_default():
session = tf.Session(config=_tf_config)
saver = tf.train.import_meta_graph(checkpoint + ".meta")
saver.restore(session, checkpoint)
a_in = train_utils.load_tensor("user_placeholder")
b_in = train_utils.load_tensor("bot_placeholder")
sim_all = train_utils.load_tensor("similarity_all")
pred_confidence = train_utils.load_tensor("pred_confidence")
sim = train_utils.load_tensor("similarity")
dial_embed = train_utils.load_tensor("dial_embed")
bot_embed = train_utils.load_tensor("bot_embed")
all_bot_embed = train_utils.load_tensor("all_bot_embed")
attention_weights = train_utils.load_tensor("attention_weights")
return cls(
featurizer=featurizer,
priority=meta["priority"],
graph=graph,
session=session,
user_placeholder=a_in,
bot_placeholder=b_in,
similarity_all=sim_all,
pred_confidence=pred_confidence,
similarity=sim,
dial_embed=dial_embed,
bot_embed=bot_embed,
all_bot_embed=all_bot_embed,
attention_weights=attention_weights,
)
| msamogh/rasa-frames | rasa/core/policies/embedding_policy.py | embedding_policy.py | py | 24,298 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "tensorflow.contrib",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "rasa.core.policies.policy.Policy",
"line_number": 31,
"usage_type": "name"
},
{
"api_na... |
6301255873 | from flask import current_app, g, Flask, flash, jsonify, redirect, render_template, request, session, Response
import logging
import sqlite3
import json
import requests
from db import DB, KeyNotFound, BadRequest
import datetime
# Configure application
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Needed to flash messages
app.secret_key = b'mEw6%7APK'
# path to database
DATABASE = 'splatDB.sqlite3'
# default path
@app.route('/')
def home():
return render_template("home.html")
# hello world
@app.route('/hello')
def hello_world():
data = {"message": "Hello, World!"}
return jsonify(data)
# -----------------
# Create/Read Endpoints
# These JSON/REST api endpoints are used to add new records
# and return lookups based on Ids
# -------------------
# creates required table for application.
# note having a web endpoint for this is not a standard approach, but used for quick testing
@app.route('/create', methods=["GET"])
def create_tables():
"""
Drops existing tables and creates new tables
"""
db = DB(get_db_conn())
return db.create_db('schema/create.sql')
@app.route('/artist', methods=["POST"])
def add_artist():
"""
Loads a new appearance of song
(and possibly a new song) into the database.
"""
post_body = request.json
if not post_body:
logging.error("No post body")
return Response(status=400)
# get DB class with new connection
db = DB(get_db_conn())
try:
db.add_artist(post_body)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=e.error_code)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=201)
@app.route('/album', methods=["POST"])
def add_album():
"""
Loads a new appearance of song
(and possibly a new song) into the database.
"""
post_body = request.json
if not post_body:
logging.error("No post body")
return Response(status=400)
# get DB class with new connection
db = DB(get_db_conn())
try:
db.add_album(post_body)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=e.error_code)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=201)
@app.route('/songs', methods=["POST"])
def add_song():
"""
Loads a new appearance of song
(and possibly a new song) into the database.
"""
post_body = request.json
if not post_body:
logging.error("No post body")
return Response(status=400)
# get DB class with new connection
db = DB(get_db_conn())
try:
db.add_song_ms2(post_body)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=e.error_code)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=201)
@app.route('/playlists', methods=["POST"])
def add_playlist():
"""
Adds a new playlist, with a list of ordered songs
"""
post_body = request.json
if not post_body:
logging.error("No post body")
return Response(status=400)
# get DB class with new connection
db = DB(get_db_conn())
try:
db.add_playlist(post_body)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=e.error_code)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=201)
@app.route("/playcount", methods=["POST"])
def add_play():
"""
Aad a play count detail
Must have"play_count","song_id","date"
May have a source as either a "playlist_id" OR "album_id" OR NEITHER (indicates a direct play)
NOTE: Can have multiple calls for the same song, date, and source (playlist,album,or none)
"""
post_body = request.json
if not post_body:
logging.error("No post body")
return Response(status=400)
# get DB class with new connection
db = DB(get_db_conn())
try:
db.add_play(post_body)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=e.error_code)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=201)
@app.route('/songs/<song_id>', methods=["GET"])
def find_song(song_id):
"""
Returns a song's info
(song_id, name, length, artist name, album name) based on song_id
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.find_song(song_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/songs/by_album/<album_id>', methods=["GET"])
def find_songs_by_album(album_id):
"""
Returns all an album's songs
(song_id, name, length, artist name, album name) based on album_id
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.find_songs_by_album(album_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/songs/by_artist/<artist_id>', methods=["GET"])
def find_songs_by_artist(artist_id):
"""
Returns all an artists' songs
(song_id, name, length, artist name, album name) based on artist_id
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.find_songs_by_artist(artist_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/albums/<album_id>', methods=["GET"])
def find_album(album_id):
"""
Returns a album's info
(album_id, album_name, release_year).
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.find_album(album_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/albums/by_artist/<artist_id>', methods=["GET"])
def find_album_by_artist(artist_id):
"""
Returns a album's info
(album_id, album_name, release_year).
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.find_album_by_artist(artist_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/artists/<artist_id>', methods=["GET"])
def find_artist(artist_id):
"""
Returns a artist's info
(artist_id, artist_name, country).
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.find_artist(artist_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
# -----------------
# Analytics Endpoints
# These JSON/REST api endpoints are used to run analysis
# over the dataset and calculate an aggregated answer
# -------------------
@app.route('/analytics/artists/avg_song_length/<artist_id>', methods=["GET"])
def avg_song_length(artist_id):
"""
Returns the average length of an artist's songs (artist_id, avg_length)
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.avg_song_length(artist_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/analytics/artists/cnt_singles/<artist_id>', methods=["GET"])
def cnt_singles(artist_id):
"""
Returns the number of singles an artist has (artist_id, cnt_single)
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.cnt_singles(artist_id)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/analytics/artists/top_length/<num_artists>', methods=["GET"])
def top_length(num_artists):
"""
Returns top (n=num_artists) artists based on total length of songs
(artist_id, total_length).
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.top_length(num_artists)
return jsonify(res)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/analytics/solo_albums', methods=["GET"])
def solo_albums():
"""
Returns an array/list of album_ids where the album
and all songs are by the same single artist_id
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
res = db.solo_albums()
return jsonify(res)
except BadRequest as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
# Convert a YYYY-MM-DD string to a datetime.date. Raises BadRequest if not in the right format
# or if not a valid date.
# From https://stackoverflow.com/questions/53460391/passing-a-date-as-a-url-parameter-to-a-flask-route
# Better function exists in 3.7+ adding this to support 3.6+
def to_date(date_string):
try:
return datetime.datetime.strptime(date_string, "%Y-%m-%d").date()
except ValueError:
raise BadRequest('{} is not valid date in the format YYYY-MM-DD'.format(date_string))
@app.route('/analytics/playcount/top_song/<date_string>', methods=["GET"])
def top_song(date_string):
"""
Get the top song played on a given date
The test data does not account for ties/ have ties. if you want to break them use song_id ascending.
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
check_date = to_date(date_string)
res = db.top_song(check_date)
return jsonify(res)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=400)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/analytics/playcount/top_source/<song_id>/<date_string>', methods=["GET"])
def top_source(song_id, date_string):
"""
For a given song and date, return the source that contributed to the most plays
This could be a given playlist_id, a given album_id, or None (a direct play)
The test data does not account for ties.
If you want to want to account for ties, give all sources that have the same (top) play_count
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
check_date = to_date(date_string)
res = db.top_source(song_id, check_date)
return jsonify(res)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=400)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
@app.route('/analytics/playcount/top_country/<date_string>', methods=["GET"])
def top_country(date_string):
"""
This is an extra credit MS that for a given date , it gives the country
with the most play
"""
# get DB class with new connection
db = DB(get_db_conn())
try:
check_date = to_date(date_string)
res = db.top_country(check_date)
return jsonify(res)
except BadRequest as e:
raise InvalidUsage(e.message, status_code=400)
except KeyNotFound as e:
print(e)
raise InvalidUsage(e.message, status_code=404)
except sqlite3.Error as e:
print(e)
raise InvalidUsage(str(e))
return Response(status=400)
# -----------------
# Web APIs
# These simply wrap requests from the website/browser and
# invoke the underlying REST / JSON API.
# -------------------
# paste in a query
@app.route('/web/query', methods=["GET", "POST"])
def query():
"""
runs pasted in query
"""
data = None
if request.method == "POST":
qry = request.form.get("query")
# Ensure query was submitted
# get DB class with new connection
db = DB(get_db_conn())
# note DO NOT EVER DO THIS NORMALLY (run SQL from a client/web directly)
# https://xkcd.com/327/
try:
res = db.run_query(str(qry))
except sqlite3.Error as e:
print(e)
return render_template("error.html", errmsg=str(e), errcode=400)
data = res
return render_template("query.html", data=data)
# paste in a query
@app.route('/web/post_data', methods=["GET", "POST"])
def post_song_web():
"""
runs simple post song json
"""
data = None
if request.method == "POST":
parameter = request.form.get("path")
if parameter is None or parameter.strip() == "":
flash("Must set key")
return render_template("post_data.html", data=data)
get_url = "http://127.0.0.1:5000/%s" % parameter
print("Making request to %s" % get_url)
# grab the response
j = json.loads(request.form.get("json_data").strip())
print("Json from form: %s" % j)
r = requests.post(get_url, json=j)
if r.status_code >= 400:
print("Error. %s Body: %s" % (r, r.content))
return render_template("error.html", errmsg=r.json(), errcode=r.status_code)
else:
flash("Ran post command")
return render_template("post_data.html", data=None)
return render_template("post_data.html", data=None)
@app.route('/web/create', methods=["GET"])
def create_web():
get_url = "http://127.0.0.1:5000/create"
print("Making request to %s" % get_url)
# grab the response
r = requests.get(get_url)
if r.status_code >= 400:
print("Error. %s Body: %s" % (r, r.content))
return render_template("error.html", errmsg=r.json(), errcode=r.status_code)
else:
flash("Ran create command")
data = r.json()
return render_template("home.html", data=data)
@app.route('/web/songs', methods=["GET", "POST"])
def song_landing():
data = None
if request.method == "POST":
path = request.form.get("path")
parameter = request.form.get("parameter")
if parameter is None or parameter.strip() == "":
flash("Must set key")
return render_template("songs.html", data=data)
get_url = "http://127.0.0.1:5000/songs/" + path + parameter
print("Making request to %s" % get_url)
# grab the response
r = requests.get(get_url)
if r.status_code >= 400:
print("Error. %s Body: %s" % (r, r.content))
return render_template("error.html", errmsg=r.json(), errcode=r.status_code)
else:
data = r.json()
return render_template("songs.html", data=data)
@app.route('/web/artists', methods=["GET", "POST"])
def artists_landing():
data = None
if request.method == "POST":
path = request.form.get("path")
# Ensure path was submitted
parameter = request.form.get("parameter")
if parameter is None or parameter.strip() == "":
flash("Must set key")
return render_template("artists.html", data=data)
get_url = "http://127.0.0.1:5000/artists/" + path + parameter
# grab the response
r = requests.get(get_url)
if r.status_code >= 400:
print("Error. %s Body: %s" % (r, r.content))
return render_template("error.html", errmsg=r.json(), errcode=r.status_code)
else:
data = r.json()
return render_template("artists.html", data=data)
@app.route('/web/albums', methods=["GET", "POST"])
def albums_landing():
data = None
if request.method == "POST":
path = request.form.get("path")
# Ensure path was submitted
parameter = request.form.get("parameter")
if parameter is None or parameter.strip() == "":
flash("Must set key")
return render_template("albums.html", data=data)
get_url = "http://127.0.0.1:5000/albums/" + path + parameter
# grab the response
r = requests.get(get_url)
if r.status_code >= 400:
print("Error. %s Body: %s" % (r, r.content))
return render_template("error.html", errmsg=r.json(), errcode=r.status_code)
else:
data = r.json()
return render_template("albums.html", data=data)
@app.route('/web/analytics', methods=["GET", "POST"])
def analytics_landing():
data = None
if request.method == "POST":
path = request.form.get("path")
# Ensure path was submitted
if path == "solo_albums":
get_url = "http://127.0.0.1:5000/analytics/" + path
elif path == "playcount/top_song/" or path == "playcount/top_country/":
date = request.form.get("date")
if date is None or date.strip() == "":
flash("Must set key")
return render_template("analytics.html", data=data)
get_url = "http://127.0.0.1:5000/analytics/" + path + date
elif path == "playcount/top_source/":
parameter = request.form.get("parameter")
if parameter is None or parameter.strip() == "":
flash("Must set key")
return render_template("analytics.html", data=data)
parameter2 = request.form.get("parameter2")
if parameter2 is None or parameter2.strip() == "":
flash("Must set key")
return render_template("analytics.html", data=data)
get_url = "http://127.0.0.1:5000/analytics/" + path + parameter + '/' + parameter2
else:
parameter = request.form.get("parameter")
if parameter is None or parameter.strip() == "":
flash("Must set key")
return render_template("analytics.html", data=data)
get_url = "http://127.0.0.1:5000/analytics/" + path + parameter
# grab the response
print(get_url)
r = requests.get(get_url)
if r.status_code >= 400:
print("Error. %s Body: %s" % (r, r.content))
return render_template("error.html", errmsg=r.json(), errcode=r.status_code)
else:
data = r.json()
return render_template("analytics.html", data=data)
# -----------------
# Utilities / Errors
# -------------------
# gets connection to database
def get_db_conn():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
# Error Class for managing Errors
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
# called on close of response; closes db connection
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
# ########### post MS1 ############## #
| sarikam1/DatabaseModelling | Database Modelling/server/app.py | app.py | py | 20,869 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "db.DB",
"line_num... |
36633682423 | # I think there can be something cool to come from this eventually, I'm bad at picking colors
import json
from json import JSONDecoder
class ColorSchemaDeserializationError(Exception):
""" Exception deserializing color schema from JSON """
pass
class ColorSchema:
def __init__(self, name: str = None, serialized_json: str = None):
if serialized_json:
try:
data = json.loads(serialized_json)
self._name = data["colorSchema"]["identifier"]
for each_rgb_tuple in data["colorSchema"]["rgb_values"]:
hex_code = "%02x%02x%02x" % (tuple(each_rgb_tuple))
self._color_table.append(hex_code)
except JSONDecoder:
raise ColorSchemaDeserializationError
except IndexError:
raise ColorSchemaDeserializationError
else:
self._name = name
self._color_table = []
def _get_color_for_index(self, index):
if index < len(self._color_table):
return self._color_table[index]
else:
corrected_index = index % len(self._color_table)
return self._color_table[corrected_index]
def hex_code(self, index: int) -> str:
return self._get_color_for_index(index)
def rgb(self, index: int) -> tuple:
rgb = self._get_color_for_index(index)
red = int(rgb[0:2], 16)
green = int(rgb[2:4], 16)
blue = int(rgb[4:6], 16)
return red, green, blue
def __str__(self) -> str:
if self._name:
return self._name
else:
return "DEFAULT"
def serialize_json(self) -> str:
output = {
"colorSchema": {"identifier": str(self),
"rgb_values": []}
}
for x in range(len(self._color_table)):
output["colorSchema"]["rgb_values"].append(self.rgb(x))
return json.dumps(output)
class DefaultColorSchema(ColorSchema):
def __init__(self):
super().__init__(name="DEFAULT")
colors = ["233142",
"facf5a",
"ff5959"
"4f9da6",
"022c43",
"ffd700",
"115173",
"053f5e",
"3a9679",
"fabc60",
"11144c",
"085f63",
"49beb7",
"facf5a",
"ff5959"]
self._color_table.extend(colors)
| picoevent/picoevent | PicoEvent/ColorSchema.py | ColorSchema.py | py | 2,536 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.JSONDecoder",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 58,
"usage_type": "call"
}
] |
22815747066 | # This figure requires the grackle chemistry and cooling library.
# Visit grackle.readthedocs.org for more information.
from matplotlib import pyplot
from utilities.testing import *
from pygrackle.grackle_wrapper import *
from pygrackle.fluid_container import FluidContainer
from utilities.api import \
setup_fluid_container, \
calculate_mean_molecular_weight, \
calculate_hydrogen_number_density, \
set_cosmology_units, \
get_cooling_units
from utilities.primordial_equilibrium import \
total_cooling, \
nHI, nHII, nHeI, nHeII, nHeIII, ne
from utilities.physical_constants import \
mass_hydrogen_cgs, \
sec_per_Myr, \
sec_per_Gyr, \
cm_per_mpc
my_chem = chemistry_data()
my_chem.use_chemistry = 1
my_chem.with_radiative_cooling = 0
my_chem.primordial_chemistry = 1
my_chem.metal_cooling = 0
my_chem.UVbackground = 0
my_chem.include_metal_heating = 0
my_chem.comoving_coordinates = 0
my_chem.a_units = 1.0
my_chem.density_units = mass_hydrogen_cgs
my_chem.length_units = 1.0
my_chem.time_units = 1.0
my_chem.velocity_units = my_chem.length_units / my_chem.time_units
current_redshift = 0.0
fc = setup_fluid_container(my_chem, current_redshift=current_redshift,
converge=True, tolerance=1e-6, max_iterations=np.inf,
dt=(0.0001 * sec_per_Myr / my_chem.time_units))
calculate_temperature(fc)
a = 1.0 / (1.0 + current_redshift) / my_chem.a_units
calculate_cooling_time(fc, a)
t_sort = np.argsort(fc["temperature"])
t_cool = fc["cooling_time"][t_sort] * my_chem.time_units
my_T = fc["temperature"][t_sort]
my_nH = calculate_hydrogen_number_density(my_chem, fc).mean()
cooling_rate = fc["energy"][t_sort] / t_cool * fc["density"] * \
my_chem.density_units / my_nH**2
eq_cooling = total_cooling(my_T, my_nH) / my_nH**2
eq_cooling_cen = total_cooling(my_T, my_nH, rates='cen') / my_nH**2
fontsize = 14
n_rows = 1
n_columns = 1
# blank space between edge of figure and active plot area
top_buffer = 0.03
bottom_buffer = 0.1
left_buffer = 0.12
right_buffer = 0.03
# blank space between plots
hor_buffer = 0.05
vert_buffer = 0.05
# calculate the height and width of each panel
panel_width = ((1.0 - left_buffer - right_buffer -
((n_columns-1)*hor_buffer)) / n_columns)
panel_height = ((1.0 - top_buffer - bottom_buffer -
((n_rows-1)*vert_buffer)) / n_rows)
# create a figure (figsize is in inches
pyplot.figure()
### Cooling figure
axes = pyplot.axes((left_buffer, bottom_buffer,
panel_width, panel_height))
axes.loglog(my_T, eq_cooling, color='black', alpha=0.7,
linestyle="--", linewidth=1.5)
axes.loglog(my_T, cooling_rate, color='black', alpha=0.7,
linestyle="-", linewidth=1)
axes.loglog(my_T, eq_cooling_cen, color='black', alpha=0.7,
linestyle=":", linewidth=1.5)
axes.xaxis.set_label_text('T [K]', fontsize=fontsize)
axes.yaxis.set_label_text('$\\Lambda$ / n${_{\\rm H}}^{2}$ [erg s$^{-1}$ cm$^{3}$]',
fontsize=fontsize)
axes.set_xlim(1e4, 1e9)
axes.set_ylim(1e-26, 2e-22)
tick_labels = axes.xaxis.get_ticklabels() + \
axes.yaxis.get_ticklabels()
for tick_label in tick_labels:
tick_label.set_size(fontsize)
pyplot.savefig('cooling.png')
pyplot.savefig('cooling.pdf')
pyplot.savefig('cooling.eps')
pyplot.clf()
### Ionization balance figure
axes = pyplot.axes((left_buffer, bottom_buffer,
panel_width, panel_height))
# Plot H ions
axes.loglog(my_T, (nHI(my_T, my_nH) /
(nHI(my_T, my_nH) +
nHII(my_T, my_nH))),
color="#B82E00", alpha=0.7, linestyle="--", linewidth=1.5)
axes.loglog(my_T, (nHII(my_T, my_nH) /
(nHI(my_T, my_nH) +
nHII(my_T, my_nH))),
color="#B88A00", alpha=0.7, linestyle="--", linewidth=1.5)
axes.loglog(my_T, (nHI(my_T, my_nH, rates='cen') /
(nHI(my_T, my_nH, rates='cen') +
nHII(my_T, my_nH, rates='cen'))),
color="#B82E00", alpha=0.7, linestyle=":", linewidth=1.5)
axes.loglog(my_T, (nHII(my_T, my_nH, rates='cen') /
(nHI(my_T, my_nH, rates='cen') +
nHII(my_T, my_nH, rates='cen'))),
color="#B88A00", alpha=0.7, linestyle=":", linewidth=1.5)
axes.loglog(my_T, (fc["HI"] / (fc["HI"] + fc["HII"])),
label="HI", color="#B82E00", alpha=0.7, linestyle="-", linewidth=1.)
axes.loglog(my_T, (fc["HII"] / (fc["HI"] + fc["HII"])),
label="HII", color="#B88A00", alpha=0.7, linestyle="-", linewidth=1.)
# Plot He ions
axes.loglog(my_T, (nHeI(my_T, my_nH) /
(nHeI(my_T, my_nH) +
nHeII(my_T, my_nH) +
nHeIII(my_T, my_nH))),
color="#002EB8", alpha=0.7, linestyle="--", linewidth=1.5)
axes.loglog(my_T, (nHeII(my_T, my_nH) /
(nHeI(my_T, my_nH) +
nHeII(my_T, my_nH) +
nHeIII(my_T, my_nH))),
color="#008AB8", alpha=0.7, linestyle="--", linewidth=1.5)
axes.loglog(my_T, (nHeIII(my_T, my_nH) /
(nHeI(my_T, my_nH) +
nHeII(my_T, my_nH) +
nHeIII(my_T, my_nH))),
color="#00B88A", alpha=0.7, linestyle="--", linewidth=1.5)
axes.loglog(my_T, (nHeI(my_T, my_nH, rates='cen') /
(nHeI(my_T, my_nH, rates='cen') +
nHeII(my_T, my_nH, rates='cen') +
nHeIII(my_T, my_nH, rates='cen'))),
color="#002EB8", alpha=0.7, linestyle=":", linewidth=1.5)
axes.loglog(my_T, (nHeII(my_T, my_nH, rates='cen') /
(nHeI(my_T, my_nH, rates='cen') +
nHeII(my_T, my_nH, rates='cen') +
nHeIII(my_T, my_nH, rates='cen'))),
color="#008AB8", alpha=0.7, linestyle=":", linewidth=1.5)
axes.loglog(my_T, (nHeIII(my_T, my_nH, rates='cen') /
(nHeI(my_T, my_nH, rates='cen') +
nHeII(my_T, my_nH, rates='cen') +
nHeIII(my_T, my_nH, rates='cen'))),
color="#00B88A", alpha=0.7, linestyle=":", linewidth=1.5)
axes.loglog(my_T, (fc["HeI"] / (fc["HeI"] + fc["HeII"] + fc["HeIII"])),
label="HeI", color="#002EB8", alpha=0.7, linestyle="-", linewidth=1.)
axes.loglog(my_T, (fc["HeII"] / (fc["HeI"] + fc["HeII"] + fc["HeIII"])),
label="HeII", color="#008AB8", alpha=0.7, linestyle="-", linewidth=1.)
axes.loglog(my_T, (fc["HeIII"] / (fc["HeI"] + fc["HeII"] + fc["HeIII"])),
label="HeIII", color="#00B88A", alpha=0.7, linestyle="-", linewidth=1.)
axes.xaxis.set_label_text('T [K]', fontsize=fontsize)
axes.yaxis.set_label_text('fraction', fontsize=fontsize)
axes.set_xlim(1e4, 1e9)
axes.set_ylim(1e-10, 1)
tick_labels = axes.xaxis.get_ticklabels() + \
axes.yaxis.get_ticklabels()
for tick_label in tick_labels:
tick_label.set_size(fontsize)
axes.legend(loc='best', prop=dict(size=fontsize))
pyplot.savefig('fractions.png')
pyplot.savefig('fractions.pdf')
pyplot.savefig('fractions.eps')
| enzo-project/enzo-method-paper-ApJ-2014 | test_problems/IonizationBalance/enzo_paper_equilibrium_figure.py | enzo_paper_equilibrium_figure.py | py | 7,156 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utilities.physical_constants.mass_hydrogen_cgs",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "utilities.api.setup_fluid_container",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "utilities.physical_constants.sec_per_Myr",
"line_number": ... |
12781939308 | """
This file contains a function to get the coordinates from the GPS
"""
import serial
import time
import string
import pynmea2
#GPS gets coordinates
#UART communication protocols
def getCoordinates():
port="/dev/ttyAMA0"
ser=serial.Serial(port, baudrate=9600, timeout=0.5)
dataout = pynmea2.NMEAStreamReader()
newdata=ser.readline()
if newdata[0:6] == "$GPRMC":
newmsg=pynmea2.parse(newdata)
lat=newmsg.latitude
lng=newmsg.longitude
gps = [lat,lng]
return gps
| JanPat/i-dont-know-go | GPS.py | GPS.py | py | 481 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "serial.Serial",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pynmea2.NMEAStreamReader",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pynmea2.parse",
"line_number": 18,
"usage_type": "call"
}
] |
24903490945 | # Importing the Pillow library
from PIL import Image, ImageDraw, ImageFont
import os
def text_wrapper(text, font, max_width):
# Totally not stolen from eyong kevin https://gist.github.com/Eyongkevin/adbac2334f1355d8045111c264d80621
list_of_lines = []
if font.getlength(text) <= max_width:
return text
else:
# split the line by spaces to get words
words = text.split(' ')
i = 0
# append every word to a line while its width is shorter than the image width
while i < len(words):
line = ''
while i < len(words) and font.getlength(line + words[i]) <= max_width:
line = line + words[i] + " "
i += 1
if not line:
line = words[i]
i += 1
list_of_lines.append(f"{line}")
new_line = "\n"
return new_line.join(list_of_lines)
def get_colors_from_colorcombo_image(colorcombo_path, colorcombo_outline_path):
colorcombo_image = Image.open(colorcombo_path)
colorcombo_outline = Image.open(colorcombo_outline_path)
colorcombo_image.convert("RGBA")
get_pixel_color = colorcombo_image.load()
color_top = get_pixel_color[100, 20]
color_top_font = get_pixel_color[500, 20]
color_bottom = get_pixel_color[1000, 20]
color_bottom_font = get_pixel_color[1500, 20]
get_pixel_color = colorcombo_outline.load()
color_outline = get_pixel_color[10, 10]
color_outline = (color_outline[0], color_outline[1], color_outline[2], 190)
return color_top, color_top_font, color_bottom, color_bottom_font, color_outline
def create_cover(thumbnail_author_path, video_thumbnail_path, author, title, colorcombo, sender, color_number, outline):
# Cover size = 1280 x 1650
colorcombo_path = f"resources/color_combinations/{colorcombo}"
colorcombo_outline_path = f"resources/color_combinations/{colorcombo[:6]}_outline.png"
colors_list = get_colors_from_colorcombo_image(colorcombo_path, colorcombo_outline_path)
myfont = ImageFont.truetype("resources/Ubuntu-R.ttf", 50)
image_cover = Image.new("RGBA", (1280, 1650), "white")
image_top = Image.new("RGBA", (1280, 465), color=colors_list[0])
image_icon = Image.open(thumbnail_author_path).convert("RGBA")
image_thumbnail = Image.open(video_thumbnail_path).convert("RGBA")
image_bottom = Image.new("RGBA", (1280, 465), color=colors_list[2])
image_top_draw = ImageDraw.Draw(image_top)
image_bottom_draw = ImageDraw.Draw(image_bottom)
# FIXME: Implement text resizing based on height of the text.
image_top_draw.text((100, 320), text_wrapper(author, myfont, 1000), font=myfont, fill=colors_list[1])
image_bottom_draw.multiline_text((100, 100), text_wrapper(title, myfont, 1130), font=myfont, fill=colors_list[4])
image_cover.paste(image_top, (0, 0))
image_cover.paste(image_icon, (100, 100))
image_cover.paste(image_thumbnail, (0, 465))
image_cover.paste(image_bottom, (0, 720 + 465))
# FIXME, add this outside. When implementing cover editor
proper_senders = ["epub", "showcover", "thumbnail"]
if sender in proper_senders:
outline_rectangle = (colors_list[4][0], colors_list[4][1], colors_list[4][2], 190)
else:
if color_number < 4:
outline_rectangle = (colors_list[color_number][0], colors_list[color_number][1], colors_list[color_number][2], 190)
else:
outline_rectangle = (colors_list[4][0], colors_list[4][1], colors_list[4][2], 190)
if outline:
layer_rectangle = Image.new("RGBA", image_cover.size, color=(0, 0, 0, 0))
ImageDraw.Draw(layer_rectangle).rectangle((50, 50, 1230, 1600), outline=outline_rectangle, width=13)
final_cover = Image.alpha_composite(image_cover, layer_rectangle)
else:
final_cover = image_cover
# FIXME: Change from arbitray number "ifs" to more readeable form
if sender == "epub":
path = f"tmp/cover.png"
final_cover.save(path, "PNG")
elif sender == "thumbnail":
final_cover.thumbnail((330,425))
path = f"tmp/cover_thumbnail.png"
final_cover.save(path, "PNG")
return path
else:
# this should be used by cover.py for testing purposes only
if color_number < 4:
path = f"tmp/cover{colorcombo[:6]}{color_number}.png"
final_cover.save(path, "PNG")
elif color_number == 10:
path = f"tmp/cover{colorcombo[:6]}{color_number}.png"
final_cover.save(path, "PNG")
elif color_number == 42:
final_cover.thumbnail((256,330))
path = f"tmp/{colorcombo[:6]}_thumbnail.png"
final_cover.save(path, "PNG")
elif color_number == 5:
# This is for testing purposes only
print("imhere")
path = f"tmp/cover_thumbnail.png"
final_cover.save(path, "PNG")
return path
def generate_all(type_of_ouput):
# Testing different color combinations and outlines of said combinations
author_thumbnail_path = "resources/test_graphics/author_thumbnail_test.png"
video_thumbnail_path = "resources/test_graphics/thumbnail_test.png"
author = "This is a test of Author Name"
title = "This is just a Title Test Case with some words when you fill out the Youtube URL field you will see " \
"different result"
path = "resources/color_combinations"
color_combinations = [0, 1, 2, 3]
for colorcombo in os.listdir(path):
if "outline" in colorcombo:
pass
else:
if type_of_ouput == "thumbnails":
create_cover(author_thumbnail_path, video_thumbnail_path, author, title, colorcombo, "epub", 42, True)
elif type_of_ouput == "all":
for color in color_combinations:
create_cover(author_thumbnail_path, video_thumbnail_path, author, title, colorcombo, "epub", color, True)
elif type_of_ouput== "outline":
create_cover(author_thumbnail_path, video_thumbnail_path, author, title, colorcombo, "epub", 5, True)
def main():
# create_cover(url, video_thumbnail_path, author, title, "combo4.png", "epub", 10)
# generate_all_cover_options(url, video_thumbnail_path, author, title, "outline")
# type of options
# "thumbnails" - generates thumbnail versions of covers
# "outline" - generates covers from all theme files using coresponding *_outline.png file
# "all" - generates covers using all base colors of a cover theme to get covers. Outuput is number_of_colors*number_of_combination.png_files of cover files
generate_all("all")
if __name__ == "__main__":
main()
| maticahacic/transcript_to_ebook | cover.py | cover.py | py | 6,794 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number"... |
17140780409 | # Creates a basic BLE Advertiser
# Check the Advertiser.py class for more info
import sys
import time
from datetime import datetime, timedelta
from Advertiser import Advertiser
# Advertise
def main():
dt = datetime.now()
print("Started at: %s" % dt)
# Create advertiser
adv = Advertiser()
# Initiate BLE stack
if adv.setupBleStack('Pi Range Tester 2', interval=1000):
# Start advertising
adv.enableAdvertisements(en=True)
# Advertise for n seconds
time.sleep(600)
# Disable advertisements
adv.enableAdvertisements(en=False)
# Stop
print("Stopped at: %s" % datetime.now())
# Program start
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
finally:
print("CTRL-C Detected")
print("Stopped at: %s" % datetime.now())
sys.exit()
| horace1024/ble | run_adv.py | run_adv.py | py | 900 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "Advertiser.Advertiser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.sl... |
6797316241 | from django.utils.translation import ugettext_lazy as _
from utils.faker_factory import faker
from ..mails import BaseMailView
class ProjectsMemberRoleChangedMailView(BaseMailView):
"""
"""
template_name = 'mails/projects/member_role_changed.html'
mandatory_mail_args = [
'name',
'user_name',
'roles',
'public_url',
]
subject = _('Your role has changed: You are now acting as %(roles)s in %(name)s')
section = 'projects'
def get_mock_data(self, optional=True):
mock_data = {
'name': '[Project Name]',
'roles': ['ExO Head Coach', 'Observer'],
'user_name': '[Name]',
'disable_notification_url': None,
'public_url': '/{}'.format(faker.uri_path()),
}
return mock_data
| tomasgarzon/exo-services | service-exo-mail/mail/mailviews/projects_member_role_changed.py | projects_member_role_changed.py | py | 818 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mails.BaseMailView",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "utils.faker_factory.faker.uri_path",
"line_number": 28,
"usage_type": "call"
}... |
38665630567 | import matplotlib.pyplot as plt
import numpy as np
from sympy import *
x = Symbol('x')
#! INPUT
print("""
Bắt đầu chương trình
Đây là phương pháp lặp đơn
INPUT
f(x) với f(x)=0
khoảng phân li nghiệm (a,b)
Và thêm SaiSoE đã cho
INPUT
""")
def Phi(x):
# ** là mũ
# return 1/(x-1.5)
return -1/3*x**3
a = -2
b = 2
SaiSoE = 10
print(f"""
INPUT
{Phi(x)} , \tvới f(x)=0
khoảng phân li nghiệm (a,b) = ({a},{b})
Và thêm SaiSoE đã cho: SaiSoE = {SaiSoE}
INPUT
""")
#! INPUT
#! KIỂM TRA ĐIỀU KIỆN PP
print(f"""
#! Điều kiện của phương pháp lặp đơn là:
1. Hàm Phi(x) co <=> 0<q<1 (q=MAX_đạo hàm Phi(x))
""")
dao_ham_Phi= diff(Phi(x),x)
print(dao_ham_Phi)
x_ve_hinh=np.linspace(a,b,1000)
y_ve_hinh=[dao_ham_Phi.subs(x,i) for i in x_ve_hinh]
q=max(y_ve_hinh)
print(f"q = {q}\n Xét 0<q<1 => ",end=" ")
if(0<q and q<1):
print(f"Đúng")
else:
print(f"Sai")
plt.plot(x_ve_hinh,y_ve_hinh)
plt.show()
#! KIỂM TRA ĐIỀU KIỆN PP
print("Kết thúc chương trình")
| buihuyhau/HocLaiGTS | 2LapDon/a.py | a.py | py | 1,044 | python | vi | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linspace",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.py... |
36343463761 | from django.shortcuts import render,redirect
from django.core.paginator import Paginator
from siteSettings.models import SiteSettings
from django.contrib import messages
from users.models import User
from carts.models import *
from .models import *
def courses_page(request):
if request.user.is_authenticated:
if request.user.phone_authentication != True:
return redirect('account:code_page')
else:
user_info = User.objects.filter(username=request.user.username).first()
if user_info.block == True:
messages.error(request, 'اکانت شما مسدود شده است لطفا با پشتیبانی تماس بگیرید')
return redirect('contact:contactus_page')
else: return redirect('account:login_page')
posts = Courses.objects.filter(CourseStatus=True).all()
paginator = Paginator(posts, 9)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
settings = SiteSettings.objects.last()
title = settings.title + ' ' + '-' + ' ' + 'دوره ها'
context = {
'title': title,
'posts': page_obj,
}
return render(request,'courses/courses_page/coursesPage.html',context)
def ShowCourse_page(request,id):
if request.user.is_authenticated:
if request.user.phone_authentication != True:
return redirect('account:code_page')
else:
user_info = User.objects.filter(username=request.user.username).first()
if user_info.block == True:
messages.error(request, 'اکانت شما مسدود شده است لطفا با پشتیبانی تماس بگیرید')
return redirect('contact:contactus_page')
else: return redirect('account:login_page')
cart = Course.objects.filter(course_id=id,is_paid=True).first()
status = False
if cart is not None:
status = True
videos = Videos.objects.filter(course_id=id).all()
videos_count = Videos.objects.filter(course_id=id).count()
Introduction_Video = IntroductionVideo.objects.filter(course_id=id).last()
course = Courses.objects.filter(id=id).first()
settings = SiteSettings.objects.last()
title = settings.title + ' ' + '-' + ' ' + f'{course.CourseName}'
context = {
'title': title,
'status': status,
'videos': videos,
'videos_count': videos_count,
'keyWord': course.keyWord,
'Introduction_Video': Introduction_Video,
'course': course,
}
return render(request,'courses/showCourse_page/show_course.html',context) | benyaminsh/Logofon | courses/views.py | views.py | py | 2,657 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.redirect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "users.models.User.objects.filter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "users.models.User.objects",
"line_number": 16,
"usage_type": "attribute"
},
... |
40954037620 | import nltk.classify.util
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import movie_reviews
def extract_features(word_list):
return dict([(word, True) for word in word_list])
if __name__=='__main__':
# Load positive and negative reviews
positive_fileids = movie_reviews.fileids('pos')
negative_fileids = movie_reviews.fileids('neg')
features_positive = [(extract_features(movie_reviews.words(fileids=[f])),
'Positive') for f in positive_fileids]
features_negative = [(extract_features(movie_reviews.words(fileids=[f])),
'Negative') for f in negative_fileids]
# Split the data into train and test (80/20)
threshold_factor = 0.8
# threshold_factor = 0.7
# threshold_factor = 0.5
threshold_positive = int(threshold_factor * len(features_positive))
threshold_negative = int(threshold_factor * len(features_negative))
features_train = features_positive[:threshold_positive] + features_negative[:threshold_negative]
features_test = features_positive[threshold_positive:] + features_negative[threshold_negative:]
print("\nNumber of training datapoints:", len(features_train))
print("Number of test datapoints:", len(features_test))
# Train a Naive Bayes classifier
classifier = NaiveBayesClassifier.train(features_train)
print("\nAccuracy of the classifier:", nltk.classify.util.accuracy(classifier, features_test))
print("\nTop 10 most informative words:")
for item in classifier.most_informative_features()[:10]:
print(item[0])
# Sample input reviews
# input_reviews = [
# "It is an amazing movie",
# "This is a dull movie. I would never recommend it to anyone.",
# "The cinematography is pretty great in this movie",
# "The direction was terrible and the story was all over the place"
# ]
input_reviews = [
"This was easy to put together. It is sturdy ad a perfect fit for my daughter's room. We have one drawer that sticks a little COMMA but it works.",
"I loved the look of this dresser in the store and decided to take the plunge. It was a major project to assemble (6-ish hours for one relatively handy person without power tools) COMMA but the finished product looks great and stores a ton of clothes! The directions could definitely be a little clearer on assembling the middle divider pieces COMMA which looks wrong even when done correctly and the dimples in the wood for orientation look like holes in the instructions. I couldn't get two of the four screws that connect the front face to the dresser top to go in (screws too short or holes not quite aligned) COMMA but thankfully there were many other points of attachment and it's not at all obvious that they're missing. And mine came with metal (not plastic) cam locks COMMA which is a good thing. Great buy!",
"We were very disappointed to realize that the hemnes set in white is made of mostly particle board. We were under the impression that the all the hemnes line was made of solid wood. After further investigation it seems as though all the dressers are made of wood except the white ones. Not sure why this is and is very misleading",
"I not only purchased the dresser but I bought the matching chest I'd drawers. The pieces took a while to put together but they are worth the time. Great product."
]
print("\nPredictions:")
for review in input_reviews:
print("\nReview:", review)
probdist = classifier.prob_classify(extract_features(review.split()))
pred_sentiment = probdist.max()
print("Predicted sentiment:", pred_sentiment)
print("Probability:", round(probdist.prob(pred_sentiment), 2))
| pkankariya/CS5560_Knowledge_Discovery_Management | ICP_10/Sentiment_Analysis.py | Sentiment_Analysis.py | py | 3,729 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.corpus.movie_reviews.fileids",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.movie_reviews",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.movie_reviews.fileids",
"line_number": 11,
"usage_type": "call"
... |
33639010391 | '''
main driver file. Responsible for handling user inputs and displaying current GameState
'''
import pygame as p
from PIL import Image
import ChessEngine
p.init()
WIDTH = HEIGHT = 512 # or you can keep it as 512
DIMENSION = 8 # Chess boards are 8x8
SQ_SIZE = HEIGHT // DIMENSION
IMAGES = dict.fromkeys(['wR', 'wN', 'wB', 'wQ', 'wK', 'wp', 'bR', 'bN', 'bB', 'bQ', 'bK', 'bp'])
SET_FPS = 15
def loadImages ():
for key in list (IMAGES.keys()): # Use a list instead of a view
IMAGES[key] = p.transform.scale(p.image.load('/Users/joshmachado/Desktop/Studies/coding/Chess-Engine/images/{}.png'
.format(key)), (SQ_SIZE, SQ_SIZE))
def main():
screen = p.display.set_mode((WIDTH, HEIGHT))
clock = p.time.Clock()
screen.fill(p.Color('White'))
loadImages()
gs = ChessEngine.GameState()
validMoves = gs.getValidMoves()
moveMade = False # flag for when a move is made
sqSelected = () # Empty tuple which will save the position of the last square selected by user
playerClicks = [] # List of two tuples tracking the clicks eg: [(6,4),(4,4)]
running = True
while running:
for e in p.event.get():
if e.type == p.QUIT:
running = False
elif e.type ==p.MOUSEBUTTONDOWN:
location = p.mouse.get_pos()
col = location[0]//SQ_SIZE
row = location[1]//SQ_SIZE
if sqSelected == (row, col): # If the same square is selected twice, it:
sqSelected = () # Deselects the square
playerClicks = [] # Resets playerClicks
else:
sqSelected = (row, col)
playerClicks.append(sqSelected)
if len(playerClicks)==2: # This indicates that 2 clicks have been made (done after second click)
move = ChessEngine.Move(playerClicks[0],playerClicks[1], gs.board)
if move in validMoves:
print(move.getChessNotation())
gs.makeMove(move)
moveMade = True
sqSelected = ()
playerClicks =[]
else:
playerClicks = [sqSelected]
# Key handles
elif e.type == p.KEYDOWN:
if e.key == p.K_z: # Undo the move made
gs.undoMove()
moveMade = True
if moveMade:
validMoves = gs.getValidMoves()
moveMade = False
drawGameState(screen, gs)
clock.tick(SET_FPS)
p.display.flip()
def drawGameState(screen, gs):
drawBoard(screen)
drawPieces(screen, gs.board)
'''
drawBoard is going to draw just the board without the pieces.
*Note to self*
Top left square of the chess board is always white irrespective of which colour you're playing with
'''
def drawBoard(screen):
colours = [p.Color('white'),p.Color('grey')]
for r in range(DIMENSION):
for c in range(DIMENSION):
colour = colours[((r+c)%2)]
p.draw.rect(screen, colour, p.Rect(c*SQ_SIZE, r*SQ_SIZE, SQ_SIZE, SQ_SIZE))
'''
drawPieces is going to draw the pieces on the board given the current GameState
'''
def drawPieces(screen, board):
for r in range(DIMENSION):
for c in range(DIMENSION):
piece = board[r][c]
if piece != '--':
screen.blit(IMAGES[piece], p.Rect(c*SQ_SIZE, r*SQ_SIZE, SQ_SIZE, SQ_SIZE))
if __name__ == '__main__':
main()
| aaliixii/Chess-Engine | ChessMain.py | ChessMain.py | py | 3,651 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.transform.scale",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.... |
41786469041 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 22 20:10:01 2018
@author: Suveen
"""
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
import re
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
html = urlopen(url, context=ctx).read()
# html.parser is the HTML parser included in the standard Python 3 library.
# information on other HTML parsers is here:
# http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser
soup = BeautifulSoup(html, "html.parser")
# Retrieve all of the anchor tags
tags = soup('span')
count = 0
com_sum = 0
numlist= list()
for tag in tags:
# Look at the parts of a tag
count= count+1
str_tag= str(tag)
# print('TAg : ',tag)
# print('String', str_tag)
stuff=re.findall('[0-9]+',str_tag)
for k in range(len(stuff)):
num = float(stuff[k])
com_sum = com_sum+ num
# print (num)
numlist.append(num)
#print(numlist)
print('Count ',count)
print('Sum ', int(com_sum)) | SuveenMekala/Web_scraping | scrapetest.py | scrapetest.py | py | 1,118 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ssl.create_default_context",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "ssl.CERT_NONE",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "... |
21421821259 | from flask import Flask,render_template,request
from pymongo import MongoClient
import subprocess as sp
import os
mongo_server_url="mongodb://127.0.0.1:27017"
client = MongoClient(mongo_server_url)
app = Flask("chat app")
#here i am going to defind database and collection name
db ="lw"
collection="flask"
#We have to specify that In which IP and port you want to run this web app
port=80
hostname="0.0.0.0"
@app.route('/')
def home():
#Very Important steps
file=open("names.yml" , "r+")
file.truncate(0)
file.close()
file=open("mobiles.yml" , "r+")
file.truncate(0)
file.close()
file=open("emails.yml" , "r+")
file.truncate(0)
file.close()
return render_template("index.html")
@app.route('/sumbitted' , methods=['POST'])
def sumbitted():
name=request.form.get("name")
mobile=request.form.get("mobile")
email=request.form.get("email")
#inserting the data in mongodb server
client[db][collection].insert({"name": name , "mobile": mobile , "email": email})
#now i want to do some manupulation to store the name, mobile and email
# file handling for "names.yml"
f = open("names.yml" ,"a")
var="moqaddas"
f.write("name: ")
f.write(name)
f.close()
# file handling for "mobiles.yml"
f = open("mobiles.yml" ,"a")
var="moqaddas"
f.write("mobile: ")
f.write(mobile)
f.close()
# file handling for "names.yml"
f = open("emails.yml" ,"a")
var="moqaddas"
f.write("email: ")
f.write(email)
f.close()
m=sp.getstatusoutput("ansible-playbook mail.yml --vault-password-file password.txt")
exit_code=m[0]
output=m[1]
if exit_code==0:
return render_template("response.html")
else: return render_template("err.html")
app.run(debug=True , port=port , host=hostname)
| MDMOQADDAS/RegistrationApp | app.py | app.py | py | 1,857 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "flask.request.for... |
11352936054 |
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
def train_random_forest():
# ****** Fit a random forest and extract predictions
#
forest = RandomForestClassifier(n_estimators = 100)
# Fitting the forest may take a few minutes
print("Fitting a random forest to labeled training data...")
forest = forest.fit(train_centroids, train["sentiment"])
result = forest.predict(test_centroids)
# Write the test results
output = pd.DataFrame(data={"id": test["id"], "sentiment":result})
output.to_csv("BagOfCentroids.csv", index=False, quoting=3)
print("Wrote BagOfCentroids.csv")
| electronick1/stairs_examples | bag_of_words/bag_of_words/model.py | model.py | py | 644 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 17,
"usage_type": "call"
}
] |
23944279921 | import pyaudio
import numpy as np
from core.tools.buffer import AudioBuffer
class PyAudio():
def __init__(self,verbose=True):
"""
instantiate pyaudio client and buffer for input stream recording to plot
"""
self.input_stream = None # on start will beinput stream from mic via pyaudio
self.client=pyaudio.PyAudio()
self.verbose=verbose
def stream_read(self,chunk):
"""
return values for a single chunk
"""
data = np.frombuffer(self.input_stream.read(chunk),dtype=np.int16)
return data
def stream_start(self,rate,chunk):
"""
start audio input stream
"""
if self.verbose:
print(" -- stream started")
self.input_stream=self.client.open(format=pyaudio.paInt16,channels=1,
rate=rate,input=True,
frames_per_buffer=chunk)
def stream_stop(self):
"""
close the stream but keep the PyAudio instance alive.
"""
self.input_stream.stop_stream()
self.input_stream.close()
if self.verbose:
print("Programatically closing stream")
| AlexKingsland/GuitarGuru | core/tools/pythonaudio.py | pythonaudio.py | py | 1,200 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyaudio.PyAudio",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.int16",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pyaudio.paInt16",
... |
17981838498 | #!/usr/bin/python3
import requests
import subprocess # to execute bash commands
import sys
try:
check_for_package = subprocess.Popen(("dpkg", "-s", "html2text"), stdout=subprocess.PIPE)
output = subprocess.check_output(("grep", "Status"), stdin=check_for_package.stdout)
check_for_package.wait()
opstr = str(output, 'utf-8')
print(opstr)
if opstr == "Status: install ok installed\n":
print("Package installed")
except Exception as e:
print("installing html2text..............................")
install_pkg = subprocess.check_call("sudo apt install html2text", shell=True)
try:
tracking_number = str(sys.argv[1])
except(IndexError, ValueError):
print("please enter a tracking number of a valid format")
sys.exit(2)
request_url = "http://ipsweb.ptcmysore.gov.in/ipswebtracking/IPSWeb_item_events.aspx?itemid=" + tracking_number
# print(request_url)
r = requests.get(request_url)
print(r.status_code)
f = open("raw_html", "w+")
f.write(r.text)
f.close()
view_html = subprocess.Popen(["html2text", "raw_html"])
output = view_html.communicate()
view_html.wait()
print(output)
| technodict/Parcel_Tracking_CLI | tracking.py | tracking.py | py | 1,131 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "subprocess.Popen",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "subprocess.check_output",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "subprocess... |
74791819944 | import click
import requests
from tabulate import tabulate
class github:
def __init__(self,ghf):
self.ghf=ghf
# def repos(self,org):
# if self.ghf.debug: click.echo("org:"+org+" token:"+self.ghf.token)
# url='https://api.github.com/orgs/'+org+'/repos'
# headers=self.get_auth_header()
# data=''
# r = requests.get(url, headers=headers, data=data)
# fields = ['full_name']
# if len(self.ghf.fields) > 0:
# fields = self.ghf.fields
# if self.ghf.profile:
# self.profile_keys(r.json())
# else:
# self.print_object_array_with_fields(r.json(),fields)
def repos(self,org):
url='https://api.github.com/orgs/'+org+'/repos'
fields = ['full_name']
headers=self.get_auth_header()
self.make_github_get_request(fields,url,headers)
def make_github_get_request(self,default_fields,url,headers,data=''):
if self.ghf.debug: click.echo("url:"+url+" token:"+self.ghf.token+" fields:"+",".join(default_fields))
r = requests.get(url, headers=headers, data=data)
fields = default_fields
if len(self.ghf.fields) > 0:
fields = self.ghf.fields
table = []
if self.ghf.profile:
(fields,table) = self.get_profile_table(r.json())
else:
table = self.get_table_from_object_array_with_fields(fields,r.json())
self.print_table(fields,table)
def print_table(self, field_names, table):
if self.ghf.export_csv:
separator = self.ghf.csv_separator
if self.ghf.print_header_row:
click.echo(separator.join(field_names))
for entry in table:
click.echo(separator.join(entry))
else:
if self.ghf.print_header_row:
click.echo(tabulate(table, field_names, tablefmt="simple"))
else:
click.echo(tabulate(table, tablefmt="simple"))
def test_post(self,url,headers,data):
r = requests.post(url, headers=headers, data=data)
if r.status_code == 201:
click.echo(r.json())
elif r.status_code == 401:
click.echo("Error:"+r.json()['message'])
else:
click.echo('status:'+str(r.status_code))
click.echo(r.text)
def test_get(self,url,headers,data):
r = requests.get(url, headers=headers, data=data)
if r.status_code == 201:
click.echo(r.json())
elif r.status_code == 401:
click.echo("Error:"+r.json()['message'])
else:
click.echo('status:'+str(r.status_code))
click.echo(r.text)
def get_auth_header(self):
headers={'Authorization' : 'token '+self.ghf.token}
return headers
def get_profile_table(self,json):
outter_key_hash = {}
inner_key_hash = {}
if type(json) == type([]):
for item in json:
if type(item) == type(u''):
outter_key_hash[item] = 1 if item not in outter_key_hash else outter_key_hash[item] + 1
if type(item) == type({}):
for inner_item in item:
if type(inner_item) == type(u''):
inner_key_hash[inner_item] = 1 if inner_item not in inner_key_hash else inner_key_hash[inner_item] + 1
# elif type(json) == type({}):
# None
table = []
for key, value in outter_key_hash.items():
table.append(['level1',key,str(value)])
for key, value in inner_key_hash.items():
table.append(['level2',key,str(value)])
field_names = ['level', 'name','count']
table = sorted(table, key=lambda key: key[1])
return (field_names,table)
# click.echo(tabulate(table, field_names, tablefmt="simple"))
def get_table_from_object_array_with_fields(self,fields,json):
table = []
if type(json) == type([]):
for item in json:
if type(item) == type({}):
row = []
for field in fields:
if field in item:
row.append(item[field])
else:
row.append('')
table.append(row)
headers = fields
return table
# click.echo(tabulate(table, headers, tablefmt="simple"))
# >>> js = ['name1', 'name2', {'iname1':11,'iname2':12}]
# >>> for item in js:
# ... print type(item)
# ...
# <type 'str'>
# <type 'str'>
# <type 'dict'> | DemandCube/github-flow | src/githubflow/github.py | github.py | py | 4,784 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "click.echo",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 49... |
9194485366 | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
OpenAPI spec version: 1.0.0
Contact: support@lightly.ai
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from lightly.openapi_generated.swagger_client.configuration import Configuration
class TagArithmeticsRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'tag_id1': 'MongoObjectID',
'tag_id2': 'MongoObjectID',
'operation': 'TagArithmeticsOperation',
'new_tag_name': 'TagName',
'creator': 'TagCreator'
}
attribute_map = {
'tag_id1': 'tagId1',
'tag_id2': 'tagId2',
'operation': 'operation',
'new_tag_name': 'newTagName',
'creator': 'creator'
}
def __init__(self, tag_id1=None, tag_id2=None, operation=None, new_tag_name=None, creator=None, _configuration=None): # noqa: E501
"""TagArithmeticsRequest - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._tag_id1 = None
self._tag_id2 = None
self._operation = None
self._new_tag_name = None
self._creator = None
self.discriminator = None
self.tag_id1 = tag_id1
self.tag_id2 = tag_id2
self.operation = operation
if new_tag_name is not None:
self.new_tag_name = new_tag_name
if creator is not None:
self.creator = creator
@property
def tag_id1(self):
"""Gets the tag_id1 of this TagArithmeticsRequest. # noqa: E501
:return: The tag_id1 of this TagArithmeticsRequest. # noqa: E501
:rtype: MongoObjectID
"""
return self._tag_id1
@tag_id1.setter
def tag_id1(self, tag_id1):
"""Sets the tag_id1 of this TagArithmeticsRequest.
:param tag_id1: The tag_id1 of this TagArithmeticsRequest. # noqa: E501
:type: MongoObjectID
"""
if self._configuration.client_side_validation and tag_id1 is None:
raise ValueError("Invalid value for `tag_id1`, must not be `None`") # noqa: E501
self._tag_id1 = tag_id1
@property
def tag_id2(self):
"""Gets the tag_id2 of this TagArithmeticsRequest. # noqa: E501
:return: The tag_id2 of this TagArithmeticsRequest. # noqa: E501
:rtype: MongoObjectID
"""
return self._tag_id2
@tag_id2.setter
def tag_id2(self, tag_id2):
"""Sets the tag_id2 of this TagArithmeticsRequest.
:param tag_id2: The tag_id2 of this TagArithmeticsRequest. # noqa: E501
:type: MongoObjectID
"""
if self._configuration.client_side_validation and tag_id2 is None:
raise ValueError("Invalid value for `tag_id2`, must not be `None`") # noqa: E501
self._tag_id2 = tag_id2
@property
def operation(self):
"""Gets the operation of this TagArithmeticsRequest. # noqa: E501
:return: The operation of this TagArithmeticsRequest. # noqa: E501
:rtype: TagArithmeticsOperation
"""
return self._operation
@operation.setter
def operation(self, operation):
"""Sets the operation of this TagArithmeticsRequest.
:param operation: The operation of this TagArithmeticsRequest. # noqa: E501
:type: TagArithmeticsOperation
"""
if self._configuration.client_side_validation and operation is None:
raise ValueError("Invalid value for `operation`, must not be `None`") # noqa: E501
self._operation = operation
@property
def new_tag_name(self):
"""Gets the new_tag_name of this TagArithmeticsRequest. # noqa: E501
:return: The new_tag_name of this TagArithmeticsRequest. # noqa: E501
:rtype: TagName
"""
return self._new_tag_name
@new_tag_name.setter
def new_tag_name(self, new_tag_name):
"""Sets the new_tag_name of this TagArithmeticsRequest.
:param new_tag_name: The new_tag_name of this TagArithmeticsRequest. # noqa: E501
:type: TagName
"""
self._new_tag_name = new_tag_name
@property
def creator(self):
"""Gets the creator of this TagArithmeticsRequest. # noqa: E501
:return: The creator of this TagArithmeticsRequest. # noqa: E501
:rtype: TagCreator
"""
return self._creator
@creator.setter
def creator(self, creator):
"""Sets the creator of this TagArithmeticsRequest.
:param creator: The creator of this TagArithmeticsRequest. # noqa: E501
:type: TagCreator
"""
self._creator = creator
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TagArithmeticsRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TagArithmeticsRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TagArithmeticsRequest):
return True
return self.to_dict() != other.to_dict()
| tibe97/thesis-self-supervised-learning | lightly/openapi_generated/swagger_client/models/tag_arithmetics_request.py | tag_arithmetics_request.py | py | 6,991 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "lightly.openapi_generated.swagger_client.configuration.Configuration",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "six.iteritems",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 212,
"usage_type": "c... |
15708723931 | import numpy as np
import cv2 as cv
import pandas as pd
import random
from math import ceil
from sklearn.utils import shuffle
dt = np.dtype(np.float32)
def generator(batch_size=50):
while True:
for j in range(batch_size):
Xs = []
Ys = []
count = 0
while count < 100:
day_or_night = random.randint(0,1)
if day_or_night == 0:
folder_day = random.randint(1,13)
path_0 = '/../data/archive/Annotations/Annotations/dayTrain/dayClip{}/frameAnnotationsBOX.csv'.format(folder_day)
csv_file = pd.read_csv(filepath_or_buffer=path_0, sep=';')
else:
folder_night = random.randint(1,5)
path_0 = '/../data/archive/Annotations/Annotations/nightTrain/nightClip{}/frameAnnotationsBOX.csv'.format(folder_night)
csv_file = pd.read_csv(filepath_or_buffer=path_0, sep=';')
# choose picture
i = random.randint(0, len(csv_file.iloc[:,0].unique())-1) # choose random number of picture in folder
full_pic_name = csv_file.iloc[:,0].unique()[i] # with index above choose full name picture
pic_name = csv_file.iloc[:,0].unique()[i].split('/')[1] # with index above choose picture
if day_or_night == 0:
path_to_img = '/../data/archive/dayTrain/dayTrain/dayClip{}/frames/'.format(folder_day) + pic_name
else:
path_to_img = '/../data/archive/nightTrain/nightTrain/nightClip{}/frames/'.format(folder_night) + pic_name
img = cv.imread(path_to_img)
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
# find coordinates
number_of_same_pic = len(csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[:,0]) # how many pic with same name
img = cv.copyMakeBorder(img, 200, 200, 200, 200, cv.BORDER_REPLICATE)
# blobbing
params = cv.SimpleBlobDetector_Params()
params.minThreshold = 1
params.maxThreshold = 255
params.filterByArea = True
params.minArea = 100
params.filterByCircularity = False
params.filterByConvexity = False
params.filterByInertia = False
detector = cv.SimpleBlobDetector_create(params)
keypoints = detector.detect(img)
kps = np.array([key for key in keypoints])
for i in range(number_of_same_pic):
if count < 100:
# coors of box
x1 = csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[i,2]+200
y1 = csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[i,3]+200
x2 = csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[i,4]+200
y2 = csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[i,5]+200
# condition for keypoins which are not boxes - TAKES MUCH TIME
for key in keypoints:
keypoints = [key for key in keypoints if not ((x1-50 < key.pt[0] < x2+50) and (y1-50 < key.pt[1] < y2+50))]
random_crop_x1 = random.randint(0, 200-(x2-x1))
random_crop_x2 = 200 - random_crop_x1
random_crop_y1 = random.randint(0, 200-(y2-y1))
random_crop_y2 = 200 - random_crop_y1
cropped_img = img[y1-random_crop_y1:y2+random_crop_y2, x1-random_crop_x1:x2+random_crop_x2]
new_x1 = random_crop_x1
new_y1 = random_crop_y1
new_x2 = new_x1 + (x2-x1)
new_y2 = new_y1 + (y2-y1)
w = cropped_img.shape[1]
h = cropped_img.shape[0]
Rx = (64 / w)
Ry = (64 / h)
x1 = ceil(new_x1*Rx)
y1 = ceil(new_y1*Ry)
x2 = ceil(new_x2*Rx)
y2 = ceil(new_y2*Ry)
cropped_img = cv.resize(cropped_img, (64, 64))
cropped_img = cropped_img.reshape(1, 64, 64, 3)
box = np.array([1, x1, y1, x2, y2], dtype=dt)
Xs.append(np.array(cropped_img, dtype=dt) / 255.), Ys.append(box)
count += 1
keypoints = keypoints[-5:-1]
for k in range(len(keypoints)):
if count < 100:
k_x1 = int(round(keypoints[k].pt[0]-100))
k_y1 = int(round(keypoints[k].pt[1]-100))
k_x2 = int(round(keypoints[k].pt[0]+100))
k_y2 = int(round(keypoints[k].pt[1]+100))
cropped_img = img[k_y1:k_y2, k_x1:k_x2]
cropped_img = cv.resize(cropped_img, (64, 64))
cropped_img = cropped_img.reshape(1, 64, 64, 3)
box = np.array([0, 0, 0, 0, 0], dtype=dt)
Xs.append(np.array(cropped_img, dtype=dt) / 255.), Ys.append(box)
count += 1
Xs, Ys = shuffle(Xs, Ys)
yield Xs, Ys
| JackDrinkwater/traffic_lights_detector | src/data_gen.py | data_gen.py | py | 5,719 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.dtype",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "random.randint",
"lin... |
18444612580 | from django.contrib.auth import get_user_model
from .models import Chat
def create_chat(user_id1, user_id2, room_name):
# Get participants
participants = get_user_model().objects.filter(id__in=[user_id1, user_id2])
# Get Chat instance
chat, _ = Chat.objects.get_or_create(name=room_name)
# Add participants to chat
chat.participants.add(participants[0])
chat.participants.add(participants[1])
chat.save()
return chat | Dosu333/studentity-backend | app/chat/utils.py | utils.py | py | 456 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "models.Chat.objects.get_or_create",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.Chat.objects",
"line_number": 9,
"usage_type": "attribute"
},... |
20543918172 | from copy import deepcopy
from pandas import DataFrame as DF
from scipy.spatial.transform import Slerp
from scipy.spatial.transform import Rotation as R
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
id = 0
USE_SLERP = False
class Sample:
def __init__(
self,
source: str,
continuity_index: int,
activity: str,
gyro: DF,
accelerometer: DF,
orient: DF,
gravity: DF,
):
global id
self.id = id
id += 1
self.source = source
self.continuity_index = continuity_index
self.activity = activity
self.gyro = gyro
self.accelerometer = accelerometer
self.orient = orient
self.gravity = gravity
def __repr__(self):
classname = self.__class__.__name__
return f"{classname} {self.id}: {self.activity}"
def plot_sensor_data(self, sensor_data, title, domain = (0, float("inf"))):
"""Helper function to plot a specific sensor's data."""
df = sensor_data
filtered = df[df["time"] < domain[1] * 1e10]
filtered = filtered[filtered["time"] > domain[0] * 1e10]
for col in df.columns:
# if col != "time":
if col not in ["time", "roll", "pitch", "yaw"]:
plt.plot(filtered["time"], filtered[col], label=col)
plt.title(title)
plt.legend()
def getLength(self):
return self.gyro["time"].iloc[-1] * 1e-9 - self.gyro["time"].iloc[0]* 1e-9
def graph(self, domain=(0, float("inf"))):
"""Plots the data for all sensors in separate subplots."""
fig, axs = plt.subplots(4, 1, figsize=(10, 20))
fig.suptitle(f"Data Source: {self.source}", fontsize=16, y=1.05)
plt.subplot(4, 1, 1)
self.plot_sensor_data(self.gyro, "Gyroscope Data", domain)
plt.subplot(4, 1, 2)
self.plot_sensor_data(self.accelerometer, "Accelerometer Data", domain)
plt.subplot(4, 1, 3)
self.plot_sensor_data(self.orient, "Orientation Data", domain)
plt.subplot(4, 1, 4)
self.plot_sensor_data(self.gravity, "Gravity Data", domain)
plt.tight_layout()
plt.show()
def _plot_sensor_data_edges(self, subplot, sensor_data, title, margin_sec: int):
"""Helper function to plot a specific sensor's data."""
df = sensor_data
offset_nanosec = margin_sec * 1e9
start_split = df[df["time"] > offset_nanosec].index[0]
df_start = df.loc[: start_split - 1]
maxtime = df["time"].iloc[-1]
end_split = df[df["time"] > maxtime - offset_nanosec].index[0]
df_end = df.loc[end_split:]
plt.subplot(subplot)
time_data = df_start["time"]
for col in df_start.columns:
if col != "time":
plt.plot(time_data * 1e-9, df_start[col], label=col)
plt.title(title)
plt.legend()
plt.subplot(subplot + 1)
plt.title(title)
# plt.legend() results in: No artists with labels found to put in legend. Note that artists whose label start with an underscore are ignored when legend() is called with no argument.
time_data = df_end["time"]
for col in df_end.columns:
if col != "time":
plt.plot((time_data - maxtime) * 1e-9, df_end[col], label=col)
def graph_edges(self, margin):
"""Plots the data for all sensors in separate subplots."""
fig, axs = plt.subplots(4, 2, figsize=(10, 20))
fig.suptitle(f"Data Source: {self.source}", fontsize=16, y=1.05)
self._plot_sensor_data_edges(421, self.gyro, "Gyroscope Data", margin)
self._plot_sensor_data_edges(
423, self.accelerometer, "Accelerometer Data", margin
)
self._plot_sensor_data_edges(425, self.orient, "Orientation Data", margin)
self._plot_sensor_data_edges(427, self.gravity, "Gravity Data", margin)
plt.tight_layout()
plt.show(block=True)
def showAccPlot(self, window=128):
acc_128 = self.accelerometer[0:window]
print(acc_128.shape)
x = acc_128['x']
y = acc_128['y']
z = acc_128['z']
index = np.arange(0, window, 1)
fig = plt.gcf()
fig.set_size_inches(15, 8)
plt.title(f"Accelerometer - {self.activity} - {window} samples")
plt.plot(index, x, label="x")
plt.plot(index, y, label="y")
plt.plot(index, z, label="z")
plt.legend()
plt.show()
def save_trim(self, start, end):
with open(os.path.join(self.source, "trim.txt"), "w") as f:
f.write(f"{start}|{end}")
def resample(self, frequency_hz: float):
interval_ms = 1 / frequency_hz
self.gyro = resample_sensor(self.gyro, interval_ms)
self.accelerometer = resample_sensor(self.accelerometer, interval_ms)
if USE_SLERP:
self.orient = resample_sensor_quaternion(self.orient, interval_ms)
else:
self.orient = resample_sensor(self.orient, interval_ms)
self.gravity = resample_sensor(self.gravity, interval_ms)
def synchronize(self):
"""Effect:
All sensors are split into the same time windows
e.g. if sample had gyro readings in seconds [1, 5]
but accelerometer [3, 6], synchronized would have readings
for both in time window [3, 5].
Necessary for models taking multiple sensors as inputs
"""
pass # TODO
def resample_sensor(sensor: DF, interval_ms: float):
df = deepcopy(sensor) # for immutability, if performance gets too bad, remove and pray nothing breaks
mi = np.min(df['time'])
ma = np.max(df['time'])
# print(mi, ma)
df['time'] = pd.to_datetime(df['time'])
df.set_index('time', inplace=True)
resampled_df = df.resample(f'{interval_ms}L').mean()
resampled_df_interpolated = resampled_df.interpolate(method='linear')
df_reset = resampled_df_interpolated.reset_index()
df_reset['time'] = df_reset['time'].astype('int64')
df_reset.rename(columns={'index': 'time'}, inplace=True)
# return df_reset
return df_reset[(df_reset['time'] >= mi) & (df_reset['time'] <= ma)]
def resample_sensor_quaternion(df: DF, interval_ms: float):
if df.empty:
return df
times = pd.to_timedelta(df['time'] / 1e9, unit="seconds").dt.total_seconds()
if df[['qx', 'qy', 'qz', 'qw']].isnull().values.any():
raise ValueError("NaN values found in quaternion data.")
quaternions = df[['qx', 'qy', 'qz', 'qw']].to_numpy()
if quaternions.shape[1] != 4:
raise ValueError(f"Quaternion data has invalid shape: {quaternions.shape}")
df = resample_sensor(df[['time', 'roll', 'pitch', 'yaw']], interval_ms)
rotations = R.from_quat(quaternions)
slerp = Slerp(times, rotations)
new_rotations = slerp(df.time / 1e9)
new_quats = new_rotations.as_quat()
quaternion_df = pd.DataFrame(new_quats, columns=['qx', 'qy', 'qz', 'qw'])
final_df = pd.concat([df, quaternion_df], axis=1)
return final_df | Oloqq/activity-recognition | recognizer/samples.py | samples.py | py | 7,117 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",... |
31626287834 | import os
import bs4
from nltk.corpus.reader.api import CorpusReader
from nltk.corpus.reader.api import CategorizedCorpusReader
import nltk
import time
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
from sklearn.cluster import DBSCAN
import numpy as np
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.neighbors import NearestNeighbors
from functools import reduce
from sklearn.feature_extraction import text
from sklearn.decomposition import PCA
from numpy.linalg import svd
CAT_PATTERN = r'([a-z_\s]+)/.*'
DOC_PATTERN = r'(?!\.)[a-z_\s]+/[0-9]+\.html'
TAGS = []
title_TAGS = ['h1']
abstract_TAGS = ['blockquote']
class HTMLCorpusReader(CategorizedCorpusReader, CorpusReader):
"""
A corpus reader for raw HTML documents to enable preprocessing.
"""
def __init__(self, root, fileids=DOC_PATTERN, encoding='utf8',
tags=TAGS, **kwargs):
"""
Initialize the corpus reader. Categorization arguments
(``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to
the ``CategorizedCorpusReader`` constructor. The remaining
arguments are passed to the ``CorpusReader`` constructor.
"""
# Add the default category pattern if not passed into the class.
if not any(key.startswith('cat_') for key in kwargs.keys()):
kwargs['cat_pattern'] = CAT_PATTERN
# Initialize the NLTK corpus reader objects
CategorizedCorpusReader.__init__(self, kwargs)
CorpusReader.__init__(self, root, fileids, encoding)
# Save the tags that we specifically want to extract.
self.tags = tags
def resolve(self, fileids, categories):
"""
Returns a list of fileids or categories depending on what is passed
to each internal corpus reader function. Implemented similarly to
the NLTK ``CategorizedPlaintextCorpusReader``.
"""
if fileids is not None and categories is not None:
raise ValueError("Specify fileids or categories, not both")
if categories is not None:
return self.fileids(categories)
return fileids
def docs(self, fileids=None, categories=None):
"""
Returns the complete text of an HTML document, closing the document
after we are done reading it and yielding it in a memory safe fashion.
"""
# Resolve the fileids and the categories
fileids = self.resolve(fileids, categories)
# Create a generator, loading one document into memory at a time.
for path in self.abspaths(fileids):
with open(path, 'r', encoding='UTF-8') as f:
yield f.read()
def sizes(self, fileids=None, categories=None):
"""
Returns a list of tuples, the fileid and size on disk of the file.
This function is used to detect oddly large files in the corpus.
"""
# Resolve the fileids and the categories
fileids = self.resolve(fileids, categories)
# Create a generator, getting every path and computing filesize
for path in self.abspaths(fileids):
yield os.path.getsize(path)
def describe(paragraphs, fileids, categories):
started = time.time()
counts = nltk.FreqDist()
tokens = nltk.FreqDist()
for para in paragraphs:
counts['paras'] += 1
for sent in nltk.sent_tokenize(para):
counts['sents'] += 1
for word in nltk.wordpunct_tokenize(sent):
counts['words'] += 1
tokens[word] += 1
n_fileids = len(fileids)
n_topics = len(categories)
return {
'files': n_fileids,
'topics': n_topics,
'paragraphs': counts['paras'],
'sentences': counts['sents'],
'words': counts['words'],
'vocabulary size': len(tokens),
'lexical diversity': float(counts['words']) / float(len(tokens)),
'paragraphs per document': float(counts['paras']) / float(n_fileids),
'sentences per paragraph': float(counts['sents']) / float(counts['paras']),
'secs': time.time() - started,
}
title_corpus = HTMLCorpusReader('', CAT_PATTERN, DOC_PATTERN, tags=title_TAGS)
title_fileids = title_corpus.fileids()
title_documents = title_corpus.docs(categories=title_corpus.categories())
title_htmls = list(title_documents)
abstract_corpus = HTMLCorpusReader('', CAT_PATTERN, DOC_PATTERN, abstract_TAGS)
abstract_fileids = abstract_corpus.fileids()
abstract_documents = abstract_corpus.docs(categories=abstract_corpus.categories())
abstract_htmls = list(abstract_documents)
title_categories = title_corpus.categories()
abstract_categories = abstract_corpus.categories()
def paras(htmls, TAGS): #paragraph로 나누기
for html in htmls:
soup = bs4.BeautifulSoup(html, 'lxml')
for element in soup.find_all(TAGS):
yield element.text
soup.decompose()
title_paragraphs = list(paras(title_htmls, title_TAGS))
temp_title_paragraphs = []
for para in title_paragraphs:
if "Title:" in para: # and len(para)>30
temp_title_paragraphs.append(para.strip('Title:\n'))
title_paragraphs = temp_title_paragraphs
print("title_paragraphs len: ", len(title_paragraphs))
print("descreibe title_paragraphs", describe(title_paragraphs, title_fileids, title_categories))
abstract_paragraphs = list(paras(abstract_htmls, abstract_TAGS))
print("abstract_paragraphs len: ", len(abstract_paragraphs))
print("descreibe abstract_paragraphs", describe(abstract_paragraphs, abstract_fileids, abstract_categories))
#temp_para = []
#print(abstract_paragraphs[0])
#for para in abstract_paragraphs:
# temp_para.append(re.sub(r"[^a-zA-Z\s.]", "", para).lower()) # 영문자 + 공백만 남기기)
#abstract_paragraphs = temp_para
#print("descreibe post abstract_paragraphs", describe(abstract_paragraphs, abstract_fileids, abstract_categories))
#print(abstract_paragraphs[0])
papers_list = []
for key, value in zip(title_paragraphs, abstract_paragraphs):
temp_dict = dict()
temp_dict['title'] = key
temp_dict['abstract'] = value
papers_list.append(temp_dict)
def sklearn_tfidf_vectorize(corpus):
my_stop_words = text.ENGLISH_STOP_WORDS.union(['abstract', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z'])
tfidf = TfidfVectorizer(stop_words=my_stop_words)
return tfidf.fit_transform(corpus)
df = pd.DataFrame(papers_list, columns={'title', 'abstract'})
tf_idf = sklearn_tfidf_vectorize(abstract_paragraphs).todense()
tf_idf_df = pd.DataFrame(tf_idf)
df.to_csv('df.csv')
tf_idf_df.to_csv('tf_idf_df.csv')
def clustering(df_, tf_idf_df_, tf_idf_, eps, min_samples):
neighbors = NearestNeighbors(n_neighbors=4)
neighbors_fit = neighbors.fit(tf_idf_df_)
distances, indices = neighbors_fit.kneighbors(tf_idf_df_)
distances = np.sort(distances, axis=0)
distances = distances[:,1]
plt.plot(distances)
plt.show()
if len(eps)*len(min_samples) == 1:
ncols = 2
else:
ncols = len(eps)
fig, axs = plt.subplots(figsize=(8 * 1, 8), nrows=1, ncols=ncols) #change col
ind = 0
result = []
for i in range(len(eps)):
print("e,s: ", eps[i], min_samples[i],'\n')
model = DBSCAN(eps=eps[i], min_samples=min_samples[i])
clusters = model.fit(tf_idf_df_)
n_cluster = len(set(clusters.labels_))
if n_cluster <= 2:
print("cluster num of", eps[i], min_samples[i], "is 2 or less\n")
continue
result.append(model.fit_predict(tf_idf_df_))
df_['cluster' + 'of' + str(eps[i]) + 'and' + str(min_samples[i])] = result[ind]
score_samples = silhouette_samples(tf_idf_, df_['cluster' + 'of' + str(eps[i]) + 'and' + str(min_samples[i])])
df_['silhouette_coeff' + 'of' + str(eps[i]) + 'and' + str(min_samples[i])] = score_samples
silhouette_s = silhouette_score(tf_idf_, df_['cluster' + 'of' + str(eps[i]) + 'and' + str(min_samples[i])])
temp = 0
for p in df_.groupby('cluster' + 'of' + str(eps[i]) + 'and' + str(min_samples[i]))['silhouette_coeff' + 'of' + str(eps[i]) + 'and' + str(min_samples[i])].mean():
temp += p
average_score = temp/len(set(clusters.labels_))
y_lower = 10
axs[i].set_title(
'Number of Cluster : ' + str(n_cluster) + '\n' + 'Silhouette Score :' + str(round(silhouette_s, 3)))
axs[i].set_xlabel("The silhouette coefficient values")
axs[i].set_ylabel("Cluster label")
axs[i].set_xlim([-0.1, 1])
axs[i].set_ylim([0, len(tf_idf_df_) + (n_cluster + 1) * 10])
axs[i].set_yticks([]) # Clear the yaxis labels / ticks
axs[i].set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1])
# 클러스터링 갯수별로 fill_betweenx( )형태의 막대 그래프 표현.
for j in range(-1, n_cluster-1):
ith_cluster_sil_values = score_samples[result[ind] == j]
ith_cluster_sil_values.sort()
size_cluster_i = ith_cluster_sil_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(j) / n_cluster)
axs[i].fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_sil_values, \
facecolor=color, edgecolor=color, alpha=0.7)
axs[i].text(-0.05, y_lower + 0.5 * size_cluster_i, str(j))
y_lower = y_upper + 10
axs[i].axvline(x=silhouette_s, color="red", linestyle="--")
ind += 1
plt.show()
return result
# svd로 s값 구해서 분산정도 구하고 pca 파라미터 구하기
#u, s, vt = svd(tf_idf)
#s = np.diag(s)
#s_list = []
#for i in range(0, 1000):
# s_list.append(s[i][i]/np.trace(s))
#
#for i in range(1, 1000):
# print(1-reduce(lambda a, b: a + b, s_list[:i]))
pca = PCA(n_components=30) # 주성분을 몇개로 할지 결정
principalComponents = pca.fit_transform(tf_idf_df)
principalDf = pd.DataFrame(data=principalComponents)
pca_df = pd.DataFrame(data=principalComponents, index=df.index,
columns=[f"pca{num+1}" for num in range(df.shape[1])])
result = pd.DataFrame({'설명가능한 분산 비율(고윳값)':pca.explained_variance_,
'기여율':pca.explained_variance_ratio_},
index=np.array([f"pca{num+1}" for num in range(df.shape[1])]))
result['누적기여율'] = result['기여율'].cumsum()
print(result)
eps = [0.05, 0.03, 0.09, 0.05, 0.03, 0.09, 0.05, 0.03, 0.09]
min_samples = [2, 2, 2, 4, 4, 4, 7, 7, 7]
res = clustering(df, principalDf, principalComponents, eps, min_samples)
print(res[0])
print(type(res[0]))
"""
for i, r in enumerate(res):
if set(r) == None:
continue
else:
for cluster_num in set(r):
if cluster_num == -1 or cluster_num == 0:
continue
# -1,0은 노이즈 판별이 났거나 클러스터링이 안된 경우
print("cluster num : {}".format(cluster_num))
temp_df = dff[dff['cluster' + 'of' + str(eps[i]) + 'and' + str(min_samples[i])] == cluster_num] # cluster num 별로 조회
for title in temp_df['title']:
print(title) # 제목으로 살펴보자
print()
print("-----------------\n")
"""
"""
print(df.head())
print("num of clusters: ", len(set(clusters.labels_)))
print("average_score: " + 'of' + str(e) + 'and' + str(s) + ": ", average_score)
print("silhouette score: " + 'of' + str(e) + 'and' + str(s) + ": ", silhouette_s)
print(df.groupby('cluster' + 'of' + str(e) + 'and' + str(s))['silhouette_coeff' + 'of' + str(e) + 'and' + str(s)].mean())
print("eps, min_samples: " + 'of' + str(e) + 'and' + str(s) + ": ", e, s)""" | kyle1213/data-mining | _old/abstract_clustering_pca.py | abstract_clustering_pca.py | py | 12,035 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.corpus.reader.api.CategorizedCorpusReader",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.reader.api.CorpusReader",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.reader.api.CategorizedCorpusReader.__init__",
... |
35829929249 | from django.contrib import admin
from .models import Page, Carousel
class PageAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
list_display = (
'pk',
'title',
'slug',
'status',
'updated_at',
)
list_filter = ('status', )
list_editable = (
'title',
'status',
)
class CarouselAdmin(admin.ModelAdmin):
list_display = [
'pk',
'title',
'cover_image',
'status',
]
list_filter = ['status', ]
list_editable = list_filter
admin.site.register(Page, PageAdmin)
admin.site.register(Carousel, CarouselAdmin) | hakanyalcinkaya/kodluyoruz-org-python-ve-django-egitimi | kaft_clone/page/admin.py | admin.py | py | 648 | python | en | code | 81 | github-code | 36 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 21,
"usage_type": "attribute"
... |
37348679197 | from math import pi
from collections import defaultdict
import numpy as np
from my_gpaw.kpt_descriptor import KPointDescriptor
from .kpts import RSKPoint, to_real_space
def create_symmetry_map(kd: KPointDescriptor): # -> List[List[int]]
sym = kd.symmetry
U_scc = sym.op_scc
nsym = len(U_scc)
compconj_s = np.zeros(nsym, bool)
if sym.time_reversal and not sym.has_inversion:
U_scc = np.concatenate([U_scc, -U_scc])
compconj_s = np.zeros(nsym * 2, bool)
compconj_s[nsym:] = True
nsym *= 2
map_ss = np.zeros((nsym, nsym), int)
for s1 in range(nsym):
for s2 in range(nsym):
diff_s = abs(U_scc[s1].dot(U_scc).transpose((1, 0, 2)) -
U_scc[s2]).sum(2).sum(1)
indices = (diff_s == 0).nonzero()[0]
assert len(indices) == 1
s = indices[0]
assert compconj_s[s1] ^ compconj_s[s2] == compconj_s[s]
map_ss[s1, s2] = s
return map_ss
class Symmetry:
def __init__(self, kd: KPointDescriptor):
self.kd = kd
self.symmetry_map_ss = create_symmetry_map(kd)
U_scc = kd.symmetry.op_scc
is_identity_s = (U_scc == np.eye(3, dtype=int)).all(2).all(1)
self.s0 = is_identity_s.nonzero()[0][0]
self.inverse_s = self.symmetry_map_ss[:, self.s0]
def symmetry_operation(self, s: int, wfs, inverse=False):
if inverse:
s = self.inverse_s[s]
U_scc = self.kd.symmetry.op_scc
nsym = len(U_scc)
time_reversal = s >= nsym
s %= nsym
U_cc = U_scc[s]
if (U_cc == np.eye(3, dtype=int)).all():
def T0(a_R):
return a_R
else:
N_c = wfs.gd.N_c
i_cr = np.dot(U_cc.T, np.indices(N_c).reshape((3, -1)))
i = np.ravel_multi_index(i_cr, N_c, 'wrap')
def T0(a_R):
return a_R.ravel()[i].reshape(N_c)
if time_reversal:
def T(a_R):
return T0(a_R).conj()
else:
T = T0
T_a = []
for a, id in enumerate(wfs.setups.id_a):
b = self.kd.symmetry.a_sa[s, a]
S_c = np.dot(wfs.spos_ac[a], U_cc) - wfs.spos_ac[b]
U_ii = wfs.setups[a].R_sii[s].T
T_a.append((b, S_c, U_ii))
return T, T_a, time_reversal
def apply_symmetry(self, s: int, rsk, wfs, spos_ac):
U_scc = self.kd.symmetry.op_scc
nsym = len(U_scc)
time_reversal = s >= nsym
s %= nsym
sign = 1 - 2 * int(time_reversal)
U_cc = U_scc[s]
if (U_cc == np.eye(3)).all() and not time_reversal:
return rsk
u1_nR = rsk.u_nR
proj1 = rsk.proj
f_n = rsk.f_n
k1_c = rsk.k_c
weight = rsk.weight
u2_nR = np.empty_like(u1_nR)
proj2 = proj1.new()
k2_c = sign * U_cc.dot(k1_c)
N_c = u1_nR.shape[1:]
i_cr = np.dot(U_cc.T, np.indices(N_c).reshape((3, -1)))
i = np.ravel_multi_index(i_cr, N_c, 'wrap')
for u1_R, u2_R in zip(u1_nR, u2_nR):
u2_R[:] = u1_R.ravel()[i].reshape(N_c)
for a, id in enumerate(wfs.setups.id_a):
b = self.kd.symmetry.a_sa[s, a]
S_c = np.dot(spos_ac[a], U_cc) - spos_ac[b]
x = np.exp(2j * pi * np.dot(k1_c, S_c))
U_ii = wfs.setups[a].R_sii[s].T * x
proj2[a][:] = proj1[b].dot(U_ii)
if time_reversal:
np.conj(u2_nR, out=u2_nR)
np.conj(proj2.array, out=proj2.array)
return RSKPoint(u2_nR, proj2, f_n, k2_c, weight)
def pairs(self, kpts, wfs, spos_ac):
kd = self.kd
nsym = len(kd.symmetry.op_scc)
assert len(kpts) == kd.nibzkpts
symmetries_k = []
for k in range(kd.nibzkpts):
indices = np.where(kd.bz2ibz_k == k)[0]
sindices = (kd.sym_k[indices] +
kd.time_reversal_k[indices] * nsym)
symmetries_k.append(sindices)
# pairs: Dict[Tuple[int, int, int], int]
pairs1 = defaultdict(int)
for i1 in range(kd.nibzkpts):
for s1 in symmetries_k[i1]:
for i2 in range(kd.nibzkpts):
for s2 in symmetries_k[i2]:
s3 = self.symmetry_map_ss[s1, s2]
# s3 = self.inverse_s[s3]
if 1: # i1 < i2:
pairs1[(i1, i2, s3)] += 1
else:
s4 = self.inverse_s[s3]
if i1 == i2:
# pairs1[(i1, i1, min(s3, s4))] += 1
pairs1[(i1, i1, s3)] += 1
else:
pairs1[(i2, i1, s4)] += 1
pairs = {}
seen = {}
for (i1, i2, s), count in pairs1.items():
k2 = kd.bz2bz_ks[kd.ibz2bz_k[i2], s]
if (i1, k2) in seen:
pairs[seen[(i1, k2)]] += count
else:
pairs[(i1, i2, s)] = count
# seen[(i1, k2)] = (i1, i2, s)
comm = wfs.world
lasti1 = -1
lasti2 = -1
for (i1, i2, s), count in sorted(pairs.items()):
if i1 != lasti1:
k1 = kpts[i1]
u1_nR = to_real_space(k1.psit)
rsk1 = RSKPoint(u1_nR, k1.proj.broadcast(),
k1.f_n, k1.k_c,
k1.weight, k1.dPdR_aniv)
lasti1 = i1
if i2 == i1:
if s == self.s0:
rsk2 = rsk1
else:
N = len(rsk1.u_nR)
S = comm.size
B = (N + S - 1) // S
na = min(B * comm.rank, N)
nb = min(na + B, N)
rsk2 = RSKPoint(rsk1.u_nR[na:nb],
rsk1.proj.view(na, nb),
rsk1.f_n[na:nb],
rsk1.k_c,
rsk1.weight)
lasti2 = i2
elif i2 != lasti2:
k2 = kpts[i2]
N = len(k2.psit.array)
S = comm.size
B = (N + S - 1) // S
na = min(B * comm.rank, N)
nb = min(na + B, N)
u2_nR = to_real_space(k2.psit, na, nb)
rsk2 = RSKPoint(u2_nR, k2.proj.broadcast().view(na, nb),
k2.f_n[na:nb], k2.k_c,
k2.weight)
lasti2 = i2
yield (i1, i2, s, rsk1,
self.apply_symmetry(s, rsk2, wfs, spos_ac),
count)
| f-fathurrahman/ffr-learns-gpaw | my_gpaw/hybrids/symmetry.py | symmetry.py | py | 6,817 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "my_gpaw.kpt_descriptor.KPointDescriptor",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": ... |
4543553383 | import numpy as np
import cv2
import matplotlib.pyplot as plt
pt_1 = './pt_2D_1.txt'
pt_2 = './pt_2D_2.txt'
f1 = open(pt_1, 'r')
f2 = open(pt_2, 'r')
pt_1 = []
pt_2 = []
for line in f1.readlines():
x = line.rstrip('\n').split(' ')
pt_1.append(x)
for line in f2.readlines():
x = line.rstrip('\n').split(' ')
pt_2.append(x)
pt_1 = pt_1[1:]
pt_2 = pt_2[1:]
pt_1 = np.array(pt_1)
pt_2 = np.array(pt_2)
img1 = cv2.imread('./image1.jpg')
img2 = cv2.imread('./image2.jpg')
def LIS_eight(a, b):
matrix = np.zeros((a.shape[0],9))
for i in range(a.shape[0]):
u = float(a[i][0])
v = float(a[i][1])
u_ = float(b[i][0])
v_ = float(b[i][1])
matrix[i] = np.array([u*u_, u_*v, u_, v_*u, v*v_, v_, u, v, 1])
# Decompose ATA
U, D, V = np.linalg.svd(matrix,full_matrices=True)
x = V.T[:, 8]
F = np.reshape(x, (3,3))
"""
Code above satisfied F = 1 requirement,
We still need rank2 requirement
"""
# compute rank2 f
FU,FD,FV = np.linalg.svd(F,full_matrices=True)
F = np.dot(FU, np.dot(np.diag([*FD[:2], 0]), FV))
return F
# fundamental matrix
F = LIS_eight(pt_1, pt_2)
# normalized fundamental matrix
def normalized_points(m):
uv = []
for i in range(m.shape[0]):
uv.append([float(m[i][0]), float(m[i][1])])
uv = np.array(uv)
# Center
mean = np.mean(uv, axis=0)
center = uv - mean
# Scale
scale = np.sqrt(2 * len(m) / np.sum(np.power(center, 2)))
trans_matrix = np.array(
[[scale, 0, -mean[0] * scale],
[0, scale, -mean[1] * scale],
[0,0,1]
],dtype=object
)
return uv, trans_matrix
uv1, trans_matrix1=normalized_points(pt_1)
uv2, trans_matrix2=normalized_points(pt_2)
uv1 = np.insert(uv1,uv1.shape[1],values=1, axis=1)
uv2 = np.insert(uv2,uv2.shape[1],values=1, axis=1)
# q = Tp
points1 = (trans_matrix1 @ (uv1.T)).T
# q = T'p'
points2 = (trans_matrix2 @ (uv2.T)).T
F_norm = LIS_eight(points1, points2)
# T'FT
F_norm = trans_matrix2.T @ (F_norm) @ (trans_matrix1)
#print(points_norm)
# pFp = [points2[i].dot(F_norm.dot(points1[i]))
# for i in range(points1.shape[0])]
# print("p'^T F p =", np.abs(pFp).max())
def plot_(pt1, pt2, img1, img2, f):
plt.subplot(1,2,1)
# That is epipolar line associated with p.
ln1 = f.T.dot(pt2.T)
# Ax + By + C = 0
A,B,C = ln1
for i in range(ln1.shape[1]):
# when y as 0,x = - (C/A)
# when y = image.shape[0], x = -(Bw + C / A)
# when x as image.shape[1], y = - (Aw + C / B)
# when x as 0, y = - (C / B)
#plt.plot([-C[i]/A[i], img1.shape[1]], [0, -(A[i]*img1.shape[1] + C[i])/B[i]], 'r')
if ((-C[i]/B[i]) <0):
plt.plot([-C[i]/A[i],img1.shape[1]],[0, -(C[i] + A[i]*img1.shape[1])/B[i]], 'r')
elif ((-C[i]/B[i]) > img1.shape[0]):
plt.plot([-(C[i] + B[i]*img1.shape[0])/A[i],img1.shape[1]],[img1.shape[0], -(C[i] + A[i]*img1.shape[1])/B[i]], 'r')
else:
plt.plot([0, img1.shape[1]], [-C[i]/B[i], -(C[i] + A[i]*img1.shape[1])/B[i]], 'r')
plt.plot([pt1[i][0]], [pt1[i][1]], 'b*')
plt.imshow(img1, cmap='gray')
plt.subplot(1,2,2)
# That is the epipolar line associated with p’.
ln2 = f.dot(pt1.T)
# Ax + By + C = 0
A,B,C = ln2
for i in range(ln2.shape[1]):
# when y as 0,x = - (C/A)
# when y = image.shape[0], x = -(Bw + C / A)
# when x as image.shape[1], y = - (Aw + C / B)
# when x as 0, y = - (C / B)
#plt.plot([-C[i]/A[i], img1.shape[1]], [0, -(A[i]*img1.shape[1] + C[i])/B[i]], 'r')
if ((-C[i]/B[i]) <0):
plt.plot([-C[i]/A[i],img2.shape[1]],[0, -(C[i] + A[i]*img2.shape[1])/B[i]], 'r')
elif ((-C[i]/B[i]) > img2.shape[0]):
plt.plot([-(C[i] + B[i]*img2.shape[0])/A[i],img2.shape[1]],[img2.shape[0], -(C[i] + A[i]*img2.shape[1])/B[i]], 'r')
else:
plt.plot([0, img2.shape[1]], [-C[i]/B[i], -(C[i] + A[i]*img2.shape[1])/B[i]], 'r')
plt.plot([pt1[i][0]], [pt1[i][1]], 'b*')
plt.imshow(img2, cmap='gray')
plt.show()
def plot_norm(pt1, pt2, img1, img2, f):
plt.subplot(1,2,1)
# That is epipolar line associated with p.
ln1 = f.T.dot(pt2.T)
# Ax + By + C = 0
A,B,C = ln1
for i in range(ln1.shape[1]):
# when x as 0,y = - (C/B)
# when x as 512(w), y = - (Aw + C / B)
plt.plot([0, img1.shape[1]], [-C[i]/B[i], -(C[i] + A[i]*img1.shape[1])*1.0/B[i]], 'r')
plt.plot([pt1[i][0]], [pt1[i][1]], 'b*')
plt.imshow(img1, cmap='gray')
plt.subplot(1,2,2)
# That is the epipolar line associated with p’.
ln2 = f.dot(pt1.T)
# Ax + By + C = 0
A,B,C = ln2
for i in range(ln2.shape[1]):
plt.plot([0, img2.shape[1]], [-C[i]*1.0/B[i], -(A[i]*img2.shape[1] + C[i])/B[i]], 'r')
plt.plot([pt2[i][0]], [pt2[i][1]], 'b*')
plt.imshow(img2, cmap='gray')
plt.show()
plot_(uv1, uv2, img1, img2, F)
plot_norm(uv1, uv2, img1, img2, F_norm)
def calaulate_dist(pt1, pt2, f):
ln1 = f.T.dot(pt2.T)
pt_num = pt1.shape[0]
a,b,c = ln1
dist = 0.0
for i in range(pt_num):
dist += np.abs((a[i]*pt1[i][0] + b[i]*pt1[i][1] + c[i])) / np.sqrt(np.power(a[i],2) + np.power(b[i],2))
acc = dist / pt_num
return acc
# acc associated with point2
print('Accuracy of the fundamental matrices by point2:', calaulate_dist(uv1, uv2, F))
print('Accuracy of the normalized fundamental matrices by point2:', calaulate_dist(uv1, uv2, F_norm))
# acc associated with point1
print('Accuracy of the fundamental matrices by point1:', calaulate_dist(uv2, uv1, F.T))
print('Accuracy of the normalized fundamental matrices by point1:', calaulate_dist(uv2, uv1, F_norm.T))
| yehsin/CV_class_hw2 | 1/hw2-1.py | hw2-1.py | py | 5,803 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 29... |
7373532913 | from tornado import ioloop, httpclient as hc, gen, escape
from . import _compat as _
from .graphite import GraphiteRecord
from .utils import convert_to_format, parse_interval, parse_rule, HISTORICAL, interval_to_graphite, gen_log
import math
from collections import deque, defaultdict
from itertools import islice
LOGGER = gen_log
METHODS = "average", "last_value"
LEVELS = {
'critical': 0,
'warning': 10,
'normal': 20,
}
class sliceable_deque(deque):
def __getitem__(self, index):
try:
return deque.__getitem__(self, index)
except TypeError:
return type(self)(islice(self, index.start, index.stop, index.step))
class AlertFabric(type):
""" Register alert's classes and produce an alert by source. """
alerts = {}
def __new__(mcs, name, bases, params):
source = params.get('source')
cls = super(AlertFabric, mcs).__new__(mcs, name, bases, params)
if source:
mcs.alerts[source] = cls
LOGGER.info('Register Alert: %s' % source)
return cls
def get(cls, reactor, source='graphite', **options):
acls = cls.alerts[source]
return acls(reactor, **options)
class BaseAlert(_.with_metaclass(AlertFabric)):
""" Abstract basic alert class. """
source = None
def __init__(self, reactor, **options):
self.reactor = reactor
self.options = options
self.client = hc.AsyncHTTPClient()
try:
self.configure(**options)
except Exception as e:
raise ValueError("Invalid alert configuration: %s" % e)
self.waiting = False
self.state = {None: "normal", "waiting": "normal", "loading": "normal"}
self.history = defaultdict(lambda: sliceable_deque([], self.history_size))
LOGGER.info("Alert '%s': has inited" % self)
def __hash__(self):
return hash(self.name) ^ hash(self.source)
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
return "%s (%s)" % (self.name, self.interval)
def configure(self, name=None, rules=None, query=None, **options):
assert name, "Alert's name is invalid"
self.name = name
assert rules, "%s: Alert's rules is invalid" % name
self.rules = [parse_rule(rule) for rule in rules]
self.rules = list(sorted(self.rules, key=lambda r: LEVELS.get(r.get('level'), 99)))
assert query, "%s: Alert's query is invalid" % self.name
self.query = query
self.interval = interval_to_graphite(
options.get('interval', self.reactor.options['interval']))
interval = parse_interval(self.interval)
self._format = options.get('format', self.reactor.options['format'])
self.request_timeout = options.get(
'request_timeout', self.reactor.options['request_timeout'])
self.history_size = options.get('history_size', self.reactor.options['history_size'])
self.history_size = parse_interval(self.history_size)
self.history_size = int(math.ceil(self.history_size / interval))
if self.reactor.options.get('debug'):
self.callback = ioloop.PeriodicCallback(self.load, 5000)
else:
self.callback = ioloop.PeriodicCallback(self.load, interval)
def convert(self, value):
return convert_to_format(value, self._format)
def reset(self):
""" Reset state to normal for all targets.
It will repeat notification if a metric is still failed.
"""
for target in self.state:
self.state[target] = "normal"
def start(self):
self.callback.start()
self.load()
return self
def stop(self):
self.callback.stop()
return self
def check(self, records):
for value, target in records:
LOGGER.info("%s [%s]: %s", self.name, target, value)
for rule in self.rules:
rvalue = self.get_value_for_rule(rule, target)
if rvalue is None:
continue
if rule['op'](value, rvalue):
self.notify(rule['level'], value, target, rule=rule)
break
else:
self.notify('normal', value, target, rule=rule)
self.history[target].append(value)
def get_value_for_rule(self, rule, target):
rvalue = rule['value']
if rvalue == HISTORICAL:
history = self.history[target]
if len(history) < self.history_size:
return None
rvalue = sum(history) / len(history)
rvalue = rule['mod'](rvalue)
return rvalue
def notify(self, level, value, target=None, ntype=None, rule=None):
""" Notify main reactor about event. """
# Did we see the event before?
if target in self.state and level == self.state[target]:
return False
# Do we see the event first time?
if target not in self.state and level == 'normal' \
and not self.reactor.options['send_initial']:
return False
self.state[target] = level
return self.reactor.notify(level, self, value, target=target, ntype=ntype, rule=rule)
def load(self):
raise NotImplementedError()
class GraphiteAlert(BaseAlert):
source = 'graphite'
def configure(self, **options):
super(GraphiteAlert, self).configure(**options)
self.method = options.get('method', self.reactor.options['method'])
assert self.method in METHODS, "Method is invalid"
self.auth_username = self.reactor.options.get('auth_username')
self.auth_password = self.reactor.options.get('auth_password')
query = escape.url_escape(self.query)
self.url = "%(base)s/render/?target=%(query)s&rawData=true&from=-%(interval)s" % {
'base': self.reactor.options['graphite_url'], 'query': query,
'interval': self.interval}
@gen.coroutine
def load(self):
LOGGER.debug('%s: start checking: %s' % (self.name, self.query))
if self.waiting:
self.notify('warning', 'Process takes too much time', target='waiting', ntype='common')
else:
self.waiting = True
try:
response = yield self.client.fetch(self.url, auth_username=self.auth_username,
auth_password=self.auth_password,
request_timeout=self.request_timeout)
records = (GraphiteRecord(line.decode('utf-8')) for line in response.buffer)
self.check([(getattr(record, self.method), record.target) for record in records])
self.notify('normal', 'Metrics are loaded', target='loading', ntype='common')
except Exception as e:
# self.notify('critical', 'Loading error: %s' % e, target=getattr(e, '_target', 'loading'), ntype=self.source) # 'common')
self.notify('critical', '%s' % e, target=getattr(e, '_target', 'loading'))
self.waiting = False
def get_graph_url(self, target, graphite_url=None):
query = escape.url_escape(target)
return "%(base)s/render/?target=%(query)s&from=-%(interval)s" % {
'base': graphite_url or self.reactor.options['graphite_url'], 'query': query,
'interval': self.interval}
class URLAlert(BaseAlert):
source = 'url'
@gen.coroutine
def load(self):
LOGGER.debug('%s: start checking: %s' % (self.name, self.query))
if self.waiting:
self.notify('warning', 'Process takes too much time', target='waiting', ntype='common')
else:
self.waiting = True
try:
response = yield self.client.fetch(self.query,
method=self.options.get('method', 'GET'),
request_timeout=self.request_timeout)
self.check([(response.code, self.query)])
self.notify('normal', 'Metrics are loaded', target='loading')
except Exception as e:
self.notify('critical', str(e), target='loading')
self.waiting = False
| lixiaocheng18/testops | graphite/lib/beacon/alerts.py | alerts.py | py | 8,351 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.gen_log",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "collections.deque",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "collections.deque.__getitem__",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "collect... |
12427469941 | import os, sys
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import defaultdict
import multiprocessing
from functools import partial
import gensim
from gensim import models, matutils
from gensim.corpora import MmCorpus, Dictionary
from scipy.stats import entropy
from scipy.spatial.distance import pdist, squareform
from scipy.linalg import svdvals
from nltk.corpus import stopwords
from tqdm import tqdm
#logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
ignore_words = frozenset(stopwords.words('english'))
##############################################################################################################################
'''
extract_data method is copied directly from gensim.py in the pyLDAvis library
https://github.com/bmabey/pyLDAvis/blob/master/pyLDAvis/gensim.py
'''
##############################################################################################################################
def extract_data(topic_model, corpus, dictionary, doc_topic_dists=None):
if not matutils.ismatrix(corpus):
corpus_csc = matutils.corpus2csc(corpus, num_terms=len(dictionary))
else:
corpus_csc = corpus
# Need corpus to be a streaming gensim list corpus for len and inference functions below:
corpus = matutils.Sparse2Corpus(corpus_csc)
# TODO: add the hyperparam to smooth it out? no beta in online LDA impl.. hmm..
# for now, I'll just make sure we don't ever get zeros...
fnames_argsort = np.asarray(list(dictionary.token2id.values()), dtype=np.int_)
doc_lengths = corpus_csc.sum(axis=0).A.ravel()
assert doc_lengths.shape[0] == len(corpus), 'Document lengths and corpus have different sizes {} != {}'.format(doc_lengths.shape[0], len(corpus))
if hasattr(topic_model, 'lda_alpha'):
num_topics = len(topic_model.lda_alpha)
else:
num_topics = topic_model.num_topics
if doc_topic_dists is None:
# If its an HDP model.
if hasattr(topic_model, 'lda_beta'):
gamma = topic_model.inference(corpus)
else:
gamma, _ = topic_model.inference(corpus)
doc_topic_dists = gamma / gamma.sum(axis=1)[:, None]
else:
if isinstance(doc_topic_dists, list):
doc_topic_dists = matutils.corpus2dense(doc_topic_dists, num_topics).T
elif issparse(doc_topic_dists):
doc_topic_dists = doc_topic_dists.T.todense()
doc_topic_dists = doc_topic_dists / doc_topic_dists.sum(axis=1)
assert doc_topic_dists.shape[1] == num_topics, 'Document topics and number of topics do not match {} != {}'.format(doc_topic_dists.shape[1], num_topics)
# get the topic-term distribution straight from gensim without iterating over tuples
if hasattr(topic_model, 'lda_beta'):
topic = topic_model.lda_beta
else:
topic = topic_model.state.get_lambda()
topic = topic / topic.sum(axis=1)[:, None]
topic_term_dists = topic[:, fnames_argsort]
assert topic_term_dists.shape[0] == doc_topic_dists.shape[1]
coherence_model = models.CoherenceModel(model=topic_model, corpus=corpus, dictionary=dictionary, coherence='u_mass')
return {'topic_term_dists': topic_term_dists, 'doc_topic_dists': doc_topic_dists,
'doc_lengths': doc_lengths, 'num_topics': num_topics}
##############################################################################################################################
def cao_juan_2009(topic_term_dists, num_topics):
cos_pdists = squareform(pdist(topic_term_dists, metric='cosine'))
return np.sum(cos_pdists) / (num_topics*(num_topics - 1)/2)
def arun_2010(topic_term_dists, doc_topic_dists, doc_lengths, num_topics):
P = svdvals(topic_term_dists)
Q = np.matmul(doc_lengths, doc_topic_dists) / np.linalg.norm(doc_lengths)
return entropy(P, Q)
def deveaud_2014(topic_term_dists, num_topics):
jsd_pdists = squareform(pdist(topic_term_dists, metric=jensen_shannon))
return np.sum(jsd_pdists) / (num_topics*(num_topics - 1))
def jensen_shannon(P, Q):
M = 0.5 * (P + Q)
return 0.5 * (entropy(P, M) + entropy(Q, M))
def preprocess_text(text):
with open(text, 'r') as inp:
text = ' '.join(line.rstrip('\n') for line in inp)
return [word for word in gensim.utils.simple_preprocess(text, deacc=True, min_len=3) if word not in ignore_words]
def files_to_gen(directory):
for path, dirs, files in os.walk(directory):
for name in files:
yield os.path.join(path, name)
class DocCorpus(gensim.corpora.TextCorpus):
def get_texts(self):
pool = multiprocessing.Pool(max(1, multiprocessing.cpu_count() - 1))
for tokens in pool.map(preprocess_text, files_to_gen(self.input)):
yield tokens
pool.terminate()
def build_coherence_models(topic_model, **kwargs):
u_mass = models.CoherenceModel(model=topic_model, corpus=kwargs['corpus'], dictionary=kwargs['dictionary'], coherence='u_mass')
c_v = models.CoherenceModel(model=topic_model, texts=kwargs['texts'], corpus=kwargs['corpus'], dictionary=kwargs['dictionary'], coherence='c_v')
c_uci = models.CoherenceModel(model=topic_model, texts=kwargs['texts'], corpus=kwargs['corpus'], dictionary=kwargs['dictionary'], coherence='c_uci')
c_npmi = models.CoherenceModel(model=topic_model, texts=kwargs['texts'], corpus=kwargs['corpus'], dictionary=kwargs['dictionary'], coherence='c_npmi')
return {'num_topics': topic_model.num_topics, 'u_mass': u_mass.get_coherence(), 'c_v': c_v.get_coherence(), 'c_uci': c_uci.get_coherence(), 'c_npmi': c_npmi.get_coherence()}
''' a poor attempt at implementing useless statistical measures, the result seems meaningless and is
in the img folder of the github project. '''
def main(text_dir):
topics = range(10, 101, 10) + range(120, 201, 20) + range(250, 451, 50)
#topics = range(10, 21, 10)
#corpus = DocCorpus(text_dir)
#dictionary = corpus.dictionary
corpus = MmCorpus('../twitter_LDA_topic_modeling/simple-wiki.mm')
dictionary = Dictionary.load('../twitter_LDA_topic_modeling/simple-wiki.dict')
print('Building LDA models')
lda_models = [models.LdaMulticore(corpus=corpus, id2word=dictionary, num_topics=i, passes=5) for i in tqdm(topics)]
print('Generating coherence models')
texts = [[dictionary[word_id] for word_id, freq in doc] for doc in corpus]
pool = multiprocessing.Pool(max(1, multiprocessing.cpu_count() - 1))
func = partial(build_coherence_models,
corpus=corpus,
dictionary=dictionary,
texts=texts)
coherence_models = pool.map(func, lda_models)
pool.close()
# print('Extracting data from models')
# model_data = [extract_data(model, corpus, dictionary) for model in tqdm(lda_models)]
# d = defaultdict(list)
# print('Generating output data')
# for i, data in tqdm(enumerate(model_data)):
# d['num_topics'].append(data['num_topics'])
# d['cao_juan_2009'].append(cao_juan_2009(data['topic_term_dists'], data['num_topics']))
# d['arun_2010'].append(arun_2010(data['topic_term_dists'], data['doc_topic_dists'], data['doc_lengths'], data['num_topics']))
# d['deveaud_2014'].append(deveaud_2014(data['topic_term_dists'], data['num_topics']))
# d['u_mass_coherence'].append(data['u_mass_coherence'])
d = defaultdict(list)
print('Generating output data')
for data in tqdm(coherence_models):
d['num_topics'].append(data['num_topics'])
d['u_mass'].append(data['u_mass'])
d['c_v'].append(data['c_v'])
d['c_uci'].append(data['c_uci'])
d['c_npmi'].append(data['c_npmi'])
df = pd.DataFrame(d)
df = df.set_index('num_topics')
df.to_csv('coherence_simple_wiki', sep='\t')
df.plot(xticks=df.index, style=['bs-', 'yo-', 'r^-', 'gx-'])
ax1 = df.plot(xticks=df.index, style='bs-', grid=True, y='u_mass')
ax2 = df.plot(xticks=df.index, style='yo-', grid=True, y='c_v', ax=ax1)
ax3 = df.plot(xticks=df.index, style='r^-', grid=True, y='c_npmi', ax=ax2)
df.plot(xticks=df.index, style='gx-', grid=True, y='c_uci', ax=ax3)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.17), fancybox=True, shadow=True, ncol=4, fontsize=9)
plt.subplots_adjust(bottom=0.2)
plt.xticks(df.index, rotation=45, ha='right', fontsize=8)
plt.savefig('coherence_simple_wiki')
plt.close()
if __name__ == '__main__':
sys.exit(main(sys.argv[1]))
| karakayaonurr/Topic-Modelling-with-LDA-at-Twitter | lda_tuna.py | lda_tuna.py | py | 8,596 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "gensim.matutils.ismatrix",
"line_number": 29,
"usage_type": "call"
},
{
"api_na... |
35219043762 | from itertools import product
import sys
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import json
import re
sys.path.append('../../..')
from lib import excelUtils
from lib import httpUtils
from lib import textUtil
from lib.htmlEleUtils import getNodeText
from lib.htmlEleUtils import getInnerHtml
import math
products1 = []
headers1=[
'link','Breadcrumb','Product Name','size/price','Category'
]
def addHeader(header, title):
if title not in header and len(title) > 0:
header.append(title)
def getProductInfo(url):
print(str(len(products1))+"====="+url)
sope = httpUtils.getHtmlFromUrl(url)
nav = sope.find("div", attrs={"class":"breadcrumbs"})
pInfo={
"link":url
}
pInfo["Breadcrumb"] = getNodeText(nav)
pInfo["Product Name"] = getNodeText(sope.find("h1", attrs={"class":"page-title"}))
attrs = sope.find_all("div", attrs={"class":"product attribute sku"})
for attr in attrs:
title = getNodeText(attr.find("strong"))
value = getNodeText(attr.find("div", attrs={"class":"value"}))
pInfo[title] = value
addHeader(headers1, title)
sizes = sope.find_all("div", attrs={"class":"field choice admin__field admin__field-option required"})
sizeStr = ""
for size in sizes:
option = size.find("input")
sizeStr += getNodeText(size.find("label")) + "-" + option["price"]+","
pInfo["size/price"] = sizeStr
category = sope.find("div", attrs={"class":"product category"})
pInfo["Category"] = getNodeText(category.find("div", attrs={"class":"value"}))
trs = sope.find_all("tr")
for tr in trs:
tds = tr.find_all("td")
ths = tr.find_all("th")
if len(tds) == 1 and len(ths) == 1:
title = getNodeText(ths[0])
value = getNodeText(tds[0])
pInfo[title] = value
addHeader(headers1, title)
products1.append(pInfo.copy())
def getProductList(url):
sope = httpUtils.getHtmlFromUrl(url)
ps = sope.find_all("li", attrs={"class":"item product product-item"})
for p in ps:
pLink = p.find("a")
getProductInfo(pLink["href"])
for pIndex in range(1, 9):
getProductList('https://www.arp1.com/catalogsearch/result/index/?p='+str(pIndex)+'&product_list_limit=100&q=autoimmune')
# getProductInfo('https://www.arp1.com/aire-antibody-csb-pa001502ha01hu.html')
excelUtils.generateExcelMultipleSheet('arp1.xlsx', [
{
"name": 'arp1',
"header": headers1 ,
"data": products1
}
]) | Just-Doing/python-caiji | src/work/Common/arp1/arp1.py | arp1.py | py | 2,360 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "lib.httpUtils.getHtmlFromUrl",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "lib.httpUtil... |
72485988903 | import logging
import sshpubkeys
from django.core.exceptions import ValidationError
LOGGER = logging.getLogger(__name__)
def ssh_public_key_validator(public_key):
'''
validate public key string
'''
try:
key = sshpubkeys.SSHKey(public_key)
key.parse()
except (sshpubkeys.InvalidKeyError, sshpubkeys.exceptions.MalformedDataError, UnicodeEncodeError) as exc:
LOGGER.exception(exc)
raise ValidationError('Malformed SSH Public Key') from exc
| bpereto/borg-hive | src/borghive/lib/validators.py | validators.py | py | 495 | python | en | code | 35 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sshpubkeys.SSHKey",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sshpubkeys.InvalidKeyError",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "ss... |
26299686956 | import mysql.connector
from datetime import datetime, timedelta
import os
import sys
from pathlib import Path
sql_pass =os.environ["MYSQLPASSWORD"]
railway_host =os.environ["MYSQLHOST"]
railway_user =os.environ["MYSQLUSER"]
railway_database =os.environ["MYSQLDATABASE"]
railway_port = int(os.environ["MYSQLPORT"])
def get_connection():
connection = mysql.connector.connect(
host = railway_host,
user= railway_user,
password= sql_pass,
database=railway_database,
port = railway_port,
)
return connection
#未完了データの取得
def get_tasks(user_id):
connection = get_connection()
cursor = connection.cursor(dictionary=True)
cursor.execute(
"SELECT * FROM tasks WHERE user_id = %s AND status !='完了' ",
(user_id,)
)
tasks = cursor.fetchall()
cursor.close()
connection.close()
return tasks
#全件データの取得
def get_all_tasks(user_id):
connection = get_connection()
cursor = connection.cursor(dictionary=True)
cursor.execute(
"SELECT * FROM tasks WHERE user_id = %s ",
(user_id,)
)
tasks = cursor.fetchall()
cursor.close()
connection.close()
return tasks
#新規のデータの追加
def add_task(user_id, startdatetime_str, task,status,priority,enddatetime_str):
connection = get_connection()
cursor = connection.cursor()
cursor.execute( "INSERT INTO tasks (user_id, starttime, task,status,priority,endtime) VALUES (%s, %s, %s,%s,%s,%s)",
(user_id, startdatetime_str, task,status,priority,enddatetime_str)
)
connection.commit()
cursor.close()
connection.close()
#ステータス変更機能
def update_status(task_id, user_id, status):
connection = get_connection()
cursor = connection.cursor()
try:
cursor.execute(
"UPDATE tasks SET status = %s WHERE id = %s AND user_id = %s",
(status, task_id, user_id)
)
connection.commit()
finally:
cursor.close()
connection.close()
#24時間経過かつ未了のタスクの出力
def get_expired_tasks(current_time):
connection = get_connection()
cursor = connection.cursor(dictionary=True)
expired_time = current_time - timedelta(days=1)
cursor.execute(
"SELECT * FROM tasks WHERE endtime <= %s AND status != '完了' ",
(expired_time,)
)
expired_tasks = cursor.fetchall()
cursor.close()
connection.close()
return expired_tasks
| uninin3141/task_manage_bot | app/dataset/db.py | db.py | py | 2,618 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_n... |
34125354328 | import matplotlib.pyplot as pt
import math
dt = [1e-4, 2e-4, 5e-4, 1e-3, 2e-3, 5e-3, 1e-2]
dtl = [math.log(x) for x in dt]
dr = [1e-6, 2e-6, 4e-6, 2e-5, 9e-5, 6e-4, 1e-3]
dr2 = [2e-2, 7e-2, 1e-1, 5e-1, 1e0, 1e9, 1e9]
dr3 = [3e-1, 7e-1, 2e0, 8e0, 1.3e1, 1e9, 1e9]
drl = [math.log(x) for x in dr]
drl2 = [math.log(x) for x in dr2]
drl3 = [math.log(x) for x in dr3]
pt.plot(dtl, drl, label = 'Verlet')
pt.plot(dtl, drl2, label = 'velocity Verlet')
pt.plot(dtl, drl3, label = 'Euler')
pt.xlim((-10, -4))
pt.ylim((-14, 4))
pt.xlabel(r'ln dt')
pt.ylabel(r'ln drift')
pt.legend()
pt.show() | Platinum-Berlitz/TCCA-CCME | Library/Molecular Simulation/10/10_5.py | 10_5.py | py | 604 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "math.log",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 11,
"usage... |
7813713746 | """add tree_parameters column to groups
Create Date: 2022-05-02 21:53:26.704275
"""
import enumtables # noqa: F401
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "20220502_215324"
down_revision = "20220502_171903"
branch_labels = None
depends_on = None
def upgrade():
op.add_column(
"groups",
sa.Column(
"tree_parameters",
postgresql.JSONB(astext_type=sa.Text()),
server_default=sa.text("'{}'::jsonb"),
nullable=True,
),
schema="aspen",
)
def downgrade():
raise NotImplementedError("don't downgrade")
| chanzuckerberg/czgenepi | src/backend/database_migrations/versions/20220502_215324_add_tree_parameters_column_to_groups.py | 20220502_215324_add_tree_parameters_column_to_groups.py | py | 696 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "alembic.op.add_column",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialect... |
69808754346 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
from scipy import interpolate
def floats_to_rgb(x, min=-1, max=1):
"""
Translates floats in [min, max) to valid RBG integers, in [0, 255].
Values are clamped to min and max.
"""
x = np.array(x)
out = 256 * (x - min) / (max - min)
out[out < 0] = 0
out[out > 255] = 255
assert np.min(out) >= 0 and np.max(out) <= 255
return out.astype("uint8")
def rgb_to_floats(x, min=-1, max=1):
"""
The "inverse" of floats_to_rgb.
Note the denominator is 255, mirroring SLiM.
"""
x = np.array(x, dtype='float')
out = min + (max - min) * x / 255
return out
def xyz_to_array(x, y, z):
"""
Given arrays of regularly-spaced x and y values, with z[i] corresponding to the value at
(x[i], y[i]), return the triple
xx, yy, zz
where zz is just z, reshaped, and xx and yy are such that zz[i, j] corresponds to (xx[i], yy[j]).
"""
xx = np.unique(x)
yy = np.unique(y)
nr, nc = len(yy), len(xx)
zz = np.zeros((nr, nc))
ii = np.searchsorted(yy, y)
jj = np.searchsorted(xx, x)
for i, j, zval in zip(ii, jj, z):
zz[i, j] = zval
return xx, yy, zz
def xyz_to_function(x, y, z, **kwargs):
"""
Given arrays of regularly-spaced x and y values, with z[i] corresponding to the value at
(x[i], y[i]), return the function that linearly interpolates the values of z to other
values of x and y. Will extrapolate outside of the given domain.
"""
xx, yy, zz = xyz_to_array(x, y, z)
return interpolate.RegularGridInterpolator((xx, yy), zz.T, **kwargs, fill_value=None, bounds_error=False)
def slope_layers(height, f=None):
"""
Given an (n + 1, m + 1)-layer ``height``, return the (n, m, 2) layer that has the
x- and y-components of the slope of ``height``, as follows: if the heights surrounding
a square are
> c d
> a b
then we compute the slope there as
> ( (b - a)/2 + (d - c)/2, (c - a)/2 + (d - b)/2 )
"""
if f is None:
f = (1, 1)
dx = f[0] * np.diff(height, axis=1)
dy = f[1] * (-1) * np.diff(height, axis=0) # -1 because images have (0,0) in lower-left
return np.stack([
(dx[1:,:] + dx[:-1,:]) / 2,
(dy[:,1:] + dy[:,:-1]) / 2
], axis=-1)
def function_height(f, nrow, ncol, xrange, yrange, **kwargs):
"""
Return a (nrow x ncol) numpy array with values given by
> f(x[i], y[j])
where x ranges from xrange[0] to xrange[1].
and likewise for y, defaulting to both being in [0, 1).
"""
xvals = np.linspace(xrange[0], xrange[1], nrow)
yvals = np.linspace(yrange[0], yrange[1], ncol)
x = np.repeat([xvals], ncol, axis=1)
y = np.repeat([yvals], nrow, axis=0).flatten()
out = f(x, y, **kwargs)
out.shape = (nrow, ncol)
return(out)
def bump_height(nrow, ncol, width=None, center=None):
"""
Return a (nrow x ncol) numpy array with values given by the bump function
> exp(- 1 / (1 - r^2) ),
where
> r = sqrt( (x/width[0])^2 + (y/width[1])^2 )
for -width[0] < x < width[0] and -width[1] , y < width[1].
"""
if center is None:
center = np.array([(nrow - 1) / 2, (ncol - 1) / 2])
if width is None:
width = center
x = np.repeat([np.arange(nrow) - center[0]], ncol, axis=1)
y = np.repeat([np.arange(ncol) - center[1]], nrow, axis=0).flatten()
z = np.maximum(0.05, 1 - ((x/width[0]) ** 2 + (y/width[1]) ** 2))
out = np.exp(- 1 / z)
out[out < 0] = 0.0
out.shape = (nrow, ncol)
return(out)
def gaussian_height(nrow, ncol, width=None, center=None):
"""
Return a (nrow x ncol) numpy array with values given by the gaussian density
> exp(- r^2 / 2 ),
where
> r = sqrt( (x/width[0])^2 + (y/width[1])^2 )
for -width[0] < x < width[0] and -width[1] , y < width[1].
"""
if center is None:
center = np.array([(nrow - 1) / 2, (ncol - 1) / 2])
if width is None:
width = center
x = np.repeat([np.arange(nrow) - center[0]], ncol, axis=1)
y = np.repeat([np.arange(ncol) - center[1]], nrow, axis=0).flatten()
z = (x/width[0]) ** 2 + (y/width[1]) ** 2
out = np.exp(- z/2)
out[out < 0] = 0.0
out.shape = (nrow, ncol)
return(out)
def saddle_height(nrow, ncol, width=None, center=None):
"""
Return a (nrow x ncol) numpy array with values given by the gaussian density
> exp( - ((x/width[0])^2 - (y/width[1])^2) / 2 ),
for -width[0] < x < width[0] and -width[1] , y < width[1].
"""
if center is None:
center = np.array([(nrow - 1) / 2, (ncol - 1) / 2])
if width is None:
width = center
x = np.repeat([np.arange(nrow) - center[0]], ncol, axis=1)
y = np.repeat([np.arange(ncol) - center[1]], nrow, axis=0).flatten()
z = (x/width[0]) ** 2 - (y/width[1]) ** 2
out = np.exp(- z/2)
out[out < 0] = 0.0
out.shape = (nrow, ncol)
return(out)
def mountain_height(nrow, ncol, slope=None, center=None):
"""
Return a (nrow x ncol) numpy array that has value 1.0 at ``center``
and declines linearly with ``slope`` to zero.
"""
if center is None:
center = np.array([(nrow - 1) / 2, (ncol - 1) / 2])
if slope is None:
# put 0.0 at the further edge of the smaller dimension
slope = 1.0 / min(max(ncol - center[0], center[0]),
max(nrow - center[1], center[1]))
x = np.repeat([np.arange(nrow) - center[0]], ncol, axis=1)
y = np.repeat([np.arange(ncol) - center[1]], nrow, axis=0).flatten()
dist = np.sqrt(x ** 2 + y ** 2)
out = 1.0 - dist * slope
out[out < 0] = 0.0
out.shape = (nrow, ncol)
return(out)
def make_slope_rgb(nrow, ncol, height_fn, f=None, **kwargs):
if 'center' in kwargs:
center = kwargs['center']
kwargs['center'] = [center[0] * (1 + 1/nrow), center[1] * (1 + 1/ncol)]
height = height_fn(
nrow + 1,
ncol + 1,
**kwargs)
slope = slope_layers(height, f=f)
out = np.concatenate([
floats_to_rgb(slope / np.max(np.abs(slope)), min=-1, max=1),
np.full((nrow, ncol, 1), 0, dtype='uint8'),
np.full((nrow, ncol, 1), 255, dtype='uint8')
], axis=-1)
return out.astype("uint8")
def make_sigma_rgb(nrow, ncol, height_fn, **kwargs):
# uses same sigma in x and y direction; no correlation
# do it by averaging the +1 grid to agree with slope
if 'center' in kwargs:
center = kwargs['center']
kwargs['center'] = [center[0] * (1 + 1/nrow), center[1] * (1 + 1/ncol)]
height = height_fn(
nrow + 1,
ncol + 1,
**kwargs)
sigma = floats_to_rgb((height[:-1,:-1]
+ height[1:,:-1]
+ height[:-1,1:]
+ height[1:,1:])[:,:,np.newaxis] / 4,
min=-1, max=1)
zero = floats_to_rgb(np.full((nrow, ncol, 1), 0), min=-1, max=1)
out = np.concatenate([
sigma,
sigma,
zero,
np.full((nrow, ncol, 1), 255, dtype='uint8')
], axis=-1)
return out.astype("uint8")
def mountain_slope(nrow, ncol, slope=None, center=None):
"""
Make a (nrow, ncol, 4) RGBA array with layers corresponding to
(downslope bias x, downslope bias y, 0, 255)
on a "stratovolcano" (linear cone).
"""
if center is None:
center = np.array([nrow / 2, ncol / 2])
return make_slope_rgb(
nrow, ncol, mountain_height,
slope=slope, center=center)
def mountain_sigma(nrow, ncol, slope=None, center=None):
"""
Make a (nrow, ncol, 4) RGBA array with layers corresponding to
(sigma x, sigma y, 0, 255)
on a "stratovolcano" (linear cone).
"""
if center is None:
center = np.array([nrow / 2, ncol / 2])
return make_sigma_rgb(
nrow, ncol, mountain_height,
slope=slope, center=center)
def saddle_slope(nrow, ncol, width=None, center=None):
"""
Make a (nrow, ncol, 4) RGBA array with layers corresponding to
(downslope bias x, downslope bias y, 0, 255)
on the saddle exp(-(x^2-y^2)/2)
"""
if center is None:
center = np.array([nrow / 2, ncol / 2])
return make_slope_rgb(
nrow, ncol, saddle_height,
width=width, center=center)
def gaussian_slope(nrow, ncol, width=None, center=None):
"""
Make a (nrow, ncol, 4) RGBA array with layers corresponding to
(downslope bias x, downslope bias y, 0, 255)
on a "butte" (a bump function).
"""
if center is None:
center = np.array([nrow / 2, ncol / 2])
return make_slope_rgb(
nrow, ncol, gaussian_height,
width=width, center=center)
def butte_slope(nrow, ncol, width=None, center=None):
"""
Make a (nrow, ncol, 4) RGBA array with layers corresponding to
(downslope bias x, downslope bias y, 0, 255)
on a "butte" (a bump function).
"""
if center is None:
center = np.array([nrow / 2, ncol / 2])
return make_slope_rgb(
nrow, ncol, bump_height,
width=width, center=center)
def butte_sigma(nrow, ncol, width=None, center=None):
"""
Make a (nrow, ncol, 4) RGBA array with layers corresponding to
(sigma x, sigma y, 0, 255)
on a "butte" (a bump function).
"""
if center is None:
center = np.array([nrow / 2, ncol / 2])
return make_sigma_rgb(
nrow, ncol, bump_height,
width=width, center=center)
| kr-colab/product-space-FEM | simulation/maps/map_utils.py | map_utils.py | py | 9,643 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 27,
... |
4390242213 | from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path("",views.index,name="loginmain"),
path("login/",views.login,name="login"),
path("logout/",views.logout,name="logout"),
path("change/",views.change,name="change"),
path("sendcode/",views.send_code,name="sendcode"),
path("verify/",views.verify,name="verify"),
path('reset/',views.reset,name="reset"),
]
| Hardik01101/BlogSite-1 | login/urls.py | urls.py | py | 447 | python | en | code | null | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
26473361587 | import cv2
import os
import matplotlib.pyplot as plt
import numpy as np
def show_image(img):
plt.imshow(img)
plt.show()
def show_class(idx):
celing = (img[:, :] == [idx, idx, idx]) * 1.0
plt.imshow(celing)
plt.show()
# input image in order calibration
INPUT_DIR = 'data/seg'
PATH = os.path.join(os.getcwd(), INPUT_DIR)
images = os.listdir(PATH)
img = cv2.imread(os.path.join(PATH, images[0]))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mi, ma = np.min(img), np.max(img)
n_classes = ma - mi + 1
print('Class:{}'.format(n_classes))
# get shape of image
W, H, _ = img.shape
show_class(0)
show_class(1)
show_class(2)
show_class(3)
show_class(4)
show_class(5)
# celing = (img[:,:]==[187, 188,67])*1.0
# show_image(celing)
# celing [187, 188,67]
plt.imshow(img)
plt.show()
# mi, ma = np.min(img), np.max(img)
# n_classes = ma - mi + 1
| Naxalov/Seg2Dataset | main.py | main.py | py | 858 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplo... |
35029813116 | from pyglossary.plugins.formats_common import *
from struct import unpack
from zlib import decompress
from datetime import datetime
enable = True
lname = "appledict_bin"
format = "AppleDictBin"
description = "AppleDict Binary"
extensions = (".dictionary", ".data",)
extensionCreate = ""
singleFile = True
kind = "binary"
wiki = ""
website = (
"https://support.apple.com/en-gu/guide/dictionary/welcome/mac",
"Dictionary User Guide for Mac",
)
optionsProp = {
"html": BoolOption(comment="Entries are HTML"),
"html_full": BoolOption(
comment="Turn every entry's definition into an HTML document",
),
}
class Reader(object):
depends = {
"lxml": "lxml",
}
_html: bool = True
_html_full: bool = False
def __init__(self, glos):
self._glos = glos
self._filename = ""
self._file = None
self._encoding = "utf-8"
self._buf = ""
self._defiFormat = "m"
self._re_link = re.compile(f'<a [^<>]*>')
self._titleById = {}
self._wordCount = 0
try:
from lxml import etree
except ModuleNotFoundError as e:
e.msg += f", run `{pip} install lxml` to install"
raise e
def sub_link(self, m: "Match"):
from lxml.html import fromstring, tostring
a_raw = m.group(0)
a = fromstring(a_raw)
href = a.attrib.get("href", "")
if href.startswith("x-dictionary:d:"):
word = href[len("x-dictionary:d:"):]
a.attrib["href"] = href = f"bword://{word}"
elif href.startswith("x-dictionary:r:"):
# https://github.com/ilius/pyglossary/issues/343
id_i = len("x-dictionary:r:")
id_j = href.find(":", id_i)
_id = href[id_i:id_j]
title = self._titleById.get(_id)
if title:
a.attrib["href"] = href = f"bword://{title}"
else:
title = a.attrib.get("title")
if title:
a.attrib["href"] = href = f"bword://{title}"
elif href.startswith("http://") or href.startswith("https://"):
pass
else:
a.attrib["href"] = href = f"bword://{href}"
a_new = tostring(a).decode("utf-8")
a_new = a_new[:-4] # remove '</a>'
return a_new
def fixLinksInDefi(self, defi: str) -> str:
defi = self._re_link.sub(self.sub_link, defi)
return defi
def open(self, filename):
self._defiFormat = "h" if self._html else "m"
parts = split(filename)
dbname = parts[-1]
if isdir(filename):
if parts[-1] == "Contents":
filename = join(filename, "Body.data")
if len(parts) > 2:
dbname = parts[-2]
elif isfile(join(filename, "Contents/Body.data")):
filename = join(filename, "Contents/Body.data")
elif isfile(join(filename, "Contents/Resources/Body.data")):
filename = join(filename, "Contents/Resources/Body.data")
else:
raise IOError(
"could not find Body.data file, "
"please select Body.data file instead of directory"
)
elif dbname == "Body.data" and len(parts) > 1:
dbname = parts[-2]
if len(parts) > 2:
if dbname == "Contents":
dbname = parts[-3]
elif dbname == "Resources" and len(parts) > 3:
dbname = parts[-4]
if not isfile(filename):
raise IOError(f"no such file: {filename}")
if dbname.endswith(".dictionary"):
dbname = dbname[:-len(".dictionary")]
self._glos.setInfo("name", dbname)
self._filename = filename
self._file = open(filename, "rb")
self._file.seek(0x40)
self._limit = 0x40 + unpack("i", self._file.read(4))[0]
self._file.seek(0x60)
t0 = datetime.now()
self.readEntryIds()
dt = datetime.now() - t0
log.info(
f"Reading entry IDs took {int(dt.total_seconds() * 1000)} ms, "
f"number of entries: {self._wordCount}"
)
def __len__(self):
return self._wordCount
def close(self):
if self._file is not None:
self._file.close()
self._file = None
def getChunkSize(self, pos):
plus = self._buf[pos:pos + 12].find(b"<d:entry")
if plus < 1:
return 0, 0
bs = self._buf[pos:pos + plus]
if plus < 4:
bs = b"\x00" * (4 - plus) + bs
try:
chunkSize, = unpack("i", bs)
except Exception as e:
log.error(f"{self._buf[pos:pos+100]}")
raise e
return chunkSize, plus
def _getDefi(self, entryElem: "Element") -> str:
from lxml import etree
if not self._html:
# FIXME: this produces duplicate text for Idioms.dictionary, see #301
return "".join([
etree.tostring(
child,
encoding="utf-8",
).decode("utf-8")
for child in entryElem.iterdescendants()
])
defi = etree.tostring(
entryElem,
encoding="utf-8",
).decode("utf-8")
defi = self.fixLinksInDefi(defi)
if self._html_full:
defi = (
f'<!DOCTYPE html><html><head>'
f'<link rel="stylesheet" href="style.css">'
f'</head><body>{defi}</body></html>'
)
return defi
def _readEntryData(self, pos: int) -> "Tuple[bytes, int]":
chunkSize, plus = self.getChunkSize(pos)
pos += plus
if chunkSize == 0:
endI = self._buf[pos:].find(b"</d:entry>")
if endI == -1:
chunkSize = len(self._buf) - pos
else:
chunkSize = endI + 10
entryBytes = self._buf[pos:pos + chunkSize]
pos += chunkSize
return entryBytes, pos
def _readEntry(self, pos: int) -> "Tuple[BaseEntry, int]":
"""
returns (entry, pos)
"""
from lxml import etree
entryBytes, pos = self._readEntryData(pos)
entryFull = entryBytes.decode(self._encoding, errors="replace")
entryFull = entryFull.strip()
if not entryFull:
return None, pos
try:
entryRoot = etree.fromstring(entryFull)
except etree.XMLSyntaxError as e:
log.error(
f"pos={pos}, len(buf)={len(self._buf)}, "
f"entryFull={entryFull!r}"
)
raise e
entryElems = entryRoot.xpath("/d:entry", namespaces=entryRoot.nsmap)
if not entryElems:
return None, pos
word = entryElems[0].xpath("./@d:title", namespaces=entryRoot.nsmap)[0]
defi = self._getDefi(entryElems[0])
if self._limit <= 0:
raise ValueError(f"self._limit = {self._limit}")
return self._glos.newEntry(
word, defi,
defiFormat=self._defiFormat,
byteProgress=(self._absPos, self._limit),
), pos
def readEntryIds(self):
_file = self._file
limit = self._limit
titleById = {}
while True:
absPos = _file.tell()
if absPos >= limit:
break
bufSizeB = _file.read(4) # type: bytes
bufSize, = unpack("i", bufSizeB) # type: int
self._buf = decompress(_file.read(bufSize)[8:])
pos = 0
while pos < len(self._buf):
b_entry, pos = self._readEntryData(pos)
b_entry = b_entry.strip()
if not b_entry:
continue
id_i = b_entry.find(b'id="')
if id_i < 0:
log.error(f"id not found: {b_entry}, pos={pos}, buf={self._buf}")
continue
id_j = b_entry.find(b'"', id_i + 4)
if id_j < 0:
log.error(f"id closing not found: {b_entry.decode(self._encoding)}")
continue
_id = b_entry[id_i + 4: id_j].decode(self._encoding)
title_i = b_entry.find(b'd:title="')
if title_i < 0:
log.error(f"title not found: {b_entry.decode(self._encoding)}")
continue
title_j = b_entry.find(b'"', title_i + 9)
if title_j < 0:
log.error(f"title closing not found: {b_entry.decode(self._encoding)}")
continue
titleById[_id] = b_entry[title_i + 9: title_j].decode(self._encoding)
self._titleById = titleById
_file.seek(0x60)
self._wordCount = len(titleById)
def __iter__(self):
from os.path import dirname
if self._file is None:
raise RuntimeError("iterating over a reader while it's not open")
glos = self._glos
cssFilename = join(dirname(self._filename), "DefaultStyle.css")
if isfile(cssFilename):
with open(cssFilename, mode="rb") as cssFile:
cssBytes = cssFile.read()
yield glos.newDataEntry("style.css", cssBytes)
_file = self._file
limit = self._limit
while True:
self._absPos = _file.tell()
if self._absPos >= limit:
break
bufSizeB = _file.read(4) # type: bytes
# alternative for buf, bufSize is calculated
# ~ flag = f.tell()
# ~ bufSize = 0
# ~ while True:
# ~ zipp = f.read(bufSize)
# ~ try:
# ~ # print(zipp)
# ~ input(zipp.decode(self._encoding))
# ~ buf = decompress(zipp[8:])
# ~ # print(buf)
# ~ break
# ~ except:
# ~ print(bufSize)
# ~ f.seek(flag)
# ~ bufSize = bufSize+1
bufSize, = unpack("i", bufSizeB) # type: int
self._buf = decompress(_file.read(bufSize)[8:])
pos = 0
while pos < len(self._buf):
entry, pos = self._readEntry(pos)
if entry is not None:
yield entry
| xiuxi/pyglossary | pyglossary/plugins/appledict_bin.py | appledict_bin.py | py | 8,315 | python | en | code | null | github-code | 36 | [
{
"api_name": "lxml.html.fromstring",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "lxml.html.tostring",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "datetime.datet... |
27625090373 | """empty message
Revision ID: 187613429bc6
Revises: f493fd2f04fa
Create Date: 2023-03-11 20:54:05.004095
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '187613429bc6'
down_revision = 'f493fd2f04fa'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('cat',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('a_name', sa.String(length=16), nullable=True),
sa.Column('d_eat', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('customer',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('c_name', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dog',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('a_name', sa.String(length=16), nullable=True),
sa.Column('d_legs', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('goods',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('g_name', sa.String(length=64), nullable=True),
sa.Column('g_price', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('student',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('u_name', sa.String(length=16), nullable=True),
sa.Column('u_des', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('u_name')
)
op.create_table('address',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('a_position', sa.String(length=128), nullable=True),
sa.Column('a_customer_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['a_customer_id'], ['customer.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('address')
op.drop_table('user')
op.drop_table('student')
op.drop_table('goods')
op.drop_table('dog')
op.drop_table('customer')
op.drop_table('cat')
# ### end Alembic commands ###
| operatorhs/python-flask | flask-stu/migrations/versions/187613429bc6_.py | 187613429bc6_.py | py | 2,518 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
30838793023 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 18:11:32 2021
@author: mathisagathe
"""
from pymongo import MongoClient
client = MongoClient("10.35.7.4", username = "mathis", password = "MathisM21", authsource = "mathisdb")
db=client.mathisdb
collection = db["TripAdvisor"]
r1 = {"country":"France"}
nbrestoFR = collection.find((r1)).count()
print("Le nombre total de restaurants en France sur TA est de : ",nbrestoFR)
#Nombre de restaurants en France servant des repas végétariens et sans gluten
r2 = {"$and":
[
{"country":"France"},
{"vegetarian_friendly":"Y"},
{"gluten_free":"Y"}
]
}
nbr2 = collection.find((r2)).count()
print("Le nombre total de restaurants en France servant des repas végétariens et sans gluten est de : ",nbr2)
#Top 5 des villes européennes avec le plus de restaurants
r3 = collection.aggregate([
{"$group":{"_id":"$city","nb":{"$sum":1}}},
{"$sort":{"nb":-1}},
{"$limit":6}
])
for i in r3:
print(i)
| romanelollier/School_Project_BigData | requetes.py | requetes.py | py | 1,040 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 12,
"usage_type": "call"
}
] |
25903248374 | # -*- coding: utf-8 -*-
# @Time : 2018/3/27 8:18
# @Author : glacier
# @Email : 2284711614@qq.com
# @File : get_plan_to_md.py
# @Software: PyCharm
import os,time
import pymysql
import datetime
if __name__ == '__main__':
# 格式化
# today = time.strftime('%Y-%m-%d',time.localtime(time.time()))
today = '2018-03-23'
db = pymysql.connect(
"123.206.84.216",
"user001",
"123456",
"glacier",
charset='utf8'
)
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
sql = "SELECT user_id,plan_content,plan_type FROM plan_list " \
"WHERE create_time like '%"+ today +"%'"
try:
# 执行SQL语句
cursor.execute(sql)
table = cursor.fetchall()
# 提交到数据库执行
db.commit()
for tt in table:
with open('C:\\Users\\Administrator\\Desktop\\今日计划.md','a+',encoding='UTF-8') as f:
if tt[2] == 0:
f.write('- [ ] ' + tt[1] + '\n')
elif tt[2] == 1:
f.write('- [ ] ' + tt[1] + ' - 进行中 \n')
elif tt[2] == 2:
f.write('- [x] ' + tt[1] + '\n')
f.close()
except:
print("出错啦!")
# 发生错误时回滚
db.rollback()
# 关闭数据库连接
db.close()
# with open('C:\\Users\\Administrator\\Desktop\\今日计划.md') as f:
# pass
| GlacierBo/python_learn | python_base/get_plan_to_md.py | get_plan_to_md.py | py | 1,501 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymysql.connect",
"line_number": 18,
"usage_type": "call"
}
] |
41165592833 | # Imported libraries
import random as r
import time as t
import colorama as col
# Functions
# little helper function that converts an array of numbers into a string
def arr_to_string(arr_nums):
arr_str = ""
for n in arr_nums:
arr_str += str(n) + " "
return arr_str
# color(number, number, array, boolean)
# slices array from index1(inclusive) - index2(inclusive) from whole_arr.
# If switch = 1, pair is red. If switch = 0, pair is blue
def color(index1, index2, whole_arr, switch):
num1 = str(whole_arr[index1])
num2 = str(whole_arr[index2])
if switch:
pair = col.Fore.LIGHTRED_EX + num1 + " " + num2
else:
pair = col.Fore.LIGHTBLUE_EX + num1 + " " + num2
if index1 == 0:
rest_of_arr = whole_arr[2:]
str_rest = ""
for n in rest_of_arr:
str_rest += str(n) + " "
print(pair + " " + col.Fore.RESET + str_rest)
else:
arr_slice_1 = whole_arr[0:index1]
arr_slice_2 = whole_arr[index2+1:]
str_slice_1 = ""
str_slice_2 = ""
for n in arr_slice_1:
str_slice_1 += str(n) + " "
for n in arr_slice_2:
str_slice_2 += str(n) + " "
print(str_slice_1 + pair + " " + col.Fore.RESET + str_slice_2)
def bubble_sort(arr_sort, speed):
input("\nI will now attempt to sort your array. Press \"Enter\" when ready...")
print()
arr0 = arr_sort.copy()
i = 0
print(arr_to_string(arr0))
t.sleep(speed)
# i = how many swaps there aren't. So if i < len(arr0) - 1, it hasn't been fully sorted
while i < len(arr0) - 1:
i = 0
# loops through array in pairs, which is why condition is (n < arr0.length - 1)
for n in range(0, len(arr0) - 1):
num1 = arr0[n]
num2 = arr0[n + 1]
# if 1st value in the pair is greater than 2nd value, swap
if num1 - num2 > 0:
color(n, n + 1, arr0, 1)
t.sleep(speed)
arr0[n] = num2
arr0[n + 1] = num1
color(n, n+1, arr0, 1)
t.sleep(speed)
# if 1st value in pair is not greater than 2nd value, add 1 to i
else:
i += 1
color(n, n+1, arr0, 0)
t.sleep(speed)
print('\nFinished! Your newly sorted array is: \n%s' % col.Fore.MAGENTA + str(arr0))
return arr0
# user generates a random array
# for arr_sort argument in bubble_sort function
def user_interface():
input('\nWelcome to Array Sort 1.0! Press "Enter" to continue...')
count = input('\nWe\'re going to generate a random array of numbers.'
'\nHow many numbers should be in the array? Please choose between 5 and 25: ')
num_max = input('\nWhat is the maximum number each number in the array should be? '
'\nIn other words, no number will be greater than this number: ')
num_min = input('\nWhat is the minimum number each number in the array should be?'
'\nIn other words, no number will be less than this number: ')
random_array = [r.randint(int(num_min), int(num_max)) for a in range(0, int(count))]
print('\nYour array is: \n%s' % col.Fore.MAGENTA + str(random_array) + col.Fore.RESET)
return random_array
# for speed argument in bubble_sort function
def sorting_speed():
user_speed = input("\nPlease choose the speed at which you would like to see your array sorted: "
"\n1. Slow"
"\n2. Normal"
"\n3. Instant")
if user_speed == "1" or user_speed.lower() == "slow":
return .5
elif user_speed == "2" or user_speed.lower() == "normal":
return .3
elif user_speed == "3" or user_speed.lower() == "instant":
return 0
else:
print("\nI don't understand")
return sorting_speed()
# Function calls
user_array = user_interface()
speed_choice = sorting_speed()
bubble_sort(user_array, speed_choice)
# End
print(col.Fore.RESET + "\nThank you for using Array Sort 1.0!")
| habit456/Python3_Projects_msc | bubble_sort.py | bubble_sort.py | py | 4,224 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "colorama.Fore",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "colorama.For... |
42324051999 | # schedule_post.py
# Author: Daniel Edades
# Last Modified: 11/21/2017
# Description: Formats a database row intended to represent a post scheduled
# for a future time, then inserts that row into a database table for later
# retrieval and posting at that actual time.
import sqlite3
def schedule_post(table_name, time, content):
conn = sqlite3.connect('scheduled_posts.db')
c = conn.cursor()
command_string = "INSERT INTO " + "\'" + table_name + "\'"
command_string += "(\'post_time\', \'contents\', \'posted\')"
command_string += " VALUES (?, ?, 0)"
c.execute(command_string, (time, content))
conn.commit() | edadesd/sunrisebot | schedule_post.py | schedule_post.py | py | 661 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 14,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.